Merge "Check that the String class is not movable in String.equals intrinsics."
diff --git a/.vpython b/.vpython
new file mode 100644
index 0000000..ed00723
--- /dev/null
+++ b/.vpython
@@ -0,0 +1,25 @@
+# This is a vpython "spec" file.
+#
+# It describes patterns for python wheel dependencies of the python scripts in
+# the chromium repo, particularly for dependencies that have compiled components
+# (since pure-python dependencies can be easily vendored into third_party).
+#
+# When vpython is invoked, it finds this file and builds a python VirtualEnv,
+# containing all of the dependencies described in this file, fetching them from
+# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`,
+# this never requires the end-user machine to have a working python extension
+# compilation environment. All of these packages are built using:
+#   https://chromium.googlesource.com/infra/infra/+/master/infra/tools/dockerbuild/
+#
+# All python scripts in the repo share this same spec, to avoid dependency
+# fragmentation.
+#
+# If you have depot_tools installed in your $PATH, you can invoke python scripts
+# in this repo by running them as you normally would run them, except
+# substituting `vpython` instead of `python` on the command line, e.g.:
+#   vpython path/to/script.py some --arguments
+#
+# Read more about `vpython` and how to modify this file here:
+#   https://chromium.googlesource.com/infra/infra/+/master/doc/users/vpython.md
+
+python_version: "2.7"
diff --git a/Android.bp b/Android.bp
deleted file mode 100644
index 34a6469..0000000
--- a/Android.bp
+++ /dev/null
@@ -1,56 +0,0 @@
-// TODO: These should be handled with transitive static library dependencies
-art_static_dependencies = [
-    // Note: the order is important because of static linking resolution.
-    "libziparchive",
-    "libnativehelper",
-    "libnativebridge",
-    "libnativeloader",
-    "libsigchain_dummy",
-    "liblog",
-    "libz",
-    "libbacktrace",
-    "libcutils",
-    "libunwindstack",
-    "libutils",
-    "libbase",
-    "liblz4",
-    "liblzma",
-    "libmetricslogger_static",
-]
-
-subdirs = [
-    "adbconnection",
-    "benchmark",
-    "build",
-    "cmdline",
-    "compiler",
-    "dalvikvm",
-    "dex2oat",
-    "dexdump",
-    "dexlayout",
-    "dexlist",
-    "dexoptanalyzer",
-    "disassembler",
-    "dt_fd_forward",
-    "dt_fd_forward/export",
-    "imgdiag",
-    "libartbase",
-    "libdexfile",
-    "libprofile",
-    "oatdump",
-    "openjdkjvm",
-    "openjdkjvmti",
-    "patchoat",
-    "profman",
-    "runtime",
-    "sigchainlib",
-    "simulator",
-    "test",
-    "tools",
-    "tools/breakpoint-logger",
-    "tools/cpp-define-generator",
-    "tools/dmtracedump",
-    "tools/hiddenapi",
-    "tools/titrace",
-    "tools/wrapagentproperties",
-]
diff --git a/Android.mk b/Android.mk
index 7852be5..526cd59 100644
--- a/Android.mk
+++ b/Android.mk
@@ -31,13 +31,8 @@
 .PHONY: clean-oat-host
 clean-oat-host:
 	find $(OUT_DIR) -name "*.oat" -o -name "*.odex" -o -name "*.art" -o -name '*.vdex' | xargs rm -f
-ifneq ($(TMPDIR),)
-	rm -rf $(TMPDIR)/$(USER)/test-*/dalvik-cache/*
+	rm -rf $(TMPDIR)/*/test-*/dalvik-cache/*
 	rm -rf $(TMPDIR)/android-data/dalvik-cache/*
-else
-	rm -rf /tmp/$(USER)/test-*/dalvik-cache/*
-	rm -rf /tmp/android-data/dalvik-cache/*
-endif
 
 .PHONY: clean-oat-target
 clean-oat-target:
@@ -68,7 +63,6 @@
 include $(art_path)/tools/amm/Android.mk
 include $(art_path)/tools/dexfuzz/Android.mk
 include $(art_path)/tools/veridex/Android.mk
-include $(art_path)/libart_fake/Android.mk
 
 ART_HOST_DEPENDENCIES := \
   $(ART_HOST_EXECUTABLES) \
@@ -98,6 +92,8 @@
 include $(art_path)/build/Android.gtest.mk
 include $(art_path)/test/Android.run-test.mk
 
+TEST_ART_TARGET_SYNC_DEPS += $(ART_TEST_TARGET_GTEST_DEPENDENCIES) $(ART_TEST_TARGET_RUN_TEST_DEPENDENCIES)
+
 # Make sure /system is writable on the device.
 TEST_ART_ADB_ROOT_AND_REMOUNT := \
     ($(ADB) root && \
@@ -324,6 +320,53 @@
 
 
 #######################
+# Android Runtime APEX.
+
+include $(CLEAR_VARS)
+
+# The Android Runtime APEX comes in two flavors:
+# - the release module (`com.android.runtime.release`), containing
+#   only "release" artifacts;
+# - the debug module (`com.android.runtime.debug`), containing both
+#   "release" and "debug" artifacts, as well as additional tools.
+#
+# The Android Runtime APEX module (`com.android.runtime`) is an
+# "alias" for one of the previous modules. By default, "user" build
+# variants contain the release module, while "userdebug" and "eng"
+# build variant contain the debug module. However, if
+# `PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD` is defined, it overrides
+# the previous logic:
+# - if `PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD` is set to `false`, the
+#   build will include the release module (whatever the build
+#   variant);
+# - if `PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD` is set to `true`, the
+#   build will include the debug module (whatever the build variant).
+
+art_target_include_debug_build := $(PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD)
+ifneq (false,$(art_target_include_debug_build))
+  ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
+    art_target_include_debug_build := true
+  endif
+endif
+ifeq (true,$(art_target_include_debug_build))
+  # Module with both release and debug variants, as well as
+  # additional tools.
+  TARGET_RUNTIME_APEX := com.android.runtime.debug
+else
+  # Release module (without debug variants nor tools).
+  TARGET_RUNTIME_APEX := com.android.runtime.release
+endif
+
+LOCAL_MODULE := com.android.runtime
+LOCAL_REQUIRED_MODULES := $(TARGET_RUNTIME_APEX)
+
+# Clear locally used variable.
+art_target_include_debug_build :=
+
+include $(BUILD_PHONY_PACKAGE)
+
+
+#######################
 # Fake packages for ART
 
 # The art-runtime package depends on the core ART libraries and binaries. It exists so we can
@@ -341,13 +384,9 @@
     libart-compiler \
     libopenjdkjvm \
     libopenjdkjvmti \
-    patchoat \
     profman \
     libadbconnection \
 
-# For nosy apps, we provide a fake library that avoids namespace issues and gives some warnings.
-LOCAL_REQUIRED_MODULES += libart_fake
-
 # Potentially add in debug variants:
 #
 # * We will never add them if PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD = false.
@@ -367,7 +406,6 @@
     libopenjdkd \
     libopenjdkjvmd \
     libopenjdkjvmtid \
-    patchoatd \
     profmand \
     libadbconnectiond \
 
@@ -427,7 +465,7 @@
 define build-art-hiddenapi
 $(shell if [ ! -d frameworks/base ]; then \
   mkdir -p ${TARGET_OUT_COMMON_INTERMEDIATES}/PACKAGING; \
-	touch ${TARGET_OUT_COMMON_INTERMEDIATES}/PACKAGING/hiddenapi-{blacklist,dark-greylist,light-greylist}.txt; \
+	touch ${TARGET_OUT_COMMON_INTERMEDIATES}/PACKAGING/hiddenapi-flags.csv; \
   fi;)
 endef
 
@@ -445,20 +483,61 @@
 build-art-target: $(TARGET_OUT_EXECUTABLES)/art $(ART_TARGET_DEPENDENCIES) $(TARGET_CORE_IMG_OUTS)
 
 ########################################################################
+# Workaround for not using symbolic links for linker and bionic libraries
+# in a minimal setup (eg buildbot or golem).
+########################################################################
+
+PRIVATE_BIONIC_FILES := \
+  bin/bootstrap/linker \
+  bin/bootstrap/linker64 \
+  lib/bootstrap/libc.so \
+  lib/bootstrap/libm.so \
+  lib/bootstrap/libdl.so \
+  lib64/bootstrap/libc.so \
+  lib64/bootstrap/libm.so \
+  lib64/bootstrap/libdl.so 
+
+.PHONY: art-bionic-files
+art-bionic-files: libc.bootstrap libdl.bootstrap libm.bootstrap linker
+	for f in $(PRIVATE_BIONIC_FILES); do \
+	  tf=$(TARGET_OUT)/$$f; \
+	  if [ -f $$tf ]; then cp -f $$tf $$(echo $$tf | sed 's,bootstrap/,,'); fi; \
+	done
+
+########################################################################
 # Phony target for only building what go/lem requires for pushing ART on /data.
 
 .PHONY: build-art-target-golem
 # Also include libartbenchmark, we always include it when running golem.
 # libstdc++ is needed when building for ART_TARGET_LINUX.
+#
+# Also include the bootstrap Bionic libraries (libc, libdl, libm).
+# These are required as the "main" libc, libdl, and libm have moved to
+# the Runtime APEX. This is a temporary change needed until Golem
+# fully supports the Runtime APEX.
+# TODO(b/121117762): Remove this when the ART Buildbot and Golem have
+# full support for the Runtime APEX.
+#
+# Also include a copy of the ICU .dat prebuilt files in
+# /system/etc/icu on target (see module `icu-data-art-test`), so that
+# it can found even if the Runtime APEX is not available, by setting
+# the environment variable `ART_TEST_ANDROID_RUNTIME_ROOT` to
+# "/system" on device. This is a temporary change needed until Golem
+# fully supports the Runtime APEX.
+# TODO(b/121117762): Remove this when the ART Buildbot and Golem have
+# full support for the Runtime APEX.
 ART_TARGET_SHARED_LIBRARY_BENCHMARK := $(TARGET_OUT_SHARED_LIBRARIES)/libartbenchmark.so
-build-art-target-golem: dex2oat dalvikvm patchoat linker libstdc++ \
+build-art-target-golem: dex2oat dalvikvm linker libstdc++ \
                         $(TARGET_OUT_EXECUTABLES)/art \
                         $(TARGET_OUT)/etc/public.libraries.txt \
                         $(ART_TARGET_DEX_DEPENDENCIES) \
                         $(ART_TARGET_SHARED_LIBRARY_DEPENDENCIES) \
                         $(ART_TARGET_SHARED_LIBRARY_BENCHMARK) \
                         $(TARGET_CORE_IMG_OUT_BASE).art \
-                        $(TARGET_CORE_IMG_OUT_BASE)-interpreter.art
+                        $(TARGET_CORE_IMG_OUT_BASE)-interpreter.art \
+                        libc.bootstrap libdl.bootstrap libm.bootstrap \
+                        icu-data-art-test \
+                        art-bionic-files
 	# remove debug libraries from public.libraries.txt because golem builds
 	# won't have it.
 	sed -i '/libartd.so/d' $(TARGET_OUT)/etc/public.libraries.txt
@@ -486,7 +565,7 @@
 build-art-host-tests:   build-art-host $(TEST_ART_RUN_TEST_DEPENDENCIES) $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES) $(ART_TEST_HOST_GTEST_DEPENDENCIES) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES)
 
 .PHONY: build-art-target-tests
-build-art-target-tests:   build-art-target $(TEST_ART_RUN_TEST_DEPENDENCIES) $(TEST_ART_TARGET_SYNC_DEPS) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES)
+build-art-target-tests:   build-art-target $(TEST_ART_RUN_TEST_DEPENDENCIES) $(ART_TEST_TARGET_RUN_TEST_DEPENDENCIES) $(ART_TEST_TARGET_GTEST_DEPENDENCIES) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES)
 
 ########################################################################
 # targets to switch back and forth from libdvm to libart
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 341df78..e28ce2b 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -1,4 +1,4 @@
-# Copyright (C) 2007 The Android Open Source Project
+# Copyright (C) 2014 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -50,6 +50,9 @@
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libart_*)
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libartd_*)
 
+# Old Android Runtime APEX package, before the introduction of "release" and "debug" packages.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/apex/com.android.runtime.apex)
+
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
 # ************************************************
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 7e492c7..60ad35c 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -1,5 +1,4 @@
 [Hook Scripts]
-check_generated_files_up_to_date = tools/cpp-define-generator/presubmit-check-files-up-to-date
 check_generated_tests_up_to_date = tools/test_presubmit.py
 
 [Builtin Hooks]
diff --git a/adbconnection/Android.bp b/adbconnection/Android.bp
index 95fc274..5f78278 100644
--- a/adbconnection/Android.bp
+++ b/adbconnection/Android.bp
@@ -30,11 +30,6 @@
         "libbase",
     ],
     target: {
-        android: {
-            shared_libs: [
-                "libcutils",
-            ],
-        },
         host: {
         },
         darwin: {
diff --git a/adbconnection/adbconnection.cc b/adbconnection/adbconnection.cc
index ad94148..e1b5b62 100644
--- a/adbconnection/adbconnection.cc
+++ b/adbconnection/adbconnection.cc
@@ -20,13 +20,15 @@
 
 #include "android-base/endian.h"
 #include "android-base/stringprintf.h"
+#include "base/file_utils.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/mutex.h"
+#include "base/socket_peer_is_trusted.h"
 #include "jni/java_vm_ext.h"
 #include "jni/jni_env_ext.h"
 #include "mirror/throwable.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
 #include "runtime-inl.h"
 #include "runtime_callbacks.h"
 #include "scoped_thread_state_change-inl.h"
@@ -38,10 +40,6 @@
 
 #include "poll.h"
 
-#ifdef ART_TARGET_ANDROID
-#include "cutils/sockets.h"
-#endif
-
 #include <sys/ioctl.h>
 #include <sys/socket.h>
 #include <sys/un.h>
@@ -164,8 +162,8 @@
                         art::WellKnownClasses::java_lang_Thread_init,
                         thr_group.get(),
                         thr_name.get(),
-                        /*Priority*/ 0,
-                        /*Daemon*/ true);
+                        /*Priority=*/ 0,
+                        /*Daemon=*/ true);
 }
 
 struct CallbackData {
@@ -251,6 +249,8 @@
     runtime->StartThreadBirth();
   }
   ScopedLocalRef<jobject> thr(soa.Env(), CreateAdbConnectionThread(soa.Self()));
+  // Note: Using pthreads instead of std::thread to not abort when the thread cannot be
+  //       created (exception support required).
   pthread_t pthread;
   std::unique_ptr<CallbackData> data(new CallbackData { this, soa.Env()->NewGlobalRef(thr.get()) });
   started_debugger_threads_ = true;
@@ -268,7 +268,7 @@
     runtime->EndThreadBirth();
     return;
   }
-  data.release();
+  data.release();  // NOLINT pthreads API.
 }
 
 static bool FlagsSet(int16_t data, int16_t flags) {
@@ -289,7 +289,7 @@
 
   // If the agent isn't loaded we might need to tell ddms code the connection is closed.
   if (!agent_loaded_ && notified_ddm_active_) {
-    NotifyDdms(/*active*/false);
+    NotifyDdms(/*active=*/false);
   }
 }
 
@@ -426,11 +426,11 @@
   cmsg->cmsg_type  = SCM_RIGHTS;
 
   // Duplicate the fds before sending them.
-  android::base::unique_fd read_fd(dup(adb_connection_socket_));
+  android::base::unique_fd read_fd(art::DupCloexec(adb_connection_socket_));
   CHECK_NE(read_fd.get(), -1) << "Failed to dup read_fd_: " << strerror(errno);
-  android::base::unique_fd write_fd(dup(adb_connection_socket_));
+  android::base::unique_fd write_fd(art::DupCloexec(adb_connection_socket_));
   CHECK_NE(write_fd.get(), -1) << "Failed to dup write_fd: " << strerror(errno);
-  android::base::unique_fd write_lock_fd(dup(adb_write_event_fd_));
+  android::base::unique_fd write_lock_fd(art::DupCloexec(adb_write_event_fd_));
   CHECK_NE(write_lock_fd.get(), -1) << "Failed to dup write_lock_fd: " << strerror(errno);
 
   dt_fd_forward::FdSet {
@@ -476,7 +476,6 @@
   int rc = TEMP_FAILURE_RETRY(recvmsg(control_sock_, &msg, 0));
 
   if (rc <= 0) {
-    PLOG(WARNING) << "Receiving file descriptor from ADB failed (socket " << control_sock_ << ")";
     return android::base::unique_fd(-1);
   } else {
     VLOG(jdwp) << "Fds have been received from ADB!";
@@ -514,11 +513,7 @@
     // the debuggable flag set.
     int ret = connect(sock, &control_addr_.controlAddrPlain, control_addr_len_);
     if (ret == 0) {
-      bool trusted = sock >= 0;
-#ifdef ART_TARGET_ANDROID
-      // Needed for socket_peer_is_trusted.
-      trusted = trusted && socket_peer_is_trusted(sock);
-#endif
+      bool trusted = sock >= 0 && art::SocketPeerIsTrusted(sock);
       if (!trusted) {
         LOG(ERROR) << "adb socket is not trusted. Aborting connection.";
         if (sock >= 0 && shutdown(sock, SHUT_RDWR)) {
@@ -605,7 +600,7 @@
         if (memcmp(kListenStartMessage, buf, sizeof(kListenStartMessage)) == 0) {
           agent_listening_ = true;
           if (adb_connection_socket_ != -1) {
-            SendAgentFds(/*require_handshake*/ !performed_handshake_);
+            SendAgentFds(/*require_handshake=*/ !performed_handshake_);
           }
         } else if (memcmp(kListenEndMessage, buf, sizeof(kListenEndMessage)) == 0) {
           agent_listening_ = false;
@@ -628,7 +623,6 @@
           android::base::unique_fd new_fd(ReadFdFromAdb());
           if (new_fd == -1) {
             // Something went wrong. We need to retry getting the control socket.
-            PLOG(ERROR) << "Something went wrong getting fds from adb. Retry!";
             control_sock_.reset();
             break;
           } else if (adb_connection_socket_ != -1) {
@@ -647,7 +641,7 @@
           VLOG(jdwp) << "Sending fds as soon as we received them.";
           // The agent was already loaded so this must be after a disconnection. Therefore have the
           // transport perform the handshake.
-          SendAgentFds(/*require_handshake*/ true);
+          SendAgentFds(/*require_handshake=*/ true);
         }
       } else if (FlagsSet(control_sock_poll.revents, POLLRDHUP)) {
         // The other end of the adb connection just dropped it.
@@ -663,7 +657,7 @@
         } else if (agent_listening_ && !sent_agent_fds_) {
           VLOG(jdwp) << "Sending agent fds again on data.";
           // Agent was already loaded so it can deal with the handshake.
-          SendAgentFds(/*require_handshake*/ true);
+          SendAgentFds(/*require_handshake=*/ true);
         }
       } else if (FlagsSet(adb_socket_poll.revents, POLLRDHUP)) {
         DCHECK(!agent_has_socket_);
@@ -763,7 +757,7 @@
   }
 
   if (!notified_ddm_active_) {
-    NotifyDdms(/*active*/ true);
+    NotifyDdms(/*active=*/ true);
   }
   uint32_t reply_type;
   std::vector<uint8_t> reply;
@@ -826,9 +820,9 @@
 void AdbConnectionState::AttachJdwpAgent(art::Thread* self) {
   art::Runtime* runtime = art::Runtime::Current();
   self->AssertNoPendingException();
-  runtime->AttachAgent(/* JNIEnv */ nullptr,
+  runtime->AttachAgent(/* env= */ nullptr,
                        MakeAgentArg(),
-                       /* classloader */ nullptr);
+                       /* class_loader= */ nullptr);
   if (self->IsExceptionPending()) {
     LOG(ERROR) << "Failed to load agent " << agent_name_;
     art::ScopedObjectAccess soa(self);
diff --git a/build/Android.bp b/build/Android.bp
index 62f71ff..46fb0c5 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -7,6 +7,7 @@
         "blueprint-proptools",
         "soong",
         "soong-android",
+        "soong-apex",
         "soong-cc",
     ],
     srcs: [
@@ -18,16 +19,40 @@
 }
 
 art_clang_tidy_errors = [
-    // Protect scoped things like MutexLock.
-    "bugprone-unused-raii",
+    "android-cloexec-dup",
+    "android-cloexec-open",
+    "bugprone-argument-comment",
+    "bugprone-lambda-function-name",
+    "bugprone-unused-raii",  // Protect scoped things like MutexLock.
+    "bugprone-unused-return-value",
+    "bugprone-virtual-near-miss",
+    "modernize-use-bool-literals",
+    "modernize-use-nullptr",
+    "modernize-use-using",
+    "performance-faster-string-find",
     "performance-for-range-copy",
+    "performance-implicit-conversion-in-loop",
+    "performance-noexcept-move-constructor",
     "performance-unnecessary-copy-initialization",
     "performance-unnecessary-value-param",
     "misc-unused-using-decls",
 ]
 // Should be: strings.Join(art_clang_tidy_errors, ",").
-art_clang_tidy_errors_str = "bugprone-unused-raii"
+art_clang_tidy_errors_str = "android-cloexec-dup"
+        + ",android-cloexec-open"
+        + ",bugprone-argument-comment"
+        + ",bugprone-lambda-function-name"
+        + ",bugprone-unused-raii"
+        + ",bugprone-unused-return-value"
+        + ",bugprone-virtual-near-miss"
+        + ",modernize-redundant-void-arg"
+        + ",modernize-use-bool-literals"
+        + ",modernize-use-nullptr"
+        + ",modernize-use-using"
+        + ",performance-faster-string-find"
         + ",performance-for-range-copy"
+        + ",performance-implicit-conversion-in-loop"
+        + ",performance-noexcept-move-constructor"
         + ",performance-unnecessary-copy-initialization"
         + ",performance-unnecessary-value-param"
         + ",misc-unused-using-decls"
@@ -41,9 +66,11 @@
     // We have lots of C-style variadic functions, and are OK with them. JNI ensures
     // that working around this warning would be extra-painful.
     "-cert-dcl50-cpp",
-    // No exceptions.
-    "-misc-noexcept-move-constructor",
-    "-performance-noexcept-move-constructor",
+    // "Modernization" we don't agree with.
+    "-modernize-use-auto",
+    "-modernize-return-braced-init-list",
+    "-modernize-use-default-member-init",
+    "-modernize-pass-by-value",
 ]
 
 art_global_defaults {
@@ -88,6 +115,10 @@
         "-Wunreachable-code-break",
         "-Wunreachable-code-return",
 
+        // Disable warning for use of offsetof on non-standard layout type.
+        // We use it to implement OFFSETOF_MEMBER - see macros.h.
+        "-Wno-invalid-offsetof",
+
         // Enable thread annotations for std::mutex, etc.
         "-D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS",
     ],
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 316ce64..d024e77 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -93,6 +93,6 @@
 endif
 
 ADB_EXECUTABLE := $(HOST_OUT_EXECUTABLES)/adb
-ADB := $(ADB_EXECUTABLE)
+ADB ?= $(ADB_EXECUTABLE)
 
 endif # ART_ANDROID_COMMON_MK
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 3247e54..c321733 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -73,19 +73,34 @@
 HOST_CORE_IMG_LOCATION := $(HOST_OUT_JAVA_LIBRARIES)/core.art
 TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art
 
-# Jar files for core.art.
-HOST_CORE_DEX_LOCATIONS   := $(foreach jar,$(HOST_CORE_JARS),  $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
+# Modules to compile for core.art.
+CORE_IMG_JARS := core-oj core-libart okhttp bouncycastle apache-xml
+HOST_CORE_IMG_JARS   := $(addsuffix -hostdex,$(CORE_IMG_JARS))
+TARGET_CORE_IMG_JARS := $(addsuffix -testdex,$(CORE_IMG_JARS))
+HOST_CORE_IMG_DEX_LOCATIONS   := $(foreach jar,$(HOST_CORE_IMG_JARS),  $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
 ifeq ($(ART_TEST_ANDROID_ROOT),)
-TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_JARS),/$(DEXPREOPT_BOOT_JAR_DIR)/$(jar).jar)
+TARGET_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_IMG_JARS),/$(DEXPREOPT_BOOT_JAR_DIR)/$(jar).jar)
 else
-TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_JARS),$(ART_TEST_ANDROID_ROOT)/framework/$(jar).jar)
+TARGET_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_IMG_JARS),$(ART_TEST_ANDROID_ROOT)/$(jar).jar)
 endif
+HOST_CORE_IMG_DEX_FILES   := $(foreach jar,$(HOST_CORE_IMG_JARS),  $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
+TARGET_CORE_IMG_DEX_FILES := $(foreach jar,$(TARGET_CORE_IMG_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
 
-HOST_CORE_DEX_FILES   := $(foreach jar,$(HOST_CORE_JARS),  $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
-TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
+# Jar files for the boot class path for testing. Must start with CORE_IMG_JARS.
+TEST_CORE_JARS := $(CORE_IMG_JARS) conscrypt
+HOST_TEST_CORE_JARS   := $(addsuffix -hostdex,$(TEST_CORE_JARS))
+TARGET_TEST_CORE_JARS := $(addsuffix -testdex,$(TEST_CORE_JARS))
+HOST_CORE_DEX_LOCATIONS   := $(foreach jar,$(HOST_TEST_CORE_JARS),  $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
+ifeq ($(ART_TEST_ANDROID_ROOT),)
+TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_TEST_CORE_JARS),/$(DEXPREOPT_BOOT_JAR_DIR)/$(jar).jar)
+else
+TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(ART_TEST_ANDROID_ROOT)/framework/$(jar).jar)
+endif
+HOST_CORE_DEX_FILES   := $(foreach jar,$(HOST_TEST_CORE_JARS),  $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
+TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
 
-ART_HOST_DEX_DEPENDENCIES := $(foreach jar,$(HOST_CORE_JARS),$(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
-ART_TARGET_DEX_DEPENDENCIES := $(foreach jar,$(TARGET_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar)
+ART_HOST_DEX_DEPENDENCIES := $(foreach jar,$(HOST_TEST_CORE_JARS),$(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
+ART_TARGET_DEX_DEPENDENCIES := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar)
 
 ART_CORE_SHARED_LIBRARIES := libjavacore libopenjdk libopenjdkjvm libopenjdkjvmti
 ART_CORE_SHARED_DEBUG_LIBRARIES := libopenjdkd libopenjdkjvmd libopenjdkjvmtid
@@ -108,7 +123,6 @@
     dexoptanalyzer \
     imgdiag \
     oatdump \
-    patchoat \
     profman \
 
 ART_CORE_EXECUTABLES := \
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index c508fe7..12eae89 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -20,11 +20,8 @@
 include art/build/Android.common_path.mk
 
 # Directory used for temporary test files on the host.
-# Use a BSD checksum calculated from CWD and USER as one of the path
-# components for the test output. This should allow us to run tests from
-# multiple repositories at the same time.
-ART_TMPDIR := $(if $(TMPDIR),$(TMPDIR),/tmp)
-ART_HOST_TEST_DIR := $(ART_TMPDIR)/test-art-$(shell echo $$CWD-${USER} | sum | cut -d ' ' -f1)
+# TMPDIR is always provided by the build system as $OUT_DIR-unique temporary directory.
+ART_HOST_TEST_DIR := $(TMPDIR)/test-art
 
 # List of known broken tests that we won't attempt to execute. The test name must be the full
 # rule name such as test-art-host-oat-optimizing-HelloWorld64.
@@ -128,10 +125,11 @@
     LOCAL_DEX_PREOPT := false
     LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_test.mk $(4)
     LOCAL_MODULE_TAGS := tests
-    LOCAL_JAVA_LIBRARIES := $(TARGET_CORE_JARS)
+    LOCAL_JAVA_LIBRARIES := $(TARGET_TEST_CORE_JARS)
     LOCAL_MODULE_PATH := $(3)
     LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT)
     ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
+      LOCAL_MIN_SDK_VERSION := 19
       LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
     endif
     include $(BUILD_JAVA_LIBRARY)
@@ -144,9 +142,10 @@
     LOCAL_NO_STANDARD_LIBRARIES := true
     LOCAL_DEX_PREOPT := false
     LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_test.mk $(4)
-    LOCAL_JAVA_LIBRARIES := $(HOST_CORE_JARS)
+    LOCAL_JAVA_LIBRARIES := $(HOST_TEST_CORE_JARS)
     LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION)
     ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
+      LOCAL_MIN_SDK_VERSION := 19
       LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
     endif
     include $(BUILD_HOST_DALVIK_JAVA_LIBRARY)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 20f20c9..21eee7a 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -38,6 +38,7 @@
   GetMethodSignature \
   HiddenApi \
   HiddenApiSignatures \
+  HiddenApiStubs \
   ImageLayoutA \
   ImageLayoutB \
   IMTA \
@@ -61,6 +62,7 @@
   StaticLeafMethods \
   Statics \
   StaticsFromCode \
+  StringLiterals \
   Transaction \
   XandY
 
@@ -162,6 +164,16 @@
 $(ART_TEST_TARGET_GTEST_VerifierDepsMulti_DEX): $(ART_TEST_GTEST_VerifierDepsMulti_SRC) $(HOST_OUT_EXECUTABLES)/smali
 	 $(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^)
 
+ART_TEST_GTEST_VerifySoftFailDuringClinit_SRC := $(abspath $(wildcard $(LOCAL_PATH)/VerifySoftFailDuringClinit/*.smali))
+ART_TEST_HOST_GTEST_VerifySoftFailDuringClinit_DEX := $(dir $(ART_TEST_HOST_GTEST_Main_DEX))$(subst Main,VerifySoftFailDuringClinit,$(basename $(notdir $(ART_TEST_HOST_GTEST_Main_DEX))))$(suffix $(ART_TEST_HOST_GTEST_Main_DEX))
+ART_TEST_TARGET_GTEST_VerifySoftFailDuringClinit_DEX := $(dir $(ART_TEST_TARGET_GTEST_Main_DEX))$(subst Main,VerifySoftFailDuringClinit,$(basename $(notdir $(ART_TEST_TARGET_GTEST_Main_DEX))))$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX))
+
+$(ART_TEST_HOST_GTEST_VerifySoftFailDuringClinit_DEX): $(ART_TEST_GTEST_VerifySoftFailDuringClinit_SRC) $(HOST_OUT_EXECUTABLES)/smali
+	 $(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^)
+
+$(ART_TEST_TARGET_GTEST_VerifySoftFailDuringClinit_DEX): $(ART_TEST_GTEST_VerifySoftFailDuringClinit_SRC) $(HOST_OUT_EXECUTABLES)/smali
+	 $(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^)
+
 # Dex file dependencies for each gtest.
 ART_GTEST_art_dex_file_loader_test_DEX_DEPS := GetMethodSignature Main Nested MultiDex
 ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary MyClassNatives Nested VerifierDeps VerifierDepsMulti
@@ -174,12 +186,12 @@
 ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes
 ART_GTEST_dexanalyze_test_DEX_DEPS := MultiDex
 ART_GTEST_dexlayout_test_DEX_DEPS := ManyMethods
-ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ManyMethods Statics VerifierDeps MainUncompressed EmptyUncompressed
+ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ManyMethods Statics VerifierDeps MainUncompressed EmptyUncompressed StringLiterals
 ART_GTEST_dex2oat_image_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps
 ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
-ART_GTEST_hiddenapi_test_DEX_DEPS := HiddenApi
+ART_GTEST_hiddenapi_test_DEX_DEPS := HiddenApi HiddenApiStubs
 ART_GTEST_hidden_api_test_DEX_DEPS := HiddenApiSignatures
-ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB DefaultMethods
+ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB DefaultMethods VerifySoftFailDuringClinit
 ART_GTEST_imtable_test_DEX_DEPS := IMTA IMTB
 ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation
 ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
@@ -187,15 +199,15 @@
 ART_GTEST_oat_file_assistant_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS)
 ART_GTEST_dexoptanalyzer_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS)
 ART_GTEST_image_space_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS)
-ART_GTEST_oat_file_test_DEX_DEPS := Main MultiDex MainUncompressed MultiDexUncompressed
+ART_GTEST_oat_file_test_DEX_DEPS := Main MultiDex MainUncompressed MultiDexUncompressed MainStripped Nested MultiDexModifiedSecondary
 ART_GTEST_oat_test_DEX_DEPS := Main
 ART_GTEST_oat_writer_test_DEX_DEPS := Main
 ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY
-ART_GTEST_patchoat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS)
 ART_GTEST_proxy_test_DEX_DEPS := Interfaces
 ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods
 ART_GTEST_profile_assistant_test_DEX_DEPS := ProfileTestMultiDex
 ART_GTEST_profile_compilation_info_test_DEX_DEPS := ManyMethods ProfileTestMultiDex
+ART_GTEST_profiling_info_test_DEX_DEPS := ProfileTestMultiDex
 ART_GTEST_runtime_callbacks_test_DEX_DEPS := XandY
 ART_GTEST_stub_test_DEX_DEPS := AllFields
 ART_GTEST_transaction_test_DEX_DEPS := Transaction
@@ -205,6 +217,7 @@
 ART_GTEST_verifier_deps_test_DEX_DEPS := VerifierDeps VerifierDepsMulti MultiDex
 ART_GTEST_dex_to_dex_decompiler_test_DEX_DEPS := VerifierDeps DexToDexDecompiler
 ART_GTEST_oatdump_app_test_DEX_DEPS := ProfileTestMultiDex
+ART_GTEST_oatdump_test_DEX_DEPS := ProfileTestMultiDex
 
 # The elf writer test has dependencies on core.oat.
 ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_DEFAULT_64) $(HOST_CORE_IMAGE_DEFAULT_32)
@@ -214,14 +227,12 @@
   $(HOST_CORE_IMAGE_optimizing_64) \
   $(HOST_CORE_IMAGE_optimizing_32) \
   $(HOST_CORE_IMAGE_interpreter_64) \
-  $(HOST_CORE_IMAGE_interpreter_32) \
-  patchoatd-host
+  $(HOST_CORE_IMAGE_interpreter_32)
 ART_GTEST_dex2oat_environment_tests_TARGET_DEPS := \
   $(TARGET_CORE_IMAGE_optimizing_64) \
   $(TARGET_CORE_IMAGE_optimizing_32) \
   $(TARGET_CORE_IMAGE_interpreter_64) \
-  $(TARGET_CORE_IMAGE_interpreter_32) \
-  patchoatd-target
+  $(TARGET_CORE_IMAGE_interpreter_32)
 
 ART_GTEST_oat_file_test_HOST_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
@@ -235,10 +246,10 @@
 
 ART_GTEST_dexoptanalyzer_test_HOST_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \
-  dexoptanalyzerd-host
+  $(HOST_OUT_EXECUTABLES)/dexoptanalyzerd
 ART_GTEST_dexoptanalyzer_test_TARGET_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \
-  dexoptanalyzerd-target
+  $(TARGET_OUT_EXECUTABLES)/dexoptanalyzerd
 
 ART_GTEST_image_space_test_HOST_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
@@ -247,68 +258,68 @@
 
 ART_GTEST_dex2oat_test_HOST_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \
-  dex2oatd-host
+  $(HOST_OUT_EXECUTABLES)/dex2oatd
 ART_GTEST_dex2oat_test_TARGET_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \
-  dex2oatd-target
+  $(TARGET_OUT_EXECUTABLES)/dex2oatd
 
 ART_GTEST_dex2oat_image_test_HOST_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \
-  dex2oatd-host
+  $(HOST_OUT_EXECUTABLES)/dex2oatd
 ART_GTEST_dex2oat_image_test_TARGET_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \
-  dex2oatd-target
+  $(TARGET_OUT_EXECUTABLES)/dex2oatd
 
 # TODO: document why this is needed.
 ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_DEFAULT_64) $(HOST_CORE_IMAGE_DEFAULT_32)
 
 # The dexdiag test requires the dexdiag utility.
-ART_GTEST_dexdiag_test_HOST_DEPS := dexdiag-host
-ART_GTEST_dexdiag_test_TARGET_DEPS := dexdiag-target
+ART_GTEST_dexdiag_test_HOST_DEPS := $(HOST_OUT_EXECUTABLES)/dexdiag
+ART_GTEST_dexdiag_test_TARGET_DEPS := $(TARGET_OUT_EXECUTABLES)/dexdiag
 
 # The dexdump test requires an image and the dexdump utility.
 # TODO: rename into dexdump when migration completes
 ART_GTEST_dexdump_test_HOST_DEPS := \
   $(HOST_CORE_IMAGE_DEFAULT_64) \
   $(HOST_CORE_IMAGE_DEFAULT_32) \
-  dexdump2-host
+  $(HOST_OUT_EXECUTABLES)/dexdump2
 ART_GTEST_dexdump_test_TARGET_DEPS := \
   $(TARGET_CORE_IMAGE_DEFAULT_64) \
   $(TARGET_CORE_IMAGE_DEFAULT_32) \
-  dexdump2-target
+  $(TARGET_OUT_EXECUTABLES)/dexdump2
 
 # The dexanalyze test requires an image and the dexanalyze utility.
 ART_GTEST_dexanalyze_test_HOST_DEPS := \
   $(HOST_CORE_IMAGE_DEFAULT_64) \
   $(HOST_CORE_IMAGE_DEFAULT_32) \
-  dexanalyze-host
+  $(HOST_OUT_EXECUTABLES)/dexanalyze
 ART_GTEST_dexanalyze_test_TARGET_DEPS := \
   $(TARGET_CORE_IMAGE_DEFAULT_64) \
   $(TARGET_CORE_IMAGE_DEFAULT_32) \
-  dexanalyze-target
+  $(TARGET_OUT_EXECUTABLES)/dexanalyze
 
 # The dexlayout test requires an image and the dexlayout utility.
 # TODO: rename into dexdump when migration completes
 ART_GTEST_dexlayout_test_HOST_DEPS := \
   $(HOST_CORE_IMAGE_DEFAULT_64) \
   $(HOST_CORE_IMAGE_DEFAULT_32) \
-  dexlayoutd-host \
-  dexdump2-host
+  $(HOST_OUT_EXECUTABLES)/dexlayoutd \
+  $(HOST_OUT_EXECUTABLES)/dexdump2
 ART_GTEST_dexlayout_test_TARGET_DEPS := \
   $(TARGET_CORE_IMAGE_DEFAULT_64) \
   $(TARGET_CORE_IMAGE_DEFAULT_32) \
-  dexlayoutd-target \
-  dexdump2-target
+  $(TARGET_OUT_EXECUTABLES)/dexlayoutd \
+  $(TARGET_OUT_EXECUTABLES)/dexdump2
 
 # The dexlist test requires an image and the dexlist utility.
 ART_GTEST_dexlist_test_HOST_DEPS := \
   $(HOST_CORE_IMAGE_DEFAULT_64) \
   $(HOST_CORE_IMAGE_DEFAULT_32) \
-  dexlist-host
+  $(HOST_OUT_EXECUTABLES)/dexlist
 ART_GTEST_dexlist_test_TARGET_DEPS := \
   $(TARGET_CORE_IMAGE_DEFAULT_64) \
   $(TARGET_CORE_IMAGE_DEFAULT_32) \
-  dexlist-target
+  $(TARGET_OUT_EXECUTABLES)/dexlist
 
 # The imgdiag test has dependencies on core.oat since it needs to load it during the test.
 # For the host, also add the installed tool (in the base size, that should suffice). For the
@@ -316,59 +327,54 @@
 ART_GTEST_imgdiag_test_HOST_DEPS := \
   $(HOST_CORE_IMAGE_DEFAULT_64) \
   $(HOST_CORE_IMAGE_DEFAULT_32) \
-  imgdiagd-host
+  $(HOST_OUT_EXECUTABLES)/imgdiagd
 ART_GTEST_imgdiag_test_TARGET_DEPS := \
   $(TARGET_CORE_IMAGE_DEFAULT_64) \
   $(TARGET_CORE_IMAGE_DEFAULT_32) \
-  imgdiagd-target
+  $(TARGET_OUT_EXECUTABLES)/imgdiagd
 
 # Dex analyze test requires dexanalyze.
 ART_GTEST_dexanalyze_test_HOST_DEPS := \
-  dexanalyze-host
+  $(HOST_OUT_EXECUTABLES)/dexanalyze
 ART_GTEST_dexanalyze_test_TARGET_DEPS := \
-  dexanalyze-target
+  $(TARGET_OUT_EXECUTABLES)/dexanalyze
 
 # Oatdump test requires an image and oatfile to dump.
 ART_GTEST_oatdump_test_HOST_DEPS := \
   $(HOST_CORE_IMAGE_DEFAULT_64) \
   $(HOST_CORE_IMAGE_DEFAULT_32) \
-  oatdumpd-host \
-  oatdumpds-host \
-  dexdump2-host
+  $(HOST_OUT_EXECUTABLES)/oatdumpd \
+  $(HOST_OUT_EXECUTABLES)/oatdumpds \
+  $(HOST_OUT_EXECUTABLES)/dexdump2
 ART_GTEST_oatdump_test_TARGET_DEPS := \
   $(TARGET_CORE_IMAGE_DEFAULT_64) \
   $(TARGET_CORE_IMAGE_DEFAULT_32) \
-  oatdumpd-target \
-  dexdump2-target
+  $(TARGET_OUT_EXECUTABLES)/oatdumpd \
+  $(TARGET_OUT_EXECUTABLES)/dexdump2
 ART_GTEST_oatdump_image_test_HOST_DEPS := $(ART_GTEST_oatdump_test_HOST_DEPS)
 ART_GTEST_oatdump_image_test_TARGET_DEPS := $(ART_GTEST_oatdump_test_TARGET_DEPS)
 ART_GTEST_oatdump_app_test_HOST_DEPS := $(ART_GTEST_oatdump_test_HOST_DEPS) \
-  dex2oatd-host \
-  dex2oatds-host
+  $(HOST_OUT_EXECUTABLES)/dex2oatd \
+  $(HOST_OUT_EXECUTABLES)/dex2oatds
 ART_GTEST_oatdump_app_test_TARGET_DEPS := $(ART_GTEST_oatdump_test_TARGET_DEPS) \
-  dex2oatd-target
-
-ART_GTEST_patchoat_test_HOST_DEPS := \
-  $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
-ART_GTEST_patchoat_test_TARGET_DEPS := \
-  $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS)
+  $(TARGET_OUT_EXECUTABLES)/dex2oatd
 
 # Profile assistant tests requires profman utility.
-ART_GTEST_profile_assistant_test_HOST_DEPS := profmand-host
-ART_GTEST_profile_assistant_test_TARGET_DEPS := profmand-target
+ART_GTEST_profile_assistant_test_HOST_DEPS := $(HOST_OUT_EXECUTABLES)/profmand
+ART_GTEST_profile_assistant_test_TARGET_DEPS := $(TARGET_OUT_EXECUTABLES)/profmand
 
 ART_GTEST_hiddenapi_test_HOST_DEPS := \
   $(HOST_CORE_IMAGE_DEFAULT_64) \
   $(HOST_CORE_IMAGE_DEFAULT_32) \
-  hiddenapid-host
+  $(HOST_OUT_EXECUTABLES)/hiddenapid
 
 # The path for which all the source files are relative, not actually the current directory.
 LOCAL_PATH := art
 
 ART_TEST_MODULES := \
     art_cmdline_tests \
-    art_compiler_tests \
     art_compiler_host_tests \
+    art_compiler_tests \
     art_dex2oat_tests \
     art_dexanalyze_tests \
     art_dexdiag_tests \
@@ -379,13 +385,15 @@
     art_hiddenapi_tests \
     art_imgdiag_tests \
     art_libartbase_tests \
+    art_libartpalette_tests \
+    art_libdexfile_external_tests \
+    art_libdexfile_support_tests \
     art_libdexfile_tests \
     art_libprofile_tests \
     art_oatdump_tests \
-    art_patchoat_tests \
     art_profman_tests \
-    art_runtime_tests \
     art_runtime_compiler_tests \
+    art_runtime_tests \
     art_sigchain_tests \
 
 ART_TARGET_GTEST_FILES := $(foreach m,$(ART_TEST_MODULES),\
@@ -412,12 +420,18 @@
 ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
 ART_TEST_TARGET_GTEST_RULES :=
 ART_TEST_HOST_GTEST_DEPENDENCIES :=
+ART_TEST_TARGET_GTEST_DEPENDENCIES :=
 
 ART_GTEST_TARGET_ANDROID_ROOT := '/system'
 ifneq ($(ART_TEST_ANDROID_ROOT),)
   ART_GTEST_TARGET_ANDROID_ROOT := $(ART_TEST_ANDROID_ROOT)
 endif
 
+ART_GTEST_TARGET_ANDROID_RUNTIME_ROOT := '/apex/com.android.runtime'
+ifneq ($(ART_TEST_ANDROID_RUNTIME_ROOT),)
+  ART_GTEST_TARGET_ANDROID_RUNTIME_ROOT := $(ART_TEST_ANDROID_RUNTIME_ROOT)
+endif
+
 # Define a make rule for a target device gtest.
 # $(1): gtest name - the name of the test we're building such as leb128_test.
 # $(2): path relative to $OUT to the test binary
@@ -440,15 +454,15 @@
 
   # Add the test dependencies to test-art-target-sync, which will be a prerequisite for the test
   # to ensure files are pushed to the device.
-  TEST_ART_TARGET_SYNC_DEPS += \
+  gtest_deps := \
     $$(ART_GTEST_$(1)_TARGET_DEPS) \
     $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_TARGET_GTEST_$(file)_DEX)) \
     $$(gtest_exe) \
     $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
     $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \
-    $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \
-    $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar \
-    $$(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar
+    $$(foreach jar,$$(TARGET_TEST_CORE_JARS),$$(TARGET_OUT_JAVA_LIBRARIES)/$$(jar).jar)
+
+  ART_TEST_TARGET_GTEST_DEPENDENCIES += $$(gtest_deps)
 
 $$(gtest_rule): PRIVATE_TARGET_EXE := $$(gtest_target_exe)
 $$(gtest_rule): PRIVATE_MAYBE_CHROOT_COMMAND := $$(maybe_chroot_command)
@@ -466,7 +480,9 @@
 	$(hide) $(ADB) shell $$(PRIVATE_MAYBE_CHROOT_COMMAND) chmod 755 $$(PRIVATE_TARGET_EXE)
 	$(hide) $$(call ART_TEST_SKIP,$$@) && \
 	  ($(ADB) shell "$$(PRIVATE_MAYBE_CHROOT_COMMAND) env $(GCOV_ENV) LD_LIBRARY_PATH=$(4) \
-	       ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) $$(PRIVATE_TARGET_EXE) \
+	       ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \
+	       ANDROID_RUNTIME_ROOT=$(ART_GTEST_TARGET_ANDROID_RUNTIME_ROOT) \
+	       $$(PRIVATE_TARGET_EXE) \
 	     && touch $$(PRIVATE_GTEST_WITNESS)" \
 	   && ($(ADB) pull $$(PRIVATE_GTEST_WITNESS) /tmp/ && $$(call ART_TEST_PASSED,$$@)) \
 	   || $$(call ART_TEST_FAILED,$$@))
@@ -481,6 +497,7 @@
   maybe_chroot_command :=
   maybe_art_test_chroot :=
   gtest_target_exe :=
+  gtest_deps :=
   gtest_exe :=
   gtest_rule :=
 endef  # define-art-gtest-rule-target
@@ -490,7 +507,10 @@
 # $(2): path relative to $OUT to the test binary
 # $(3): 2ND_ or undefined - used to differentiate between the primary and secondary architecture.
 define define-art-gtest-rule-host
-  gtest_rule := test-art-host-gtest-$(1)$$($(3)ART_PHONY_TEST_HOST_SUFFIX)
+  gtest_suffix := $(1)$$($(3)ART_PHONY_TEST_HOST_SUFFIX)
+  gtest_rule := test-art-host-gtest-$$(gtest_suffix)
+  gtest_output := $(call intermediates-dir-for,PACKAGING,art-host-gtest,HOST)/$$(gtest_suffix).xml
+  $$(call dist-for-goals,$$(gtest_rule),$$(gtest_output):gtest/$$(gtest_suffix))
   gtest_exe := $(OUT_DIR)/$(2)
   # Dependencies for all host gtests.
   gtest_deps := $$(HOST_CORE_DEX_LOCATIONS) \
@@ -498,21 +518,24 @@
     $$($(3)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$$(ART_HOST_SHLIB_EXTENSION) \
     $$(gtest_exe) \
     $$(ART_GTEST_$(1)_HOST_DEPS) \
-    $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX))
-  ifneq (,$(DIST_DIR))
-    gtest_xml_output := --gtest_output=xml:$(DIST_DIR)/gtest/$(1)$$($(3)ART_PHONY_TEST_HOST_SUFFIX).xml
-  else
-    gtest_xml_output :=
-  endif
+    $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) \
+    $(HOST_OUT_EXECUTABLES)/timeout_dumper
 
   ART_TEST_HOST_GTEST_DEPENDENCIES += $$(gtest_deps)
 
 .PHONY: $$(gtest_rule)
+$$(gtest_rule): $$(gtest_output)
+
+# Re-run the tests, even if nothing changed. Until the build system has a dedicated "no cache"
+# option, claim to write a file that is never produced.
+$$(gtest_output): .KATI_IMPLICIT_OUTPUTS := $$(gtest_output)-nocache
+$$(gtest_output): NAME := $$(gtest_rule)
 ifeq (,$(SANITIZE_HOST))
-$$(gtest_rule): PRIVATE_XML_OUTPUT := $$(gtest_xml_output)
-$$(gtest_rule): $$(gtest_exe) $$(gtest_deps)
-	$(hide) ($$(call ART_TEST_SKIP,$$@) && $$< $$(PRIVATE_XML_OUTPUT) && \
-		$$(call ART_TEST_PASSED,$$@)) || $$(call ART_TEST_FAILED,$$@)
+$$(gtest_output): $$(gtest_exe) $$(gtest_deps)
+	$(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && \
+		timeout --foreground -k 120s -s SIGRTMIN+2 2400s $(HOST_OUT_EXECUTABLES)/timeout_dumper \
+			$$< --gtest_output=xml:$$@ && \
+		$$(call ART_TEST_PASSED,$$(NAME))) || $$(call ART_TEST_FAILED,$$(NAME))
 else
 # Note: envsetup currently exports ASAN_OPTIONS=detect_leaks=0 to suppress leak detection, as some
 #       build tools (e.g., ninja) intentionally leak. We want leak checks when we run our tests, so
@@ -521,14 +544,15 @@
 # (with the x86-64 ABI, as this allows symbolization of both x86 and x86-64). We don't do this in
 # general as it loses all the color output, and we have our own symbolization step when not running
 # under ASAN.
-$$(gtest_rule): PRIVATE_XML_OUTPUT := $$(gtest_xml_output)
-$$(gtest_rule): $$(gtest_exe) $$(gtest_deps)
-	$(hide) ($$(call ART_TEST_SKIP,$$@) && set -o pipefail && \
-		ASAN_OPTIONS=detect_leaks=1 $$< $$(PRIVATE_XML_OUTPUT) 2>&1 | tee $$<.tmp.out >&2 && \
-		{ $$(call ART_TEST_PASSED,$$@) ; rm $$<.tmp.out ; }) || \
+$$(gtest_output): $$(gtest_exe) $$(gtest_deps)
+	$(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && set -o pipefail && \
+		ASAN_OPTIONS=detect_leaks=1 timeout --foreground -k 120s -s SIGRTMIN+2 3600s \
+			$(HOST_OUT_EXECUTABLES)/timeout_dumper \
+				$$< --gtest_output=xml:$$@ 2>&1 | tee $$<.tmp.out >&2 && \
+		{ $$(call ART_TEST_PASSED,$$(NAME)) ; rm $$<.tmp.out ; }) || \
 		( grep -q AddressSanitizer $$<.tmp.out && export ANDROID_BUILD_TOP=`pwd` && \
 			{ echo "ABI: 'x86_64'" | cat - $$<.tmp.out | development/scripts/stack | tail -n 3000 ; } ; \
-		rm $$<.tmp.out ; $$(call ART_TEST_FAILED,$$@))
+		rm $$<.tmp.out ; $$(call ART_TEST_FAILED,$$(NAME)))
 endif
 
   ART_TEST_HOST_GTEST$$($(3)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(gtest_rule)
@@ -539,7 +563,9 @@
   # Clear locally defined variables.
   gtest_deps :=
   gtest_exe :=
+  gtest_output :=
   gtest_rule :=
+  gtest_suffix :=
 endef  # define-art-gtest-rule-host
 
 # Define the rules to build and run host and target gtests.
@@ -706,6 +732,7 @@
 ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
 ART_TEST_TARGET_GTEST_RULES :=
 ART_GTEST_TARGET_ANDROID_ROOT :=
+ART_GTEST_TARGET_ANDROID_RUNTIME_ROOT :=
 ART_GTEST_class_linker_test_DEX_DEPS :=
 ART_GTEST_class_table_test_DEX_DEPS :=
 ART_GTEST_compiler_driver_test_DEX_DEPS :=
@@ -733,9 +760,6 @@
 ART_GTEST_dex2oat_image_test_HOST_DEPS :=
 ART_GTEST_dex2oat_image_test_TARGET_DEPS :=
 ART_GTEST_object_test_DEX_DEPS :=
-ART_GTEST_patchoat_test_DEX_DEPS :=
-ART_GTEST_patchoat_test_HOST_DEPS :=
-ART_GTEST_patchoat_test_TARGET_DEPS :=
 ART_GTEST_proxy_test_DEX_DEPS :=
 ART_GTEST_reflection_test_DEX_DEPS :=
 ART_GTEST_stub_test_DEX_DEPS :=
@@ -754,5 +778,8 @@
 ART_TEST_GTEST_VerifierDeps_SRC :=
 ART_TEST_HOST_GTEST_VerifierDeps_DEX :=
 ART_TEST_TARGET_GTEST_VerifierDeps_DEX :=
+ART_TEST_GTEST_VerifySoftFailDuringClinit_SRC :=
+ART_TEST_HOST_GTEST_VerifySoftFailDuringClinit_DEX :=
+ART_TEST_TARGET_GTEST_VerifySoftFailDuringClinit_DEX :=
 GTEST_DEX_DIRECTORIES :=
 LOCAL_PATH :=
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index c4ae593..2ad1143 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -39,9 +39,6 @@
 # Use dex2oat debug version for better error reporting
 # $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
 # $(2): 2ND_ or undefined, 2ND_ for 32-bit host builds.
-# $(3): multi-image.
-# NB depending on HOST_CORE_DEX_LOCATIONS so we are sure to have the dex files in frameworks for
-# run-test --no-image
 define create-core-oat-host-rules
   core_compile_options :=
   core_image_name :=
@@ -66,25 +63,14 @@
     $$(error found $(1) expected interpreter, interp-ac, or optimizing)
   endif
 
-  # If $(3) is true, generate a multi-image.
-  ifeq ($(3),true)
-    core_multi_infix := -multi
-    core_multi_param := --multi-image --no-inline-from=core-oj-hostdex.jar
-    core_multi_group := _multi
-  else
-    core_multi_infix :=
-    core_multi_param :=
-    core_multi_group :=
-  endif
-
-  core_image_name := $($(2)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_multi_infix)$(CORE_IMG_SUFFIX)
-  core_oat_name := $($(2)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_multi_infix)$(CORE_OAT_SUFFIX)
+  core_image_name := $($(2)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$(CORE_IMG_SUFFIX)
+  core_oat_name := $($(2)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$(CORE_OAT_SUFFIX)
 
   # Using the bitness suffix makes it easier to add as a dependency for the run-test mk.
   ifeq ($(2),)
-    HOST_CORE_IMAGE_$(1)$$(core_multi_group)_64 := $$(core_image_name)
+    HOST_CORE_IMAGE_$(1)_64 := $$(core_image_name)
   else
-    HOST_CORE_IMAGE_$(1)$$(core_multi_group)_32 := $$(core_image_name)
+    HOST_CORE_IMAGE_$(1)_32 := $$(core_image_name)
   endif
   HOST_CORE_IMG_OUTS += $$(core_image_name)
   HOST_CORE_OAT_OUTS += $$(core_oat_name)
@@ -92,21 +78,23 @@
 $$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
 $$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
 $$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
-$$(core_image_name): PRIVATE_CORE_MULTI_PARAM := $$(core_multi_param)
-$$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency)
+$$(core_image_name): $$(HOST_CORE_IMG_DEX_LOCATIONS) $$(core_dex2oat_dependency)
 	@echo "host dex2oat: $$@"
 	@mkdir -p $$(dir $$@)
 	$$(hide) ANDROID_LOG_TAGS="*:e" $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
 	  --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
-	  --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(HOST_CORE_DEX_FILES)) \
-	  $$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
+	  --image-classes=$$(PRELOADED_CLASSES) \
+	  $$(addprefix --dex-file=,$$(HOST_CORE_IMG_DEX_FILES)) \
+	  $$(addprefix --dex-location=,$$(HOST_CORE_IMG_DEX_LOCATIONS)) \
+	  --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
 	  --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
 	  --base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(2)ART_HOST_ARCH) \
 	  $$(LOCAL_$(2)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \
 	  --host --android-root=$$(HOST_OUT) \
-	  --generate-debug-info --generate-build-id --compile-pic \
+	  --generate-debug-info --generate-build-id \
 	  --runtime-arg -XX:SlowDebug=true \
-	  $$(PRIVATE_CORE_MULTI_PARAM) $$(PRIVATE_CORE_COMPILE_OPTIONS)
+	  --no-inline-from=core-oj-hostdex.jar \
+	  $$(PRIVATE_CORE_COMPILE_OPTIONS)
 
 $$(core_oat_name): $$(core_image_name)
 
@@ -119,21 +107,17 @@
 endef  # create-core-oat-host-rules
 
 # $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
-# $(2): multi-image.
 define create-core-oat-host-rule-combination
-  $(call create-core-oat-host-rules,$(1),,$(2))
+  $(call create-core-oat-host-rules,$(1),)
 
   ifneq ($(HOST_PREFER_32_BIT),true)
-    $(call create-core-oat-host-rules,$(1),2ND_,$(2))
+    $(call create-core-oat-host-rules,$(1),2ND_)
   endif
 endef
 
-$(eval $(call create-core-oat-host-rule-combination,optimizing,false))
-$(eval $(call create-core-oat-host-rule-combination,interpreter,false))
-$(eval $(call create-core-oat-host-rule-combination,interp-ac,false))
-$(eval $(call create-core-oat-host-rule-combination,optimizing,true))
-$(eval $(call create-core-oat-host-rule-combination,interpreter,true))
-$(eval $(call create-core-oat-host-rule-combination,interp-ac,true))
+$(eval $(call create-core-oat-host-rule-combination,optimizing))
+$(eval $(call create-core-oat-host-rule-combination,interpreter))
+$(eval $(call create-core-oat-host-rule-combination,interp-ac))
 
 .PHONY: test-art-host-dex2oat-host
 test-art-host-dex2oat-host: $(HOST_CORE_IMG_OUTS)
@@ -185,19 +169,21 @@
 $$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
 $$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
 $$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
-$$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency)
+$$(core_image_name): $$(TARGET_CORE_IMG_DEX_FILES) $$(core_dex2oat_dependency)
 	@echo "target dex2oat: $$@"
 	@mkdir -p $$(dir $$@)
 	$$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
 	  --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
-	  --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(TARGET_CORE_DEX_FILES)) \
-	  $$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
+	  --image-classes=$$(PRELOADED_CLASSES) \
+	  $$(addprefix --dex-file=,$$(TARGET_CORE_IMG_DEX_FILES)) \
+	  $$(addprefix --dex-location=,$$(TARGET_CORE_IMG_DEX_LOCATIONS)) \
+	  --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
 	  --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
 	  --base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(2)TARGET_ARCH) \
 	  --instruction-set-variant=$$($(2)DEX2OAT_TARGET_CPU_VARIANT) \
 	  --instruction-set-features=$$($(2)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
 	  --android-root=$$(PRODUCT_OUT)/system \
-	  --generate-debug-info --generate-build-id --compile-pic \
+	  --generate-debug-info --generate-build-id \
 	  --runtime-arg -XX:SlowDebug=true \
 	  $$(PRIVATE_CORE_COMPILE_OPTIONS) || (rm $$(PRIVATE_CORE_OAT_NAME); exit 1)
 
diff --git a/build/apex/Android.bp b/build/apex/Android.bp
new file mode 100644
index 0000000..4a6637b
--- /dev/null
+++ b/build/apex/Android.bp
@@ -0,0 +1,232 @@
+// Android Runtime APEX module.
+
+// Modules listed in LOCAL_REQUIRED_MODULES for module art-runtime in art/Android.mk.
+// - Base requirements (binaries for which both 32- and 64-bit versions are built, if relevant).
+art_runtime_base_binaries_both = [
+    "dalvikvm",
+]
+// - Base requirements (binaries for which a 32-bit version is preferred).
+art_runtime_base_binaries_prefer32 = [
+    "dex2oat",
+    "dexoptanalyzer",
+    "profman",
+]
+// - Base requirements (libraries).
+art_runtime_base_native_shared_libs = [
+    "libart",
+    "libart-compiler",
+    "libopenjdkjvm",
+    "libopenjdkjvmti",
+    "libadbconnection",
+]
+bionic_native_shared_libs = [
+    "libc",
+    "libm",
+    "libdl",
+]
+bionic_binaries_both = [
+    "linker",
+]
+// - Debug variants (binaries for which a 32-bit version is preferred).
+art_runtime_debug_binaries_prefer32 = [
+    "dexoptanalyzerd",
+    "profmand",
+]
+art_runtime_debug_binaries_prefer32_device = [
+    "dex2oatd",
+]
+art_runtime_debug_binaries_both_host = [
+    "dex2oatd",
+]
+
+// - Debug variants (libraries).
+art_runtime_debug_native_shared_libs = [
+    "libartd",
+    "libartd-compiler",
+    "libopenjdkjvmd",
+    "libopenjdkjvmtid",
+    "libadbconnectiond",
+]
+libcore_debug_native_shared_libs = [
+    "libopenjdkd",
+]
+
+// Data files associated with bionic / managed core library APIs.
+art_runtime_data_file_prebuilts = [
+    "apex_tz_version",
+    "apex_tzdata",
+    "apex_tzlookup.xml",
+    "apex_icu.dat",
+]
+
+// Modules listed in LOCAL_REQUIRED_MODULES for module art-tools in art/Android.mk.
+art_tools_common_binaries = [
+    "dexdiag",
+    "dexdump",
+    "dexlist",
+]
+
+// Device-only modules listed in LOCAL_REQUIRED_MODULES for module art-tools in art/Android.mk.
+art_tools_device_only_binaries = [
+    // oatdump cannot link with host linux_bionic due to not using clang lld;
+    // TODO: Make it work with clang lld.
+    "oatdump",
+]
+
+// Host-only modules listed in LOCAL_REQUIRED_MODULES for module art-tools in art/Android.mk.
+art_tools_host_only_binaries = [
+    // FIXME: Does not work as-is, because `ahat` is defined in tools/ahat/Android.mk
+    // (same issue as for `libart_fake` above).
+    //"ahat",
+    "hprof-conv",
+    // ...
+]
+
+art_tools_device_binaries = art_tools_common_binaries + art_tools_device_only_binaries
+art_tools_host_binaries = art_tools_common_binaries + art_tools_host_only_binaries
+
+// Libcore native libraries.
+libcore_native_shared_libs = [
+    "libjavacore",
+    "libopenjdk",
+    "libexpat",
+    "libz",
+    "libziparchive"
+]
+
+// Java libraries
+libcore_target_java_libs = [
+    "core-oj",
+    "core-libart",
+    "okhttp",
+    "bouncycastle",
+    "apache-xml",
+]
+
+apex_key {
+    name: "com.android.runtime.key",
+    public_key: "com.android.runtime.avbpubkey",
+    private_key: "com.android.runtime.pem",
+}
+
+prebuilt_etc {
+    name: "com.android.runtime.ld.config.txt",
+    src: "ld.config.txt",
+    filename: "ld.config.txt",
+    installable: false,
+}
+
+// TODO: Introduce `apex_defaults` to factor common parts of `apex`
+// module definitions below?
+
+// Release version of the Runtime APEX module (not containing debug
+// variants nor tools), included in user builds. Also used for
+// storage-constrained devices in userdebug and eng builds.
+apex {
+    name: "com.android.runtime.release",
+    compile_multilib: "both",
+    manifest: "manifest.json",
+    java_libs: libcore_target_java_libs,
+    native_shared_libs: art_runtime_base_native_shared_libs
+        + bionic_native_shared_libs
+        + libcore_native_shared_libs,
+    multilib: {
+        both: {
+            // TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64`
+            // (see `symlink_preferred_arch` in art/dalvikvm/Android.bp).
+            binaries: art_runtime_base_binaries_both
+                + bionic_binaries_both,
+        },
+        prefer32: {
+            binaries: art_runtime_base_binaries_prefer32,
+        },
+        first: {
+            binaries: [],
+        }
+    },
+    prebuilts: art_runtime_data_file_prebuilts
+        + ["com.android.runtime.ld.config.txt"],
+    key: "com.android.runtime.key",
+}
+
+// "Debug" version of the Runtime APEX module (containing both release and
+// debug variants, as well as additional tools), included in userdebug and
+// eng build.
+apex {
+    name: "com.android.runtime.debug",
+    compile_multilib: "both",
+    manifest: "manifest.json",
+    java_libs: libcore_target_java_libs,
+    native_shared_libs: art_runtime_base_native_shared_libs
+        + art_runtime_debug_native_shared_libs
+        + bionic_native_shared_libs
+        + libcore_native_shared_libs
+        + libcore_debug_native_shared_libs,
+    multilib: {
+        both: {
+            // TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64`
+            // (see `symlink_preferred_arch` in art/dalvikvm/Android.bp).
+            binaries: art_runtime_base_binaries_both
+                + bionic_binaries_both,
+        },
+        prefer32: {
+            binaries: art_runtime_base_binaries_prefer32
+                + art_runtime_debug_binaries_prefer32
+                + art_runtime_debug_binaries_prefer32_device,
+        },
+        first: {
+            binaries: art_tools_device_binaries,
+        }
+    },
+    prebuilts: art_runtime_data_file_prebuilts
+        + ["com.android.runtime.ld.config.txt"],
+    key: "com.android.runtime.key",
+}
+
+// TODO: Do this better. art_apex will disable host builds when
+// HOST_PREFER_32_BIT is set. We cannot simply use com.android.runtime.debug
+// because binaries have different multilib classes and 'multilib: {}' isn't
+// supported by target: { ... }.
+// See b/120617876 for more information.
+art_apex {
+    name: "com.android.runtime.host",
+    compile_multilib: "both",
+    payload_type: "zip",
+    host_supported: true,
+    device_supported: false,
+    manifest: "manifest.json",
+    java_libs: libcore_target_java_libs,
+    ignore_system_library_special_case: true,
+    native_shared_libs: art_runtime_base_native_shared_libs
+        + art_runtime_debug_native_shared_libs
+        + libcore_native_shared_libs
+        + libcore_debug_native_shared_libs,
+    multilib: {
+        both: {
+            // TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64`
+            // (see `symlink_preferred_arch` in art/dalvikvm/Android.bp).
+            binaries: art_runtime_base_binaries_both
+                + art_runtime_debug_binaries_both_host,
+        },
+        first: {
+            binaries: art_tools_host_binaries
+                + art_runtime_base_binaries_prefer32
+                + art_runtime_debug_binaries_prefer32,
+        }
+    },
+    key: "com.android.runtime.key",
+    target: {
+        darwin: {
+            enabled: false,
+        },
+        linux_bionic: {
+            enabled: true,
+            multilib: {
+                both: {
+                    native_shared_libs: bionic_native_shared_libs,
+                    binaries: bionic_binaries_both,
+                }
+            }
+        },
+    },
+}
diff --git a/build/apex/art_apex_test.py b/build/apex/art_apex_test.py
new file mode 100755
index 0000000..1abc466
--- /dev/null
+++ b/build/apex/art_apex_test.py
@@ -0,0 +1,656 @@
+#!/usr/bin/env python3
+
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+import zipfile
+
+logging.basicConfig(format='%(message)s')
+
+class FSObject:
+  def __init__(self, name, is_dir, is_exec, is_symlink):
+    self.name = name
+    self.is_dir = is_dir
+    self.is_exec = is_exec
+    self.is_symlink = is_symlink
+  def __str__(self):
+    return '%s(dir=%r,exec=%r,symlink=%r)' % (self.name, self.is_dir, self.is_exec, self.is_symlink)
+
+class TargetApexProvider:
+  def __init__(self, apex, tmpdir, debugfs):
+    self._tmpdir = tmpdir
+    self._debugfs = debugfs
+    self._folder_cache = {}
+    self._payload = os.path.join(self._tmpdir, 'apex_payload.img')
+    # Extract payload to tmpdir.
+    zip = zipfile.ZipFile(apex)
+    zip.extract('apex_payload.img', tmpdir)
+
+  def __del__(self):
+    # Delete temps.
+    if os.path.exists(self._payload):
+      os.remove(self._payload)
+
+  def get(self, path):
+    dir, name = os.path.split(path)
+    if len(dir) == 0:
+      dir = '.'
+    map = self.read_dir(dir)
+    return map[name] if name in map else None
+
+  def read_dir(self, dir):
+    if dir in self._folder_cache:
+      return self._folder_cache[dir]
+    # Cannot use check_output as it will annoy with stderr.
+    process = subprocess.Popen([self._debugfs, '-R', 'ls -l -p %s' % (dir), self._payload],
+                               stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                               universal_newlines=True)
+    stdout, stderr = process.communicate()
+    res = str(stdout)
+    map = {}
+    # Debugfs output looks like this:
+    #   debugfs 1.44.4 (18-Aug-2018)
+    #   /12/040755/0/2000/.//
+    #   /2/040755/1000/1000/..//
+    #   /13/100755/0/2000/dalvikvm32/28456/
+    #   /14/100755/0/2000/dexoptanalyzer/20396/
+    #   /15/100755/0/2000/linker/1152724/
+    #   /16/100755/0/2000/dex2oat/563508/
+    #   /17/100755/0/2000/linker64/1605424/
+    #   /18/100755/0/2000/profman/85304/
+    #   /19/100755/0/2000/dalvikvm64/28576/
+    #    |     |   |   |       |        |
+    #    |     |   |   #- gid  #- name  #- size
+    #    |     |   #- uid
+    #    |     #- type and permission bits
+    #    #- inode nr (?)
+    #
+    # Note: could break just on '/' to avoid names with newlines.
+    for line in res.split("\n"):
+      if not line:
+        continue
+      comps = line.split('/')
+      if len(comps) != 8:
+        logging.warn('Could not break and parse line \'%s\'', line)
+        continue
+      bits = comps[2]
+      name = comps[5]
+      if len(bits) != 6:
+        logging.warn('Dont understand bits \'%s\'', bits)
+        continue
+      is_dir = True if bits[1] == '4' else False
+      def is_exec_bit(ch):
+        return True if int(ch) & 1 == 1 else False
+      is_exec = is_exec_bit(bits[3]) and is_exec_bit(bits[4]) and is_exec_bit(bits[5])
+      is_symlink = True if bits[1] == '2' else False
+      map[name] = FSObject(name, is_dir, is_exec, is_symlink)
+    self._folder_cache[dir] = map
+    return map
+
+class HostApexProvider:
+  def __init__(self, apex, tmpdir):
+    self._tmpdir = tmpdir
+    self._folder_cache = {}
+    self._payload = os.path.join(self._tmpdir, 'apex_payload.zip')
+    # Extract payload to tmpdir.
+    zip = zipfile.ZipFile(apex)
+    zip.extract('apex_payload.zip', tmpdir)
+
+  def __del__(self):
+    # Delete temps.
+    if os.path.exists(self._payload):
+      os.remove(self._payload)
+
+  def get(self, path):
+    dir, name = os.path.split(path)
+    if len(dir) == 0:
+      dir = ''
+    map = self.read_dir(dir)
+    return map[name] if name in map else None
+
+  def read_dir(self, dir):
+    if dir in self._folder_cache:
+      return self._folder_cache[dir]
+    if not self._folder_cache:
+      self.parse_zip()
+    if dir in self._folder_cache:
+      return self._folder_cache[dir]
+    return {}
+
+  def parse_zip(self):
+    zip = zipfile.ZipFile(self._payload)
+    infos = zip.infolist()
+    for zipinfo in infos:
+      path = zipinfo.filename
+
+      # Assume no empty file is stored.
+      assert path
+
+      def get_octal(val, index):
+        return (val >> (index * 3)) & 0x7;
+      def bits_is_exec(val):
+        # TODO: Enforce group/other, too?
+        return get_octal(val, 2) & 1 == 1
+
+      is_zipinfo = True
+      while path:
+        dir, base = os.path.split(path)
+        # TODO: If directories are stored, base will be empty.
+
+        if not dir in self._folder_cache:
+          self._folder_cache[dir] = {}
+        dir_map = self._folder_cache[dir]
+        if not base in dir_map:
+          if is_zipinfo:
+            bits = (zipinfo.external_attr >> 16) & 0xFFFF
+            is_dir = get_octal(bits, 4) == 4
+            is_symlink = get_octal(bits, 4) == 2
+            is_exec = bits_is_exec(bits)
+          else:
+            is_exec = False  # Seems we can't get this easily?
+            is_symlink = False
+            is_dir = True
+          dir_map[base] = FSObject(base, is_dir, is_exec, is_symlink)
+        is_zipinfo = False
+        path = dir
+
+# DO NOT USE DIRECTLY! This is an "abstract" base class.
+class Checker:
+  def __init__(self, provider):
+    self._provider = provider
+    self._errors = 0
+
+  def fail(self, msg, *args):
+    self._errors += 1
+    logging.error(msg, args)
+
+  def error_count(self):
+    return self._errors
+  def reset_errors(self):
+    self._errors = 0
+
+  def is_file(self, file):
+    fs_object = self._provider.get(file)
+    if fs_object is None:
+      return (False, 'Could not find %s')
+    if fs_object.is_dir:
+      return (False, '%s is a directory')
+    return (True, '')
+
+  def check_file(self, file):
+    chk = self.is_file(file)
+    if not chk[0]:
+      self.fail(chk[1], file)
+    return chk[0]
+  def check_no_file(self, file):
+    chk = self.is_file(file)
+    if chk[0]:
+      self.fail('File %s does exist', file)
+    return not chk[0]
+
+  def check_binary(self, file):
+    path = 'bin/%s' % (file)
+    if not self.check_file(path):
+      return False
+    if not self._provider.get(path).is_exec:
+      self.fail('%s is not executable', path)
+      return False
+    return True
+
+  def check_binary_symlink(self, file):
+    path = 'bin/%s' % (file)
+    fs_object = self._provider.get(path)
+    if fs_object is None:
+      self.fail('Could not find %s', path)
+      return False
+    if fs_object.is_dir:
+      self.fail('%s is a directory', path)
+      return False
+    if not fs_object.is_symlink:
+      self.fail('%s is not a symlink', path)
+      return False
+    return True
+
+  def check_single_library(self, file):
+    res1 = self.is_file('lib/%s' % (file))
+    res2 = self.is_file('lib64/%s' % (file))
+    if not res1[0] and not res2[0]:
+      self.fail('Library missing: %s', file)
+      return False
+    return True
+
+  def check_no_library(self, file):
+    res1 = self.is_file('lib/%s' % (file))
+    res2 = self.is_file('lib64/%s' % (file))
+    if res1[0] or res2[0]:
+      self.fail('Library exists: %s', file)
+      return False
+    return True
+
+  def check_java_library(self, file):
+    return self.check_file('javalib/%s' % (file))
+
+  # Just here for docs purposes, even if it isn't good Python style.
+
+  def check_library(self, file):
+    raise NotImplementedError
+
+  def check_first_library(self, file):
+    raise NotImplementedError
+
+  def check_multilib_binary(self, file):
+    raise NotImplementedError
+
+  def check_prefer32_binary(self, file):
+    raise NotImplementedError
+
+
+class Arch32Checker(Checker):
+  def __init__(self, provider):
+    super().__init__(provider)
+
+  def check_multilib_binary(self, file):
+    return all([self.check_binary('%s32' % (file)),
+                self.check_no_file('bin/%s64' % (file)),
+                self.check_binary_symlink(file)])
+
+  def check_library(self, file):
+    # TODO: Use $TARGET_ARCH (e.g. check whether it is "arm" or "arm64") to improve
+    # the precision of this test?
+    return all([self.check_file('lib/%s' % (file)), self.check_no_file('lib64/%s' % (file))])
+
+  def check_first_library(self, file):
+    return self.check_library(file)
+
+  def check_prefer32_binary(self, file):
+    return self.check_binary('%s32' % (file))
+
+
+class Arch64Checker(Checker):
+  def __init__(self, provider):
+    super().__init__(provider)
+
+  def check_multilib_binary(self, file):
+    return all([self.check_no_file('bin/%s32' % (file)),
+                self.check_binary('%s64' % (file)),
+                self.check_binary_symlink(file)])
+
+  def check_library(self, file):
+    # TODO: Use $TARGET_ARCH (e.g. check whether it is "arm" or "arm64") to improve
+    # the precision of this test?
+    return all([self.check_no_file('lib/%s' % (file)), self.check_file('lib64/%s' % (file))])
+
+  def check_first_library(self, file):
+    return self.check_library(file)
+
+  def check_prefer32_binary(self, file):
+    return self.check_binary('%s64' % (file))
+
+
+class MultilibChecker(Checker):
+  def __init__(self, provider):
+    super().__init__(provider)
+
+  def check_multilib_binary(self, file):
+    return all([self.check_binary('%s32' % (file)),
+                self.check_binary('%s64' % (file)),
+                self.check_binary_symlink(file)])
+
+  def check_library(self, file):
+    # TODO: Use $TARGET_ARCH (e.g. check whether it is "arm" or "arm64") to improve
+    # the precision of this test?
+    return all([self.check_file('lib/%s' % (file)), self.check_file('lib64/%s' % (file))])
+
+  def check_first_library(self, file):
+    return all([self.check_no_file('lib/%s' % (file)), self.check_file('lib64/%s' % (file))])
+
+  def check_prefer32_binary(self, file):
+    return self.check_binary('%s32' % (file))
+
+
+class ReleaseChecker:
+  def __init__(self, checker):
+    self._checker = checker
+  def __str__(self):
+    return 'Release Checker'
+
+  def run(self):
+    # Check that the mounted image contains an APEX manifest.
+    self._checker.check_file('apex_manifest.json')
+
+    # Check that the mounted image contains ART base binaries.
+    self._checker.check_multilib_binary('dalvikvm')
+    self._checker.check_binary('dex2oat')
+    self._checker.check_binary('dexoptanalyzer')
+    self._checker.check_binary('profman')
+
+    # oatdump is only in device apex's due to build rules
+    # TODO: Check for it when it is also built for host.
+    # self._checker.check_binary('oatdump')
+
+    # Check that the mounted image contains Android Runtime libraries.
+    self._checker.check_library('libart-compiler.so')
+    self._checker.check_library('libart-dexlayout.so')
+    self._checker.check_library('libart.so')
+    self._checker.check_library('libartbase.so')
+    self._checker.check_library('libartpalette.so')
+    self._checker.check_no_library('libartpalette-system.so')
+    self._checker.check_library('libdexfile.so')
+    self._checker.check_library('libopenjdkjvm.so')
+    self._checker.check_library('libopenjdkjvmti.so')
+    self._checker.check_library('libprofile.so')
+    # Check that the mounted image contains Android Core libraries.
+    # Note: host vs target libs are checked elsewhere.
+    self._checker.check_library('libjavacore.so')
+    self._checker.check_library('libopenjdk.so')
+    self._checker.check_library('libziparchive.so')
+    # Check that the mounted image contains additional required libraries.
+    self._checker.check_library('libadbconnection.so')
+
+    # TODO: Should we check for other libraries, such as:
+    #
+    #   libbacktrace.so
+    #   libbase.so
+    #   liblog.so
+    #   libsigchain.so
+    #   libtombstoned_client.so
+    #   libunwindstack.so
+    #   libvixl.so
+    #   libvixld.so
+    #   ...
+    #
+    # ?
+
+    self._checker.check_java_library('core-oj.jar')
+    self._checker.check_java_library('core-libart.jar')
+    self._checker.check_java_library('okhttp.jar')
+    self._checker.check_java_library('bouncycastle.jar')
+    self._checker.check_java_library('apache-xml.jar')
+
+class ReleaseTargetChecker:
+  def __init__(self, checker):
+    self._checker = checker
+  def __str__(self):
+    return 'Release (Target) Checker'
+
+  def run(self):
+    # Check that the mounted image contains Android Core libraries.
+    self._checker.check_library('libexpat.so')
+    self._checker.check_library('libz.so')
+
+class ReleaseHostChecker:
+  def __init__(self, checker):
+    self._checker = checker;
+  def __str__(self):
+    return 'Release (Host) Checker'
+
+  def run(self):
+    # Check that the mounted image contains Android Core libraries.
+    self._checker.check_library('libexpat-host.so')
+    self._checker.check_library('libz-host.so')
+
+class DebugChecker:
+  def __init__(self, checker):
+    self._checker = checker
+  def __str__(self):
+    return 'Debug Checker'
+
+  def run(self):
+    # Check that the mounted image contains ART tools binaries.
+    self._checker.check_binary('dexdiag')
+    self._checker.check_binary('dexdump')
+    self._checker.check_binary('dexlist')
+
+    # Check that the mounted image contains ART debug binaries.
+    self._checker.check_binary('dex2oatd')
+    self._checker.check_binary('dexoptanalyzerd')
+    self._checker.check_binary('profmand')
+
+    # Check that the mounted image contains Android Runtime debug libraries.
+    self._checker.check_library('libartbased.so')
+    self._checker.check_library('libartd-compiler.so')
+    self._checker.check_library('libartd-dexlayout.so')
+    self._checker.check_library('libartd.so')
+    self._checker.check_library('libdexfiled.so')
+    self._checker.check_library('libopenjdkjvmd.so')
+    self._checker.check_library('libopenjdkjvmtid.so')
+    self._checker.check_library('libprofiled.so')
+    # Check that the mounted image contains Android Core debug libraries.
+    self._checker.check_library('libopenjdkd.so')
+    # Check that the mounted image contains additional required debug libraries.
+    self._checker.check_library('libadbconnectiond.so')
+
+class DebugTargetChecker:
+  def __init__(self, checker):
+    self._checker = checker
+  def __str__(self):
+    return 'Debug (Target) Checker'
+
+  def run(self):
+    # Check for files pulled in from debug target-only oatdump.
+    self._checker.check_binary('oatdump')
+    self._checker.check_first_library('libart-disassembler.so')
+
+def print_list(provider):
+    def print_list_impl(provider, path):
+      map = provider.read_dir(path)
+      if map is None:
+        return
+      map = dict(map)
+      if '.' in map:
+        del map['.']
+      if '..' in map:
+        del map['..']
+      for (_, val) in sorted(map.items()):
+        new_path = os.path.join(path, val.name)
+        print(new_path)
+        if val.is_dir:
+          print_list_impl(provider, new_path)
+    print_list_impl(provider, '')
+
+def print_tree(provider, title):
+    def get_vertical(has_next_list):
+      str = ''
+      for v in has_next_list:
+        str += '%s   ' % ('│' if v else ' ')
+      return str
+    def get_last_vertical(last):
+      return '└── ' if last else '├── ';
+    def print_tree_impl(provider, path, has_next_list):
+      map = provider.read_dir(path)
+      if map is None:
+        return
+      map = dict(map)
+      if '.' in map:
+        del map['.']
+      if '..' in map:
+        del map['..']
+      key_list = list(sorted(map.keys()))
+      for i in range(0, len(key_list)):
+        val = map[key_list[i]]
+        prev = get_vertical(has_next_list)
+        last = get_last_vertical(i == len(key_list) - 1)
+        print('%s%s%s' % (prev, last, val.name))
+        if val.is_dir:
+          has_next_list.append(i < len(key_list) - 1)
+          print_tree_impl(provider, os.path.join(path, val.name), has_next_list)
+          has_next_list.pop()
+    print('%s' % (title))
+    print_tree_impl(provider, '', [])
+
+# Note: do not sys.exit early, for __del__ cleanup.
+def artApexTestMain(args):
+  if args.tree and args.debug:
+    logging.error("Both of --tree and --debug set")
+    return 1
+  if args.list and args.debug:
+    logging.error("Both of --list and --debug set")
+    return 1
+  if args.list and args.tree:
+    logging.error("Both of --list and --tree set")
+    return 1
+  if not args.tmpdir:
+    logging.error("Need a tmpdir.")
+    return 1
+  if not args.host and not args.debugfs:
+    logging.error("Need debugfs.")
+    return 1
+  if args.bitness not in ['32', '64', 'multilib', 'auto']:
+    logging.error('--bitness needs to be one of 32|64|multilib|auto')
+
+  try:
+    if args.host:
+      apex_provider = HostApexProvider(args.apex, args.tmpdir)
+    else:
+      apex_provider = TargetApexProvider(args.apex, args.tmpdir, args.debugfs)
+  except Exception as e:
+    logging.error('Failed to create provider: %s', e)
+    return 1
+
+  if args.tree:
+    print_tree(apex_provider, args.apex)
+    return 0
+  if args.list:
+    print_list(apex_provider)
+    return 0
+
+  checkers = []
+  if args.bitness == 'auto':
+    logging.warn('--bitness=auto, trying to autodetect. This may be incorrect!')
+    has_32 = apex_provider.get('lib') is not None
+    has_64 = apex_provider.get('lib64') is not None
+    if has_32 and has_64:
+      logging.warn('  Detected multilib')
+      args.bitness = 'multilib'
+    elif has_32:
+      logging.warn('  Detected 32-only')
+      args.bitness = '32'
+    elif has_64:
+      logging.warn('  Detected 64-only')
+      args.bitness = '64'
+    else:
+      logging.error('  Could not detect bitness, neither lib nor lib64 contained.')
+      print('%s' % (apex_provider._folder_cache))
+      return 1
+
+  if args.bitness == '32':
+    base_checker = Arch32Checker(apex_provider)
+  elif args.bitness == '64':
+    base_checker = Arch64Checker(apex_provider)
+  else:
+    assert args.bitness == 'multilib'
+    base_checker = MultilibChecker(apex_provider)
+
+  checkers.append(ReleaseChecker(base_checker))
+  if args.host:
+    checkers.append(ReleaseHostChecker(base_checker))
+  else:
+    checkers.append(ReleaseTargetChecker(base_checker))
+  if args.debug:
+    checkers.append(DebugChecker(base_checker))
+  if args.debug and not args.host:
+    checkers.append(DebugTargetChecker(base_checker))
+
+  failed = False
+  for checker in checkers:
+    logging.info('%s...', checker)
+    checker.run()
+    if base_checker.error_count() > 0:
+      logging.error('%s FAILED', checker)
+      failed = True
+    else:
+      logging.info('%s SUCCEEDED', checker)
+    base_checker.reset_errors()
+
+  return 1 if failed else 0
+
+def artApexTestDefault(parser):
+  if not 'ANDROID_PRODUCT_OUT' in os.environ:
+    logging.error('No-argument use requires ANDROID_PRODUCT_OUT')
+    sys.exit(1)
+  product_out = os.environ['ANDROID_PRODUCT_OUT']
+  if not 'ANDROID_HOST_OUT' in os.environ:
+    logging.error('No-argument use requires ANDROID_HOST_OUT')
+    sys.exit(1)
+  host_out = os.environ['ANDROID_HOST_OUT']
+
+  args = parser.parse_args(['dummy'])  # For consistency.
+  args.debugfs = '%s/bin/debugfs' % (host_out)
+  args.tmpdir = '.'
+  args.tree = False
+  args.list = False
+  args.bitness = 'auto'
+  failed = False
+
+  if not os.path.exists(args.debugfs):
+    logging.error("Cannot find debugfs (default path %s). Please build it, e.g., m debugfs",
+                  args.debugfs)
+    sys.exit(1)
+
+  # TODO: Add host support
+  configs= [
+    {'name': 'com.android.runtime.release', 'debug': False, 'host': False},
+    {'name': 'com.android.runtime.debug', 'debug': True, 'host': False},
+  ]
+
+  for config in configs:
+    logging.info(config['name'])
+    # TODO: Host will need different path.
+    args.apex = '%s/system/apex/%s.apex' % (product_out, config['name'])
+    if not os.path.exists(args.apex):
+      failed = True
+      logging.error("Cannot find APEX %s. Please build it first.", args.apex)
+      continue
+    args.debug = config['debug']
+    args.host = config['host']
+    exit_code = artApexTestMain(args)
+    if exit_code != 0:
+      failed = True
+
+  if failed:
+    sys.exit(1)
+
+if __name__ == "__main__":
+  parser = argparse.ArgumentParser(description='Check integrity of a Runtime APEX.')
+
+  parser.add_argument('apex', help='apex file input')
+
+  parser.add_argument('--host', help='Check as host apex', action='store_true')
+
+  parser.add_argument('--debug', help='Check as debug apex', action='store_true')
+
+  parser.add_argument('--list', help='List all files', action='store_true')
+  parser.add_argument('--tree', help='Print directory tree', action='store_true')
+
+  parser.add_argument('--tmpdir', help='Directory for temp files')
+  parser.add_argument('--debugfs', help='Path to debugfs')
+
+  parser.add_argument('--bitness', help='Bitness to check, 32|64|multilib|auto', default='auto')
+
+  if len(sys.argv) == 1:
+    artApexTestDefault(parser)
+  else:
+    args = parser.parse_args()
+
+    if args is None:
+      sys.exit(1)
+
+    exit_code = artApexTestMain(args)
+    sys.exit(exit_code)
diff --git a/build/apex/com.android.runtime.avbpubkey b/build/apex/com.android.runtime.avbpubkey
new file mode 100644
index 0000000..b0ffc9b
--- /dev/null
+++ b/build/apex/com.android.runtime.avbpubkey
Binary files differ
diff --git a/build/apex/com.android.runtime.pem b/build/apex/com.android.runtime.pem
new file mode 100644
index 0000000..4c7ce4b
--- /dev/null
+++ b/build/apex/com.android.runtime.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKgIBAAKCAgEAx/VRn+TOZ4Hah9tHkb2Jvw7aQcqurnNamGa1Ta3x09HEV45s
+KTAqeTIPpbagx7aj6LNimiqoJaClV8pFhYfC6y7GLnXBk6PRGb2kPmrWy2aQFRkh
+Z2LBQwu15Rqr3SVbzMPbC5qoXOpUopzZnjRwniR32fnpJedUwpSMxaJwXDxfgBku
+Wm3EHBuTQ33L/z3VGwbVp1Rh/QhI/RfcwT1u6o9XUl0LqiQu/8DLTLNmsjAsQkbA
+8O1ToIBu2l71HaMqXOKRKtVuAYPyAMml5hXSH0dva2ebKkUM8E7FhcsK71QJ5iCs
+L2uC+OmG9f4aSqiIJld7/nDU7nrkiUxjs5bGp2PIxHmuv725XdBMJ+IuqnZGGkA8
+4tF14bY1YX4Tq6ojzATBnbFcZEAU4epJvX13Wu11ktYndMbppUtnVCdhO2vnA/tP
+MpBSOCHMk2Y2Q96LcIN9ANJrcrkrSIGBTQdvCRJ9LtofXlk/ytGIUceCzRtFhmwL
+zWFwJVT7cQX04Pw/EX/zrZyOq7SUYCGDsBwZsUtlZ30Cx92dergtKlZyJFqKnwMv
+hajr55mqRCv4M1dumCgiQaml29ftXWE6wQxqI0jQN8seSVz/HUazjSb3QFXgX16z
+w4VkxqSKu4subqesMcxiyev5McGXUUthkRGDSSFbJwX0L5jNEPyYPUu2nJ0CAwEA
+AQKCAgEAxGKuDin8hjBE3tWAjyTmWp1Nwvw7X96vhaqqOmayceU9vviERlel/24p
+bAnYEw3QIcW8+8kVaA9FFNn2OdVCnRVNU2gX/NcRkQRugVcRKqfKrs4FvrKBOUYR
+Gbh+Py5n4M4jHlyBKvCCu0rteLHsQYVzqMQINk/jMVAQijKlxBEPgpI4slvIFgsH
+MWwlpMOnv2mRAUyhCJDQjrKW/7tEal7p1lzIDgyHlGxXvzcbj7o8XcN7z6RnU+WP
++iz09GzCOIPVK4p/BkH+tsNVioq32jygs44IGRXERWg4GtV2IeQZ7Mj+E3y2H53M
+DWHJlLW9MlsNzrImjypntmkuKr3Uz+ipg/oXD1tv/XJkBkJUsWSQHzGw4DfxRfq7
+eJ9LlIMzrQn8ZJAJTSsckmGuakSyD9amSbtn1kl+fEZge9SvAoZVZelwB1qfGgyS
+qQVAN9x1SP0DCeX33syxT2rxZVOUZgRT8yt01jVcIU3dD66McYRjiUY6uG1aZ3Xb
+p8TD3xKMqPPc7dIN3xcN58S+sIejydmm636LE1ykA0dYPczqxDfIfhbqE/42B5LZ
+grjZdXN1pd97IeEFQLd+DfP8iq80D6k6ojmXxANXCz1ilJXyr2defWUwSSiwsD5v
+HacFeOQ6+KQyYrkdhbpa5XlO6luDIZmxN3B6rx7kqg6UZW9EzYkCggEBAPDNOZ6X
+TIKBIdV5zkr2rvjor/WvPnoWUOBCmxh8zaAZhReE3RitNjtEVz/ns/x8vyyMRdPA
+JDszBrawYlMjoEW9NQe6BYKfwKRl+QzsWEIsdBfYB70vmguwS/VdMbVaU/jWFbS+
+IFB9F88iEJiI8IeH+IomGXinCDxAkXqJztFZRzonaX5+QHC4s8geRyYn9gs6SxHy
+MqOOzifnebZg4dXLCL7jMDGsEa/Fu188FFae407BsOEt4bday37n91xysdilkPg3
+b3mIB3EFrsbnqXypayM/QUfQ/d48Xfa/l+74i1Qpd1MIeHYNndLDxtRes9Oc7Rnv
+oCdI9Lkc+KuR8AcCggEBANSUKb2jz0VfSZSZsgL5gj34Kcfoe5peQvP+pUuJmZhy
+8QkGUUNtq2l86PMJSfJknbUhVLPe0wzT8NG08HTMkVHlw7lve//avugfpnrR7hsZ
+BTWDjW44x+Y8Q8dwTUl3nYtEYn81ycUzmFBmYDEVXjlvyMlXe0HLEz90v2wwtZlp
+IxEXgEgMnLj36JH5iKh7YuLf9c8laok7Jed6u+h5nlXUcbfaSVN6U3K+6UdQKUrr
+TaSQLw2pEsZ6CEt0yGJDkoID7mfTfc1/olNWWGUz0RE9G5eqQYjgEoAiTBZZeSlm
+3Kaun8gydN7wwJ6AjPCPFOwtgV7dUoN4YbWgfsAgnTsCggEBALHOWCWKgqw6vcjr
+0C/6Ruj0qDk51WBA6icuB2flf9AgB+595OQ7wjexFtvRM03UrzUtvsHEtvwfiW2M
+gI3zWH0mYOn7qeXyIEVEJsosGl+Cg5a3pb9ETvMiknPzBKlssWSkcBKt8R59v/7q
+oGaBd1ocRKF90IEOlT4oT0O0Tkq3Kaj/QR5uCxcwy0+RS+gYyc0wlg4CUPIEmKVO
+fsj0cM10xlhtWUDUVZr83oZLzpjHagDVdM5RGsJRAMIMdtKEvl3Co3ElPeL3VsdV
+8uBcXwH1925nXsSwxUQ8PwXcI0wJqpfSppFhR9Gj7E2c0kwuQYqX7VuhXRik/k9R
+3SyS7jECggEBAL7q7m4GL8IjKSdPvgNT6TgUqBmFX3UtkT4nhnbH9u1m1bmANf20
+Ak20RFb6EbKj0Mv7SmJdDfkoY9FDiu2rSBxgmZ7yVFBeOjSpMFCAODOYDgiYxK2o
+S0go+cqlvpPr3M9WNIwBV9xHUVVsDJookb5N+etyKR3W78t+4+ib+oz0Uu0nySts
+QFkTNYncrXJ7lj0iXVaUSRFE0O8LWLYafCyjpxoy7sYNR+L3OPW2Nc+2cr4ITGod
+XeJpeQejs9Ak1fD07OnMlOC576SfGLaTigHMevqEi2UNsS/pHaK46stXOXZtwM0B
+G9uaJ7RyyaHHL0hKOjVj2pZ+yGph4VRWNj8CggEAQlp/QytXhKZtM9OqRy/th+XO
+ctoVEl8codUydwwxMCsKqGYiCXazeyDZQimOjaxSNFXo8hWuf694WGsQJ6TyXCEs
+0JAJbCooI+DI9Z4LbqHtLDg1/S6a1558Nyrc6j6amevvbB5xKS2mKhGl5JgzBsJO
+H3yE0DD1DHaSM3V1rTfdyGoaxNESw45bnpxkAooMrw62OIO/9f502FLUx+sq+koT
+aajw4qQ6rBll3/+PKCORKzncHDMkIbeD6c6sX+ONUz7vxg3pV4eZG7NClWvA24Td
+1sANz3m6EmqG41lBzeUGConWxWRwkEXJgbxmPwMariRKR8aNVOlDVVbDp9Hhxg==
+-----END RSA PRIVATE KEY-----
diff --git a/build/apex/ld.config.txt b/build/apex/ld.config.txt
new file mode 100644
index 0000000..9bf2ae5
--- /dev/null
+++ b/build/apex/ld.config.txt
@@ -0,0 +1,81 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Bionic loader config file for the Runtime APEX.
+#
+# There are no versioned APEX paths here - this APEX module does not support
+# having several versions mounted.
+
+dir.runtime = /apex/com.android.runtime/bin/
+
+[runtime]
+additional.namespaces = platform,conscrypt,runtime
+
+# Keep in sync with the runtime namespace in /system/etc/ld.config.txt.
+namespace.default.isolated = true
+# Visible because some libraries are dlopen'ed, e.g. libopenjdk is dlopen'ed by
+# libart.
+namespace.default.visible = true
+namespace.default.search.paths = /apex/com.android.runtime/${LIB}
+namespace.default.asan.search.paths = /apex/com.android.runtime/${LIB}
+# odex files are in /system/framework. dalvikvm has to be able to dlopen the
+# files for CTS.
+namespace.default.permitted.paths = /system/framework
+namespace.default.links = platform
+# TODO(b/119867084): Restrict fallback to platform namespace to PALette library.
+namespace.default.link.platform.allow_all_shared_libs = true
+
+# Keep in sync with the default namespace in /system/etc/ld.config.txt.
+namespace.platform.isolated = true
+namespace.platform.search.paths = /system/${LIB}
+namespace.platform.asan.search.paths = /data/asan/system/${LIB}
+namespace.platform.links = default
+namespace.platform.link.default.shared_libs  = libart.so:libartd.so
+namespace.platform.link.default.shared_libs += libnativebridge.so
+namespace.platform.link.default.shared_libs += libnativehelper.so
+namespace.platform.link.default.shared_libs += libnativeloader.so
+# /system/lib/libc.so, etc are symlinks to /bionic/lib/libc.so, etc.
+# Add /bionic/lib to the permitted paths because linker uses realpath(3)
+# to check the accessibility of the lib. We could add this to search.paths
+# instead but that makes the resolution of bionic libs be dependent on
+# the order of /system/lib and /bionic/lib in search.paths. If /bionic/lib
+# is after /system/lib, then /bionic/lib is never tried because libc.so
+# is always found in /system/lib but fails to pass the accessibility test
+# because of its realpath.  It's better to not depend on the ordering if
+# possible.
+namespace.platform.permitted.paths = /bionic/${LIB}
+namespace.platform.asan.permitted.paths = /bionic/${LIB}
+
+# Note that we don't need to link the default namespace with conscrypt:
+# the runtime Java code and binaries do not explicitly load native libraries
+# from it.
+
+###############################################################################
+# "conscrypt" APEX namespace
+#
+# This namespace is for libraries within the conscrypt APEX.
+###############################################################################
+
+# Keep in sync with conscrypt namespace in /system/etc/ld.config.txt.
+namespace.conscrypt.isolated = true
+namespace.conscrypt.visible = true
+
+namespace.conscrypt.search.paths = /apex/com.android.conscrypt/${LIB}
+namespace.conscrypt.asan.search.paths = /apex/com.android.conscrypt/${LIB}
+namespace.conscrypt.links = runtime,platform
+namespace.conscrypt.link.runtime.shared_libs   = libjavacore.so
+namespace.conscrypt.link.platform.shared_libs  = libc.so
+namespace.conscrypt.link.platform.shared_libs += libm.so
+namespace.conscrypt.link.platform.shared_libs += libdl.so
+
+###############################################################################
+# "runtime" APEX namespace
+#
+# This namespace is an alias for the default namespace.
+###############################################################################
+namespace.runtime.isolated = true
+namespace.runtime.visible = true
+namespace.runtime.links = default
+namespace.runtime.link.default.allow_all_shared_libs = true
+namespace.runtime.links += platform
+# TODO(b/119867084): Restrict fallback to platform namespace to PALette library.
+namespace.runtime.link.platform.allow_all_shared_libs = true
diff --git a/build/apex/manifest.json b/build/apex/manifest.json
new file mode 100644
index 0000000..20a8314
--- /dev/null
+++ b/build/apex/manifest.json
@@ -0,0 +1,4 @@
+{
+  "name": "com.android.runtime",
+  "version": 1
+}
diff --git a/build/apex/runtests.sh b/build/apex/runtests.sh
new file mode 100755
index 0000000..95c1de9
--- /dev/null
+++ b/build/apex/runtests.sh
@@ -0,0 +1,248 @@
+#!/bin/bash
+
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Run Android Runtime APEX tests.
+
+SCRIPT_DIR=$(dirname $0)
+
+# Status of whole test script.
+exit_status=0
+# Status of current test suite.
+test_status=0
+
+function say {
+  echo "$0: $*"
+}
+
+function die {
+  echo "$0: $*"
+  exit 1
+}
+
+[[ -n "$ANDROID_PRODUCT_OUT" ]] \
+  || die "You need to source and lunch before you can use this script."
+
+[[ -n "$ANDROID_HOST_OUT" ]] \
+  || die "You need to source and lunch before you can use this script."
+
+if [ ! -e "$ANDROID_HOST_OUT/bin/debugfs" ] ; then
+  say "Could not find debugfs, building now."
+  make debugfs-host || die "Cannot build debugfs"
+fi
+
+# Fail early.
+set -e
+
+build_apex_p=true
+list_image_files_p=false
+print_image_tree_p=false
+
+function usage {
+  cat <<EOF
+Usage: $0 [OPTION]
+Build (optional) and run tests on Android Runtime APEX package (on host).
+
+  -s, --skip-build    skip the build step
+  -l, --list-files    list the contents of the ext4 image using `find`
+  -t, --print-tree    list the contents of the ext4 image using `tree`
+  -h, --help          display this help and exit
+
+EOF
+  exit
+}
+
+while [[ $# -gt 0 ]]; do
+  case "$1" in
+    (-s|--skip-build) build_apex_p=false;;
+    (-l|--list-files) list_image_files_p=true;;
+    (-t|--print-tree) print_image_tree_p=true;;
+    (-h|--help) usage;;
+    (*) die "Unknown option: '$1'
+Try '$0 --help' for more information.";;
+  esac
+  shift
+done
+
+# build_apex APEX_MODULE
+# ----------------------
+# Build APEX package APEX_MODULE.
+function build_apex {
+  if $build_apex_p; then
+    local apex_module=$1
+    say "Building package $apex_module" && make "$apex_module" || die "Cannot build $apex_module"
+  fi
+}
+
+# maybe_list_apex_contents_apex APEX TMPDIR [other]
+function maybe_list_apex_contents_apex {
+  local apex=$1
+  local tmpdir=$2
+  shift 2
+
+  # List the contents of the apex in list form.
+  if $list_image_files_p; then
+    say "Listing image files"
+    $SCRIPT_DIR/art_apex_test.py --list --tmpdir "$tmpdir" $@ $apex
+  fi
+
+  # List the contents of the apex in tree form.
+  if $print_image_tree_p; then
+    say "Printing image tree"
+    $SCRIPT_DIR/art_apex_test.py --tree --tmpdir "$tmpdir" $@ $apex
+  fi
+}
+
+function fail_check {
+  echo "$0: FAILED: $*"
+  test_status=1
+  exit_status=1
+}
+
+# Testing target (device) APEX packages.
+# ======================================
+
+# Clean-up.
+function cleanup_target {
+  rm -rf "$work_dir"
+}
+
+# Garbage collection.
+function finish_target {
+  # Don't fail early during cleanup.
+  set +e
+  cleanup_target
+}
+
+# Testing release APEX package (com.android.runtime.release).
+# -----------------------------------------------------------
+
+apex_module="com.android.runtime.release"
+test_status=0
+
+say "Processing APEX package $apex_module"
+
+work_dir=$(mktemp -d)
+
+trap finish_target EXIT
+
+# Build the APEX package (optional).
+build_apex "$apex_module"
+apex_path="$ANDROID_PRODUCT_OUT/system/apex/${apex_module}.apex"
+
+# List the contents of the APEX image (optional).
+maybe_list_apex_contents_apex $apex_path $work_dir --debugfs $ANDROID_HOST_OUT/bin/debugfs
+
+# Run tests on APEX package.
+say "Checking APEX package $apex_module"
+$SCRIPT_DIR/art_apex_test.py \
+  --tmpdir $work_dir \
+  --debugfs $ANDROID_HOST_OUT/bin/debugfs \
+  $apex_path \
+    || fail_check "Release checks failed"
+
+# Clean up.
+trap - EXIT
+cleanup_target
+
+[[ "$test_status" = 0 ]] && say "$apex_module tests passed"
+echo
+
+# Testing debug APEX package (com.android.runtime.debug).
+# -------------------------------------------------------
+
+apex_module="com.android.runtime.debug"
+test_status=0
+
+say "Processing APEX package $apex_module"
+
+work_dir=$(mktemp -d)
+
+trap finish_target EXIT
+
+# Build the APEX package (optional).
+build_apex "$apex_module"
+apex_path="$ANDROID_PRODUCT_OUT/system/apex/${apex_module}.apex"
+
+# List the contents of the APEX image (optional).
+maybe_list_apex_contents_apex $apex_path $work_dir --debugfs $ANDROID_HOST_OUT/bin/debugfs
+
+# Run tests on APEX package.
+say "Checking APEX package $apex_module"
+$SCRIPT_DIR/art_apex_test.py \
+  --tmpdir $work_dir \
+  --debugfs $ANDROID_HOST_OUT/bin/debugfs \
+  --debug \
+  $apex_path \
+    || fail_check "Debug checks failed"
+
+# Clean up.
+trap - EXIT
+cleanup_target
+
+[[ "$test_status" = 0 ]] && say "$apex_module tests passed"
+echo
+
+
+# Testing host APEX package (com.android.runtime.host).
+# =====================================================
+
+# Clean-up.
+function cleanup_host {
+  rm -rf "$work_dir"
+}
+
+# Garbage collection.
+function finish_host {
+  # Don't fail early during cleanup.
+  set +e
+  cleanup_host
+}
+
+apex_module="com.android.runtime.host"
+test_status=0
+
+say "Processing APEX package $apex_module"
+
+work_dir=$(mktemp -d)
+
+trap finish_host EXIT
+
+# Build the APEX package (optional).
+build_apex "$apex_module"
+apex_path="$ANDROID_HOST_OUT/apex/${apex_module}.zipapex"
+
+# List the contents of the APEX image (optional).
+maybe_list_apex_contents_apex $apex_path $work_dir --host
+
+# Run tests on APEX package.
+say "Checking APEX package $apex_module"
+$SCRIPT_DIR/art_apex_test.py \
+  --tmpdir $work_dir \
+  --host \
+  --debug \
+  $apex_path \
+    || fail_check "Debug checks failed"
+
+# Clean up.
+trap - EXIT
+cleanup_host
+
+[[ "$test_status" = 0 ]] && say "$apex_module tests passed"
+
+[[ "$exit_status" = 0 ]] && say "All Android Runtime APEX tests passed"
+
+exit $exit_status
diff --git a/build/art.go b/build/art.go
index 61b1a4e..5236e31 100644
--- a/build/art.go
+++ b/build/art.go
@@ -16,8 +16,10 @@
 
 import (
 	"android/soong/android"
+	"android/soong/apex"
 	"android/soong/cc"
 	"fmt"
+	"log"
 	"sync"
 
 	"github.com/google/blueprint/proptools"
@@ -53,6 +55,9 @@
 		cflags = append(cflags, "-DART_HEAP_POISONING=1")
 		asflags = append(asflags, "-DART_HEAP_POISONING=1")
 	}
+	if envTrue(ctx, "ART_USE_CXX_INTERPRETER") {
+		cflags = append(cflags, "-DART_USE_CXX_INTERPRETER=1")
+	}
 
 	if !envFalse(ctx, "ART_USE_READ_BARRIER") && ctx.AConfig().ArtUseReadBarrier() {
 		// Used to change the read barrier type. Valid values are BAKER, BROOKS,
@@ -66,7 +71,7 @@
 			"-DART_READ_BARRIER_TYPE_IS_"+barrierType+"=1")
 	}
 
-	if envTrue(ctx, "ART_USE_GENERATIONAL_CC") {
+	if !envFalse(ctx, "ART_USE_GENERATIONAL_CC") {
 		cflags = append(cflags, "-DART_USE_GENERATIONAL_CC=1")
 	}
 
@@ -277,14 +282,45 @@
 
 func init() {
 	android.RegisterModuleType("art_cc_library", artLibrary)
-	android.RegisterModuleType("art_cc_static_library", artStaticLibrary)
+	android.RegisterModuleType("art_cc_library_static", artStaticLibrary)
 	android.RegisterModuleType("art_cc_binary", artBinary)
 	android.RegisterModuleType("art_cc_test", artTest)
 	android.RegisterModuleType("art_cc_test_library", artTestLibrary)
 	android.RegisterModuleType("art_cc_defaults", artDefaultsFactory)
 	android.RegisterModuleType("libart_cc_defaults", libartDefaultsFactory)
+	android.RegisterModuleType("libart_static_cc_defaults", libartStaticDefaultsFactory)
 	android.RegisterModuleType("art_global_defaults", artGlobalDefaultsFactory)
 	android.RegisterModuleType("art_debug_defaults", artDebugDefaultsFactory)
+
+	// TODO: This makes the module disable itself for host if HOST_PREFER_32_BIT is
+	// set. We need this because the multilib types of binaries listed in the apex
+	// rule must match the declared type. This is normally not difficult but HOST_PREFER_32_BIT
+	// changes this to 'prefer32' on all host binaries. Since HOST_PREFER_32_BIT is
+	// only used for testing we can just disable the module.
+	// See b/120617876 for more information.
+	android.RegisterModuleType("art_apex", artApexBundleFactory)
+}
+
+func artApexBundleFactory() android.Module {
+	module := apex.ApexBundleFactory()
+	android.AddLoadHook(module, func(ctx android.LoadHookContext) {
+		if envTrue(ctx, "HOST_PREFER_32_BIT") {
+			type props struct {
+				Target struct {
+					Host struct {
+						Enabled *bool
+					}
+				}
+			}
+
+			p := &props{}
+			p.Target.Host.Enabled = proptools.BoolPtr(false)
+			ctx.AppendProperties(p)
+			log.Print("Disabling host build of " + ctx.ModuleName() + " for HOST_PREFER_32_BIT=true")
+		}
+	})
+
+	return module
 }
 
 func artGlobalDefaultsFactory() android.Module {
@@ -312,26 +348,15 @@
 func libartDefaultsFactory() android.Module {
 	c := &codegenProperties{}
 	module := cc.DefaultsFactory(c)
-	android.AddLoadHook(module, func(ctx android.LoadHookContext) {
-		codegen(ctx, c, true)
+	android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, true) })
 
-		type props struct {
-			Target struct {
-				Android struct {
-					Shared_libs []string
-				}
-			}
-		}
+	return module
+}
 
-		p := &props{}
-		// TODO: express this in .bp instead b/79671158
-		if !envTrue(ctx, "ART_TARGET_LINUX") {
-			p.Target.Android.Shared_libs = []string{
-				"libmetricslogger",
-			}
-		}
-		ctx.AppendProperties(p)
-	})
+func libartStaticDefaultsFactory() android.Module {
+	c := &codegenProperties{}
+	module := cc.DefaultsFactory(c)
+	android.AddLoadHook(module, func(ctx android.LoadHookContext) {	codegen(ctx, c, true) })
 
 	return module
 }
diff --git a/build/codegen.go b/build/codegen.go
index 8526bf1..d0db78e 100644
--- a/build/codegen.go
+++ b/build/codegen.go
@@ -107,8 +107,9 @@
 }
 
 type CodegenCommonArchProperties struct {
-	Srcs   []string
-	Cflags []string
+	Srcs     []string
+	Cflags   []string
+	Cppflags []string
 }
 
 type CodegenLibraryArchProperties struct {
diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h
index 95ab123..81a2179 100644
--- a/cmdline/cmdline.h
+++ b/cmdline/cmdline.h
@@ -85,7 +85,9 @@
   }
 }
 
-static Runtime* StartRuntime(const char* boot_image_location, InstructionSet instruction_set) {
+static Runtime* StartRuntime(const char* boot_image_location,
+                             InstructionSet instruction_set,
+                             const std::vector<const char*>& runtime_args) {
   CHECK(boot_image_location != nullptr);
 
   RuntimeOptions options;
@@ -101,13 +103,19 @@
     std::string boot_image_option;
     boot_image_option += "-Ximage:";
     boot_image_option += boot_image_location;
-    options.push_back(std::make_pair(boot_image_option.c_str(), nullptr));
+    options.push_back(std::make_pair(boot_image_option, nullptr));
   }
 
   // Instruction set.
   options.push_back(
       std::make_pair("imageinstructionset",
                      reinterpret_cast<const void*>(GetInstructionSetString(instruction_set))));
+
+  // Explicit runtime args.
+  for (const char* runtime_arg : runtime_args) {
+    options.push_back(std::make_pair(runtime_arg, nullptr));
+  }
+
   // None of the command line tools need sig chain. If this changes we'll need
   // to upgrade this option to a proper parameter.
   options.push_back(std::make_pair("-Xno-sig-chain", nullptr));
@@ -154,6 +162,14 @@
           PrintUsage();
           return false;
         }
+      } else if (option == "--runtime-arg") {
+        if (i + 1 == argc) {
+          fprintf(stderr, "Missing argument for --runtime-arg\n");
+          PrintUsage();
+          return false;
+        }
+        ++i;
+        runtime_args_.push_back(argv[i]);
       } else if (option.starts_with("--output=")) {
         output_name_ = option.substr(strlen("--output=")).ToString();
         const char* filename = output_name_.c_str();
@@ -209,6 +225,12 @@
         "      Default: %s\n"
         "\n",
         GetInstructionSetString(kRuntimeISA));
+    usage +=
+        "  --runtime-arg <argument> used to specify various arguments for the runtime\n"
+        "      such as initial heap size, maximum heap size, and verbose output.\n"
+        "      Use a separate --runtime-arg switch for each argument.\n"
+        "      Example: --runtime-arg -Xms256m\n"
+        "\n";
     usage +=  // Optional.
         "  --output=<file> may be used to send the output to a file.\n"
         "      Example: --output=/tmp/oatdump.txt\n"
@@ -221,6 +243,8 @@
   const char* boot_image_location_ = nullptr;
   // Specified by --instruction-set.
   InstructionSet instruction_set_ = InstructionSet::kNone;
+  // Runtime arguments specified by --runtime-arg.
+  std::vector<const char*> runtime_args_;
   // Specified by --output.
   std::ostream* os_ = &std::cout;
   std::unique_ptr<std::ofstream> out_;  // If something besides cout is used
@@ -383,7 +407,7 @@
   Runtime* CreateRuntime(CmdlineArgs* args) {
     CHECK(args != nullptr);
 
-    return StartRuntime(args->boot_image_location_, args->instruction_set_);
+    return StartRuntime(args->boot_image_location_, args->instruction_set_, args_->runtime_args_);
   }
 };
 }  // namespace art
diff --git a/cmdline/cmdline_parser.h b/cmdline/cmdline_parser.h
index 82c04e7..952be44 100644
--- a/cmdline/cmdline_parser.h
+++ b/cmdline/cmdline_parser.h
@@ -206,7 +206,7 @@
       };
       load_value_ = []() -> TArg& {
         assert(false && "Should not be appending values to ignored arguments");
-        return *reinterpret_cast<TArg*>(0);  // Blow up.
+        __builtin_trap();  // Blow up.
       };
 
       save_value_specified_ = true;
@@ -270,7 +270,7 @@
 
       load_value_ = []() -> TArg& {
         assert(false && "No load value function defined");
-        return *reinterpret_cast<TArg*>(0);  // Blow up.
+        __builtin_trap();  // Blow up.
       };
     }
 
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 42c6a5f..101e5c4 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -63,6 +63,12 @@
     return expected == actual;
   }
 
+  template <char Separator>
+  bool UsuallyEquals(const std::vector<std::string>& expected,
+                     const ParseStringList<Separator>& actual) {
+    return expected == static_cast<std::vector<std::string>>(actual);
+  }
+
   // Try to use memcmp to compare simple plain-old-data structs.
   //
   // This should *not* generate false positives, but it can generate false negatives.
@@ -131,7 +137,7 @@
     art::InitLogging(nullptr, art::Runtime::Abort);  // argv = null
   }
 
-  virtual void SetUp() {
+  void SetUp() override {
     parser_ = ParsedOptions::MakeParser(false);  // do not ignore unrecognized options
   }
 
@@ -218,8 +224,13 @@
   }
 
   EXPECT_SINGLE_PARSE_EXISTS("-Xzygote", M::Zygote);
-  EXPECT_SINGLE_PARSE_VALUE_STR("/hello/world", "-Xbootclasspath:/hello/world", M::BootClassPath);
-  EXPECT_SINGLE_PARSE_VALUE("/hello/world", "-Xbootclasspath:/hello/world", M::BootClassPath);
+  EXPECT_SINGLE_PARSE_VALUE(std::vector<std::string>({"/hello/world"}),
+                            "-Xbootclasspath:/hello/world",
+                            M::BootClassPath);
+  EXPECT_SINGLE_PARSE_VALUE(std::vector<std::string>({"/hello", "/world"}),
+                            "-Xbootclasspath:/hello:/world",
+                            M::BootClassPath);
+  EXPECT_SINGLE_PARSE_VALUE_STR("/hello/world", "-classpath /hello/world", M::ClassPath);
   EXPECT_SINGLE_PARSE_VALUE(Memory<1>(234), "-Xss234", M::StackSize);
   EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(1234*MB), "-Xms1234m", M::MemoryInitialSize);
   EXPECT_SINGLE_PARSE_VALUE(true, "-XX:EnableHSpaceCompactForOOM", M::EnableHSpaceCompactForOOM);
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 48da755..478ecdf 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -24,11 +24,11 @@
 #include "detail/cmdline_debug_detail.h"
 #include "memory_representation.h"
 
+#include "android-base/logging.h"
 #include "android-base/strings.h"
 
 // Includes for the types that are being specialized
 #include <string>
-#include "base/logging.h"
 #include "base/time_utils.h"
 #include "experimental_flags.h"
 #include "gc/collector_type.h"
@@ -427,6 +427,7 @@
   gc::CollectorType collector_type_ = gc::kCollectorTypeDefault;
   bool verify_pre_gc_heap_ = false;
   bool verify_pre_sweeping_heap_ = kIsDebugBuild;
+  bool generational_cc = kEnableGenerationalCCByDefault;
   bool verify_post_gc_heap_ = false;
   bool verify_pre_gc_rosalloc_ = kIsDebugBuild;
   bool verify_pre_sweeping_rosalloc_ = false;
@@ -455,6 +456,10 @@
         xgc.verify_pre_sweeping_heap_ = true;
       } else if (gc_option == "nopresweepingverify") {
         xgc.verify_pre_sweeping_heap_ = false;
+      } else if (gc_option == "generational_cc") {
+        xgc.generational_cc = true;
+      } else if (gc_option == "nogenerational_cc") {
+        xgc.generational_cc = false;
       } else if (gc_option == "postverify") {
         xgc.verify_post_gc_heap_ = true;
       } else if (gc_option == "nopostverify") {
diff --git a/cmdline/token_range.h b/cmdline/token_range.h
index 642bb1d..e28ead9 100644
--- a/cmdline/token_range.h
+++ b/cmdline/token_range.h
@@ -325,7 +325,7 @@
     string_idx += remaining;
     maybe_push_wildcard_token();
 
-    return std::unique_ptr<TokenRange>(new TokenRange(std::move(new_token_list)));
+    return std::make_unique<TokenRange>(std::move(new_token_list));
   }
 
   // Do a quick match token-by-token, and see if they match.
diff --git a/compiler/Android.bp b/compiler/Android.bp
index c365537..0ebaa5f 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -157,6 +157,8 @@
                 "optimizing/code_generator_x86.cc",
                 "optimizing/code_generator_vector_x86.cc",
                 "optimizing/intrinsics_x86.cc",
+                "optimizing/instruction_simplifier_x86_shared.cc",
+                "optimizing/instruction_simplifier_x86.cc",
                 "optimizing/pc_relative_fixups_x86.cc",
                 "optimizing/x86_memory_gen.cc",
                 "utils/x86/assembler_x86.cc",
@@ -168,6 +170,7 @@
             srcs: [
                 "jni/quick/x86_64/calling_convention_x86_64.cc",
                 "optimizing/intrinsics_x86_64.cc",
+                "optimizing/instruction_simplifier_x86_64.cc",
                 "optimizing/code_generator_x86_64.cc",
                 "optimizing/code_generator_vector_x86_64.cc",
                 "utils/x86_64/assembler_x86_64.cc",
@@ -179,8 +182,6 @@
     generated_sources: ["art_compiler_operator_srcs"],
     shared_libs: [
         "libbase",
-        "libcutils",  // for atrace.
-        "liblzma",
     ],
     include_dirs: ["art/disassembler"],
     header_libs: [
@@ -191,6 +192,13 @@
     export_include_dirs: ["."],
 }
 
+cc_defaults {
+    name: "libart-compiler_static_base_defaults",
+    static_libs: [
+        "libbase",
+    ],
+}
+
 gensrcs {
     name: "art_compiler_operator_srcs",
     cmd: "$(location generate_operator_out) art/compiler $(in) > $(out)",
@@ -221,12 +229,12 @@
             // VIXL assembly support for ARM targets.
             static: {
                 whole_static_libs: [
-                    "libvixl-arm",
+                    "libvixl",
                 ],
             },
             shared: {
                 shared_libs: [
-                    "libvixl-arm",
+                    "libvixl",
                 ],
             },
         },
@@ -234,21 +242,22 @@
             // VIXL assembly support for ARM64 targets.
             static: {
                 whole_static_libs: [
-                    "libvixl-arm64",
+                    "libvixl",
                 ],
             },
             shared: {
                 shared_libs: [
-                    "libvixl-arm64",
+                    "libvixl",
                 ],
             },
         },
     },
     shared_libs: [
         "libart",
+        "libartbase",
+        "libartpalette",
         "libprofile",
         "libdexfile",
-        "libartbase",
     ],
 
     target: {
@@ -260,6 +269,18 @@
     },
 }
 
+cc_defaults {
+    name: "libart-compiler_static_defaults",
+    defaults: [
+        "libart-compiler_static_base_defaults",
+        "libart_static_defaults",
+        "libartbase_static_defaults",
+        "libdexfile_static_defaults",
+        "libprofile_static_defaults",
+    ],
+    static_libs: ["libart-compiler"],
+}
+
 art_cc_library {
     name: "libartd-compiler",
     defaults: [
@@ -271,12 +292,12 @@
             // VIXL assembly support for ARM targets.
             static: {
                 whole_static_libs: [
-                    "libvixld-arm",
+                    "libvixld",
                 ],
             },
             shared: {
                 shared_libs: [
-                    "libvixld-arm",
+                    "libvixld",
                 ],
             },
         },
@@ -284,24 +305,37 @@
             // VIXL assembly support for ARM64 targets.
             static: {
                 whole_static_libs: [
-                    "libvixld-arm64",
+                    "libvixld",
                 ],
             },
             shared: {
                 shared_libs: [
-                    "libvixld-arm64",
+                    "libvixld",
                 ],
             },
         },
     },
     shared_libs: [
+        "libartbased",
         "libartd",
+        "libartpalette",
         "libprofiled",
         "libdexfiled",
-        "libartbased",
     ],
 }
 
+cc_defaults {
+    name: "libartd-compiler_static_defaults",
+    defaults: [
+        "libart-compiler_static_base_defaults",
+        "libartd_static_defaults",
+        "libartbased_static_defaults",
+        "libdexfiled_static_defaults",
+        "libprofiled_static_defaults",
+    ],
+    static_libs: ["libartd-compiler"],
+}
+
 art_cc_library {
     name: "libart-compiler-gtest",
     defaults: ["libart-gtest-defaults"],
@@ -418,8 +452,7 @@
         "libprofiled",
         "libartd-compiler",
         "libartd-simulator-container",
-        "libvixld-arm",
-        "libvixld-arm64",
+        "libvixld",
 
         "libbacktrace",
         "libnativeloader",
@@ -476,7 +509,6 @@
     },
     shared_libs: [
         "libartd-compiler",
-        "libvixld-arm",
-        "libvixld-arm64",
+        "libvixld",
     ],
 }
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index 581edaa..658bdb3 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -53,13 +53,13 @@
     dwarf::WriteCIE(is64bit, dwarf::Reg(8), initial_opcodes, kCFIFormat, &debug_frame_data_);
     std::vector<uintptr_t> debug_frame_patches;
     dwarf::WriteFDE(is64bit,
-                    /* section_address */ 0,
-                    /* cie_address */ 0,
-                    /* code_address */ 0,
+                    /* section_address= */ 0,
+                    /* cie_address= */ 0,
+                    /* code_address= */ 0,
                     actual_asm.size(),
                     actual_cfi,
                     kCFIFormat,
-                    /* buffer_address */ 0,
+                    /* buffer_address= */ 0,
                     &debug_frame_data_,
                     &debug_frame_patches);
     ReformatCfi(Objdump(false, "-W"), &lines);
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 586891a..07c73c9 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -22,6 +22,7 @@
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/callee_save_type.h"
+#include "base/casts.h"
 #include "base/enums.h"
 #include "base/utils.h"
 #include "class_linker.h"
@@ -152,6 +153,10 @@
 
     CreateCompilerDriver();
   }
+  // Note: We cannot use MemMap because some tests tear down the Runtime and destroy
+  // the gMaps, so when destroying the MemMap, the test would crash.
+  inaccessible_page_ = mmap(nullptr, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  CHECK(inaccessible_page_ != MAP_FAILED) << strerror(errno);
 }
 
 void CommonCompilerTest::ApplyInstructionSet() {
@@ -184,17 +189,15 @@
 void CommonCompilerTest::CreateCompilerDriver() {
   ApplyInstructionSet();
 
-  compiler_options_->boot_image_ = true;
+  compiler_options_->image_type_ = CompilerOptions::ImageType::kBootImage;
   compiler_options_->compile_pic_ = false;  // Non-PIC boot image is a test configuration.
   compiler_options_->SetCompilerFilter(GetCompilerFilter());
   compiler_options_->image_classes_.swap(*GetImageClasses());
+  compiler_options_->profile_compilation_info_ = GetProfileCompilationInfo();
   compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
-                                            verification_results_.get(),
                                             compiler_kind_,
-                                            &compiler_options_->image_classes_,
                                             number_of_threads_,
-                                            /* swap_fd */ -1,
-                                            GetProfileCompilationInfo()));
+                                            /* swap_fd= */ -1));
 }
 
 void CommonCompilerTest::SetUpRuntimeOptions(RuntimeOptions* options) {
@@ -222,6 +225,10 @@
   verification_results_.reset();
   compiler_options_.reset();
   image_reservation_.Reset();
+  if (inaccessible_page_ != nullptr) {
+    munmap(inaccessible_page_, kPageSize);
+    inaccessible_page_ = nullptr;
+  }
 
   CommonRuntimeTest::TearDown();
 }
@@ -257,7 +264,7 @@
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
     Handle<mirror::ClassLoader> h_class_loader = hs.NewHandle(
         self->DecodeJObject(class_loader)->AsClassLoader());
-    const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
+    const dex::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
 
     std::vector<const DexFile*> dex_files;
     dex_files.push_back(dex_file);
@@ -267,8 +274,16 @@
 
     compiler_driver_->InitializeThreadPools();
 
-    compiler_driver_->PreCompile(class_loader, dex_files, &timings);
+    compiler_driver_->PreCompile(class_loader,
+                                 dex_files,
+                                 &timings,
+                                 &compiler_options_->image_classes_,
+                                 verification_results_.get());
 
+    // Verification results in the `callback_` should not be used during compilation.
+    down_cast<QuickCompilerCallbacks*>(callbacks_.get())->SetVerificationResults(
+        reinterpret_cast<VerificationResults*>(inaccessible_page_));
+    compiler_options_->verification_results_ = verification_results_.get();
     compiler_driver_->CompileOne(self,
                                  class_loader,
                                  *dex_file,
@@ -279,6 +294,9 @@
                                  code_item,
                                  dex_cache,
                                  h_class_loader);
+    compiler_options_->verification_results_ = nullptr;
+    down_cast<QuickCompilerCallbacks*>(callbacks_.get())->SetVerificationResults(
+        verification_results_.get());
 
     compiler_driver_->FreeThreadPools();
 
@@ -328,10 +346,38 @@
                                             (size_t)120 * 1024 * 1024,  // 120MB
                                             PROT_NONE,
                                             false /* no need for 4gb flag with fixed mmap */,
+                                            /*reuse=*/ false,
+                                            /*reservation=*/ nullptr,
                                             &error_msg);
   CHECK(image_reservation_.IsValid()) << error_msg;
 }
 
+void CommonCompilerTest::CompileAll(jobject class_loader,
+                                    const std::vector<const DexFile*>& dex_files,
+                                    TimingLogger* timings) {
+  TimingLogger::ScopedTiming t(__FUNCTION__, timings);
+  SetDexFilesForOatFile(dex_files);
+
+  compiler_driver_->InitializeThreadPools();
+
+  compiler_driver_->PreCompile(class_loader,
+                               dex_files,
+                               timings,
+                               &compiler_options_->image_classes_,
+                               verification_results_.get());
+
+  // Verification results in the `callback_` should not be used during compilation.
+  down_cast<QuickCompilerCallbacks*>(callbacks_.get())->SetVerificationResults(
+      reinterpret_cast<VerificationResults*>(inaccessible_page_));
+  compiler_options_->verification_results_ = verification_results_.get();
+  compiler_driver_->CompileAll(class_loader, dex_files, timings);
+  compiler_options_->verification_results_ = nullptr;
+  down_cast<QuickCompilerCallbacks*>(callbacks_.get())->SetVerificationResults(
+      verification_results_.get());
+
+  compiler_driver_->FreeThreadPools();
+}
+
 void CommonCompilerTest::UnreserveImageSpace() {
   image_reservation_.Reset();
 }
@@ -343,7 +389,7 @@
 }
 
 void CommonCompilerTest::ClearBootImageOption() {
-  compiler_options_->boot_image_ = false;
+  compiler_options_->image_type_ = CompilerOptions::ImageType::kNone;
 }
 
 }  // namespace art
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index e6d1564..a71908e 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -20,6 +20,8 @@
 #include <list>
 #include <vector>
 
+#include <jni.h>
+
 #include "arch/instruction_set.h"
 #include "arch/instruction_set_features.h"
 #include "base/hash_set.h"
@@ -37,6 +39,7 @@
 class CumulativeLogger;
 class DexFile;
 class ProfileCompilationInfo;
+class TimingLogger;
 class VerificationResults;
 
 template<class T> class Handle;
@@ -88,6 +91,10 @@
                             const char* method_name, const char* signature)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void CompileAll(jobject class_loader,
+                  const std::vector<const DexFile*>& dex_files,
+                  TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
+
   void ApplyInstructionSet();
   void OverrideInstructionSetFeatures(InstructionSet instruction_set, const std::string& variant);
 
@@ -116,6 +123,7 @@
 
  private:
   MemMap image_reservation_;
+  void* inaccessible_page_;
 
   // Chunks must not move their storage after being created - use the node-based std::list.
   std::list<std::vector<uint8_t>> header_code_and_maps_chunks_;
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 29f004c..58f7e4f 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -17,21 +17,20 @@
 #include "compiled_method.h"
 
 #include "driver/compiled_method_storage.h"
-#include "driver/compiler_driver.h"
 #include "utils/swap_space.h"
 
 namespace art {
 
-CompiledCode::CompiledCode(CompilerDriver* compiler_driver,
+CompiledCode::CompiledCode(CompiledMethodStorage* storage,
                            InstructionSet instruction_set,
                            const ArrayRef<const uint8_t>& quick_code)
-    : compiler_driver_(compiler_driver),
-      quick_code_(compiler_driver_->GetCompiledMethodStorage()->DeduplicateCode(quick_code)),
+    : storage_(storage),
+      quick_code_(storage->DeduplicateCode(quick_code)),
       packed_fields_(InstructionSetField::Encode(instruction_set)) {
 }
 
 CompiledCode::~CompiledCode() {
-  compiler_driver_->GetCompiledMethodStorage()->ReleaseCode(quick_code_);
+  GetStorage()->ReleaseCode(quick_code_);
 }
 
 bool CompiledCode::operator==(const CompiledCode& rhs) const {
@@ -74,7 +73,7 @@
     }
     default:
       LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
-      return 0;
+      UNREACHABLE();
   }
 }
 
@@ -95,33 +94,33 @@
     }
     default:
       LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
-      return nullptr;
+      UNREACHABLE();
   }
 }
 
-CompiledMethod::CompiledMethod(CompilerDriver* driver,
+CompiledMethod::CompiledMethod(CompiledMethodStorage* storage,
                                InstructionSet instruction_set,
                                const ArrayRef<const uint8_t>& quick_code,
                                const ArrayRef<const uint8_t>& vmap_table,
                                const ArrayRef<const uint8_t>& cfi_info,
                                const ArrayRef<const linker::LinkerPatch>& patches)
-    : CompiledCode(driver, instruction_set, quick_code),
-      vmap_table_(driver->GetCompiledMethodStorage()->DeduplicateVMapTable(vmap_table)),
-      cfi_info_(driver->GetCompiledMethodStorage()->DeduplicateCFIInfo(cfi_info)),
-      patches_(driver->GetCompiledMethodStorage()->DeduplicateLinkerPatches(patches)) {
+    : CompiledCode(storage, instruction_set, quick_code),
+      vmap_table_(storage->DeduplicateVMapTable(vmap_table)),
+      cfi_info_(storage->DeduplicateCFIInfo(cfi_info)),
+      patches_(storage->DeduplicateLinkerPatches(patches)) {
 }
 
 CompiledMethod* CompiledMethod::SwapAllocCompiledMethod(
-    CompilerDriver* driver,
+    CompiledMethodStorage* storage,
     InstructionSet instruction_set,
     const ArrayRef<const uint8_t>& quick_code,
     const ArrayRef<const uint8_t>& vmap_table,
     const ArrayRef<const uint8_t>& cfi_info,
     const ArrayRef<const linker::LinkerPatch>& patches) {
-  SwapAllocator<CompiledMethod> alloc(driver->GetCompiledMethodStorage()->GetSwapSpaceAllocator());
+  SwapAllocator<CompiledMethod> alloc(storage->GetSwapSpaceAllocator());
   CompiledMethod* ret = alloc.allocate(1);
   alloc.construct(ret,
-                  driver,
+                  storage,
                   instruction_set,
                   quick_code,
                   vmap_table,
@@ -129,14 +128,15 @@
   return ret;
 }
 
-void CompiledMethod::ReleaseSwapAllocatedCompiledMethod(CompilerDriver* driver, CompiledMethod* m) {
-  SwapAllocator<CompiledMethod> alloc(driver->GetCompiledMethodStorage()->GetSwapSpaceAllocator());
+void CompiledMethod::ReleaseSwapAllocatedCompiledMethod(CompiledMethodStorage* storage,
+                                                        CompiledMethod* m) {
+  SwapAllocator<CompiledMethod> alloc(storage->GetSwapSpaceAllocator());
   alloc.destroy(m);
   alloc.deallocate(m, 1);
 }
 
 CompiledMethod::~CompiledMethod() {
-  CompiledMethodStorage* storage = GetCompilerDriver()->GetCompiledMethodStorage();
+  CompiledMethodStorage* storage = GetStorage();
   storage->ReleaseLinkerPatches(patches_);
   storage->ReleaseCFIInfo(cfi_info_);
   storage->ReleaseVMapTable(vmap_table_);
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 864ce58..e92777f 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -28,7 +28,6 @@
 namespace art {
 
 template <typename T> class ArrayRef;
-class CompilerDriver;
 class CompiledMethodStorage;
 template<typename T> class LengthPrefixedArray;
 
@@ -39,7 +38,7 @@
 class CompiledCode {
  public:
   // For Quick to supply an code blob
-  CompiledCode(CompilerDriver* compiler_driver,
+  CompiledCode(CompiledMethodStorage* storage,
                InstructionSet instruction_set,
                const ArrayRef<const uint8_t>& quick_code);
 
@@ -78,8 +77,8 @@
   template <typename T>
   static ArrayRef<const T> GetArray(const LengthPrefixedArray<T>* array);
 
-  CompilerDriver* GetCompilerDriver() {
-    return compiler_driver_;
+  CompiledMethodStorage* GetStorage() {
+    return storage_;
   }
 
   template <typename BitFieldType>
@@ -96,7 +95,7 @@
  private:
   using InstructionSetField = BitField<InstructionSet, 0u, kInstructionSetFieldSize>;
 
-  CompilerDriver* const compiler_driver_;
+  CompiledMethodStorage* const storage_;
 
   // Used to store the compiled code.
   const LengthPrefixedArray<uint8_t>* const quick_code_;
@@ -109,7 +108,7 @@
   // Constructs a CompiledMethod.
   // Note: Consider using the static allocation methods below that will allocate the CompiledMethod
   //       in the swap space.
-  CompiledMethod(CompilerDriver* driver,
+  CompiledMethod(CompiledMethodStorage* storage,
                  InstructionSet instruction_set,
                  const ArrayRef<const uint8_t>& quick_code,
                  const ArrayRef<const uint8_t>& vmap_table,
@@ -119,14 +118,14 @@
   virtual ~CompiledMethod();
 
   static CompiledMethod* SwapAllocCompiledMethod(
-      CompilerDriver* driver,
+      CompiledMethodStorage* storage,
       InstructionSet instruction_set,
       const ArrayRef<const uint8_t>& quick_code,
       const ArrayRef<const uint8_t>& vmap_table,
       const ArrayRef<const uint8_t>& cfi_info,
       const ArrayRef<const linker::LinkerPatch>& patches);
 
-  static void ReleaseSwapAllocatedCompiledMethod(CompilerDriver* driver, CompiledMethod* m);
+  static void ReleaseSwapAllocatedCompiledMethod(CompiledMethodStorage* storage, CompiledMethod* m);
 
   bool IsIntrinsic() const {
     return GetPackedField<IsIntrinsicField>();
@@ -137,7 +136,7 @@
   // This affects debug information generated at link time.
   void MarkAsIntrinsic() {
     DCHECK(!IsIntrinsic());
-    SetPackedField<IsIntrinsicField>(/* value */ true);
+    SetPackedField<IsIntrinsicField>(/* value= */ true);
   }
 
   ArrayRef<const uint8_t> GetVmapTable() const;
diff --git a/compiler/compiler.cc b/compiler/compiler.cc
index 646040f..54da446 100644
--- a/compiler/compiler.cc
+++ b/compiler/compiler.cc
@@ -21,6 +21,7 @@
 #include "base/macros.h"
 #include "base/utils.h"
 #include "dex/code_item_accessors-inl.h"
+#include "dex/dex_file.h"
 #include "driver/compiler_driver.h"
 #include "optimizing/optimizing_compiler.h"
 
@@ -39,7 +40,7 @@
   }
 }
 
-bool Compiler::IsPathologicalCase(const DexFile::CodeItem& code_item,
+bool Compiler::IsPathologicalCase(const dex::CodeItem& code_item,
                                   uint32_t method_idx,
                                   const DexFile& dex_file) {
   /*
diff --git a/compiler/compiler.h b/compiler/compiler.h
index ef3d87f..8a67724 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -19,10 +19,13 @@
 
 #include "base/mutex.h"
 #include "base/os.h"
-#include "dex/dex_file.h"
+#include "dex/invoke_type.h"
 
 namespace art {
 
+namespace dex {
+struct CodeItem;
+}  // namespace dex
 namespace jit {
 class JitCodeCache;
 class JitLogger;
@@ -35,6 +38,7 @@
 class ArtMethod;
 class CompilerDriver;
 class CompiledMethod;
+class DexFile;
 template<class T> class Handle;
 class OatWriter;
 class Thread;
@@ -54,7 +58,7 @@
 
   virtual bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const = 0;
 
-  virtual CompiledMethod* Compile(const DexFile::CodeItem* code_item,
+  virtual CompiledMethod* Compile(const dex::CodeItem* code_item,
                                   uint32_t access_flags,
                                   InvokeType invoke_type,
                                   uint16_t class_def_idx,
@@ -71,6 +75,7 @@
   virtual bool JitCompile(Thread* self ATTRIBUTE_UNUSED,
                           jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED,
                           ArtMethod* method ATTRIBUTE_UNUSED,
+                          bool baseline ATTRIBUTE_UNUSED,
                           bool osr ATTRIBUTE_UNUSED,
                           jit::JitLogger* jit_logger ATTRIBUTE_UNUSED)
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -101,7 +106,7 @@
 
   // Returns whether the method to compile is such a pathological case that
   // it's not worth compiling.
-  static bool IsPathologicalCase(const DexFile::CodeItem& code_item,
+  static bool IsPathologicalCase(const dex::CodeItem& code_item,
                                  uint32_t method_idx,
                                  const DexFile& dex_file);
 
diff --git a/compiler/debug/dwarf/dwarf_test.cc b/compiler/debug/dwarf/dwarf_test.cc
index 933034f..6512314 100644
--- a/compiler/debug/dwarf/dwarf_test.cc
+++ b/compiler/debug/dwarf/dwarf_test.cc
@@ -334,7 +334,7 @@
 
   std::vector<uintptr_t> debug_info_patches;
   std::vector<uintptr_t> expected_patches = { 16, 20, 29, 33, 42, 46 };
-  dwarf::WriteDebugInfoCU(0 /* debug_abbrev_offset */, info,
+  dwarf::WriteDebugInfoCU(/* debug_abbrev_offset= */ 0, info,
                           0, &debug_info_data_, &debug_info_patches);
 
   EXPECT_EQ(expected_patches, debug_info_patches);
diff --git a/compiler/debug/dwarf/headers.h b/compiler/debug/dwarf/headers.h
index 28f1084..4a27178 100644
--- a/compiler/debug/dwarf/headers.h
+++ b/compiler/debug/dwarf/headers.h
@@ -107,7 +107,9 @@
   } else {
     DCHECK(format == DW_DEBUG_FRAME_FORMAT);
     // Relocate code_address if it has absolute value.
-    patch_locations->push_back(buffer_address + buffer->size() - section_address);
+    if (patch_locations != nullptr) {
+      patch_locations->push_back(buffer_address + buffer->size() - section_address);
+    }
   }
   if (is64bit) {
     writer.PushUint64(code_address);
@@ -122,6 +124,30 @@
   writer.UpdateUint32(fde_header_start, writer.data()->size() - fde_header_start - 4);
 }
 
+// Read singe FDE entry from 'data' (which is advanced).
+template<typename Addr>
+bool ReadFDE(const uint8_t** data, Addr* addr, Addr* size, ArrayRef<const uint8_t>* opcodes) {
+  struct Header {
+    uint32_t length;
+    int32_t cie_pointer;
+    Addr addr;
+    Addr size;
+    uint8_t augmentaion;
+    uint8_t opcodes[];
+  } PACKED(1);
+  const Header* header = reinterpret_cast<const Header*>(*data);
+  const size_t length = 4 + header->length;
+  *data += length;
+  if (header->cie_pointer == -1) {
+    return false;  // Not an FDE entry.
+  }
+  DCHECK_EQ(header->cie_pointer, 0);  // Expects single CIE. Assumes DW_DEBUG_FRAME_FORMAT.
+  *addr = header->addr;
+  *size = header->size;
+  *opcodes = ArrayRef<const uint8_t>(header->opcodes, length - offsetof(Header, opcodes));
+  return true;
+}
+
 // Write compilation unit (CU) to .debug_info section.
 template<typename Vector>
 void WriteDebugInfoCU(uint32_t debug_abbrev_offset,
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index bda7108..a63f241 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -41,26 +41,14 @@
 namespace art {
 namespace debug {
 
-typedef std::vector<DexFile::LocalInfo> LocalInfos;
-
-static void LocalInfoCallback(void* ctx, const DexFile::LocalInfo& entry) {
-  static_cast<LocalInfos*>(ctx)->push_back(entry);
-}
-
 static std::vector<const char*> GetParamNames(const MethodDebugInfo* mi) {
   std::vector<const char*> names;
+  DCHECK(mi->dex_file != nullptr);
   CodeItemDebugInfoAccessor accessor(*mi->dex_file, mi->code_item, mi->dex_method_index);
   if (accessor.HasCodeItem()) {
-    DCHECK(mi->dex_file != nullptr);
-    const uint8_t* stream = mi->dex_file->GetDebugInfoStream(accessor.DebugInfoOffset());
-    if (stream != nullptr) {
-      DecodeUnsignedLeb128(&stream);  // line.
-      uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
-      for (uint32_t i = 0; i < parameters_size; ++i) {
-        uint32_t id = DecodeUnsignedLeb128P1(&stream);
-        names.push_back(mi->dex_file->StringDataByIdx(dex::StringIndex(id)));
-      }
-    }
+    accessor.VisitParameterNames([&](const dex::StringIndex& id) {
+      names.push_back(mi->dex_file->StringDataByIdx(id));
+    });
   }
   return names;
 }
@@ -164,9 +152,9 @@
       DCHECK(mi->dex_file != nullptr);
       const DexFile* dex = mi->dex_file;
       CodeItemDebugInfoAccessor accessor(*dex, mi->code_item, mi->dex_method_index);
-      const DexFile::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index);
-      const DexFile::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method);
-      const DexFile::TypeList* dex_params = dex->GetProtoParameters(dex_proto);
+      const dex::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index);
+      const dex::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method);
+      const dex::TypeList* dex_params = dex->GetProtoParameters(dex_proto);
       const char* dex_class_desc = dex->GetMethodDeclaringClassDescriptor(dex_method);
       const bool is_static = (mi->access_flags & kAccStatic) != 0;
 
@@ -257,11 +245,12 @@
       }
 
       // Write local variables.
-      LocalInfos local_infos;
+      std::vector<DexFile::LocalInfo> local_infos;
       if (accessor.DecodeDebugLocalInfo(is_static,
                                         mi->dex_method_index,
-                                        LocalInfoCallback,
-                                        &local_infos)) {
+                                        [&](const DexFile::LocalInfo& entry) {
+                                          local_infos.push_back(entry);
+                                        })) {
         for (const DexFile::LocalInfo& var : local_infos) {
           if (var.reg_ < accessor.RegistersSize() - accessor.InsSize()) {
             info_.StartTag(DW_TAG_variable);
@@ -383,10 +372,10 @@
         }
 
         // Base class.
-        mirror::Class* base_class = type->GetSuperClass();
+        ObjPtr<mirror::Class> base_class = type->GetSuperClass();
         if (base_class != nullptr) {
           info_.StartTag(DW_TAG_inheritance);
-          base_class_references.emplace(info_.size(), base_class);
+          base_class_references.emplace(info_.size(), base_class.Ptr());
           info_.WriteRef4(DW_AT_type, 0);
           info_.WriteUdata(DW_AT_data_member_location, 0);
           info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public);
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 3d78943..0a13a92 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -34,11 +34,6 @@
 
 typedef std::vector<DexFile::PositionInfo> PositionInfos;
 
-static bool PositionInfoCallback(void* ctx, const DexFile::PositionInfo& entry) {
-  static_cast<PositionInfos*>(ctx)->push_back(entry);
-  return false;
-}
-
 template<typename ElfTypes>
 class ElfDebugLineWriter {
   using Elf_Addr = typename ElfTypes::Addr;
@@ -154,11 +149,14 @@
       Elf_Addr method_address = base_address + mi->code_address;
 
       PositionInfos dex2line_map;
-      DCHECK(mi->dex_file != nullptr);
       const DexFile* dex = mi->dex_file;
+      DCHECK(dex != nullptr);
       CodeItemDebugInfoAccessor accessor(*dex, mi->code_item, mi->dex_method_index);
-      const uint32_t debug_info_offset = accessor.DebugInfoOffset();
-      if (!dex->DecodeDebugPositionInfo(debug_info_offset, PositionInfoCallback, &dex2line_map)) {
+      if (!accessor.DecodeDebugPositionInfo(
+          [&](const DexFile::PositionInfo& entry) {
+            dex2line_map.push_back(entry);
+            return false;
+          })) {
         continue;
       }
 
diff --git a/compiler/debug/elf_debug_reader.h b/compiler/debug/elf_debug_reader.h
new file mode 100644
index 0000000..91b1b3e
--- /dev/null
+++ b/compiler/debug/elf_debug_reader.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_READER_H_
+#define ART_COMPILER_DEBUG_ELF_DEBUG_READER_H_
+
+#include "base/array_ref.h"
+#include "debug/dwarf/headers.h"
+#include "elf.h"
+#include "xz_utils.h"
+
+namespace art {
+namespace debug {
+
+// Trivial ELF file reader.
+//
+// It is the bare minimum needed to read mini-debug-info symbols for unwinding.
+// We use it to merge JIT mini-debug-infos together or to prune them after GC.
+// The consumed ELF file comes from ART JIT.
+template <typename ElfTypes, typename VisitSym, typename VisitFde>
+static void ReadElfSymbols(const uint8_t* elf, VisitSym visit_sym, VisitFde visit_fde) {
+  // Note that the input buffer might be misaligned.
+  typedef typename ElfTypes::Ehdr ALIGNED(1) Elf_Ehdr;
+  typedef typename ElfTypes::Shdr ALIGNED(1) Elf_Shdr;
+  typedef typename ElfTypes::Sym ALIGNED(1) Elf_Sym;
+  typedef typename ElfTypes::Addr ALIGNED(1) Elf_Addr;
+
+  // Read and check the elf header.
+  const Elf_Ehdr* header = reinterpret_cast<const Elf_Ehdr*>(elf);
+  CHECK(header->checkMagic());
+
+  // Find sections that we are interested in.
+  const Elf_Shdr* sections = reinterpret_cast<const Elf_Shdr*>(elf + header->e_shoff);
+  const Elf_Shdr* strtab = nullptr;
+  const Elf_Shdr* symtab = nullptr;
+  const Elf_Shdr* debug_frame = nullptr;
+  const Elf_Shdr* gnu_debugdata = nullptr;
+  for (size_t i = 1 /* skip null section */; i < header->e_shnum; i++) {
+    const Elf_Shdr* section = sections + i;
+    const char* name = reinterpret_cast<const char*>(
+        elf + sections[header->e_shstrndx].sh_offset + section->sh_name);
+    if (strcmp(name, ".strtab") == 0) {
+      strtab = section;
+    } else if (strcmp(name, ".symtab") == 0) {
+      symtab = section;
+    } else if (strcmp(name, ".debug_frame") == 0) {
+      debug_frame = section;
+    } else if (strcmp(name, ".gnu_debugdata") == 0) {
+      gnu_debugdata = section;
+    }
+  }
+
+  // Visit symbols.
+  if (symtab != nullptr && strtab != nullptr) {
+    const Elf_Sym* symbols = reinterpret_cast<const Elf_Sym*>(elf + symtab->sh_offset);
+    DCHECK_EQ(symtab->sh_entsize, sizeof(Elf_Sym));
+    size_t count = symtab->sh_size / sizeof(Elf_Sym);
+    for (size_t i = 1 /* skip null symbol */; i < count; i++) {
+      Elf_Sym symbol = symbols[i];
+      if (symbol.getBinding() != STB_LOCAL) {  // Ignore local symbols (e.g. "$t").
+        const uint8_t* name = elf + strtab->sh_offset + symbol.st_name;
+        visit_sym(symbol, reinterpret_cast<const char*>(name));
+      }
+    }
+  }
+
+  // Visit CFI (unwind) data.
+  if (debug_frame != nullptr) {
+    const uint8_t* data = elf + debug_frame->sh_offset;
+    const uint8_t* end = data + debug_frame->sh_size;
+    while (data < end) {
+      Elf_Addr addr, size;
+      ArrayRef<const uint8_t> opcodes;
+      if (dwarf::ReadFDE<Elf_Addr>(&data, &addr, &size, &opcodes)) {
+        visit_fde(addr, size, opcodes);
+      }
+    }
+  }
+
+  // Process embedded compressed ELF file.
+  if (gnu_debugdata != nullptr) {
+    ArrayRef<const uint8_t> compressed(elf + gnu_debugdata->sh_offset, gnu_debugdata->sh_size);
+    std::vector<uint8_t> decompressed;
+    XzDecompress(compressed, &decompressed);
+    ReadElfSymbols<ElfTypes>(decompressed.data(), visit_sym, visit_fde);
+  }
+}
+
+}  // namespace debug
+}  // namespace art
+#endif  // ART_COMPILER_DEBUG_ELF_DEBUG_READER_H_
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
index 71422d4..e5c09aa 100644
--- a/compiler/debug/elf_debug_writer.cc
+++ b/compiler/debug/elf_debug_writer.cc
@@ -16,19 +16,23 @@
 
 #include "elf_debug_writer.h"
 
-#include <vector>
+#include <type_traits>
 #include <unordered_map>
+#include <vector>
 
 #include "base/array_ref.h"
+#include "base/stl_util.h"
 #include "debug/dwarf/dwarf_constants.h"
 #include "debug/elf_compilation_unit.h"
 #include "debug/elf_debug_frame_writer.h"
 #include "debug/elf_debug_info_writer.h"
 #include "debug/elf_debug_line_writer.h"
 #include "debug/elf_debug_loc_writer.h"
+#include "debug/elf_debug_reader.h"
 #include "debug/elf_symtab_writer.h"
 #include "debug/method_debug_info.h"
 #include "debug/xz_utils.h"
+#include "elf.h"
 #include "linker/elf_builder.h"
 #include "linker/vector_output_stream.h"
 #include "oat.h"
@@ -36,19 +40,21 @@
 namespace art {
 namespace debug {
 
+using ElfRuntimeTypes = std::conditional<sizeof(void*) == 4, ElfTypes32, ElfTypes64>::type;
+
 template <typename ElfTypes>
 void WriteDebugInfo(linker::ElfBuilder<ElfTypes>* builder,
                     const DebugInfo& debug_info,
                     dwarf::CFIFormat cfi_format,
                     bool write_oat_patches) {
   // Write .strtab and .symtab.
-  WriteDebugSymbols(builder, false /* mini-debug-info */, debug_info);
+  WriteDebugSymbols(builder, /* mini-debug-info= */ false, debug_info);
 
   // Write .debug_frame.
   WriteCFISection(builder, debug_info.compiled_methods, cfi_format, write_oat_patches);
 
   // Group the methods into compilation units based on class.
-  std::unordered_map<const DexFile::ClassDef*, ElfCompilationUnit> class_to_compilation_unit;
+  std::unordered_map<const dex::ClassDef*, ElfCompilationUnit> class_to_compilation_unit;
   for (const MethodDebugInfo& mi : debug_info.compiled_methods) {
     if (mi.dex_file != nullptr) {
       auto& dex_class_def = mi.dex_file->GetClassDef(mi.class_def_index);
@@ -119,22 +125,28 @@
   linker::VectorOutputStream out("Mini-debug-info ELF file", &buffer);
   std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
       new linker::ElfBuilder<ElfTypes>(isa, features, &out));
-  builder->Start(false /* write_program_headers */);
+  builder->Start(/* write_program_headers= */ false);
   // Mirror ELF sections as NOBITS since the added symbols will reference them.
-  builder->GetText()->AllocateVirtualMemory(text_section_address, text_section_size);
+  if (text_section_size != 0) {
+    builder->GetText()->AllocateVirtualMemory(text_section_address, text_section_size);
+  }
   if (dex_section_size != 0) {
     builder->GetDex()->AllocateVirtualMemory(dex_section_address, dex_section_size);
   }
-  WriteDebugSymbols(builder.get(), true /* mini-debug-info */, debug_info);
-  WriteCFISection(builder.get(),
-                  debug_info.compiled_methods,
-                  dwarf::DW_DEBUG_FRAME_FORMAT,
-                  false /* write_oat_paches */);
+  if (!debug_info.Empty()) {
+    WriteDebugSymbols(builder.get(), /* mini-debug-info= */ true, debug_info);
+  }
+  if (!debug_info.compiled_methods.empty()) {
+    WriteCFISection(builder.get(),
+                    debug_info.compiled_methods,
+                    dwarf::DW_DEBUG_FRAME_FORMAT,
+                    /* write_oat_patches= */ false);
+  }
   builder->End();
   CHECK(builder->Good());
   std::vector<uint8_t> compressed_buffer;
   compressed_buffer.reserve(buffer.size() / 4);
-  XzCompress(ArrayRef<uint8_t>(buffer), &compressed_buffer);
+  XzCompress(ArrayRef<const uint8_t>(buffer), &compressed_buffer);
   return compressed_buffer;
 }
 
@@ -165,107 +177,207 @@
   }
 }
 
-template <typename ElfTypes>
-static std::vector<uint8_t> MakeElfFileForJITInternal(
-    InstructionSet isa,
-    const InstructionSetFeatures* features,
-    bool mini_debug_info,
-    ArrayRef<const MethodDebugInfo> method_infos) {
-  CHECK_GT(method_infos.size(), 0u);
-  uint64_t min_address = std::numeric_limits<uint64_t>::max();
-  uint64_t max_address = 0;
-  for (const MethodDebugInfo& mi : method_infos) {
-    CHECK_EQ(mi.is_code_address_text_relative, false);
-    min_address = std::min(min_address, mi.code_address);
-    max_address = std::max(max_address, mi.code_address + mi.code_size);
-  }
-  DebugInfo debug_info{};
-  debug_info.compiled_methods = method_infos;
-  std::vector<uint8_t> buffer;
-  buffer.reserve(KB);
-  linker::VectorOutputStream out("Debug ELF file", &buffer);
-  std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
-      new linker::ElfBuilder<ElfTypes>(isa, features, &out));
-  // No program headers since the ELF file is not linked and has no allocated sections.
-  builder->Start(false /* write_program_headers */);
-  if (mini_debug_info) {
-    if (method_infos.size() > 1) {
-      std::vector<uint8_t> mdi = MakeMiniDebugInfo(isa,
-                                                   features,
-                                                   min_address,
-                                                   max_address - min_address,
-                                                   /* dex_section_address */ 0,
-                                                   /* dex_section_size */ 0,
-                                                   debug_info);
-      builder->WriteSection(".gnu_debugdata", &mdi);
-    } else {
-      // The compression is great help for multiple methods but it is not worth it for a
-      // single method due to the overheads so skip the compression here for performance.
-      builder->GetText()->AllocateVirtualMemory(min_address, max_address - min_address);
-      WriteDebugSymbols(builder.get(), true /* mini-debug-info */, debug_info);
-      WriteCFISection(builder.get(),
-                      debug_info.compiled_methods,
-                      dwarf::DW_DEBUG_FRAME_FORMAT,
-                      false /* write_oat_paches */);
-    }
-  } else {
-    builder->GetText()->AllocateVirtualMemory(min_address, max_address - min_address);
-    WriteDebugInfo(builder.get(),
-                   debug_info,
-                   dwarf::DW_DEBUG_FRAME_FORMAT,
-                   false /* write_oat_patches */);
-  }
-  builder->End();
-  CHECK(builder->Good());
-  return buffer;
-}
-
 std::vector<uint8_t> MakeElfFileForJIT(
     InstructionSet isa,
     const InstructionSetFeatures* features,
     bool mini_debug_info,
-    ArrayRef<const MethodDebugInfo> method_infos) {
-  if (Is64BitInstructionSet(isa)) {
-    return MakeElfFileForJITInternal<ElfTypes64>(isa, features, mini_debug_info, method_infos);
-  } else {
-    return MakeElfFileForJITInternal<ElfTypes32>(isa, features, mini_debug_info, method_infos);
-  }
-}
-
-template <typename ElfTypes>
-static std::vector<uint8_t> WriteDebugElfFileForClassesInternal(
-    InstructionSet isa,
-    const InstructionSetFeatures* features,
-    const ArrayRef<mirror::Class*>& types)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
+    const MethodDebugInfo& method_info) {
+  using ElfTypes = ElfRuntimeTypes;
+  CHECK_EQ(sizeof(ElfTypes::Addr), static_cast<size_t>(GetInstructionSetPointerSize(isa)));
+  CHECK_EQ(method_info.is_code_address_text_relative, false);
+  DebugInfo debug_info{};
+  debug_info.compiled_methods = ArrayRef<const MethodDebugInfo>(&method_info, 1);
   std::vector<uint8_t> buffer;
   buffer.reserve(KB);
   linker::VectorOutputStream out("Debug ELF file", &buffer);
   std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
       new linker::ElfBuilder<ElfTypes>(isa, features, &out));
   // No program headers since the ELF file is not linked and has no allocated sections.
-  builder->Start(false /* write_program_headers */);
+  builder->Start(/* write_program_headers= */ false);
+  builder->GetText()->AllocateVirtualMemory(method_info.code_address, method_info.code_size);
+  if (mini_debug_info) {
+    // The compression is great help for multiple methods but it is not worth it for a
+    // single method due to the overheads so skip the compression here for performance.
+    WriteDebugSymbols(builder.get(), /* mini-debug-info= */ true, debug_info);
+    WriteCFISection(builder.get(),
+                    debug_info.compiled_methods,
+                    dwarf::DW_DEBUG_FRAME_FORMAT,
+                    /* write_oat_patches= */ false);
+  } else {
+    WriteDebugInfo(builder.get(),
+                   debug_info,
+                   dwarf::DW_DEBUG_FRAME_FORMAT,
+                   /* write_oat_patches= */ false);
+  }
+  builder->End();
+  CHECK(builder->Good());
+  // Verify the ELF file by reading it back using the trivial reader.
+  if (kIsDebugBuild) {
+    using Elf_Sym = typename ElfTypes::Sym;
+    using Elf_Addr = typename ElfTypes::Addr;
+    size_t num_syms = 0;
+    size_t num_cfis = 0;
+    ReadElfSymbols<ElfTypes>(
+        buffer.data(),
+        [&](Elf_Sym sym, const char*) {
+          DCHECK_EQ(sym.st_value, method_info.code_address + CompiledMethod::CodeDelta(isa));
+          DCHECK_EQ(sym.st_size, method_info.code_size);
+          num_syms++;
+        },
+        [&](Elf_Addr addr, Elf_Addr size, ArrayRef<const uint8_t> opcodes) {
+          DCHECK_EQ(addr, method_info.code_address);
+          DCHECK_EQ(size, method_info.code_size);
+          DCHECK_GE(opcodes.size(), method_info.cfi.size());
+          DCHECK_EQ(memcmp(opcodes.data(), method_info.cfi.data(), method_info.cfi.size()), 0);
+          num_cfis++;
+        });
+    DCHECK_EQ(num_syms, 1u);
+    // CFI might be missing. TODO: Ensure we have CFI for all methods.
+    DCHECK_LE(num_cfis, 1u);
+  }
+  return buffer;
+}
+
+// Combine several mini-debug-info ELF files into one, while filtering some symbols.
+std::vector<uint8_t> PackElfFileForJIT(
+    InstructionSet isa,
+    const InstructionSetFeatures* features,
+    std::vector<const uint8_t*>& added_elf_files,
+    std::vector<const void*>& removed_symbols,
+    /*out*/ size_t* num_symbols) {
+  using ElfTypes = ElfRuntimeTypes;
+  using Elf_Addr = typename ElfTypes::Addr;
+  using Elf_Sym = typename ElfTypes::Sym;
+  CHECK_EQ(sizeof(Elf_Addr), static_cast<size_t>(GetInstructionSetPointerSize(isa)));
+  const bool is64bit = Is64BitInstructionSet(isa);
+  auto is_removed_symbol = [&removed_symbols](Elf_Addr addr) {
+    const void* code_ptr = reinterpret_cast<const void*>(addr);
+    return std::binary_search(removed_symbols.begin(), removed_symbols.end(), code_ptr);
+  };
+  uint64_t min_address = std::numeric_limits<uint64_t>::max();
+  uint64_t max_address = 0;
+
+  // Produce the inner ELF file.
+  // It will contain the symbols (.symtab) and unwind information (.debug_frame).
+  std::vector<uint8_t> inner_elf_file;
+  {
+    inner_elf_file.reserve(1 * KB);  // Approximate size of ELF file with a single symbol.
+    linker::VectorOutputStream out("Mini-debug-info ELF file for JIT", &inner_elf_file);
+    std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
+        new linker::ElfBuilder<ElfTypes>(isa, features, &out));
+    builder->Start(/*write_program_headers=*/ false);
+    auto* text = builder->GetText();
+    auto* strtab = builder->GetStrTab();
+    auto* symtab = builder->GetSymTab();
+    auto* debug_frame = builder->GetDebugFrame();
+    std::deque<Elf_Sym> symbols;
+    std::vector<uint8_t> debug_frame_buffer;
+    WriteCIE(isa, dwarf::DW_DEBUG_FRAME_FORMAT, &debug_frame_buffer);
+
+    // Write symbols names. All other data is buffered.
+    strtab->Start();
+    strtab->Write("");  // strtab should start with empty string.
+    for (const uint8_t* added_elf_file : added_elf_files) {
+      ReadElfSymbols<ElfTypes>(
+          added_elf_file,
+          [&](Elf_Sym sym, const char* name) {
+              if (is_removed_symbol(sym.st_value)) {
+                return;
+              }
+              sym.st_name = strtab->Write(name);
+              symbols.push_back(sym);
+              min_address = std::min<uint64_t>(min_address, sym.st_value);
+              max_address = std::max<uint64_t>(max_address, sym.st_value + sym.st_size);
+          },
+          [&](Elf_Addr addr, Elf_Addr size, ArrayRef<const uint8_t> opcodes) {
+              if (is_removed_symbol(addr)) {
+                return;
+              }
+              WriteFDE(is64bit,
+                       /*section_address=*/ 0,
+                       /*cie_address=*/ 0,
+                       addr,
+                       size,
+                       opcodes,
+                       dwarf::DW_DEBUG_FRAME_FORMAT,
+                       debug_frame_buffer.size(),
+                       &debug_frame_buffer,
+                       /*patch_locations=*/ nullptr);
+          });
+    }
+    strtab->End();
+
+    // Create .text covering the code range. Needed for gdb to find the symbols.
+    if (max_address > min_address) {
+      text->AllocateVirtualMemory(min_address, max_address - min_address);
+    }
+
+    // Add the symbols.
+    *num_symbols = symbols.size();
+    for (; !symbols.empty(); symbols.pop_front()) {
+      symtab->Add(symbols.front(), text);
+    }
+    symtab->WriteCachedSection();
+
+    // Add the CFI/unwind section.
+    debug_frame->Start();
+    debug_frame->WriteFully(debug_frame_buffer.data(), debug_frame_buffer.size());
+    debug_frame->End();
+
+    builder->End();
+    CHECK(builder->Good());
+  }
+
+  // Produce the outer ELF file.
+  // It contains only the inner ELF file compressed as .gnu_debugdata section.
+  // This extra wrapping is not necessary but the compression saves space.
+  std::vector<uint8_t> outer_elf_file;
+  {
+    std::vector<uint8_t> gnu_debugdata;
+    gnu_debugdata.reserve(inner_elf_file.size() / 4);
+    XzCompress(ArrayRef<const uint8_t>(inner_elf_file), &gnu_debugdata);
+
+    outer_elf_file.reserve(KB + gnu_debugdata.size());
+    linker::VectorOutputStream out("Mini-debug-info ELF file for JIT", &outer_elf_file);
+    std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
+        new linker::ElfBuilder<ElfTypes>(isa, features, &out));
+    builder->Start(/*write_program_headers=*/ false);
+    if (max_address > min_address) {
+      builder->GetText()->AllocateVirtualMemory(min_address, max_address - min_address);
+    }
+    builder->WriteSection(".gnu_debugdata", &gnu_debugdata);
+    builder->End();
+    CHECK(builder->Good());
+  }
+
+  return outer_elf_file;
+}
+
+std::vector<uint8_t> WriteDebugElfFileForClasses(
+    InstructionSet isa,
+    const InstructionSetFeatures* features,
+    const ArrayRef<mirror::Class*>& types)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  using ElfTypes = ElfRuntimeTypes;
+  CHECK_EQ(sizeof(ElfTypes::Addr), static_cast<size_t>(GetInstructionSetPointerSize(isa)));
+  std::vector<uint8_t> buffer;
+  buffer.reserve(KB);
+  linker::VectorOutputStream out("Debug ELF file", &buffer);
+  std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
+      new linker::ElfBuilder<ElfTypes>(isa, features, &out));
+  // No program headers since the ELF file is not linked and has no allocated sections.
+  builder->Start(/* write_program_headers= */ false);
   ElfDebugInfoWriter<ElfTypes> info_writer(builder.get());
   info_writer.Start();
   ElfCompilationUnitWriter<ElfTypes> cu_writer(&info_writer);
   cu_writer.Write(types);
-  info_writer.End(false /* write_oat_patches */);
+  info_writer.End(/* write_oat_patches= */ false);
 
   builder->End();
   CHECK(builder->Good());
   return buffer;
 }
 
-std::vector<uint8_t> WriteDebugElfFileForClasses(InstructionSet isa,
-                                                 const InstructionSetFeatures* features,
-                                                 const ArrayRef<mirror::Class*>& types) {
-  if (Is64BitInstructionSet(isa)) {
-    return WriteDebugElfFileForClassesInternal<ElfTypes64>(isa, features, types);
-  } else {
-    return WriteDebugElfFileForClassesInternal<ElfTypes32>(isa, features, types);
-  }
-}
-
 // Explicit instantiations
 template void WriteDebugInfo<ElfTypes32>(
     linker::ElfBuilder<ElfTypes32>* builder,
diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h
index e442e00..85ab356 100644
--- a/compiler/debug/elf_debug_writer.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -54,7 +54,14 @@
     InstructionSet isa,
     const InstructionSetFeatures* features,
     bool mini_debug_info,
-    ArrayRef<const MethodDebugInfo> method_infos);
+    const MethodDebugInfo& method_info);
+
+std::vector<uint8_t> PackElfFileForJIT(
+    InstructionSet isa,
+    const InstructionSetFeatures* features,
+    std::vector<const uint8_t*>& added_elf_files,
+    std::vector<const void*>& removed_symbols,
+    /*out*/ size_t* num_symbols);
 
 std::vector<uint8_t> WriteDebugElfFileForClasses(
     InstructionSet isa,
diff --git a/compiler/debug/method_debug_info.h b/compiler/debug/method_debug_info.h
index 729c403..152db6e 100644
--- a/compiler/debug/method_debug_info.h
+++ b/compiler/debug/method_debug_info.h
@@ -32,7 +32,7 @@
   size_t class_def_index;
   uint32_t dex_method_index;
   uint32_t access_flags;
-  const DexFile::CodeItem* code_item;
+  const dex::CodeItem* code_item;
   InstructionSet isa;
   bool deduped;
   bool is_native_debuggable;
diff --git a/compiler/debug/xz_utils.cc b/compiler/debug/xz_utils.cc
index a9e30a6..a8f60ac 100644
--- a/compiler/debug/xz_utils.cc
+++ b/compiler/debug/xz_utils.cc
@@ -17,13 +17,16 @@
 #include "xz_utils.h"
 
 #include <vector>
+#include <mutex>
 
 #include "base/array_ref.h"
-#include "dwarf/writer.h"
+#include "base/bit_utils.h"
 #include "base/leb128.h"
+#include "dwarf/writer.h"
 
 // liblzma.
 #include "7zCrc.h"
+#include "Xz.h"
 #include "XzCrc64.h"
 #include "XzEnc.h"
 
@@ -32,10 +35,17 @@
 
 constexpr size_t kChunkSize = kPageSize;
 
-static void XzCompressChunk(ArrayRef<uint8_t> src, std::vector<uint8_t>* dst) {
+static void XzInitCrc() {
+  static std::once_flag crc_initialized;
+  std::call_once(crc_initialized, []() {
+    CrcGenerateTable();
+    Crc64GenerateTable();
+  });
+}
+
+static void XzCompressChunk(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst) {
   // Configure the compression library.
-  CrcGenerateTable();
-  Crc64GenerateTable();
+  XzInitCrc();
   CLzma2EncProps lzma2Props;
   Lzma2EncProps_Init(&lzma2Props);
   lzma2Props.lzmaProps.level = 1;  // Fast compression.
@@ -62,7 +72,7 @@
       return SZ_OK;
     }
     size_t src_pos_;
-    ArrayRef<uint8_t> src_;
+    ArrayRef<const uint8_t> src_;
     std::vector<uint8_t>* dst_;
   };
   XzCallbacks callbacks;
@@ -85,7 +95,7 @@
 // In short, the file format is: [header] [compressed_block]* [index] [footer]
 // Where [index] is: [num_records] ([compressed_size] [uncompressed_size])* [crc32]
 //
-void XzCompress(ArrayRef<uint8_t> src, std::vector<uint8_t>* dst) {
+void XzCompress(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst) {
   uint8_t header[] = { 0xFD, '7', 'z', 'X', 'Z', 0, 0, 1, 0x69, 0x22, 0xDE, 0x36 };
   uint8_t footer[] = { 0, 1, 'Y', 'Z' };
   dst->insert(dst->end(), header, header + sizeof(header));
@@ -138,6 +148,47 @@
     writer.UpdateUint32(0, CrcCalc(tmp.data() + 4, 6));
     dst->insert(dst->end(), tmp.begin(), tmp.end());
   }
+
+  // Decompress the data back and check that we get the original.
+  if (kIsDebugBuild) {
+    std::vector<uint8_t> decompressed;
+    XzDecompress(ArrayRef<const uint8_t>(*dst), &decompressed);
+    DCHECK_EQ(decompressed.size(), src.size());
+    DCHECK_EQ(memcmp(decompressed.data(), src.data(), src.size()), 0);
+  }
+}
+
+void XzDecompress(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst) {
+  XzInitCrc();
+  std::unique_ptr<CXzUnpacker> state(new CXzUnpacker());
+  ISzAlloc alloc;
+  alloc.Alloc = [](ISzAllocPtr, size_t size) { return malloc(size); };
+  alloc.Free = [](ISzAllocPtr, void* ptr) { return free(ptr); };
+  XzUnpacker_Construct(state.get(), &alloc);
+
+  size_t src_offset = 0;
+  size_t dst_offset = 0;
+  ECoderStatus status;
+  do {
+    dst->resize(RoundUp(dst_offset + kPageSize / 4, kPageSize));
+    size_t src_remaining = src.size() - src_offset;
+    size_t dst_remaining = dst->size() - dst_offset;
+    int return_val = XzUnpacker_Code(state.get(),
+                                     dst->data() + dst_offset,
+                                     &dst_remaining,
+                                     src.data() + src_offset,
+                                     &src_remaining,
+                                     true,
+                                     CODER_FINISH_ANY,
+                                     &status);
+    CHECK_EQ(return_val, SZ_OK);
+    src_offset += src_remaining;
+    dst_offset += dst_remaining;
+  } while (status == CODER_STATUS_NOT_FINISHED);
+  CHECK_EQ(src_offset, src.size());
+  CHECK(XzUnpacker_IsStreamWasFinished(state.get()));
+  XzUnpacker_Free(state.get());
+  dst->resize(dst_offset);
 }
 
 }  // namespace debug
diff --git a/compiler/debug/xz_utils.h b/compiler/debug/xz_utils.h
index c4076c6..731b03c 100644
--- a/compiler/debug/xz_utils.h
+++ b/compiler/debug/xz_utils.h
@@ -24,7 +24,8 @@
 namespace art {
 namespace debug {
 
-void XzCompress(ArrayRef<uint8_t> src, std::vector<uint8_t>* dst);
+void XzCompress(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst);
+void XzDecompress(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst);
 
 }  // namespace debug
 }  // namespace art
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ad9a30f..23ce37e 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -376,9 +376,7 @@
   DCHECK_EQ(inst->Opcode(), Instruction::RETURN_VOID);
   if (unit_.IsConstructor()) {
     // Are we compiling a non clinit constructor which needs a barrier ?
-    if (!unit_.IsStatic() &&
-        driver_.RequiresConstructorBarrier(Thread::Current(), unit_.GetDexFile(),
-                                           unit_.GetClassDefIndex())) {
+    if (!unit_.IsStatic() && unit_.RequiresConstructorBarrier()) {
       return;
     }
   }
@@ -475,7 +473,7 @@
           method_idx,
           unit_.GetDexCache(),
           unit_.GetClassLoader(),
-          /* referrer */ nullptr,
+          /* referrer= */ nullptr,
           kVirtual);
 
   if (UNLIKELY(resolved_method == nullptr)) {
@@ -507,7 +505,7 @@
 }
 
 CompiledMethod* DexToDexCompiler::CompileMethod(
-    const DexFile::CodeItem* code_item,
+    const dex::CodeItem* code_item,
     uint32_t access_flags,
     InvokeType invoke_type ATTRIBUTE_UNUSED,
     uint16_t class_def_idx,
@@ -530,7 +528,7 @@
       class_def_idx,
       method_idx,
       access_flags,
-      driver_->GetVerifiedMethod(&dex_file, method_idx),
+      driver_->GetCompilerOptions().GetVerifiedMethod(&dex_file, method_idx),
       hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file)));
 
   std::vector<uint8_t> quicken_data;
@@ -616,7 +614,7 @@
     instruction_set = InstructionSet::kArm;
   }
   CompiledMethod* ret = CompiledMethod::SwapAllocCompiledMethod(
-      driver_,
+      driver_->GetCompiledMethodStorage(),
       instruction_set,
       ArrayRef<const uint8_t>(),                   // no code
       ArrayRef<const uint8_t>(quicken_data),       // vmap_table
@@ -629,11 +627,11 @@
 void DexToDexCompiler::SetDexFiles(const std::vector<const DexFile*>& dex_files) {
   // Record what code items are already seen to detect when multiple methods have the same code
   // item.
-  std::unordered_set<const DexFile::CodeItem*> seen_code_items;
+  std::unordered_set<const dex::CodeItem*> seen_code_items;
   for (const DexFile* dex_file : dex_files) {
     for (ClassAccessor accessor : dex_file->GetClasses()) {
       for (const ClassAccessor::Method& method : accessor.GetMethods()) {
-        const DexFile::CodeItem* code_item = method.GetCodeItem();
+        const dex::CodeItem* code_item = method.GetCodeItem();
         // Detect the shared code items.
         if (!seen_code_items.insert(code_item).second) {
           shared_code_items_.insert(code_item);
@@ -648,7 +646,7 @@
   MutexLock mu(Thread::Current(), lock_);
   size_t unquicken_count = 0;
   for (const auto& pair : shared_code_item_quicken_info_) {
-    const DexFile::CodeItem* code_item = pair.first;
+    const dex::CodeItem* code_item = pair.first;
     const QuickenState& state = pair.second;
     CHECK_GE(state.methods_.size(), 1u);
     if (state.conflict_) {
@@ -667,7 +665,8 @@
           // There is up to one compiled method for each method ref. Releasing it leaves the
           // deduped data intact, this means its safe to do even when other threads might be
           // compiling.
-          CompiledMethod::ReleaseSwapAllocatedCompiledMethod(driver_, method);
+          CompiledMethod::ReleaseSwapAllocatedCompiledMethod(driver_->GetCompiledMethodStorage(),
+                                                             method);
         }
       }
     }
diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h
index 7536c31..78309ae 100644
--- a/compiler/dex/dex_to_dex_compiler.h
+++ b/compiler/dex/dex_to_dex_compiler.h
@@ -22,7 +22,7 @@
 #include <unordered_set>
 
 #include "base/bit_vector.h"
-#include "dex/dex_file.h"
+#include "base/mutex.h"
 #include "dex/invoke_type.h"
 #include "dex/method_reference.h"
 #include "handle.h"
@@ -33,6 +33,11 @@
 class CompiledMethod;
 class CompilerDriver;
 class DexCompilationUnit;
+class DexFile;
+
+namespace dex {
+struct CodeItem;
+}  // namespace dex
 
 namespace mirror {
 class ClassLoader;
@@ -49,7 +54,7 @@
 
   explicit DexToDexCompiler(CompilerDriver* driver);
 
-  CompiledMethod* CompileMethod(const DexFile::CodeItem* code_item,
+  CompiledMethod* CompileMethod(const dex::CodeItem* code_item,
                                 uint32_t access_flags,
                                 InvokeType invoke_type,
                                 uint16_t class_def_idx,
@@ -104,9 +109,9 @@
   std::unordered_map<const DexFile*, BitVector> should_quicken_;
   // Guarded by lock_ during writing, accessed without a lock during quickening.
   // This is safe because no thread is adding to the shared code items during the quickening phase.
-  std::unordered_set<const DexFile::CodeItem*> shared_code_items_;
+  std::unordered_set<const dex::CodeItem*> shared_code_items_;
   // Blacklisted code items are unquickened in UnquickenConflictingMethods.
-  std::unordered_map<const DexFile::CodeItem*, QuickenState> shared_code_item_quicken_info_
+  std::unordered_map<const dex::CodeItem*, QuickenState> shared_code_item_quicken_info_
       GUARDED_BY(lock_);
   // Number of added code items.
   size_t num_code_items_ GUARDED_BY(lock_) = 0u;
diff --git a/compiler/dex/dex_to_dex_decompiler_test.cc b/compiler/dex/dex_to_dex_decompiler_test.cc
index 4f83d60..1f04546 100644
--- a/compiler/dex/dex_to_dex_decompiler_test.cc
+++ b/compiler/dex/dex_to_dex_decompiler_test.cc
@@ -39,16 +39,15 @@
 class DexToDexDecompilerTest : public CommonCompilerTest {
  public:
   void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) {
-    TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
-    TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
-    compiler_options_->boot_image_ = false;
+    TimingLogger timings("DexToDexDecompilerTest::CompileAll", false, false);
+    compiler_options_->image_type_ = CompilerOptions::ImageType::kNone;
     compiler_options_->SetCompilerFilter(CompilerFilter::kQuicken);
     // Create the main VerifierDeps, here instead of in the compiler since we want to aggregate
     // the results for all the dex files, not just the results for the current dex file.
     down_cast<QuickCompilerCallbacks*>(Runtime::Current()->GetCompilerCallbacks())->SetVerifierDeps(
         new verifier::VerifierDeps(GetDexFiles(class_loader)));
-    SetDexFilesForOatFile(GetDexFiles(class_loader));
-    compiler_driver_->CompileAll(class_loader, GetDexFiles(class_loader), &timings);
+    std::vector<const DexFile*> dex_files = GetDexFiles(class_loader);
+    CommonCompilerTest::CompileAll(class_loader, dex_files, &timings);
   }
 
   void RunTest(const char* dex_name) {
@@ -96,7 +95,7 @@
         optimizer::ArtDecompileDEX(*updated_dex_file,
                                    *accessor.GetCodeItem(method),
                                    table,
-                                   /* decompile_return_instruction */ true);
+                                   /* decompile_return_instruction= */ true);
       }
     }
 
diff --git a/compiler/dex/inline_method_analyser.cc b/compiler/dex/inline_method_analyser.cc
index fe8b766..b0f025d 100644
--- a/compiler/dex/inline_method_analyser.cc
+++ b/compiler/dex/inline_method_analyser.cc
@@ -41,7 +41,7 @@
 class Matcher {
  public:
   // Match function type.
-  typedef bool MatchFn(Matcher* matcher);
+  using MatchFn = bool(Matcher*);
 
   template <size_t size>
   static bool Match(const CodeItemDataAccessor* code_item, MatchFn* const (&pattern)[size]);
@@ -216,7 +216,7 @@
   DCHECK(IsInstructionIPut(new_iput->Opcode()));
   uint32_t field_index = new_iput->VRegC_22c();
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  ArtField* field = class_linker->LookupResolvedField(field_index, method, /* is_static */ false);
+  ArtField* field = class_linker->LookupResolvedField(field_index, method, /* is_static= */ false);
   if (UNLIKELY(field == nullptr)) {
     return false;
   }
@@ -228,7 +228,7 @@
     }
     ArtField* f = class_linker->LookupResolvedField(iputs[old_pos].field_index,
                                                     method,
-                                                    /* is_static */ false);
+                                                    /* is_static= */ false);
     DCHECK(f != nullptr);
     if (f == field) {
       auto back_it = std::copy(iputs + old_pos + 1, iputs + arraysize(iputs), iputs + old_pos);
@@ -511,7 +511,7 @@
 }
 
 bool InlineMethodAnalyser::IsSyntheticAccessor(MethodReference ref) {
-  const DexFile::MethodId& method_id = ref.dex_file->GetMethodId(ref.index);
+  const dex::MethodId& method_id = ref.dex_file->GetMethodId(ref.index);
   const char* method_name = ref.dex_file->GetMethodName(method_id);
   // javac names synthetic accessors "access$nnn",
   // jack names them "-getN", "-putN", "-wrapN".
@@ -713,7 +713,7 @@
   }
   ObjPtr<mirror::DexCache> dex_cache = method->GetDexCache();
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  ArtField* field = class_linker->LookupResolvedField(field_idx, method, /* is_static */ false);
+  ArtField* field = class_linker->LookupResolvedField(field_idx, method, /* is_static= */ false);
   if (field == nullptr || field->IsStatic()) {
     return false;
   }
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index b7117bd..e92b67a 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -38,11 +38,6 @@
 
   void ClassRejected(ClassReference ref) override;
 
-  // We are running in an environment where we can call patchoat safely so we should.
-  bool IsRelocationPossible() override {
-    return true;
-  }
-
   verifier::VerifierDeps* GetVerifierDeps() const override {
     return verifier_deps_.get();
   }
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 1e0b94d..6bd5fe8 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -79,7 +79,7 @@
   if (inserted) {
     // Successfully added, release the unique_ptr since we no longer have ownership.
     DCHECK_EQ(GetVerifiedMethod(ref), verified_method.get());
-    verified_method.release();
+    verified_method.release();  // NOLINT b/117926937
   } else {
     // TODO: Investigate why are we doing the work again for this method and try to avoid it.
     LOG(WARNING) << "Method processed more than once: " << ref.PrettyMethod();
@@ -97,7 +97,7 @@
   }
 }
 
-const VerifiedMethod* VerificationResults::GetVerifiedMethod(MethodReference ref) {
+const VerifiedMethod* VerificationResults::GetVerifiedMethod(MethodReference ref) const {
   const VerifiedMethod* ret = nullptr;
   if (atomic_verified_methods_.Get(ref, &ret)) {
     return ret;
@@ -112,12 +112,12 @@
   // which have no verifier error, nor has methods that we know will throw
   // at runtime.
   std::unique_ptr<VerifiedMethod> verified_method = std::make_unique<VerifiedMethod>(
-      /* encountered_error_types */ 0, /* has_runtime_throw */ false);
+      /* encountered_error_types= */ 0, /* has_runtime_throw= */ false);
   if (atomic_verified_methods_.Insert(ref,
                                       /*expected*/ nullptr,
                                       verified_method.get()) ==
           AtomicMap::InsertResult::kInsertResultSuccess) {
-    verified_method.release();
+    verified_method.release();  // NOLINT b/117926937
   }
 }
 
@@ -129,13 +129,13 @@
   DCHECK(IsClassRejected(ref));
 }
 
-bool VerificationResults::IsClassRejected(ClassReference ref) {
+bool VerificationResults::IsClassRejected(ClassReference ref) const {
   ReaderMutexLock mu(Thread::Current(), rejected_classes_lock_);
   return (rejected_classes_.find(ref) != rejected_classes_.end());
 }
 
 bool VerificationResults::IsCandidateForCompilation(MethodReference&,
-                                                    const uint32_t access_flags) {
+                                                    const uint32_t access_flags) const {
   if (!compiler_options_->IsAotCompilationEnabled()) {
     return false;
   }
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index 56f0030..04c4fa6 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -51,13 +51,13 @@
   void CreateVerifiedMethodFor(MethodReference ref)
       REQUIRES(!verified_methods_lock_);
 
-  const VerifiedMethod* GetVerifiedMethod(MethodReference ref)
+  const VerifiedMethod* GetVerifiedMethod(MethodReference ref) const
       REQUIRES(!verified_methods_lock_);
 
   void AddRejectedClass(ClassReference ref) REQUIRES(!rejected_classes_lock_);
-  bool IsClassRejected(ClassReference ref) REQUIRES(!rejected_classes_lock_);
+  bool IsClassRejected(ClassReference ref) const REQUIRES(!rejected_classes_lock_);
 
-  bool IsCandidateForCompilation(MethodReference& method_ref, const uint32_t access_flags);
+  bool IsCandidateForCompilation(MethodReference& method_ref, const uint32_t access_flags) const;
 
   // Add a dex file to enable using the atomic map.
   void AddDexFile(const DexFile* dex_file) REQUIRES(!verified_methods_lock_);
@@ -74,10 +74,12 @@
   // GetVerifiedMethod.
   AtomicMap atomic_verified_methods_;
 
-  ReaderWriterMutex verified_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  // TODO: External locking during CompilerDriver::PreCompile(), no locking during compilation.
+  mutable ReaderWriterMutex verified_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
   // Rejected classes.
-  ReaderWriterMutex rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  // TODO: External locking during CompilerDriver::PreCompile(), no locking during compilation.
+  mutable ReaderWriterMutex rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   std::set<ClassReference> rejected_classes_ GUARDED_BY(rejected_classes_lock_);
 
   friend class verifier::VerifierDepsTest;
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index f2da3ff..54f216a 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -82,7 +82,7 @@
           method_verifier->ResolveCheckedClass(dex::TypeIndex(inst.VRegB_21c()));
       // Pass null for the method verifier to not record the VerifierDeps dependency
       // if the types are not assignable.
-      if (cast_type.IsStrictlyAssignableFrom(reg_type, /* method_verifier */ nullptr)) {
+      if (cast_type.IsStrictlyAssignableFrom(reg_type, /* verifier= */ nullptr)) {
         // The types are assignable, we record that dependency in the VerifierDeps so
         // that if this changes after OTA, we will re-verify again.
         // We check if reg_type has a class, as the verifier may have inferred it's
@@ -92,8 +92,8 @@
           verifier::VerifierDeps::MaybeRecordAssignability(method_verifier->GetDexFile(),
                                                            cast_type.GetClass(),
                                                            reg_type.GetClass(),
-                                                           /* strict */ true,
-                                                           /* assignable */ true);
+                                                           /* is_strict= */ true,
+                                                           /* is_assignable= */ true);
         }
         if (safe_cast_set_ == nullptr) {
           safe_cast_set_.reset(new SafeCastSet());
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index 5e2f444..05eacd8 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -19,25 +19,13 @@
 #include <gtest/gtest.h>
 
 #include "compiled_method-inl.h"
-#include "compiler_driver.h"
-#include "compiler_options.h"
-#include "dex/verification_results.h"
 
 namespace art {
 
 TEST(CompiledMethodStorage, Deduplicate) {
-  CompilerOptions compiler_options;
-  VerificationResults verification_results(&compiler_options);
-  CompilerDriver driver(&compiler_options,
-                        &verification_results,
-                        Compiler::kOptimizing,
-                        /* image_classes */ nullptr,
-                        /* thread_count */ 1u,
-                        /* swap_fd */ -1,
-                        /* profile_compilation_info */ nullptr);
-  CompiledMethodStorage* storage = driver.GetCompiledMethodStorage();
+  CompiledMethodStorage storage(/* swap_fd= */ -1);
 
-  ASSERT_TRUE(storage->DedupeEnabled());  // The default.
+  ASSERT_TRUE(storage.DedupeEnabled());  // The default.
 
   const uint8_t raw_code1[] = { 1u, 2u, 3u };
   const uint8_t raw_code2[] = { 4u, 3u, 2u, 1u };
@@ -77,7 +65,7 @@
       for (auto&& f : cfi_info) {
         for (auto&& p : patches) {
           compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod(
-              &driver, InstructionSet::kNone, c, v, f, p));
+              &storage, InstructionSet::kNone, c, v, f, p));
         }
       }
     }
@@ -106,7 +94,7 @@
     }
   }
   for (CompiledMethod* method : compiled_methods) {
-    CompiledMethod::ReleaseSwapAllocatedCompiledMethod(&driver, method);
+    CompiledMethod::ReleaseSwapAllocatedCompiledMethod(&storage, method);
   }
 }
 
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 294072d..ec2e38b 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -57,7 +57,7 @@
     const DexCompilationUnit* mUnit) {
   DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
   DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
-  const DexFile::MethodId& referrer_method_id =
+  const dex::MethodId& referrer_method_id =
       mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
   return ResolveClass(soa, dex_cache, class_loader, referrer_method_id.class_idx_, mUnit);
 }
@@ -99,30 +99,6 @@
   return std::make_pair(fast_get, fast_put);
 }
 
-inline ArtMethod* CompilerDriver::ResolveMethod(
-    ScopedObjectAccess& soa,
-    Handle<mirror::DexCache> dex_cache,
-    Handle<mirror::ClassLoader> class_loader,
-    const DexCompilationUnit* mUnit,
-    uint32_t method_idx,
-    InvokeType invoke_type) {
-  DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
-  ArtMethod* resolved_method =
-      mUnit->GetClassLinker()->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
-          method_idx, dex_cache, class_loader, /* referrer */ nullptr, invoke_type);
-  if (UNLIKELY(resolved_method == nullptr)) {
-    DCHECK(soa.Self()->IsExceptionPending());
-    // Clean up any exception left by type resolution.
-    soa.Self()->ClearException();
-  }
-  return resolved_method;
-}
-
-inline VerificationResults* CompilerDriver::GetVerificationResults() const {
-  DCHECK(Runtime::Current()->IsAotCompiler());
-  return verification_results_;
-}
-
 }  // namespace art
 
 #endif  // ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index f6afe2c..d46cffb 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -24,6 +24,7 @@
 #include <malloc.h>  // For mallinfo
 #endif
 
+#include "android-base/logging.h"
 #include "android-base/strings.h"
 
 #include "art_field-inl.h"
@@ -111,19 +112,7 @@
 class CompilerDriver::AOTCompilationStats {
  public:
   AOTCompilationStats()
-      : stats_lock_("AOT compilation statistics lock"),
-        resolved_instance_fields_(0), unresolved_instance_fields_(0),
-        resolved_local_static_fields_(0), resolved_static_fields_(0), unresolved_static_fields_(0),
-        type_based_devirtualization_(0),
-        safe_casts_(0), not_safe_casts_(0) {
-    for (size_t i = 0; i <= kMaxInvokeType; i++) {
-      resolved_methods_[i] = 0;
-      unresolved_methods_[i] = 0;
-      virtual_made_direct_[i] = 0;
-      direct_calls_to_boot_[i] = 0;
-      direct_methods_to_boot_[i] = 0;
-    }
-  }
+      : stats_lock_("AOT compilation statistics lock") {}
 
   void Dump() {
     DumpStat(resolved_instance_fields_, unresolved_instance_fields_, "instance fields resolved");
@@ -140,6 +129,16 @@
              type_based_devirtualization_,
              "virtual/interface calls made direct based on type information");
 
+    const size_t total = std::accumulate(
+        class_status_count_,
+        class_status_count_ + static_cast<size_t>(ClassStatus::kLast) + 1,
+        0u);
+    for (size_t i = 0; i <= static_cast<size_t>(ClassStatus::kLast); ++i) {
+      std::ostringstream oss;
+      oss << "classes with status " << static_cast<ClassStatus>(i);
+      DumpStat(class_status_count_[i], total - class_status_count_[i], oss.str().c_str());
+    }
+
     for (size_t i = 0; i <= kMaxInvokeType; i++) {
       std::ostringstream oss;
       oss << static_cast<InvokeType>(i) << " methods were AOT resolved";
@@ -218,61 +217,57 @@
     not_safe_casts_++;
   }
 
+  // Register a class status.
+  void AddClassStatus(ClassStatus status) REQUIRES(!stats_lock_) {
+    STATS_LOCK();
+    ++class_status_count_[static_cast<size_t>(status)];
+  }
+
  private:
   Mutex stats_lock_;
 
-  size_t resolved_instance_fields_;
-  size_t unresolved_instance_fields_;
+  size_t resolved_instance_fields_ = 0u;
+  size_t unresolved_instance_fields_ = 0u;
 
-  size_t resolved_local_static_fields_;
-  size_t resolved_static_fields_;
-  size_t unresolved_static_fields_;
+  size_t resolved_local_static_fields_ = 0u;
+  size_t resolved_static_fields_ = 0u;
+  size_t unresolved_static_fields_ = 0u;
   // Type based devirtualization for invoke interface and virtual.
-  size_t type_based_devirtualization_;
+  size_t type_based_devirtualization_ = 0u;
 
-  size_t resolved_methods_[kMaxInvokeType + 1];
-  size_t unresolved_methods_[kMaxInvokeType + 1];
-  size_t virtual_made_direct_[kMaxInvokeType + 1];
-  size_t direct_calls_to_boot_[kMaxInvokeType + 1];
-  size_t direct_methods_to_boot_[kMaxInvokeType + 1];
+  size_t resolved_methods_[kMaxInvokeType + 1] = {};
+  size_t unresolved_methods_[kMaxInvokeType + 1] = {};
+  size_t virtual_made_direct_[kMaxInvokeType + 1] = {};
+  size_t direct_calls_to_boot_[kMaxInvokeType + 1] = {};
+  size_t direct_methods_to_boot_[kMaxInvokeType + 1] = {};
 
-  size_t safe_casts_;
-  size_t not_safe_casts_;
+  size_t safe_casts_ = 0u;
+  size_t not_safe_casts_ = 0u;
+
+  size_t class_status_count_[static_cast<size_t>(ClassStatus::kLast) + 1] = {};
 
   DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats);
 };
 
 CompilerDriver::CompilerDriver(
     const CompilerOptions* compiler_options,
-    VerificationResults* verification_results,
     Compiler::Kind compiler_kind,
-    HashSet<std::string>* image_classes,
     size_t thread_count,
-    int swap_fd,
-    const ProfileCompilationInfo* profile_compilation_info)
+    int swap_fd)
     : compiler_options_(compiler_options),
-      verification_results_(verification_results),
       compiler_(Compiler::Create(this, compiler_kind)),
       compiler_kind_(compiler_kind),
-      requires_constructor_barrier_lock_("constructor barrier lock"),
-      image_classes_(std::move(image_classes)),
       number_of_soft_verifier_failures_(0),
       had_hard_verifier_failure_(false),
       parallel_thread_count_(thread_count),
       stats_(new AOTCompilationStats),
-      compiler_context_(nullptr),
       compiled_method_storage_(swap_fd),
-      profile_compilation_info_(profile_compilation_info),
       max_arena_alloc_(0),
       dex_to_dex_compiler_(this) {
   DCHECK(compiler_options_ != nullptr);
 
   compiler_->Init();
 
-  if (GetCompilerOptions().IsBootImage()) {
-    CHECK(image_classes_ != nullptr) << "Expected image classes for boot image";
-  }
-
   compiled_method_storage_.SetDedupeEnabled(compiler_options_->DeduplicateCode());
 }
 
@@ -280,7 +275,7 @@
   compiled_methods_.Visit([this](const DexFileReference& ref ATTRIBUTE_UNUSED,
                                  CompiledMethod* method) {
     if (method != nullptr) {
-      CompiledMethod::ReleaseSwapAllocatedCompiledMethod(this, method);
+      CompiledMethod::ReleaseSwapAllocatedCompiledMethod(GetCompiledMethodStorage(), method);
     }
   });
   compiler_->UnInit();
@@ -328,9 +323,8 @@
                                 TimingLogger* timings) {
   DCHECK(!Runtime::Current()->IsStarted());
 
-  InitializeThreadPools();
+  CheckThreadPools();
 
-  PreCompile(class_loader, dex_files, timings);
   if (GetCompilerOptions().IsBootImage()) {
     // We don't need to setup the intrinsics for non boot image compilation, as
     // those compilations will pick up a boot image that have the ArtMethod already
@@ -346,13 +340,11 @@
   if (GetCompilerOptions().GetDumpStats()) {
     stats_->Dump();
   }
-
-  FreeThreadPools();
 }
 
 static optimizer::DexToDexCompiler::CompilationLevel GetDexToDexCompilationLevel(
     Thread* self, const CompilerDriver& driver, Handle<mirror::ClassLoader> class_loader,
-    const DexFile& dex_file, const DexFile::ClassDef& class_def)
+    const DexFile& dex_file, const dex::ClassDef& class_def)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // When the dex file is uncompressed in the APK, we do not generate a copy in the .vdex
   // file. As a result, dex2oat will map the dex file read-only, and we only need to check
@@ -397,7 +389,7 @@
     const CompilerDriver& driver,
     jobject jclass_loader,
     const DexFile& dex_file,
-    const DexFile::ClassDef& class_def) {
+    const dex::ClassDef& class_def) {
   ScopedObjectAccess soa(self);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::ClassLoader> class_loader(
@@ -424,7 +416,7 @@
 static void CompileMethodHarness(
     Thread* self,
     CompilerDriver* driver,
-    const DexFile::CodeItem* code_item,
+    const dex::CodeItem* code_item,
     uint32_t access_flags,
     InvokeType invoke_type,
     uint16_t class_def_idx,
@@ -432,7 +424,6 @@
     Handle<mirror::ClassLoader> class_loader,
     const DexFile& dex_file,
     optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
-    bool compilation_enabled,
     Handle<mirror::DexCache> dex_cache,
     CompileFn compile_fn) {
   DCHECK(driver != nullptr);
@@ -450,7 +441,6 @@
                                class_loader,
                                dex_file,
                                dex_to_dex_compilation_level,
-                               compilation_enabled,
                                dex_cache);
 
   if (kTimeCompileMethod) {
@@ -475,7 +465,7 @@
 static void CompileMethodDex2Dex(
     Thread* self,
     CompilerDriver* driver,
-    const DexFile::CodeItem* code_item,
+    const dex::CodeItem* code_item,
     uint32_t access_flags,
     InvokeType invoke_type,
     uint16_t class_def_idx,
@@ -483,11 +473,10 @@
     Handle<mirror::ClassLoader> class_loader,
     const DexFile& dex_file,
     optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
-    bool compilation_enabled,
     Handle<mirror::DexCache> dex_cache) {
   auto dex_2_dex_fn = [](Thread* self ATTRIBUTE_UNUSED,
       CompilerDriver* driver,
-      const DexFile::CodeItem* code_item,
+      const dex::CodeItem* code_item,
       uint32_t access_flags,
       InvokeType invoke_type,
       uint16_t class_def_idx,
@@ -495,7 +484,6 @@
       Handle<mirror::ClassLoader> class_loader,
       const DexFile& dex_file,
       optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
-      bool compilation_enabled ATTRIBUTE_UNUSED,
       Handle<mirror::DexCache> dex_cache ATTRIBUTE_UNUSED) -> CompiledMethod* {
     DCHECK(driver != nullptr);
     MethodReference method_ref(&dex_file, method_idx);
@@ -503,7 +491,7 @@
     optimizer::DexToDexCompiler* const compiler = &driver->GetDexToDexCompiler();
 
     if (compiler->ShouldCompileMethod(method_ref)) {
-      VerificationResults* results = driver->GetVerificationResults();
+      const VerificationResults* results = driver->GetCompilerOptions().GetVerificationResults();
       DCHECK(results != nullptr);
       const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
       // Do not optimize if a VerifiedMethod is missing. SafeCast elision,
@@ -532,7 +520,6 @@
                        class_loader,
                        dex_file,
                        dex_to_dex_compilation_level,
-                       compilation_enabled,
                        dex_cache,
                        dex_2_dex_fn);
 }
@@ -540,7 +527,7 @@
 static void CompileMethodQuick(
     Thread* self,
     CompilerDriver* driver,
-    const DexFile::CodeItem* code_item,
+    const dex::CodeItem* code_item,
     uint32_t access_flags,
     InvokeType invoke_type,
     uint16_t class_def_idx,
@@ -548,12 +535,11 @@
     Handle<mirror::ClassLoader> class_loader,
     const DexFile& dex_file,
     optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
-    bool compilation_enabled,
     Handle<mirror::DexCache> dex_cache) {
   auto quick_fn = [](
       Thread* self,
       CompilerDriver* driver,
-      const DexFile::CodeItem* code_item,
+      const dex::CodeItem* code_item,
       uint32_t access_flags,
       InvokeType invoke_type,
       uint16_t class_def_idx,
@@ -561,7 +547,6 @@
       Handle<mirror::ClassLoader> class_loader,
       const DexFile& dex_file,
       optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
-      bool compilation_enabled,
       Handle<mirror::DexCache> dex_cache) {
     DCHECK(driver != nullptr);
     CompiledMethod* compiled_method = nullptr;
@@ -584,10 +569,10 @@
     } else if ((access_flags & kAccAbstract) != 0) {
       // Abstract methods don't have code.
     } else {
-      VerificationResults* results = driver->GetVerificationResults();
+      const VerificationResults* results = driver->GetCompilerOptions().GetVerificationResults();
       DCHECK(results != nullptr);
       const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
-      bool compile = compilation_enabled &&
+      bool compile =
           // Basic checks, e.g., not <clinit>.
           results->IsCandidateForCompilation(method_ref, access_flags) &&
           // Did not fail to create VerifiedMethod metadata.
@@ -609,6 +594,29 @@
                                                          class_loader,
                                                          dex_file,
                                                          dex_cache);
+        ProfileMethodsCheck check_type =
+            driver->GetCompilerOptions().CheckProfiledMethodsCompiled();
+        if (UNLIKELY(check_type != ProfileMethodsCheck::kNone)) {
+          bool violation = driver->ShouldCompileBasedOnProfile(method_ref) &&
+                               (compiled_method == nullptr);
+          if (violation) {
+            std::ostringstream oss;
+            oss << "Failed to compile "
+                << method_ref.dex_file->PrettyMethod(method_ref.index)
+                << "[" << method_ref.dex_file->GetLocation() << "]"
+                << " as expected by profile";
+            switch (check_type) {
+              case ProfileMethodsCheck::kNone:
+                break;
+              case ProfileMethodsCheck::kLog:
+                LOG(ERROR) << oss.str();
+                break;
+              case ProfileMethodsCheck::kAbort:
+                LOG(FATAL_WITHOUT_ABORT) << oss.str();
+                _exit(1);
+            }
+          }
+        }
       }
       if (compiled_method == nullptr &&
           dex_to_dex_compilation_level !=
@@ -630,7 +638,6 @@
                        class_loader,
                        dex_file,
                        dex_to_dex_compilation_level,
-                       compilation_enabled,
                        dex_cache,
                        quick_fn);
 }
@@ -643,7 +650,7 @@
                                 uint32_t method_idx,
                                 uint32_t access_flags,
                                 InvokeType invoke_type,
-                                const DexFile::CodeItem* code_item,
+                                const dex::CodeItem* code_item,
                                 Handle<mirror::DexCache> dex_cache,
                                 Handle<mirror::ClassLoader> h_class_loader) {
   // Can we run DEX-to-DEX compiler on this class ?
@@ -664,7 +671,6 @@
                      h_class_loader,
                      dex_file,
                      dex_to_dex_compilation_level,
-                     true,
                      dex_cache);
 
   const size_t num_methods = dex_to_dex_compiler_.NumCodeItemsToQuicken(self);
@@ -680,7 +686,6 @@
                          h_class_loader,
                          dex_file,
                          dex_to_dex_compilation_level,
-                         true,
                          dex_cache);
     dex_to_dex_compiler_.ClearState();
   }
@@ -708,25 +713,44 @@
   }
 }
 
-static void ResolveConstStrings(CompilerDriver* driver,
-                                const std::vector<const DexFile*>& dex_files,
-                                TimingLogger* timings) {
+void CompilerDriver::ResolveConstStrings(const std::vector<const DexFile*>& dex_files,
+                                         bool only_startup_strings,
+                                         TimingLogger* timings) {
   ScopedObjectAccess soa(Thread::Current());
   StackHandleScope<1> hs(soa.Self());
   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
   MutableHandle<mirror::DexCache> dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
+  size_t num_instructions = 0u;
 
   for (const DexFile* dex_file : dex_files) {
     dex_cache.Assign(class_linker->FindDexCache(soa.Self(), *dex_file));
+    if (only_startup_strings) {
+      // When resolving startup strings, create the preresolved strings array.
+      dex_cache->AddPreResolvedStringsArray();
+    }
     TimingLogger::ScopedTiming t("Resolve const-string Strings", timings);
 
+    // TODO: Implement a profile-based filter for the boot image. See b/76145463.
     for (ClassAccessor accessor : dex_file->GetClasses()) {
-      if (!driver->IsClassToCompile(accessor.GetDescriptor())) {
-        // Compilation is skipped, do not resolve const-string in code of this class.
-        // FIXME: Make sure that inlining honors this. b/26687569
-        continue;
-      }
+      const ProfileCompilationInfo* profile_compilation_info =
+          GetCompilerOptions().GetProfileCompilationInfo();
+
+      const bool is_startup_class =
+          profile_compilation_info != nullptr &&
+          profile_compilation_info->ContainsClass(*dex_file, accessor.GetClassIdx());
+
       for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+        const bool is_clinit = (method.GetAccessFlags() & kAccConstructor) != 0 &&
+            (method.GetAccessFlags() & kAccStatic) != 0;
+        const bool is_startup_clinit = is_startup_class && is_clinit;
+
+        if (only_startup_strings &&
+            profile_compilation_info != nullptr &&
+            (!profile_compilation_info->GetMethodHotness(method.GetReference()).IsStartup() &&
+             !is_startup_clinit)) {
+          continue;
+        }
+
         // Resolve const-strings in the code. Done to have deterministic allocation behavior. Right
         // now this is single-threaded for simplicity.
         // TODO: Collect the relevant string indices in parallel, then allocate them sequentially
@@ -740,6 +764,11 @@
                   : inst->VRegB_31c());
               ObjPtr<mirror::String> string = class_linker->ResolveString(string_index, dex_cache);
               CHECK(string != nullptr) << "Could not allocate a string when forcing determinism";
+              if (only_startup_strings) {
+                dex_cache->GetPreResolvedStrings()[string_index.index_] =
+                    GcRoot<mirror::String>(string);
+              }
+              ++num_instructions;
               break;
             }
 
@@ -750,6 +779,7 @@
       }
     }
   }
+  VLOG(compiler) << "Resolved " << num_instructions << " const string instructions";
 }
 
 // Initialize type check bit strings for check-cast and instance-of in the code. Done to have
@@ -778,7 +808,7 @@
           ObjPtr<mirror::Class> klass =
               class_linker->LookupResolvedType(type_index,
                                                dex_cache.Get(),
-                                               /* class_loader */ nullptr);
+                                               /* class_loader= */ nullptr);
           CHECK(klass != nullptr) << descriptor << " should have been previously resolved.";
           // Now assign the bitstring if the class is not final. Keep this in sync with sharpening.
           if (!klass->IsFinal()) {
@@ -808,12 +838,6 @@
     TimingLogger::ScopedTiming t("Initialize type check bitstrings", timings);
 
     for (ClassAccessor accessor : dex_file->GetClasses()) {
-      if (!driver->IsClassToCompile(accessor.GetDescriptor())) {
-        // Compilation is skipped, do not look for type checks in code of this class.
-        // FIXME: Make sure that inlining honors this. b/26687569
-        continue;
-      }
-
       // Direct and virtual methods.
       for (const ClassAccessor::Method& method : accessor.GetMethods()) {
         InitializeTypeCheckBitstrings(driver, class_linker, dex_cache, *dex_file, method);
@@ -852,7 +876,9 @@
 
 void CompilerDriver::PreCompile(jobject class_loader,
                                 const std::vector<const DexFile*>& dex_files,
-                                TimingLogger* timings) {
+                                TimingLogger* timings,
+                                /*inout*/ HashSet<std::string>* image_classes,
+                                /*out*/ VerificationResults* verification_results) {
   CheckThreadPools();
 
   VLOG(compiler) << "Before precompile " << GetMemoryUsageString(false);
@@ -869,7 +895,7 @@
   // 6) Update the set of image classes.
   // 7) For deterministic boot image, initialize bitstrings for type checking.
 
-  LoadImageClasses(timings);
+  LoadImageClasses(timings, image_classes);
   VLOG(compiler) << "LoadImageClasses: " << GetMemoryUsageString(false);
 
   if (compiler_options_->IsAnyCompilationEnabled()) {
@@ -897,11 +923,13 @@
 
   if (GetCompilerOptions().IsForceDeterminism() && GetCompilerOptions().IsBootImage()) {
     // Resolve strings from const-string. Do this now to have a deterministic image.
-    ResolveConstStrings(this, dex_files, timings);
+    ResolveConstStrings(dex_files, /*only_startup_strings=*/ false, timings);
     VLOG(compiler) << "Resolve const-strings: " << GetMemoryUsageString(false);
+  } else if (GetCompilerOptions().ResolveStartupConstStrings()) {
+    ResolveConstStrings(dex_files, /*only_startup_strings=*/ true, timings);
   }
 
-  Verify(class_loader, dex_files, timings);
+  Verify(class_loader, dex_files, timings, verification_results);
   VLOG(compiler) << "Verify: " << GetMemoryUsageString(false);
 
   if (had_hard_verifier_failure_ && GetCompilerOptions().AbortOnHardVerifierFailure()) {
@@ -926,7 +954,7 @@
     VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString(false);
   }
 
-  UpdateImageClasses(timings);
+  UpdateImageClasses(timings, image_classes);
   VLOG(compiler) << "UpdateImageClasses: " << GetMemoryUsageString(false);
 
   if (kBitstringSubtypeCheckEnabled &&
@@ -938,13 +966,6 @@
   }
 }
 
-bool CompilerDriver::IsClassToCompile(const char* descriptor) const {
-  if (classes_to_compile_ == nullptr) {
-    return true;
-  }
-  return classes_to_compile_->find(StringPiece(descriptor)) != classes_to_compile_->end();
-}
-
 bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_ref) const {
   // Profile compilation info may be null if no profile is passed.
   if (!CompilerFilter::DependsOnProfile(compiler_options_->GetCompilerFilter())) {
@@ -953,12 +974,14 @@
     return true;
   }
   // If we are using a profile filter but do not have a profile compilation info, compile nothing.
-  if (profile_compilation_info_ == nullptr) {
+  const ProfileCompilationInfo* profile_compilation_info =
+      GetCompilerOptions().GetProfileCompilationInfo();
+  if (profile_compilation_info == nullptr) {
     return false;
   }
   // Compile only hot methods, it is the profile saver's job to decide what startup methods to mark
   // as hot.
-  bool result = profile_compilation_info_->GetMethodHotness(method_ref).IsHot();
+  bool result = profile_compilation_info->GetMethodHotness(method_ref).IsHot();
 
   if (kDebugProfileGuidedCompilation) {
     LOG(INFO) << "[ProfileGuidedCompilation] "
@@ -1045,7 +1068,8 @@
 };
 
 // Make a list of descriptors for classes to include in the image
-void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
+void CompilerDriver::LoadImageClasses(TimingLogger* timings,
+                                      /*inout*/ HashSet<std::string>* image_classes) {
   CHECK(timings != nullptr);
   if (!GetCompilerOptions().IsBootImage()) {
     return;
@@ -1056,15 +1080,15 @@
   Thread* self = Thread::Current();
   ScopedObjectAccess soa(self);
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  CHECK(image_classes_ != nullptr);
-  for (auto it = image_classes_->begin(), end = image_classes_->end(); it != end;) {
+  CHECK(image_classes != nullptr);
+  for (auto it = image_classes->begin(), end = image_classes->end(); it != end;) {
     const std::string& descriptor(*it);
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> klass(
         hs.NewHandle(class_linker->FindSystemClass(self, descriptor.c_str())));
     if (klass == nullptr) {
       VLOG(compiler) << "Failed to find class " << descriptor;
-      it = image_classes_->erase(it);
+      it = image_classes->erase(it);
       self->ClearException();
     } else {
       ++it;
@@ -1101,7 +1125,7 @@
                                           ScopedNullHandle<mirror::ClassLoader>())
               : nullptr;
       if (klass == nullptr) {
-        const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
+        const dex::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
         const char* descriptor = dex_file->GetTypeDescriptor(type_id);
         LOG(FATAL) << "Failed to resolve class " << descriptor;
       }
@@ -1114,10 +1138,10 @@
   // We walk the roots looking for classes so that we'll pick up the
   // above classes plus any classes them depend on such super
   // classes, interfaces, and the required ClassLinker roots.
-  RecordImageClassesVisitor visitor(image_classes_);
+  RecordImageClassesVisitor visitor(image_classes);
   class_linker->VisitClasses(&visitor);
 
-  CHECK(!image_classes_->empty());
+  CHECK(!image_classes->empty());
 }
 
 static void MaybeAddToImageClasses(Thread* self,
@@ -1146,7 +1170,7 @@
     if (klass->IsArrayClass()) {
       MaybeAddToImageClasses(self, klass->GetComponentType(), image_classes);
     }
-    klass.Assign(klass->GetSuperClass());
+    klass = klass->GetSuperClass();
   }
 }
 
@@ -1173,7 +1197,7 @@
   // Visitor for VisitReferences.
   void operator()(ObjPtr<mirror::Object> object,
                   MemberOffset field_offset,
-                  bool /* is_static */) const
+                  bool is_static ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset);
     if (ref != nullptr) {
@@ -1213,8 +1237,15 @@
     bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
       std::string temp;
       StringPiece name(klass->GetDescriptor(&temp));
-      if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
-        data_->image_classes_.push_back(hs_.NewHandle(klass));
+      auto it = data_->image_class_descriptors_->find(name);
+      if (it != data_->image_class_descriptors_->end()) {
+        if (LIKELY(klass->IsResolved())) {
+          data_->image_classes_.push_back(hs_.NewHandle(klass));
+        } else {
+          DCHECK(klass->IsErroneousUnresolved());
+          VLOG(compiler) << "Removing unresolved class from image classes: " << name;
+          data_->image_class_descriptors_->erase(it);
+        }
       } else {
         // Check whether it is initialized and has a clinit. They must be kept, too.
         if (klass->IsInitialized() && klass->FindClassInitializer(
@@ -1286,7 +1317,8 @@
   DISALLOW_COPY_AND_ASSIGN(ClinitImageUpdate);
 };
 
-void CompilerDriver::UpdateImageClasses(TimingLogger* timings) {
+void CompilerDriver::UpdateImageClasses(TimingLogger* timings,
+                                        /*inout*/ HashSet<std::string>* image_classes) {
   if (GetCompilerOptions().IsBootImage()) {
     TimingLogger::ScopedTiming t("UpdateImageClasses", timings);
 
@@ -1298,7 +1330,7 @@
     VariableSizedHandleScope hs(Thread::Current());
     std::string error_msg;
     std::unique_ptr<ClinitImageUpdate> update(ClinitImageUpdate::Create(hs,
-                                                                        image_classes_,
+                                                                        image_classes,
                                                                         Thread::Current(),
                                                                         runtime->GetClassLinker()));
 
@@ -1335,7 +1367,7 @@
   Handle<mirror::DexCache> dex_cache(mUnit->GetDexCache());
   {
     Handle<mirror::ClassLoader> class_loader = mUnit->GetClassLoader();
-    resolved_field = ResolveField(soa, dex_cache, class_loader, field_idx, /* is_static */ false);
+    resolved_field = ResolveField(soa, dex_cache, class_loader, field_idx, /* is_static= */ false);
     referrer_class = resolved_field != nullptr
         ? ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit) : nullptr;
   }
@@ -1367,12 +1399,6 @@
   }
 }
 
-const VerifiedMethod* CompilerDriver::GetVerifiedMethod(const DexFile* dex_file,
-                                                        uint32_t method_idx) const {
-  MethodReference ref(dex_file, method_idx);
-  return verification_results_->GetVerifiedMethod(ref);
-}
-
 bool CompilerDriver::IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc) {
   if (!compiler_options_->IsVerificationEnabled()) {
     // If we didn't verify, every cast has to be treated as non-safe.
@@ -1551,18 +1577,6 @@
   self->ClearException();
 }
 
-bool CompilerDriver::RequiresConstructorBarrier(const DexFile& dex_file,
-                                                uint16_t class_def_idx) const {
-  ClassAccessor accessor(dex_file, class_def_idx);
-  // We require a constructor barrier if there are final instance fields.
-  for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) {
-    if (field.IsFinal()) {
-      return true;
-    }
-  }
-  return false;
-}
-
 class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
  public:
   explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager)
@@ -1582,7 +1596,7 @@
     // needs it, here we try to resolve fields and methods used in class
     // definitions, since many of them many never be referenced by
     // generated code.
-    const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+    const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
     ScopedObjectAccess soa(self);
     StackHandleScope<2> hs(soa.Self());
     Handle<mirror::ClassLoader> class_loader(
@@ -1606,57 +1620,42 @@
       // We want to resolve the methods and fields eagerly.
       resolve_fields_and_methods = true;
     }
-    // If an instance field is final then we need to have a barrier on the return, static final
-    // fields are assigned within the lock held for class initialization.
-    bool requires_constructor_barrier = false;
 
-    ClassAccessor accessor(dex_file, class_def_index);
-    // Optionally resolve fields and methods and figure out if we need a constructor barrier.
-    auto method_visitor = [&](const ClassAccessor::Method& method)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      if (resolve_fields_and_methods) {
+    if (resolve_fields_and_methods) {
+      ClassAccessor accessor(dex_file, class_def_index);
+      // Optionally resolve fields and methods and figure out if we need a constructor barrier.
+      auto method_visitor = [&](const ClassAccessor::Method& method)
+          REQUIRES_SHARED(Locks::mutator_lock_) {
         ArtMethod* resolved = class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
             method.GetIndex(),
             dex_cache,
             class_loader,
-            /* referrer */ nullptr,
+            /*referrer=*/ nullptr,
             method.GetInvokeType(class_def.access_flags_));
         if (resolved == nullptr) {
           CheckAndClearResolveException(soa.Self());
         }
-      }
-    };
-    accessor.VisitFieldsAndMethods(
-        // static fields
-        [&](ClassAccessor::Field& field) REQUIRES_SHARED(Locks::mutator_lock_) {
-          if (resolve_fields_and_methods) {
+      };
+      accessor.VisitFieldsAndMethods(
+          // static fields
+          [&](ClassAccessor::Field& field) REQUIRES_SHARED(Locks::mutator_lock_) {
             ArtField* resolved = class_linker->ResolveField(
-                field.GetIndex(), dex_cache, class_loader, /* is_static */ true);
+                field.GetIndex(), dex_cache, class_loader, /*is_static=*/ true);
             if (resolved == nullptr) {
               CheckAndClearResolveException(soa.Self());
             }
-          }
-        },
-        // instance fields
-        [&](ClassAccessor::Field& field) REQUIRES_SHARED(Locks::mutator_lock_) {
-          if (field.IsFinal()) {
-            // We require a constructor barrier if there are final instance fields.
-            requires_constructor_barrier = true;
-          }
-          if (resolve_fields_and_methods) {
+          },
+          // instance fields
+          [&](ClassAccessor::Field& field) REQUIRES_SHARED(Locks::mutator_lock_) {
             ArtField* resolved = class_linker->ResolveField(
-                field.GetIndex(), dex_cache, class_loader, /* is_static */ false);
+                field.GetIndex(), dex_cache, class_loader, /*is_static=*/ false);
             if (resolved == nullptr) {
               CheckAndClearResolveException(soa.Self());
             }
-          }
-        },
-        /*direct methods*/ method_visitor,
-        /*virtual methods*/ method_visitor);
-    manager_->GetCompiler()->SetRequiresConstructorBarrier(self,
-                                                           &dex_file,
-                                                           class_def_index,
-                                                           requires_constructor_barrier);
+          },
+          /*direct_method_visitor=*/ method_visitor,
+          /*virtual_method_visitor=*/ method_visitor);
+    }
   }
 
  private:
@@ -1756,6 +1755,9 @@
     if (&cls->GetDexFile() == &accessor.GetDexFile()) {
       ObjectLock<mirror::Class> lock(self, cls);
       mirror::Class::SetStatus(cls, status, self);
+      if (status >= ClassStatus::kVerified) {
+        cls->SetVerificationAttempted();
+      }
     }
   } else {
     DCHECK(self->IsExceptionPending());
@@ -1763,9 +1765,46 @@
   }
 }
 
+// Returns true if any of the given dex files define a class from the boot classpath.
+static bool DexFilesRedefineBootClasses(
+    const std::vector<const DexFile*>& dex_files,
+    TimingLogger* timings) {
+  TimingLogger::ScopedTiming t("Fast Verify: Boot Class Redefinition Check", timings);
+
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  Thread* self = Thread::Current();
+  ScopedObjectAccess soa(self);
+
+  bool foundRedefinition = false;
+  for (const DexFile* dex_file : dex_files) {
+    for (ClassAccessor accessor : dex_file->GetClasses()) {
+      const char* descriptor = accessor.GetDescriptor();
+      StackHandleScope<1> hs_class(self);
+      Handle<mirror::Class> klass =
+          hs_class.NewHandle(class_linker->FindSystemClass(self, descriptor));
+      if (klass == nullptr) {
+        self->ClearException();
+      } else {
+        LOG(WARNING) << "Redefinition of boot class " << descriptor
+            << " App dex file: " <<  accessor.GetDexFile().GetLocation()
+            << " Boot dex file: " << klass->GetDexFile().GetLocation();
+        foundRedefinition = true;
+        if (!VLOG_IS_ON(verifier)) {
+          // If we are not in verbose mode, return early.
+          // Otherwise continue and log all the collisions for easier debugging.
+          return true;
+        }
+      }
+    }
+  }
+
+  return foundRedefinition;
+}
+
 bool CompilerDriver::FastVerify(jobject jclass_loader,
                                 const std::vector<const DexFile*>& dex_files,
-                                TimingLogger* timings) {
+                                TimingLogger* timings,
+                                /*out*/ VerificationResults* verification_results) {
   verifier::VerifierDeps* verifier_deps =
       Runtime::Current()->GetCompilerCallbacks()->GetVerifierDeps();
   // If there exist VerifierDeps that aren't the ones we just created to output, use them to verify.
@@ -1773,6 +1812,17 @@
     return false;
   }
   TimingLogger::ScopedTiming t("Fast Verify", timings);
+
+  // We cannot do fast verification if the app redefines classes from the boot classpath.
+  // Vdex does not record resolution chains for boot classes and we might wrongfully
+  // resolve a class to the app when it should have been resolved to the boot classpath
+  // (e.g. if we verified against the SDK and the app redefines a boot class which is not
+  // in the SDK.)
+  if (DexFilesRedefineBootClasses(dex_files, timings)) {
+    LOG(WARNING) << "Found redefinition of boot classes. Not doing fast verification.";
+    return false;
+  }
+
   ScopedObjectAccess soa(Thread::Current());
   StackHandleScope<2> hs(soa.Self());
   Handle<mirror::ClassLoader> class_loader(
@@ -1813,7 +1863,7 @@
           // - Quickening will not do checkcast ellision.
           // TODO(ngeoffray): Reconsider this once we refactor compiler filters.
           for (const ClassAccessor::Method& method : accessor.GetMethods()) {
-            verification_results_->CreateVerifiedMethodFor(method.GetReference());
+            verification_results->CreateVerifiedMethodFor(method.GetReference());
           }
         }
       } else if (!compiler_only_verifies) {
@@ -1831,8 +1881,9 @@
 
 void CompilerDriver::Verify(jobject jclass_loader,
                             const std::vector<const DexFile*>& dex_files,
-                            TimingLogger* timings) {
-  if (FastVerify(jclass_loader, dex_files, timings)) {
+                            TimingLogger* timings,
+                            /*out*/ VerificationResults* verification_results) {
+  if (FastVerify(jclass_loader, dex_files, timings, verification_results)) {
     return;
   }
 
@@ -1894,7 +1945,7 @@
     ScopedTrace trace(__FUNCTION__);
     ScopedObjectAccess soa(Thread::Current());
     const DexFile& dex_file = *manager_->GetDexFile();
-    const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+    const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
     const char* descriptor = dex_file.GetClassDescriptor(class_def);
     ClassLinker* class_linker = manager_->GetClassLinker();
     jobject jclass_loader = manager_->GetClassLoader();
@@ -2028,7 +2079,7 @@
     ScopedTrace trace(__FUNCTION__);
     ScopedObjectAccess soa(Thread::Current());
     const DexFile& dex_file = *manager_->GetDexFile();
-    const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+    const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
     const char* descriptor = dex_file.GetClassDescriptor(class_def);
     ClassLinker* class_linker = manager_->GetClassLinker();
     jobject jclass_loader = manager_->GetClassLoader();
@@ -2093,8 +2144,8 @@
     ScopedTrace trace(__FUNCTION__);
     jobject jclass_loader = manager_->GetClassLoader();
     const DexFile& dex_file = *manager_->GetDexFile();
-    const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
-    const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_);
+    const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+    const dex::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_);
     const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
 
     ScopedObjectAccess soa(Thread::Current());
@@ -2104,8 +2155,11 @@
     Handle<mirror::Class> klass(
         hs.NewHandle(manager_->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader)));
 
-    if (klass != nullptr && !SkipClass(manager_->GetClassLoader(), dex_file, klass.Get())) {
-      TryInitializeClass(klass, class_loader);
+    if (klass != nullptr) {
+      if (!SkipClass(manager_->GetClassLoader(), dex_file, klass.Get())) {
+        TryInitializeClass(klass, class_loader);
+      }
+      manager_->GetCompiler()->stats_->AddClassStatus(klass->GetStatus());
     }
     // Clear any class not found or verification exceptions.
     soa.Self()->ClearException();
@@ -2115,8 +2169,8 @@
   void TryInitializeClass(Handle<mirror::Class> klass, Handle<mirror::ClassLoader>& class_loader)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     const DexFile& dex_file = klass->GetDexFile();
-    const DexFile::ClassDef* class_def = klass->GetClassDef();
-    const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def->class_idx_);
+    const dex::ClassDef* class_def = klass->GetClassDef();
+    const dex::TypeId& class_type_id = dex_file.GetTypeId(class_def->class_idx_);
     const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
     ScopedObjectAccessUnchecked soa(Thread::Current());
     StackHandleScope<3> hs(soa.Self());
@@ -2157,10 +2211,9 @@
         // Otherwise it's in app image but superclasses can't be initialized, no need to proceed.
         old_status = klass->GetStatus();
 
-        bool too_many_encoded_fields = false;
-        if (!is_boot_image && klass->NumStaticFields() > kMaxEncodedFields) {
-          too_many_encoded_fields = true;
-        }
+        bool too_many_encoded_fields = !is_boot_image &&
+            klass->NumStaticFields() > kMaxEncodedFields;
+
         // If the class was not initialized, we can proceed to see if we can initialize static
         // fields. Limit the max number of encoded fields.
         if (!klass->IsInitialized() &&
@@ -2210,9 +2263,13 @@
               if (success) {
                 runtime->ExitTransactionMode();
                 DCHECK(!runtime->IsActiveTransaction());
-              }
 
-              if (!success) {
+                if (is_boot_image) {
+                  // For boot image, we want to put the updated status in the oat class since we
+                  // can't reject the image anyways.
+                  old_status = klass->GetStatus();
+                }
+              } else {
                 CHECK(soa.Self()->IsExceptionPending());
                 mirror::Throwable* exception = soa.Self()->GetException();
                 VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
@@ -2226,10 +2283,6 @@
                 soa.Self()->ClearException();
                 runtime->RollbackAllTransactions();
                 CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
-              } else if (is_boot_image) {
-                // For boot image, we want to put the updated status in the oat class since we can't
-                // reject the image anyways.
-                old_status = klass->GetStatus();
               }
             }
 
@@ -2272,7 +2325,7 @@
 
     StackHandleScope<1> hs(Thread::Current());
     Handle<mirror::DexCache> dex_cache = hs.NewHandle(klass->GetDexCache());
-    const DexFile::ClassDef* class_def = klass->GetClassDef();
+    const dex::ClassDef* class_def = klass->GetClassDef();
     ClassLinker* class_linker = manager_->GetClassLinker();
 
     // Check encoded final field values for strings and intern.
@@ -2314,7 +2367,7 @@
       self->ClearException();
       return false;
     }
-    const DexFile::TypeList* types = m->GetParameterTypeList();
+    const dex::TypeList* types = m->GetParameterTypeList();
     if (types != nullptr) {
       for (uint32_t i = 0; i < types->Size(); ++i) {
         dex::TypeIndex param_type_idx = types->GetTypeItem(i).type_idx_;
@@ -2532,7 +2585,7 @@
     // SetVerificationAttempted so that the access flags are set. If we do not do this they get
     // changed at runtime resulting in more dirty image pages.
     // Also create conflict tables.
-    // Only useful if we are compiling an image (image_classes_ is not null).
+    // Only useful if we are compiling an image.
     ScopedObjectAccess soa(Thread::Current());
     VariableSizedHandleScope hs(soa.Self());
     InitializeArrayClassesAndCreateConflictTablesVisitor visitor(hs);
@@ -2541,7 +2594,7 @@
   }
   if (GetCompilerOptions().IsBootImage()) {
     // Prune garbage objects created during aborted transactions.
-    Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ true);
+    Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ true);
   }
 }
 
@@ -2564,15 +2617,16 @@
                                      thread_pool);
 
   auto compile = [&context, &compile_fn](size_t class_def_index) {
-    ScopedTrace trace(__FUNCTION__);
     const DexFile& dex_file = *context.GetDexFile();
+    SCOPED_TRACE << "compile " << dex_file.GetLocation() << "@" << class_def_index;
     ClassLinker* class_linker = context.GetClassLinker();
     jobject jclass_loader = context.GetClassLoader();
     ClassReference ref(&dex_file, class_def_index);
-    const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+    const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
     ClassAccessor accessor(dex_file, class_def_index);
+    CompilerDriver* const driver = context.GetCompiler();
     // Skip compiling classes with generic verifier failures since they will still fail at runtime
-    if (context.GetCompiler()->GetVerificationResults()->IsClassRejected(ref)) {
+    if (driver->GetCompilerOptions().GetVerificationResults()->IsClassRejected(ref)) {
       return;
     }
     // Use a scoped object access to perform to the quick SkipClass check.
@@ -2604,15 +2658,10 @@
     // Go to native so that we don't block GC during compilation.
     ScopedThreadSuspension sts(soa.Self(), kNative);
 
-    CompilerDriver* const driver = context.GetCompiler();
-
     // Can we run DEX-to-DEX compiler on this class ?
     optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level =
         GetDexToDexCompilationLevel(soa.Self(), *driver, jclass_loader, dex_file, class_def);
 
-
-    const bool compilation_enabled = driver->IsClassToCompile(accessor.GetDescriptor());
-
     // Compile direct and virtual methods.
     int64_t previous_method_idx = -1;
     for (const ClassAccessor::Method& method : accessor.GetMethods()) {
@@ -2633,7 +2682,6 @@
                  class_loader,
                  dex_file,
                  dex_to_dex_compilation_level,
-                 compilation_enabled,
                  dex_cache);
     }
   };
@@ -2644,10 +2692,12 @@
                              const std::vector<const DexFile*>& dex_files,
                              TimingLogger* timings) {
   if (kDebugProfileGuidedCompilation) {
+    const ProfileCompilationInfo* profile_compilation_info =
+        GetCompilerOptions().GetProfileCompilationInfo();
     LOG(INFO) << "[ProfileGuidedCompilation] " <<
-        ((profile_compilation_info_ == nullptr)
+        ((profile_compilation_info == nullptr)
             ? "null"
-            : profile_compilation_info_->DumpInfo(&dex_files));
+            : profile_compilation_info->DumpInfo(dex_files));
   }
 
   dex_to_dex_compiler_.ClearState();
@@ -2779,56 +2829,6 @@
   return compiled_method;
 }
 
-bool CompilerDriver::IsMethodVerifiedWithoutFailures(uint32_t method_idx,
-                                                     uint16_t class_def_idx,
-                                                     const DexFile& dex_file) const {
-  const VerifiedMethod* verified_method = GetVerifiedMethod(&dex_file, method_idx);
-  if (verified_method != nullptr) {
-    return !verified_method->HasVerificationFailures();
-  }
-
-  // If we can't find verification metadata, check if this is a system class (we trust that system
-  // classes have their methods verified). If it's not, be conservative and assume the method
-  // has not been verified successfully.
-
-  // TODO: When compiling the boot image it should be safe to assume that everything is verified,
-  // even if methods are not found in the verification cache.
-  const char* descriptor = dex_file.GetClassDescriptor(dex_file.GetClassDef(class_def_idx));
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  Thread* self = Thread::Current();
-  ScopedObjectAccess soa(self);
-  bool is_system_class = class_linker->FindSystemClass(self, descriptor) != nullptr;
-  if (!is_system_class) {
-    self->ClearException();
-  }
-  return is_system_class;
-}
-
-void CompilerDriver::SetRequiresConstructorBarrier(Thread* self,
-                                                   const DexFile* dex_file,
-                                                   uint16_t class_def_index,
-                                                   bool requires) {
-  WriterMutexLock mu(self, requires_constructor_barrier_lock_);
-  requires_constructor_barrier_.emplace(ClassReference(dex_file, class_def_index), requires);
-}
-
-bool CompilerDriver::RequiresConstructorBarrier(Thread* self,
-                                                const DexFile* dex_file,
-                                                uint16_t class_def_index) {
-  ClassReference class_ref(dex_file, class_def_index);
-  {
-    ReaderMutexLock mu(self, requires_constructor_barrier_lock_);
-    auto it = requires_constructor_barrier_.find(class_ref);
-    if (it != requires_constructor_barrier_.end()) {
-      return it->second;
-    }
-  }
-  WriterMutexLock mu(self, requires_constructor_barrier_lock_);
-  const bool requires = RequiresConstructorBarrier(*dex_file, class_def_index);
-  requires_constructor_barrier_.emplace(class_ref, requires);
-  return requires;
-}
-
 std::string CompilerDriver::GetMemoryUsageString(bool extended) const {
   std::ostringstream oss;
   const gc::Heap* const heap = Runtime::Current()->GetHeap();
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 343f67c..6f8ec12 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -36,7 +36,6 @@
 #include "class_status.h"
 #include "compiler.h"
 #include "dex/class_reference.h"
-#include "dex/dex_file.h"
 #include "dex/dex_file_types.h"
 #include "dex/dex_to_dex_compiler.h"
 #include "dex/method_reference.h"
@@ -47,6 +46,10 @@
 
 namespace art {
 
+namespace dex {
+struct CodeItem;
+}  // namespace dex
+
 namespace mirror {
 class Class;
 class DexCache;
@@ -62,6 +65,7 @@
 class CompiledMethod;
 class CompilerOptions;
 class DexCompilationUnit;
+class DexFile;
 template<class T> class Handle;
 struct InlineIGetIPutData;
 class InstructionSetFeatures;
@@ -76,7 +80,6 @@
 class TimingLogger;
 class VdexFile;
 class VerificationResults;
-class VerifiedMethod;
 
 enum EntryPointCallingConvention {
   // ABI of invocations to a method's interpreter entry point.
@@ -95,18 +98,26 @@
   // can assume will be in the image, with null implying all available
   // classes.
   CompilerDriver(const CompilerOptions* compiler_options,
-                 VerificationResults* verification_results,
                  Compiler::Kind compiler_kind,
-                 HashSet<std::string>* image_classes,
                  size_t thread_count,
-                 int swap_fd,
-                 const ProfileCompilationInfo* profile_compilation_info);
+                 int swap_fd);
 
   ~CompilerDriver();
 
   // Set dex files classpath.
   void SetClasspathDexFiles(const std::vector<const DexFile*>& dex_files);
 
+  // Initialize and destroy thread pools. This is exposed because we do not want
+  // to do this twice, for PreCompile() and CompileAll().
+  void InitializeThreadPools();
+  void FreeThreadPools();
+
+  void PreCompile(jobject class_loader,
+                  const std::vector<const DexFile*>& dex_files,
+                  TimingLogger* timings,
+                  /*inout*/ HashSet<std::string>* image_classes,
+                  /*out*/ VerificationResults* verification_results)
+      REQUIRES(!Locks::mutator_lock_);
   void CompileAll(jobject class_loader,
                   const std::vector<const DexFile*>& dex_files,
                   TimingLogger* timings)
@@ -120,13 +131,11 @@
                   uint32_t method_idx,
                   uint32_t access_flags,
                   InvokeType invoke_type,
-                  const DexFile::CodeItem* code_item,
+                  const dex::CodeItem* code_item,
                   Handle<mirror::DexCache> dex_cache,
                   Handle<mirror::ClassLoader> h_class_loader)
       REQUIRES(!Locks::mutator_lock_);
 
-  VerificationResults* GetVerificationResults() const;
-
   const CompilerOptions& GetCompilerOptions() const {
     return *compiler_options_;
   }
@@ -146,55 +155,10 @@
   bool GetCompiledClass(const ClassReference& ref, ClassStatus* status) const;
 
   CompiledMethod* GetCompiledMethod(MethodReference ref) const;
-  size_t GetNonRelativeLinkerPatchCount() const;
   // Add a compiled method.
   void AddCompiledMethod(const MethodReference& method_ref, CompiledMethod* const compiled_method);
   CompiledMethod* RemoveCompiledMethod(const MethodReference& method_ref);
 
-  void SetRequiresConstructorBarrier(Thread* self,
-                                     const DexFile* dex_file,
-                                     uint16_t class_def_index,
-                                     bool requires)
-      REQUIRES(!requires_constructor_barrier_lock_);
-
-  // Do the <init> methods for this class require a constructor barrier (prior to the return)?
-  // The answer is "yes", if and only if this class has any instance final fields.
-  // (This must not be called for any non-<init> methods; the answer would be "no").
-  //
-  // ---
-  //
-  // JLS 17.5.1 "Semantics of final fields" mandates that all final fields are frozen at the end
-  // of the invoked constructor. The constructor barrier is a conservative implementation means of
-  // enforcing the freezes happen-before the object being constructed is observable by another
-  // thread.
-  //
-  // Note: This question only makes sense for instance constructors;
-  // static constructors (despite possibly having finals) never need
-  // a barrier.
-  //
-  // JLS 12.4.2 "Detailed Initialization Procedure" approximately describes
-  // class initialization as:
-  //
-  //   lock(class.lock)
-  //     class.state = initializing
-  //   unlock(class.lock)
-  //
-  //   invoke <clinit>
-  //
-  //   lock(class.lock)
-  //     class.state = initialized
-  //   unlock(class.lock)              <-- acts as a release
-  //
-  // The last operation in the above example acts as an atomic release
-  // for any stores in <clinit>, which ends up being stricter
-  // than what a constructor barrier needs.
-  //
-  // See also QuasiAtomic::ThreadFenceForConstructor().
-  bool RequiresConstructorBarrier(Thread* self,
-                                  const DexFile* dex_file,
-                                  uint16_t class_def_index)
-      REQUIRES(!requires_constructor_barrier_lock_);
-
   // Resolve compiling method's class. Returns null on failure.
   ObjPtr<mirror::Class> ResolveCompilingMethodsClass(const ScopedObjectAccess& soa,
                                                      Handle<mirror::DexCache> dex_cache,
@@ -225,16 +189,6 @@
                                             uint16_t field_idx)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Resolve a method. Returns null on failure, including incompatible class change.
-  ArtMethod* ResolveMethod(
-      ScopedObjectAccess& soa,
-      Handle<mirror::DexCache> dex_cache,
-      Handle<mirror::ClassLoader> class_loader,
-      const DexCompilationUnit* mUnit,
-      uint32_t method_idx,
-      InvokeType invoke_type)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   void ProcessedInstanceField(bool resolved);
   void ProcessedStaticField(bool resolved, bool local);
 
@@ -250,17 +204,8 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
 
-  const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const;
   bool IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc);
 
-  void SetCompilerContext(void* compiler_context) {
-    compiler_context_ = compiler_context;
-  }
-
-  void* GetCompilerContext() const {
-    return compiler_context_;
-  }
-
   size_t GetThreadCount() const {
     return parallel_thread_count_;
   }
@@ -273,9 +218,6 @@
     return compiled_method_storage_.DedupeEnabled();
   }
 
-  // Checks whether the provided class should be compiled, i.e., is in classes_to_compile_.
-  bool IsClassToCompile(const char* descriptor) const;
-
   // Checks whether profile guided compilation is enabled and if the method should be compiled
   // according to the profile file.
   bool ShouldCompileBasedOnProfile(const MethodReference& method_ref) const;
@@ -286,12 +228,6 @@
 
   void RecordClassStatus(const ClassReference& ref, ClassStatus status);
 
-  // Checks if the specified method has been verified without failures. Returns
-  // false if the method is not in the verification results (GetVerificationResults).
-  bool IsMethodVerifiedWithoutFailures(uint32_t method_idx,
-                                       uint16_t class_def_idx,
-                                       const DexFile& dex_file) const;
-
   // Get memory usage during compilation.
   std::string GetMemoryUsageString(bool extended) const;
 
@@ -310,10 +246,6 @@
     return &compiled_method_storage_;
   }
 
-  const ProfileCompilationInfo* GetProfileCompilationInfo() const {
-    return profile_compilation_info_;
-  }
-
   // Is `boot_image_filename` the name of a core image (small boot
   // image used for ART testing only)?
   static bool IsCoreImageFilename(const std::string& boot_image_filename) {
@@ -336,13 +268,9 @@
   }
 
  private:
-  void PreCompile(jobject class_loader,
-                  const std::vector<const DexFile*>& dex_files,
-                  TimingLogger* timings)
+  void LoadImageClasses(TimingLogger* timings, /*inout*/ HashSet<std::string>* image_classes)
       REQUIRES(!Locks::mutator_lock_);
 
-  void LoadImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
-
   // Attempt to resolve all type, methods, fields, and strings
   // referenced from code in the dex file following PathClassLoader
   // ordering semantics.
@@ -362,11 +290,13 @@
   // verification was successful.
   bool FastVerify(jobject class_loader,
                   const std::vector<const DexFile*>& dex_files,
-                  TimingLogger* timings);
+                  TimingLogger* timings,
+                  /*out*/ VerificationResults* verification_results);
 
   void Verify(jobject class_loader,
               const std::vector<const DexFile*>& dex_files,
-              TimingLogger* timings);
+              TimingLogger* timings,
+              /*out*/ VerificationResults* verification_results);
 
   void VerifyDexFile(jobject class_loader,
                      const DexFile& dex_file,
@@ -397,30 +327,26 @@
                          TimingLogger* timings)
       REQUIRES(!Locks::mutator_lock_);
 
-  void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
+  void UpdateImageClasses(TimingLogger* timings, /*inout*/ HashSet<std::string>* image_classes)
+      REQUIRES(!Locks::mutator_lock_);
 
   void Compile(jobject class_loader,
                const std::vector<const DexFile*>& dex_files,
                TimingLogger* timings);
 
-  void InitializeThreadPools();
-  void FreeThreadPools();
   void CheckThreadPools();
 
-  bool RequiresConstructorBarrier(const DexFile& dex_file, uint16_t class_def_idx) const;
+  // Resolve const string literals that are loaded from dex code. If only_startup_strings is
+  // specified, only methods that are marked startup in the profile are resolved.
+  void ResolveConstStrings(const std::vector<const DexFile*>& dex_files,
+                           bool only_startup_strings,
+                           /*inout*/ TimingLogger* timings);
 
   const CompilerOptions* const compiler_options_;
-  VerificationResults* const verification_results_;
 
   std::unique_ptr<Compiler> compiler_;
   Compiler::Kind compiler_kind_;
 
-  // All class references that require constructor barriers. If the class reference is not in the
-  // set then the result has not yet been computed.
-  mutable ReaderWriterMutex requires_constructor_barrier_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  std::map<ClassReference, bool> requires_constructor_barrier_
-      GUARDED_BY(requires_constructor_barrier_lock_);
-
   // All class references that this compiler has compiled. Indexed by class defs.
   using ClassStateTable = AtomicDexRefMap<ClassReference, ClassStatus>;
   ClassStateTable compiled_classes_;
@@ -429,21 +355,9 @@
 
   typedef AtomicDexRefMap<MethodReference, CompiledMethod*> MethodTable;
 
- private:
   // All method references that this compiler has compiled.
   MethodTable compiled_methods_;
 
-  // Image classes to be updated by PreCompile().
-  // TODO: Remove this member which is a non-const pointer to the CompilerOptions' data.
-  //       Pass this explicitly to the PreCompile() which should be called directly from
-  //       Dex2Oat rather than implicitly by CompileAll().
-  HashSet<std::string>* image_classes_;
-
-  // Specifies the classes that will be compiled. Note that if classes_to_compile_ is null,
-  // all classes are eligible for compilation (duplication filters etc. will still apply).
-  // This option may be restricted to the boot image, depending on a flag in the implementation.
-  std::unique_ptr<HashSet<std::string>> classes_to_compile_;
-
   std::atomic<uint32_t> number_of_soft_verifier_failures_;
 
   bool had_hard_verifier_failure_;
@@ -458,16 +372,8 @@
   class AOTCompilationStats;
   std::unique_ptr<AOTCompilationStats> stats_;
 
-  typedef void (*CompilerCallbackFn)(CompilerDriver& driver);
-  typedef MutexLock* (*CompilerMutexLockFn)(CompilerDriver& driver);
-
-  void* compiler_context_;
-
   CompiledMethodStorage compiled_method_storage_;
 
-  // Info for profile guided compilation.
-  const ProfileCompilationInfo* const profile_compilation_info_;
-
   size_t max_arena_alloc_;
 
   // Compiler for dex to dex (quickening).
@@ -476,6 +382,7 @@
   friend class CommonCompilerTest;
   friend class CompileClassVisitor;
   friend class DexToDexDecompilerTest;
+  friend class InitializeClassVisitor;
   friend class verifier::VerifierDepsTest;
   DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
 };
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index fe1568d..e73d072 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -42,20 +42,18 @@
 
 class CompilerDriverTest : public CommonCompilerTest {
  protected:
-  void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) {
-    TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
-    TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
+  void CompileAllAndMakeExecutable(jobject class_loader) REQUIRES(!Locks::mutator_lock_) {
+    TimingLogger timings("CompilerDriverTest::CompileAllAndMakeExecutable", false, false);
     dex_files_ = GetDexFiles(class_loader);
-    SetDexFilesForOatFile(dex_files_);
-    compiler_driver_->CompileAll(class_loader, dex_files_, &timings);
-    t.NewTiming("MakeAllExecutable");
+    CompileAll(class_loader, dex_files_, &timings);
+    TimingLogger::ScopedTiming t("MakeAllExecutable", &timings);
     MakeAllExecutable(class_loader);
   }
 
   void EnsureCompiled(jobject class_loader, const char* class_name, const char* method,
                       const char* signature, bool is_virtual)
       REQUIRES(!Locks::mutator_lock_) {
-    CompileAll(class_loader);
+    CompileAllAndMakeExecutable(class_loader);
     Thread::Current()->TransitionFromSuspendedToRunnable();
     bool started = runtime_->Start();
     CHECK(started);
@@ -82,7 +80,7 @@
   void MakeDexFileExecutable(jobject class_loader, const DexFile& dex_file) {
     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
     for (size_t i = 0; i < dex_file.NumClassDefs(); i++) {
-      const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
+      const dex::ClassDef& class_def = dex_file.GetClassDef(i);
       const char* descriptor = dex_file.GetClassDescriptor(class_def);
       ScopedObjectAccess soa(Thread::Current());
       StackHandleScope<1> hs(soa.Self());
@@ -106,7 +104,7 @@
 // Disabled due to 10 second runtime on host
 // TODO: Update the test for hash-based dex cache arrays. Bug: 30627598
 TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
-  CompileAll(nullptr);
+  CompileAllAndMakeExecutable(nullptr);
 
   // All libcore references should resolve
   ScopedObjectAccess soa(Thread::Current());
@@ -266,7 +264,7 @@
     ASSERT_TRUE(dex_file->EnableWrite());
   }
 
-  CompileAll(class_loader);
+  CompileAllAndMakeExecutable(class_loader);
 
   std::unordered_set<std::string> m = GetExpectedMethodsForClass("Main");
   std::unordered_set<std::string> s = GetExpectedMethodsForClass("Second");
@@ -310,7 +308,7 @@
   }
   ASSERT_NE(class_loader, nullptr);
 
-  CompileAll(class_loader);
+  CompileAllAndMakeExecutable(class_loader);
 
   CheckVerifiedClass(class_loader, "LMain;");
   CheckVerifiedClass(class_loader, "LSecond;");
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 8cc6cf1..8d1ae3d 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -24,9 +24,14 @@
 #include "arch/instruction_set_features.h"
 #include "base/runtime_debug.h"
 #include "base/variant_map.h"
+#include "class_linker.h"
 #include "cmdline_parser.h"
 #include "compiler_options_map-inl.h"
+#include "dex/dex_file-inl.h"
+#include "dex/verification_results.h"
+#include "dex/verified_method.h"
 #include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
 #include "simple_compiler_options_map.h"
 
 namespace art {
@@ -44,9 +49,10 @@
       no_inline_from_(),
       dex_files_for_oat_file_(),
       image_classes_(),
-      boot_image_(false),
-      core_image_(false),
-      app_image_(false),
+      verification_results_(nullptr),
+      image_type_(ImageType::kNone),
+      compiling_with_core_image_(false),
+      baseline_(false),
       debuggable_(false),
       generate_debug_info_(kDefaultGenerateDebugInfo),
       generate_mini_debug_info_(kDefaultGenerateMiniDebugInfo),
@@ -59,6 +65,7 @@
       dump_pass_timings_(false),
       dump_stats_(false),
       top_k_profile_threshold_(kDefaultTopKProfileThreshold),
+      profile_compilation_info_(nullptr),
       verbose_methods_(),
       abort_on_hard_verifier_failure_(false),
       abort_on_soft_verifier_failure_(false),
@@ -68,6 +75,9 @@
       force_determinism_(false),
       deduplicate_code_(true),
       count_hotness_in_compiled_code_(false),
+      resolve_startup_const_strings_(false),
+      check_profiled_methods_(ProfileMethodsCheck::kNone),
+      max_image_block_size_(std::numeric_limits<uint32_t>::max()),
       register_allocation_strategy_(RegisterAllocator::kRegisterAllocatorDefault),
       passes_to_run_(nullptr) {
 }
@@ -137,4 +147,40 @@
   return image_classes_.find(StringPiece(descriptor)) != image_classes_.end();
 }
 
+const VerificationResults* CompilerOptions::GetVerificationResults() const {
+  DCHECK(Runtime::Current()->IsAotCompiler());
+  return verification_results_;
+}
+
+const VerifiedMethod* CompilerOptions::GetVerifiedMethod(const DexFile* dex_file,
+                                                         uint32_t method_idx) const {
+  MethodReference ref(dex_file, method_idx);
+  return verification_results_->GetVerifiedMethod(ref);
+}
+
+bool CompilerOptions::IsMethodVerifiedWithoutFailures(uint32_t method_idx,
+                                                      uint16_t class_def_idx,
+                                                      const DexFile& dex_file) const {
+  const VerifiedMethod* verified_method = GetVerifiedMethod(&dex_file, method_idx);
+  if (verified_method != nullptr) {
+    return !verified_method->HasVerificationFailures();
+  }
+
+  // If we can't find verification metadata, check if this is a system class (we trust that system
+  // classes have their methods verified). If it's not, be conservative and assume the method
+  // has not been verified successfully.
+
+  // TODO: When compiling the boot image it should be safe to assume that everything is verified,
+  // even if methods are not found in the verification cache.
+  const char* descriptor = dex_file.GetClassDescriptor(dex_file.GetClassDef(class_def_idx));
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  Thread* self = Thread::Current();
+  ScopedObjectAccess soa(self);
+  bool is_system_class = class_linker->FindSystemClass(self, descriptor) != nullptr;
+  if (!is_system_class) {
+    self->ClearException();
+  }
+  return is_system_class;
+}
+
 }  // namespace art
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 34aceba..bd12bf7 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -39,9 +39,23 @@
 class VerifierDepsTest;
 }  // namespace verifier
 
+namespace linker {
+class Arm64RelativePatcherTest;
+}  // namespace linker
+
 class DexFile;
 enum class InstructionSet;
 class InstructionSetFeatures;
+class ProfileCompilationInfo;
+class VerificationResults;
+class VerifiedMethod;
+
+// Enum for CheckProfileMethodsCompiled. Outside CompilerOptions so it can be forward-declared.
+enum class ProfileMethodsCheck : uint8_t {
+  kNone,
+  kLog,
+  kAbort,
+};
 
 class CompilerOptions final {
  public:
@@ -57,6 +71,12 @@
   static const size_t kDefaultInlineMaxCodeUnits = 32;
   static constexpr size_t kUnsetInlineMaxCodeUnits = -1;
 
+  enum class ImageType : uint8_t {
+    kNone,                // JIT or AOT app compilation producing only an oat file but no image.
+    kBootImage,           // Creating boot image.
+    kAppImage,            // Creating app image.
+  };
+
   CompilerOptions();
   ~CompilerOptions();
 
@@ -190,23 +210,23 @@
 
   // Are we compiling a boot image?
   bool IsBootImage() const {
-    return boot_image_;
+    return image_type_ == ImageType::kBootImage;
   }
 
-  // Are we compiling a core image (small boot image only used for ART testing)?
-  bool IsCoreImage() const {
-    // Ensure that `core_image_` => `boot_image_`.
-    DCHECK(!core_image_ || boot_image_);
-    return core_image_;
+  bool IsBaseline() const {
+    return baseline_;
   }
 
   // Are we compiling an app image?
   bool IsAppImage() const {
-    return app_image_;
+    return image_type_ == ImageType::kAppImage;
   }
 
-  void DisableAppImage() {
-    app_image_ = false;
+  // Returns whether we are compiling against a "core" image, which
+  // is an indicative we are running tests. The compiler will use that
+  // information for checking invariants.
+  bool CompilingWithCoreImage() const {
+    return compiling_with_core_image_;
   }
 
   // Should the code be compiled as position independent?
@@ -214,6 +234,10 @@
     return compile_pic_;
   }
 
+  const ProfileCompilationInfo* GetProfileCompilationInfo() const {
+    return profile_compilation_info_;
+  }
+
   bool HasVerboseMethods() const {
     return !verbose_methods_.empty();
   }
@@ -261,6 +285,16 @@
 
   bool IsImageClass(const char* descriptor) const;
 
+  const VerificationResults* GetVerificationResults() const;
+
+  const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const;
+
+  // Checks if the specified method has been verified without failures. Returns
+  // false if the method is not in the verification results (GetVerificationResults).
+  bool IsMethodVerifiedWithoutFailures(uint32_t method_idx,
+                                       uint16_t class_def_idx,
+                                       const DexFile& dex_file) const;
+
   bool ParseCompilerOptions(const std::vector<std::string>& options,
                             bool ignore_unrecognized,
                             std::string* error_msg);
@@ -309,6 +343,22 @@
     return count_hotness_in_compiled_code_;
   }
 
+  bool ResolveStartupConstStrings() const {
+    return resolve_startup_const_strings_;
+  }
+
+  ProfileMethodsCheck CheckProfiledMethodsCompiled() const {
+    return check_profiled_methods_;
+  }
+
+  uint32_t MaxImageBlockSize() const {
+    return max_image_block_size_;
+  }
+
+  void SetMaxImageBlockSize(uint32_t size) {
+    max_image_block_size_ = size;
+  }
+
  private:
   bool ParseDumpInitFailures(const std::string& option, std::string* error_msg);
   void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage);
@@ -343,9 +393,12 @@
   // Must not be empty for real boot image, only for tests pretending to compile boot image.
   HashSet<std::string> image_classes_;
 
-  bool boot_image_;
-  bool core_image_;
-  bool app_image_;
+  // Results of AOT verification.
+  const VerificationResults* verification_results_;
+
+  ImageType image_type_;
+  bool compiling_with_core_image_;
+  bool baseline_;
   bool debuggable_;
   bool generate_debug_info_;
   bool generate_mini_debug_info_;
@@ -361,6 +414,9 @@
   // When using a profile file only the top K% of the profiled samples will be compiled.
   double top_k_profile_threshold_;
 
+  // Info for profile guided compilation.
+  const ProfileCompilationInfo* profile_compilation_info_;
+
   // Vector of methods to have verbose output enabled for.
   std::vector<std::string> verbose_methods_;
 
@@ -387,6 +443,17 @@
   // won't be atomic for performance reasons, so we accept races, just like in interpreter.
   bool count_hotness_in_compiled_code_;
 
+  // Whether we eagerly resolve all of the const strings that are loaded from startup methods in the
+  // profile.
+  bool resolve_startup_const_strings_;
+
+  // When running profile-guided compilation, check that methods intended to be compiled end
+  // up compiled and are not punted.
+  ProfileMethodsCheck check_profiled_methods_;
+
+  // Maximum solid block size in the generated image.
+  uint32_t max_image_block_size_;
+
   RegisterAllocator::Strategy register_allocation_strategy_;
 
   // If not null, specifies optimization passes which will be run instead of defaults.
@@ -402,6 +469,7 @@
   friend class CommonCompilerTest;
   friend class jit::JitCompiler;
   friend class verifier::VerifierDepsTest;
+  friend class linker::Arm64RelativePatcherTest;
 
   template <class Base>
   friend bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string* error_msg);
diff --git a/compiler/driver/compiler_options_map-inl.h b/compiler/driver/compiler_options_map-inl.h
index 32fc887..7e2a64b 100644
--- a/compiler/driver/compiler_options_map-inl.h
+++ b/compiler/driver/compiler_options_map-inl.h
@@ -43,9 +43,6 @@
     }
     options->SetCompilerFilter(compiler_filter);
   }
-  if (map.Exists(Base::PIC)) {
-    options->compile_pic_ = true;
-  }
   map.AssignIfExists(Base::HugeMethodMaxThreshold, &options->huge_method_threshold_);
   map.AssignIfExists(Base::LargeMethodMaxThreshold, &options->large_method_threshold_);
   map.AssignIfExists(Base::SmallMethodMaxThreshold, &options->small_method_threshold_);
@@ -58,6 +55,9 @@
   if (map.Exists(Base::Debuggable)) {
     options->debuggable_ = true;
   }
+  if (map.Exists(Base::Baseline)) {
+    options->baseline_ = true;
+  }
   map.AssignIfExists(Base::TopKProfileThreshold, &options->top_k_profile_threshold_);
   map.AssignIfExists(Base::AbortOnHardVerifierFailure, &options->abort_on_hard_verifier_failure_);
   map.AssignIfExists(Base::AbortOnSoftVerifierFailure, &options->abort_on_soft_verifier_failure_);
@@ -80,6 +80,11 @@
   if (map.Exists(Base::CountHotnessInCompiledCode)) {
     options->count_hotness_in_compiled_code_ = true;
   }
+  map.AssignIfExists(Base::ResolveStartupConstStrings, &options->resolve_startup_const_strings_);
+  if (map.Exists(Base::CheckProfiledMethods)) {
+    options->check_profiled_methods_ = *map.Get(Base::CheckProfiledMethods);
+  }
+  map.AssignIfExists(Base::MaxImageBlockSize, &options->max_image_block_size_);
 
   if (map.Exists(Base::DumpTimings)) {
     options->dump_timings_ = true;
@@ -106,9 +111,6 @@
           .template WithType<std::string>()
           .IntoKey(Map::CompilerFilter)
 
-      .Define("--compile-pic")
-          .IntoKey(Map::PIC)
-
       .Define("--huge-method-max=_")
           .template WithType<unsigned int>()
           .IntoKey(Map::HugeMethodMaxThreshold)
@@ -147,6 +149,12 @@
       .Define({"--count-hotness-in-compiled-code"})
           .IntoKey(Map::CountHotnessInCompiledCode)
 
+      .Define({"--check-profiled-methods=_"})
+          .template WithType<ProfileMethodsCheck>()
+          .WithValueMap({{"log", ProfileMethodsCheck::kLog},
+                         {"abort", ProfileMethodsCheck::kAbort}})
+          .IntoKey(Map::CheckProfiledMethods)
+
       .Define({"--dump-timings"})
           .IntoKey(Map::DumpTimings)
 
@@ -159,6 +167,9 @@
       .Define("--debuggable")
           .IntoKey(Map::Debuggable)
 
+      .Define("--baseline")
+          .IntoKey(Map::Baseline)
+
       .Define("--top-k-profile-threshold=_")
           .template WithType<double>().WithRange(0.0, 100.0)
           .IntoKey(Map::TopKProfileThreshold)
@@ -184,9 +195,18 @@
           .template WithType<std::string>()
           .IntoKey(Map::RegisterAllocationStrategy)
 
+      .Define("--resolve-startup-const-strings=_")
+          .template WithType<bool>()
+          .WithValueMap({{"false", false}, {"true", true}})
+          .IntoKey(Map::ResolveStartupConstStrings)
+
       .Define("--verbose-methods=_")
           .template WithType<ParseStringList<','>>()
-          .IntoKey(Map::VerboseMethods);
+          .IntoKey(Map::VerboseMethods)
+
+      .Define("--max-image-block-size=_")
+          .template WithType<unsigned int>()
+          .IntoKey(Map::MaxImageBlockSize);
 }
 
 #pragma GCC diagnostic pop
diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def
index 529d43f..0a9c873 100644
--- a/compiler/driver/compiler_options_map.def
+++ b/compiler/driver/compiler_options_map.def
@@ -48,19 +48,23 @@
 COMPILER_OPTIONS_KEY (bool,                        GenerateMiniDebugInfo)
 COMPILER_OPTIONS_KEY (bool,                        GenerateBuildID)
 COMPILER_OPTIONS_KEY (Unit,                        Debuggable)
+COMPILER_OPTIONS_KEY (Unit,                        Baseline)
 COMPILER_OPTIONS_KEY (double,                      TopKProfileThreshold)
 COMPILER_OPTIONS_KEY (bool,                        AbortOnHardVerifierFailure)
 COMPILER_OPTIONS_KEY (bool,                        AbortOnSoftVerifierFailure)
+COMPILER_OPTIONS_KEY (bool,                        ResolveStartupConstStrings, false)
 COMPILER_OPTIONS_KEY (std::string,                 DumpInitFailures)
 COMPILER_OPTIONS_KEY (std::string,                 DumpCFG)
 COMPILER_OPTIONS_KEY (Unit,                        DumpCFGAppend)
 // TODO: Add type parser.
 COMPILER_OPTIONS_KEY (std::string,                 RegisterAllocationStrategy)
 COMPILER_OPTIONS_KEY (ParseStringList<','>,        VerboseMethods)
-COMPILER_OPTIONS_KEY (bool,                        DeduplicateCode,        true)
+COMPILER_OPTIONS_KEY (bool,                        DeduplicateCode,            true)
 COMPILER_OPTIONS_KEY (Unit,                        CountHotnessInCompiledCode)
+COMPILER_OPTIONS_KEY (ProfileMethodsCheck,         CheckProfiledMethods)
 COMPILER_OPTIONS_KEY (Unit,                        DumpTimings)
 COMPILER_OPTIONS_KEY (Unit,                        DumpPassTimings)
 COMPILER_OPTIONS_KEY (Unit,                        DumpStats)
+COMPILER_OPTIONS_KEY (unsigned int,                MaxImageBlockSize)
 
 #undef COMPILER_OPTIONS_KEY
diff --git a/compiler/driver/compiler_options_map.h b/compiler/driver/compiler_options_map.h
index b9bc8b6..af212d6 100644
--- a/compiler/driver/compiler_options_map.h
+++ b/compiler/driver/compiler_options_map.h
@@ -25,6 +25,8 @@
 
 namespace art {
 
+enum class ProfileMethodsCheck : uint8_t;
+
 // Defines a type-safe heterogeneous key->value map. This is to be used as the base for
 // an extended map.
 template <typename Base, template <typename TV> class KeyType>
diff --git a/compiler/driver/dex_compilation_unit.cc b/compiler/driver/dex_compilation_unit.cc
index c90c37d..0d0f074 100644
--- a/compiler/driver/dex_compilation_unit.cc
+++ b/compiler/driver/dex_compilation_unit.cc
@@ -16,22 +16,27 @@
 
 #include "dex_compilation_unit.h"
 
+#include "art_field.h"
 #include "base/utils.h"
+#include "dex/class_accessor-inl.h"
 #include "dex/code_item_accessors-inl.h"
 #include "dex/descriptors_names.h"
+#include "mirror/class-inl.h"
 #include "mirror/dex_cache.h"
+#include "scoped_thread_state_change-inl.h"
 
 namespace art {
 
 DexCompilationUnit::DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
                                        ClassLinker* class_linker,
                                        const DexFile& dex_file,
-                                       const DexFile::CodeItem* code_item,
+                                       const dex::CodeItem* code_item,
                                        uint16_t class_def_idx,
                                        uint32_t method_idx,
                                        uint32_t access_flags,
                                        const VerifiedMethod* verified_method,
-                                       Handle<mirror::DexCache> dex_cache)
+                                       Handle<mirror::DexCache> dex_cache,
+                                       Handle<mirror::Class> compiling_class)
     : class_loader_(class_loader),
       class_linker_(class_linker),
       dex_file_(&dex_file),
@@ -41,7 +46,8 @@
       access_flags_(access_flags),
       verified_method_(verified_method),
       dex_cache_(dex_cache),
-      code_item_accessor_(dex_file, code_item) {}
+      code_item_accessor_(dex_file, code_item),
+      compiling_class_(compiling_class) {}
 
 const std::string& DexCompilationUnit::GetSymbol() {
   if (symbol_.empty()) {
@@ -51,4 +57,32 @@
   return symbol_;
 }
 
+bool DexCompilationUnit::RequiresConstructorBarrier() const {
+  // Constructor barriers are applicable only for <init> methods.
+  DCHECK(!IsStatic());
+  DCHECK(IsConstructor());
+
+  // We require a constructor barrier if there are final instance fields.
+  if (GetCompilingClass().GetReference() != nullptr && !GetCompilingClass().IsNull()) {
+    // Decoding class data can be slow, so iterate over fields of the compiling class if resolved.
+    ScopedObjectAccess soa(Thread::Current());
+    ObjPtr<mirror::Class> compiling_class = GetCompilingClass().Get();
+    for (size_t i = 0, size = compiling_class->NumInstanceFields(); i != size; ++i) {
+      ArtField* field = compiling_class->GetInstanceField(i);
+      if (field->IsFinal()) {
+        return true;
+      }
+    }
+  } else {
+    // Iterate over field definitions in the class data.
+    ClassAccessor accessor(*GetDexFile(), GetClassDefIndex());
+    for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) {
+      if (field.IsFinal()) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
 }  // namespace art
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index c1ae3c9..def90fa 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -23,10 +23,10 @@
 #include "dex/code_item_accessors.h"
 #include "dex/dex_file.h"
 #include "handle.h"
-#include "jni.h"
 
 namespace art {
 namespace mirror {
+class Class;
 class ClassLoader;
 class DexCache;
 }  // namespace mirror
@@ -38,12 +38,13 @@
   DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
                      ClassLinker* class_linker,
                      const DexFile& dex_file,
-                     const DexFile::CodeItem* code_item,
+                     const dex::CodeItem* code_item,
                      uint16_t class_def_idx,
                      uint32_t method_idx,
                      uint32_t access_flags,
                      const VerifiedMethod* verified_method,
-                     Handle<mirror::DexCache> dex_cache);
+                     Handle<mirror::DexCache> dex_cache,
+                     Handle<mirror::Class> compiling_class = Handle<mirror::Class>());
 
   Handle<mirror::ClassLoader> GetClassLoader() const {
     return class_loader_;
@@ -65,17 +66,17 @@
     return dex_method_idx_;
   }
 
-  const DexFile::CodeItem* GetCodeItem() const {
+  const dex::CodeItem* GetCodeItem() const {
     return code_item_;
   }
 
   const char* GetShorty() const {
-    const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+    const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
     return dex_file_->GetMethodShorty(method_id);
   }
 
   const char* GetShorty(uint32_t* shorty_len) const {
-    const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+    const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
     return dex_file_->GetMethodShorty(method_id, shorty_len);
   }
 
@@ -117,6 +118,45 @@
     return code_item_accessor_;
   }
 
+  Handle<mirror::Class> GetCompilingClass() const {
+    return compiling_class_;
+  }
+
+  // Does this <init> method require a constructor barrier (prior to the return)?
+  // The answer is "yes", if and only if the class has any instance final fields.
+  // (This must not be called for any non-<init> methods; the answer would be "no").
+  //
+  // ---
+  //
+  // JLS 17.5.1 "Semantics of final fields" mandates that all final fields are frozen at the end
+  // of the invoked constructor. The constructor barrier is a conservative implementation means of
+  // enforcing the freezes happen-before the object being constructed is observable by another
+  // thread.
+  //
+  // Note: This question only makes sense for instance constructors;
+  // static constructors (despite possibly having finals) never need
+  // a barrier.
+  //
+  // JLS 12.4.2 "Detailed Initialization Procedure" approximately describes
+  // class initialization as:
+  //
+  //   lock(class.lock)
+  //     class.state = initializing
+  //   unlock(class.lock)
+  //
+  //   invoke <clinit>
+  //
+  //   lock(class.lock)
+  //     class.state = initialized
+  //   unlock(class.lock)              <-- acts as a release
+  //
+  // The last operation in the above example acts as an atomic release
+  // for any stores in <clinit>, which ends up being stricter
+  // than what a constructor barrier needs.
+  //
+  // See also QuasiAtomic::ThreadFenceForConstructor().
+  bool RequiresConstructorBarrier() const;
+
  private:
   const Handle<mirror::ClassLoader> class_loader_;
 
@@ -124,7 +164,7 @@
 
   const DexFile* const dex_file_;
 
-  const DexFile::CodeItem* const code_item_;
+  const dex::CodeItem* const code_item_;
   const uint16_t class_def_idx_;
   const uint32_t dex_method_idx_;
   const uint32_t access_flags_;
@@ -134,6 +174,8 @@
 
   const CodeItemDataAccessor code_item_accessor_;
 
+  Handle<mirror::Class> compiling_class_;
+
   std::string symbol_;
 };
 
diff --git a/compiler/driver/simple_compiler_options_map.h b/compiler/driver/simple_compiler_options_map.h
index 3860da9..e7a51a4 100644
--- a/compiler/driver/simple_compiler_options_map.h
+++ b/compiler/driver/simple_compiler_options_map.h
@@ -50,7 +50,7 @@
 
 static inline Parser CreateSimpleParser(bool ignore_unrecognized) {
   std::unique_ptr<Parser::Builder> parser_builder =
-      std::unique_ptr<Parser::Builder>(new Parser::Builder());
+      std::make_unique<Parser::Builder>();
 
   AddCompilerOptionsArgumentParserOptions<SimpleParseArgumentMap>(*parser_builder);
 
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index fd17364..d5ceafe 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -50,7 +50,7 @@
   // which always points to the first source statement.
   static constexpr const uint32_t kDexPc = 0;
 
-  virtual void SetUp() {
+  void SetUp() override {
     CommonRuntimeTest::SetUp();
 
     ScopedObjectAccess soa(Thread::Current());
@@ -135,8 +135,8 @@
   ASSERT_EQ(2u, accessor.TriesSize());
   ASSERT_NE(0u, accessor.InsnsSizeInCodeUnits());
 
-  const DexFile::TryItem& t0 = accessor.TryItems().begin()[0];
-  const DexFile::TryItem& t1 = accessor.TryItems().begin()[1];
+  const dex::TryItem& t0 = accessor.TryItems().begin()[0];
+  const dex::TryItem& t1 = accessor.TryItems().begin()[1];
   EXPECT_LE(t0.start_addr_, t1.start_addr_);
   {
     CatchHandlerIterator iter(accessor, 4 /* Dex PC in the first try block */);
@@ -187,14 +187,14 @@
   }
 
   fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc(
-      method_g_, kDexPc, /* is_catch_handler */ false));  // return pc
+      method_g_, kDexPc, /* is_for_catch_handler= */ false));  // return pc
 
   // Create/push fake 16byte stack frame for method g
   fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
   fake_stack.push_back(0);
   fake_stack.push_back(0);
   fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc(
-      method_g_, kDexPc, /* is_catch_handler */ false));  // return pc
+      method_g_, kDexPc, /* is_for_catch_handler= */ false));  // return pc
 
   // Create/push fake 16byte stack frame for method f
   fake_stack.push_back(reinterpret_cast<uintptr_t>(method_f_));
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 3fc559e..4d7ae9b 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -26,7 +26,6 @@
 #include "base/systrace.h"
 #include "base/time_utils.h"
 #include "base/timing_logger.h"
-#include "base/unix_file/fd_file.h"
 #include "debug/elf_debug_writer.h"
 #include "driver/compiler_driver.h"
 #include "driver/compiler_options.h"
@@ -34,11 +33,6 @@
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
 #include "jit/jit_logger.h"
-#include "oat_file-inl.h"
-#include "oat_quick_method_header.h"
-#include "object_lock.h"
-#include "optimizing/register_allocator.h"
-#include "thread_list.h"
 
 namespace art {
 namespace jit {
@@ -47,54 +41,16 @@
   return new JitCompiler();
 }
 
-extern "C" void* jit_load(bool* generate_debug_info) {
-  VLOG(jit) << "loading jit compiler";
-  auto* const jit_compiler = JitCompiler::Create();
-  CHECK(jit_compiler != nullptr);
-  *generate_debug_info = jit_compiler->GetCompilerOptions().GetGenerateDebugInfo();
-  VLOG(jit) << "Done loading jit compiler";
-  return jit_compiler;
-}
-
-extern "C" void jit_unload(void* handle) {
-  DCHECK(handle != nullptr);
-  delete reinterpret_cast<JitCompiler*>(handle);
-}
-
-extern "C" bool jit_compile_method(
-    void* handle, ArtMethod* method, Thread* self, bool osr)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
-  DCHECK(jit_compiler != nullptr);
-  return jit_compiler->CompileMethod(self, method, osr);
-}
-
-extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
-  DCHECK(jit_compiler != nullptr);
-  const CompilerOptions& compiler_options = jit_compiler->GetCompilerOptions();
-  if (compiler_options.GetGenerateDebugInfo()) {
-    const ArrayRef<mirror::Class*> types_array(types, count);
-    std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
-        kRuntimeISA, compiler_options.GetInstructionSetFeatures(), types_array);
-    MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
-    // We never free debug info for types, so we don't need to provide a handle
-    // (which would have been otherwise used as identifier to remove it later).
-    AddNativeDebugInfoForJit(nullptr /* handle */, elf_file);
-  }
-}
-
-JitCompiler::JitCompiler() {
-  compiler_options_.reset(new CompilerOptions());
+void JitCompiler::ParseCompilerOptions() {
   // Special case max code units for inlining, whose default is "unset" (implictly
   // meaning no limit). Do this before parsing the actual passed options.
   compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits);
+  Runtime* runtime = Runtime::Current();
   {
     std::string error_msg;
-    if (!compiler_options_->ParseCompilerOptions(Runtime::Current()->GetCompilerOptions(),
-                                                 true /* ignore_unrecognized */,
-                                                 &error_msg)) {
+    if (!compiler_options_->ParseCompilerOptions(runtime->GetCompilerOptions(),
+                                                /*ignore_unrecognized=*/ true,
+                                                &error_msg)) {
       LOG(FATAL) << error_msg;
       UNREACHABLE();
     }
@@ -102,8 +58,11 @@
   // JIT is never PIC, no matter what the runtime compiler options specify.
   compiler_options_->SetNonPic();
 
-  // Set debuggability based on the runtime value.
-  compiler_options_->SetDebuggable(Runtime::Current()->IsJavaDebuggable());
+  // If the options don't provide whether we generate debuggable code, set
+  // debuggability based on the runtime value.
+  if (!compiler_options_->GetDebuggable()) {
+    compiler_options_->SetDebuggable(runtime->IsJavaDebuggable());
+  }
 
   const InstructionSet instruction_set = compiler_options_->GetInstructionSet();
   if (kRuntimeISA == InstructionSet::kArm) {
@@ -112,7 +71,7 @@
     DCHECK_EQ(instruction_set, kRuntimeISA);
   }
   std::unique_ptr<const InstructionSetFeatures> instruction_set_features;
-  for (const StringPiece option : Runtime::Current()->GetCompilerOptions()) {
+  for (const StringPiece option : runtime->GetCompilerOptions()) {
     VLOG(compiler) << "JIT compiler option " << option;
     std::string error_msg;
     if (option.starts_with("--instruction-set-variant=")) {
@@ -140,38 +99,95 @@
       }
     }
   }
+
   if (instruction_set_features == nullptr) {
+    // '--instruction-set-features/--instruction-set-variant' were not used.
+    // Use build-time defined features.
     instruction_set_features = InstructionSetFeatures::FromCppDefines();
   }
   compiler_options_->instruction_set_features_ = std::move(instruction_set_features);
+  compiler_options_->compiling_with_core_image_ =
+      CompilerDriver::IsCoreImageFilename(runtime->GetImageLocation());
 
-  compiler_driver_.reset(new CompilerDriver(
-      compiler_options_.get(),
-      /* verification_results */ nullptr,
-      Compiler::kOptimizing,
-      /* image_classes */ nullptr,
-      /* thread_count */ 1,
-      /* swap_fd */ -1,
-      /* profile_compilation_info */ nullptr));
-  // Disable dedupe so we can remove compiled methods.
-  compiler_driver_->SetDedupeEnabled(false);
-
-  size_t thread_count = compiler_driver_->GetThreadCount();
   if (compiler_options_->GetGenerateDebugInfo()) {
-    DCHECK_EQ(thread_count, 1u)
-        << "Generating debug info only works with one compiler thread";
     jit_logger_.reset(new JitLogger());
     jit_logger_->OpenLog();
   }
 }
 
+extern "C" void* jit_load() {
+  VLOG(jit) << "Create jit compiler";
+  auto* const jit_compiler = JitCompiler::Create();
+  CHECK(jit_compiler != nullptr);
+  VLOG(jit) << "Done creating jit compiler";
+  return jit_compiler;
+}
+
+extern "C" void jit_unload(void* handle) {
+  DCHECK(handle != nullptr);
+  delete reinterpret_cast<JitCompiler*>(handle);
+}
+
+extern "C" bool jit_compile_method(
+    void* handle, ArtMethod* method, Thread* self, bool baseline, bool osr)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+  DCHECK(jit_compiler != nullptr);
+  return jit_compiler->CompileMethod(self, method, baseline, osr);
+}
+
+extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+  DCHECK(jit_compiler != nullptr);
+  const CompilerOptions& compiler_options = jit_compiler->GetCompilerOptions();
+  if (compiler_options.GetGenerateDebugInfo()) {
+    const ArrayRef<mirror::Class*> types_array(types, count);
+    std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
+        kRuntimeISA, compiler_options.GetInstructionSetFeatures(), types_array);
+    // We never free debug info for types, so we don't need to provide a handle
+    // (which would have been otherwise used as identifier to remove it later).
+    AddNativeDebugInfoForJit(Thread::Current(),
+                             /*code_ptr=*/ nullptr,
+                             elf_file,
+                             debug::PackElfFileForJIT,
+                             compiler_options.GetInstructionSet(),
+                             compiler_options.GetInstructionSetFeatures());
+  }
+}
+
+extern "C" void jit_update_options(void* handle) {
+  JitCompiler* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+  DCHECK(jit_compiler != nullptr);
+  jit_compiler->ParseCompilerOptions();
+}
+
+extern "C" bool jit_generate_debug_info(void* handle) {
+  JitCompiler* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+  DCHECK(jit_compiler != nullptr);
+  return jit_compiler->GetCompilerOptions().GetGenerateDebugInfo();
+}
+
+JitCompiler::JitCompiler() {
+  compiler_options_.reset(new CompilerOptions());
+  ParseCompilerOptions();
+
+  compiler_driver_.reset(new CompilerDriver(
+      compiler_options_.get(),
+      Compiler::kOptimizing,
+      /* thread_count= */ 1,
+      /* swap_fd= */ -1));
+  // Disable dedupe so we can remove compiled methods.
+  compiler_driver_->SetDedupeEnabled(false);
+}
+
 JitCompiler::~JitCompiler() {
   if (compiler_options_->GetGenerateDebugInfo()) {
     jit_logger_->CloseLog();
   }
 }
 
-bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) {
+bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr) {
   SCOPED_TRACE << "JIT compiling " << method->PrettyMethod();
 
   DCHECK(!method->IsProxyMethod());
@@ -188,7 +204,7 @@
     TimingLogger::ScopedTiming t2("Compiling", &logger);
     JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
     success = compiler_driver_->GetCompiler()->JitCompile(
-        self, code_cache, method, osr, jit_logger_.get());
+        self, code_cache, method, baseline, osr, jit_logger_.get());
   }
 
   // Trim maps to reduce memory usage.
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 5840fec..29d2761 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -37,16 +37,19 @@
   virtual ~JitCompiler();
 
   // Compilation entrypoint. Returns whether the compilation succeeded.
-  bool CompileMethod(Thread* self, ArtMethod* method, bool osr)
+  bool CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   const CompilerOptions& GetCompilerOptions() const {
     return *compiler_options_.get();
   }
+
   CompilerDriver* GetCompilerDriver() const {
     return compiler_driver_.get();
   }
 
+  void ParseCompilerOptions();
+
  private:
   std::unique_ptr<CompilerOptions> compiler_options_;
   std::unique_ptr<CompilerDriver> compiler_driver_;
@@ -54,11 +57,6 @@
 
   JitCompiler();
 
-  // This is in the compiler since the runtime doesn't have access to the compiled method
-  // structures.
-  bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   DISALLOW_COPY_AND_ASSIGN(JitCompiler);
 };
 
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 920a3a8..b19a2b8 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -86,7 +86,7 @@
                         callee_save_regs, mr_conv->EntrySpills());
     jni_asm->IncreaseFrameSize(32);
     jni_asm->DecreaseFrameSize(32);
-    jni_asm->RemoveFrame(frame_size, callee_save_regs, /* may_suspend */ true);
+    jni_asm->RemoveFrame(frame_size, callee_save_regs, /* may_suspend= */ true);
     jni_asm->FinalizeCode();
     std::vector<uint8_t> actual_asm(jni_asm->CodeSize());
     MemoryRegion code(&actual_asm[0], actual_asm.size());
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 92b9543..ce987c1 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -659,7 +659,7 @@
 
   std::string reason;
   ASSERT_TRUE(Runtime::Current()->GetJavaVM()->
-                  LoadNativeLibrary(env_, "", class_loader_, &reason))
+                  LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason))
       << reason;
 
   jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24);
@@ -675,7 +675,7 @@
 
   std::string reason;
   ASSERT_TRUE(Runtime::Current()->GetJavaVM()->
-                  LoadNativeLibrary(env_, "", class_loader_, &reason))
+                  LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason))
       << reason;
 
   jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42);
@@ -1300,15 +1300,15 @@
   EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
   EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj1));
   EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj2));
-  EXPECT_EQ(0x12345678ABCDEF88ll, val1);
-  EXPECT_EQ(0x7FEDCBA987654321ll, val2);
+  EXPECT_EQ(0x12345678ABCDEF88LL, val1);
+  EXPECT_EQ(0x7FEDCBA987654321LL, val2);
   return 42;
 }
 
 void JniCompilerTest::GetTextImpl() {
   SetUpForTest(true, "getText", "(JLjava/lang/Object;JLjava/lang/Object;)I",
                CURRENT_JNI_WRAPPER(my_gettext));
-  jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88ll, jobj_,
+  jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88LL, jobj_,
                                           INT64_C(0x7FEDCBA987654321), jobj_);
   EXPECT_EQ(result, 42);
 }
@@ -2196,7 +2196,7 @@
 // Methods not annotated with anything are not considered "fast native"
 // -- Check that the annotation lookup does not find it.
 void JniCompilerTest::NormalNativeImpl() {
-  SetUpForTest(/* direct */ true,
+  SetUpForTest(/* direct= */ true,
                "normalNative",
                "()V",
                CURRENT_JNI_WRAPPER(Java_MyClassNatives_normalNative));
@@ -2218,7 +2218,7 @@
 }
 
 void JniCompilerTest::FastNativeImpl() {
-  SetUpForTest(/* direct */ true,
+  SetUpForTest(/* direct= */ true,
                "fastNative",
                "()V",
                CURRENT_JNI_WRAPPER(Java_MyClassNatives_fastNative));
@@ -2241,7 +2241,7 @@
 }
 
 void JniCompilerTest::CriticalNativeImpl() {
-  SetUpForTest(/* direct */ true,
+  SetUpForTest(/* direct= */ true,
                // Important: Don't change the "current jni" yet to avoid a method name suffix.
                "criticalNative",
                "()V",
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 54f193b..42a4603 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -18,6 +18,7 @@
 
 #include <android-base/logging.h>
 
+#include "arch/instruction_set.h"
 #include "base/macros.h"
 #include "handle_scope-inl.h"
 #include "utils/arm/managed_register_arm.h"
@@ -173,7 +174,7 @@
 
 ManagedRegister ArmManagedRuntimeCallingConvention::CurrentParamRegister() {
   LOG(FATAL) << "Should not reach here";
-  return ManagedRegister::NoRegister();
+  UNREACHABLE();
 }
 
 FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() {
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 328ecbb..4a6a754 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -18,6 +18,7 @@
 
 #include <android-base/logging.h>
 
+#include "arch/instruction_set.h"
 #include "handle_scope-inl.h"
 #include "utils/arm64/managed_register_arm64.h"
 
@@ -181,7 +182,7 @@
 
 ManagedRegister Arm64ManagedRuntimeCallingConvention::CurrentParamRegister() {
   LOG(FATAL) << "Should not reach here";
-  return ManagedRegister::NoRegister();
+  UNREACHABLE();
 }
 
 FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index ff814c8..f031b9b 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -18,6 +18,8 @@
 
 #include <android-base/logging.h>
 
+#include "arch/instruction_set.h"
+
 #ifdef ART_ENABLE_CODEGEN_arm
 #include "jni/quick/arm/calling_convention_arm.h"
 #endif
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index e256ce6..77a5d59 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -27,6 +27,8 @@
 
 namespace art {
 
+enum class InstructionSet;
+
 // Top-level abstraction for different calling conventions.
 class CallingConvention : public DeletableArenaObject<kArenaAllocCallingConvention> {
  public:
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 62e8e02..bdbf429 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -151,7 +151,7 @@
     // Don't allow both @FastNative and @CriticalNative. They are mutually exclusive.
     if (UNLIKELY(is_fast_native && is_critical_native)) {
       LOG(FATAL) << "JniCompile: Method cannot be both @CriticalNative and @FastNative"
-                 << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+                 << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
     }
 
     // @CriticalNative - extra checks:
@@ -162,15 +162,15 @@
       CHECK(is_static)
           << "@CriticalNative functions cannot be virtual since that would"
           << "require passing a reference parameter (this), which is illegal "
-          << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+          << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
       CHECK(!is_synchronized)
           << "@CriticalNative functions cannot be synchronized since that would"
           << "require passing a (class and/or this) reference parameter, which is illegal "
-          << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+          << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
       for (size_t i = 0; i < strlen(shorty); ++i) {
         CHECK_NE(Primitive::kPrimNot, Primitive::GetType(shorty[i]))
             << "@CriticalNative methods' shorty types must not have illegal references "
-            << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+            << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
       }
     }
   }
@@ -219,12 +219,6 @@
   jni_asm->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
   jni_asm->SetEmitRunTimeChecksInDebugMode(compiler_options.EmitRunTimeChecksInDebugMode());
 
-  // Offsets into data structures
-  // TODO: if cross compiling these offsets are for the host not the target
-  const Offset functions(OFFSETOF_MEMBER(JNIEnvExt, functions));
-  const Offset monitor_enter(OFFSETOF_MEMBER(JNINativeInterface, MonitorEnter));
-  const Offset monitor_exit(OFFSETOF_MEMBER(JNINativeInterface, MonitorExit));
-
   // 1. Build the frame saving all callee saves, Method*, and PC return address.
   const size_t frame_size(main_jni_conv->FrameSize());  // Excludes outgoing args.
   ArrayRef<const ManagedRegister> callee_save_regs = main_jni_conv->CalleeSaveRegisters();
@@ -638,7 +632,7 @@
   __ DecreaseFrameSize(current_out_arg_size);
 
   // 15. Process pending exceptions from JNI call or monitor exit.
-  __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0 /* stack_adjust */);
+  __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0 /* stack_adjust= */);
 
   // 16. Remove activation - need to restore callee save registers since the GC may have changed
   //     them.
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 5ec1add..c69854d 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -18,6 +18,7 @@
 
 #include <android-base/logging.h>
 
+#include "arch/instruction_set.h"
 #include "handle_scope-inl.h"
 #include "utils/mips/managed_register_mips.h"
 
@@ -124,7 +125,7 @@
 
 ManagedRegister MipsManagedRuntimeCallingConvention::CurrentParamRegister() {
   LOG(FATAL) << "Should not reach here";
-  return ManagedRegister::NoRegister();
+  UNREACHABLE();
 }
 
 FrameOffset MipsManagedRuntimeCallingConvention::CurrentParamStackOffset() {
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index 165fc60..8b395a0 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -87,7 +87,7 @@
  private:
   // Padding to ensure longs and doubles are not split in o32.
   size_t padding_;
-  size_t use_fp_arg_registers_;
+  bool use_fp_arg_registers_;
 
   DISALLOW_COPY_AND_ASSIGN(MipsJniCallingConvention);
 };
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
index a7012ae..2c297b3 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.cc
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -18,6 +18,7 @@
 
 #include <android-base/logging.h>
 
+#include "arch/instruction_set.h"
 #include "handle_scope-inl.h"
 #include "utils/mips64/managed_register_mips64.h"
 
@@ -109,7 +110,7 @@
 
 ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() {
   LOG(FATAL) << "Should not reach here";
-  return ManagedRegister::NoRegister();
+  UNREACHABLE();
 }
 
 FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index ad58e38..1f255e2 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -18,6 +18,7 @@
 
 #include <android-base/logging.h>
 
+#include "arch/instruction_set.h"
 #include "handle_scope-inl.h"
 #include "utils/x86/managed_register_x86.h"
 
@@ -257,7 +258,7 @@
 
 ManagedRegister X86JniCallingConvention::CurrentParamRegister() {
   LOG(FATAL) << "Should not reach here";
-  return ManagedRegister::NoRegister();
+  UNREACHABLE();
 }
 
 FrameOffset X86JniCallingConvention::CurrentParamStackOffset() {
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index e5e96d0..9e77d6b 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -18,6 +18,7 @@
 
 #include <android-base/logging.h>
 
+#include "arch/instruction_set.h"
 #include "base/bit_utils.h"
 #include "handle_scope-inl.h"
 #include "utils/x86_64/managed_register_x86_64.h"
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index 81ecc17..6acce10 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_LINKER_ELF_BUILDER_H_
 
 #include <vector>
+#include <deque>
 
 #include "arch/instruction_set.h"
 #include "arch/mips/instruction_set_features_mips.h"
@@ -281,10 +282,10 @@
                         name,
                         SHT_STRTAB,
                         flags,
-                        /* link */ nullptr,
-                        /* info */ 0,
+                        /* link= */ nullptr,
+                        /* info= */ 0,
                         align,
-                        /* entsize */ 0) { }
+                        /* entsize= */ 0) { }
 
     Elf_Word Add(const std::string& name) {
       if (CachedSection::GetCacheSize() == 0u) {
@@ -305,10 +306,10 @@
                   name,
                   SHT_STRTAB,
                   flags,
-                  /* link */ nullptr,
-                  /* info */ 0,
+                  /* link= */ nullptr,
+                  /* info= */ 0,
                   align,
-                  /* entsize */ 0) {
+                  /* entsize= */ 0) {
       Reset();
     }
 
@@ -350,64 +351,56 @@
                   type,
                   flags,
                   strtab,
-                  /* info */ 1,
+                  /* info= */ 1,
                   sizeof(Elf_Off),
                   sizeof(Elf_Sym)) {
       syms_.push_back(Elf_Sym());  // The symbol table always has to start with NULL symbol.
     }
 
     // Buffer symbol for this section.  It will be written later.
-    // If the symbol's section is null, it will be considered absolute (SHN_ABS).
-    // (we use this in JIT to reference code which is stored outside the debug ELF file)
     void Add(Elf_Word name,
              const Section* section,
              Elf_Addr addr,
              Elf_Word size,
              uint8_t binding,
              uint8_t type) {
-      Elf_Word section_index;
-      if (section != nullptr) {
-        DCHECK_LE(section->GetAddress(), addr);
-        DCHECK_LE(addr, section->GetAddress() + section->header_.sh_size);
-        section_index = section->GetSectionIndex();
-      } else {
-        section_index = static_cast<Elf_Word>(SHN_ABS);
-      }
-      Add(name, section_index, addr, size, binding, type);
-    }
-
-    // Buffer symbol for this section.  It will be written later.
-    void Add(Elf_Word name,
-             Elf_Word section_index,
-             Elf_Addr addr,
-             Elf_Word size,
-             uint8_t binding,
-             uint8_t type) {
       Elf_Sym sym = Elf_Sym();
       sym.st_name = name;
       sym.st_value = addr;
       sym.st_size = size;
       sym.st_other = 0;
-      sym.st_shndx = section_index;
       sym.st_info = (binding << 4) + (type & 0xf);
-      syms_.push_back(sym);
+      Add(sym, section);
+    }
+
+    // Buffer symbol for this section.  It will be written later.
+    void Add(Elf_Sym sym, const Section* section) {
+      DCHECK(section != nullptr);
+      DCHECK_LE(section->GetAddress(), sym.st_value);
+      DCHECK_LE(sym.st_value, section->GetAddress() + section->header_.sh_size);
+      sym.st_shndx = section->GetSectionIndex();
 
       // The sh_info file must be set to index one-past the last local symbol.
-      if (binding == STB_LOCAL) {
-        this->header_.sh_info = syms_.size();
+      if (sym.getBinding() == STB_LOCAL) {
+        DCHECK_EQ(syms_.back().getBinding(), STB_LOCAL);
+        this->header_.sh_info = syms_.size() + 1;
       }
+
+      syms_.push_back(sym);
     }
 
     Elf_Word GetCacheSize() { return syms_.size() * sizeof(Elf_Sym); }
 
     void WriteCachedSection() {
       this->Start();
-      this->WriteFully(syms_.data(), syms_.size() * sizeof(Elf_Sym));
+      for (; !syms_.empty(); syms_.pop_front()) {
+        this->WriteFully(&syms_.front(), sizeof(Elf_Sym));
+      }
       this->End();
     }
 
    private:
-    std::vector<Elf_Sym> syms_;  // Buffered/cached content of the whole section.
+    std::deque<Elf_Sym> syms_;  // Buffered/cached content of the whole section.
   };
 
   class AbiflagsSection final : public Section {
@@ -775,7 +768,7 @@
       // The runtime does not care about the size of this symbol (it uses the "lastword" symbol).
       // We use size 0 (meaning "unknown size" in ELF) to prevent overlap with the debug symbols.
       Elf_Word oatexec = dynstr_.Add("oatexec");
-      dynsym_.Add(oatexec, &text_, text_.GetAddress(), /* size */ 0, STB_GLOBAL, STT_OBJECT);
+      dynsym_.Add(oatexec, &text_, text_.GetAddress(), /* size= */ 0, STB_GLOBAL, STT_OBJECT);
       Elf_Word oatlastword = dynstr_.Add("oatlastword");
       Elf_Word oatlastword_address = text_.GetAddress() + text_size - 4;
       dynsym_.Add(oatlastword, &text_, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT);
@@ -831,7 +824,7 @@
     }
     if (dex_size != 0u) {
       Elf_Word oatdex = dynstr_.Add("oatdex");
-      dynsym_.Add(oatdex, &dex_, dex_.GetAddress(), /* size */ 0, STB_GLOBAL, STT_OBJECT);
+      dynsym_.Add(oatdex, &dex_, dex_.GetAddress(), /* size= */ 0, STB_GLOBAL, STT_OBJECT);
       Elf_Word oatdexlastword = dynstr_.Add("oatdexlastword");
       Elf_Word oatdexlastword_address = dex_.GetAddress() + dex_size - 4;
       dynsym_.Add(oatdexlastword, &dex_, oatdexlastword_address, 4, STB_GLOBAL, STT_OBJECT);
diff --git a/compiler/linker/linker_patch.h b/compiler/linker/linker_patch.h
index 5e1615f..f9e3930 100644
--- a/compiler/linker/linker_patch.h
+++ b/compiler/linker/linker_patch.h
@@ -58,7 +58,7 @@
   static LinkerPatch IntrinsicReferencePatch(size_t literal_offset,
                                              uint32_t pc_insn_offset,
                                              uint32_t intrinsic_data) {
-    LinkerPatch patch(literal_offset, Type::kIntrinsicReference, /* target_dex_file */ nullptr);
+    LinkerPatch patch(literal_offset, Type::kIntrinsicReference, /* target_dex_file= */ nullptr);
     patch.intrinsic_data_ = intrinsic_data;
     patch.pc_insn_offset_ = pc_insn_offset;
     return patch;
@@ -67,7 +67,7 @@
   static LinkerPatch DataBimgRelRoPatch(size_t literal_offset,
                                         uint32_t pc_insn_offset,
                                         uint32_t boot_image_offset) {
-    LinkerPatch patch(literal_offset, Type::kDataBimgRelRo, /* target_dex_file */ nullptr);
+    LinkerPatch patch(literal_offset, Type::kDataBimgRelRo, /* target_dex_file= */ nullptr);
     patch.boot_image_offset_ = boot_image_offset;
     patch.pc_insn_offset_ = pc_insn_offset;
     return patch;
@@ -144,7 +144,9 @@
   static LinkerPatch BakerReadBarrierBranchPatch(size_t literal_offset,
                                                  uint32_t custom_value1 = 0u,
                                                  uint32_t custom_value2 = 0u) {
-    LinkerPatch patch(literal_offset, Type::kBakerReadBarrierBranch, /* target_dex_file */ nullptr);
+    LinkerPatch patch(literal_offset,
+                      Type::kBakerReadBarrierBranch,
+                      /* target_dex_file= */ nullptr);
     patch.baker_custom_value1_ = custom_value1;
     patch.baker_custom_value2_ = custom_value2;
     return patch;
diff --git a/compiler/optimizing/block_builder.cc b/compiler/optimizing/block_builder.cc
index d9df23f..a5f78ca 100644
--- a/compiler/optimizing/block_builder.cc
+++ b/compiler/optimizing/block_builder.cc
@@ -68,7 +68,7 @@
     // places where the program might fall through into/out of the a block and
     // where TryBoundary instructions will be inserted later. Other edges which
     // enter/exit the try blocks are a result of branches/switches.
-    for (const DexFile::TryItem& try_item : code_item_accessor_.TryItems()) {
+    for (const dex::TryItem& try_item : code_item_accessor_.TryItems()) {
       uint32_t dex_pc_start = try_item.start_addr_;
       uint32_t dex_pc_end = dex_pc_start + try_item.insn_count_;
       MaybeCreateBlockAt(dex_pc_start);
@@ -222,9 +222,9 @@
 }
 
 // Returns the TryItem stored for `block` or nullptr if there is no info for it.
-static const DexFile::TryItem* GetTryItem(
+static const dex::TryItem* GetTryItem(
     HBasicBlock* block,
-    const ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*>& try_block_info) {
+    const ScopedArenaSafeMap<uint32_t, const dex::TryItem*>& try_block_info) {
   auto iterator = try_block_info.find(block->GetBlockId());
   return (iterator == try_block_info.end()) ? nullptr : iterator->second;
 }
@@ -235,7 +235,7 @@
 // for a handler.
 static void LinkToCatchBlocks(HTryBoundary* try_boundary,
                               const CodeItemDataAccessor& accessor,
-                              const DexFile::TryItem* try_item,
+                              const dex::TryItem* try_item,
                               const ScopedArenaSafeMap<uint32_t, HBasicBlock*>& catch_blocks) {
   for (CatchHandlerIterator it(accessor.GetCatchHandlerData(try_item->handler_off_));
       it.HasNext();
@@ -279,7 +279,7 @@
 
   // Keep a map of all try blocks and their respective TryItems. We do not use
   // the block's pointer but rather its id to ensure deterministic iteration.
-  ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*> try_block_info(
+  ScopedArenaSafeMap<uint32_t, const dex::TryItem*> try_block_info(
       std::less<uint32_t>(), local_allocator_->Adapter(kArenaAllocGraphBuilder));
 
   // Obtain TryItem information for blocks with throwing instructions, and split
@@ -295,7 +295,7 @@
     // loop for synchronized blocks.
     if (ContainsElement(throwing_blocks_, block)) {
       // Try to find a TryItem covering the block.
-      const DexFile::TryItem* try_item = code_item_accessor_.FindTryItem(block->GetDexPc());
+      const dex::TryItem* try_item = code_item_accessor_.FindTryItem(block->GetDexPc());
       if (try_item != nullptr) {
         // Block throwing and in a TryItem. Store the try block information.
         try_block_info.Put(block->GetBlockId(), try_item);
@@ -315,8 +315,16 @@
     CatchHandlerIterator iterator(handlers_ptr);
     for (; iterator.HasNext(); iterator.Next()) {
       uint32_t address = iterator.GetHandlerAddress();
-      if (catch_blocks.find(address) != catch_blocks.end()) {
+      auto existing = catch_blocks.find(address);
+      if (existing != catch_blocks.end()) {
         // Catch block already processed.
+        TryCatchInformation* info = existing->second->GetTryCatchInformation();
+        if (iterator.GetHandlerTypeIndex() != info->GetCatchTypeIndex()) {
+          // The handler is for multiple types. We could record all the types, but
+          // doing class resolution here isn't ideal, and it's unclear whether wasting
+          // the space in TryCatchInformation is worth it.
+          info->SetInvalidTypeIndex();
+        }
         continue;
       }
 
@@ -337,7 +345,7 @@
 
       catch_blocks.Put(address, catch_block);
       catch_block->SetTryCatchInformation(
-        new (allocator_) TryCatchInformation(iterator.GetHandlerTypeIndex(), *dex_file_));
+          new (allocator_) TryCatchInformation(iterator.GetHandlerTypeIndex(), *dex_file_));
     }
     handlers_ptr = iterator.EndDataPointer();
   }
@@ -348,7 +356,7 @@
   // that all predecessors are relinked to. This preserves loop headers (b/23895756).
   for (const auto& entry : try_block_info) {
     uint32_t block_id = entry.first;
-    const DexFile::TryItem* try_item = entry.second;
+    const dex::TryItem* try_item = entry.second;
     HBasicBlock* try_block = graph_->GetBlocks()[block_id];
     for (HBasicBlock* predecessor : try_block->GetPredecessors()) {
       if (GetTryItem(predecessor, try_block_info) != try_item) {
@@ -367,7 +375,7 @@
   // the successor is not in the same TryItem.
   for (const auto& entry : try_block_info) {
     uint32_t block_id = entry.first;
-    const DexFile::TryItem* try_item = entry.second;
+    const dex::TryItem* try_item = entry.second;
     HBasicBlock* try_block = graph_->GetBlocks()[block_id];
     // NOTE: Do not use iterators because SplitEdge would invalidate them.
     for (size_t i = 0, e = try_block->GetSuccessors().size(); i < e; ++i) {
@@ -415,7 +423,7 @@
   // Create blocks.
   HBasicBlock* entry_block = new (allocator_) HBasicBlock(graph_, kNoDexPc);
   HBasicBlock* exit_block = new (allocator_) HBasicBlock(graph_, kNoDexPc);
-  HBasicBlock* body = MaybeCreateBlockAt(/* semantic_dex_pc */ kNoDexPc, /* store_dex_pc */ 0u);
+  HBasicBlock* body = MaybeCreateBlockAt(/* semantic_dex_pc= */ kNoDexPc, /* store_dex_pc= */ 0u);
 
   // Add blocks to the graph.
   graph_->AddBlock(entry_block);
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 1c3660c..e35d502 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -845,8 +845,10 @@
           // make one more attempt to get a constant in the array range.
           ValueRange* existing_range = LookupValueRange(array_length, block);
           if (existing_range != nullptr &&
-              existing_range->IsConstantValueRange()) {
-            ValueRange constant_array_range(&allocator_, lower, existing_range->GetLower());
+              existing_range->IsConstantValueRange() &&
+              existing_range->GetLower().GetConstant() > 0) {
+            ValueBound constant_upper(nullptr, existing_range->GetLower().GetConstant() - 1);
+            ValueRange constant_array_range(&allocator_, lower, constant_upper);
             if (index_range->FitsIn(&constant_array_range)) {
               ReplaceInstruction(bounds_check, index);
               return;
@@ -1634,7 +1636,7 @@
         HBasicBlock* block = GetPreHeader(loop, check);
         HInstruction* cond =
             new (GetGraph()->GetAllocator()) HEqual(array, GetGraph()->GetNullConstant());
-        InsertDeoptInLoop(loop, block, cond, /* is_null_check */ true);
+        InsertDeoptInLoop(loop, block, cond, /* is_null_check= */ true);
         ReplaceInstruction(check, array);
         return true;
       }
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 7c29df8..5927d68 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -43,7 +43,7 @@
   void RunBCE() {
     graph_->BuildDominatorTree();
 
-    InstructionSimplifier(graph_, /* codegen */ nullptr).Run();
+    InstructionSimplifier(graph_, /* codegen= */ nullptr).Run();
 
     SideEffectsAnalysis side_effects(graph_);
     side_effects.Run();
@@ -598,9 +598,10 @@
   entry->AddSuccessor(block);
   // We pass a bogus constant for the class to avoid mocking one.
   HInstruction* new_array = new (allocator) HNewArray(
-      constant_10,
-      constant_10,
-      0);
+      /* cls= */ constant_10,
+      /* length= */ constant_10,
+      /* dex_pc= */ 0,
+      /* component_size_shift= */ 0);
   block->AddInstruction(new_array);
   block->AddInstruction(new (allocator) HGoto());
 
@@ -977,7 +978,11 @@
   graph_->AddBlock(block);
   entry->AddSuccessor(block);
   // We pass a bogus constant for the class to avoid mocking one.
-  HInstruction* new_array = new (GetAllocator()) HNewArray(constant_10, constant_10, 0);
+  HInstruction* new_array = new (GetAllocator()) HNewArray(
+      /* cls= */ constant_10,
+      /* length= */ constant_10,
+      /* dex_pc= */ 0,
+      /* component_size_shift= */ 0);
   block->AddInstruction(new_array);
   block->AddInstruction(new (GetAllocator()) HGoto());
 
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index a1a5692..64aa1b9 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -21,6 +21,7 @@
 #include "base/bit_vector-inl.h"
 #include "base/logging.h"
 #include "block_builder.h"
+#include "code_generator.h"
 #include "data_type-inl.h"
 #include "dex/verified_method.h"
 #include "driver/compiler_options.h"
@@ -40,7 +41,6 @@
                              const CodeItemDebugInfoAccessor& accessor,
                              const DexCompilationUnit* dex_compilation_unit,
                              const DexCompilationUnit* outer_compilation_unit,
-                             CompilerDriver* driver,
                              CodeGenerator* code_generator,
                              OptimizingCompilerStats* compiler_stats,
                              ArrayRef<const uint8_t> interpreter_metadata,
@@ -50,7 +50,6 @@
       code_item_accessor_(accessor),
       dex_compilation_unit_(dex_compilation_unit),
       outer_compilation_unit_(outer_compilation_unit),
-      compiler_driver_(driver),
       code_generator_(code_generator),
       compilation_stats_(compiler_stats),
       interpreter_metadata_(interpreter_metadata),
@@ -67,19 +66,18 @@
       code_item_accessor_(accessor),
       dex_compilation_unit_(dex_compilation_unit),
       outer_compilation_unit_(nullptr),
-      compiler_driver_(nullptr),
       code_generator_(nullptr),
       compilation_stats_(nullptr),
       handles_(handles),
       return_type_(return_type) {}
 
 bool HGraphBuilder::SkipCompilation(size_t number_of_branches) {
-  if (compiler_driver_ == nullptr) {
-    // Note that the compiler driver is null when unit testing.
+  if (code_generator_ == nullptr) {
+    // Note that the codegen is null when unit testing.
     return false;
   }
 
-  const CompilerOptions& compiler_options = compiler_driver_->GetCompilerOptions();
+  const CompilerOptions& compiler_options = code_generator_->GetCompilerOptions();
   CompilerFilter::Filter compiler_filter = compiler_options.GetCompilerFilter();
   if (compiler_filter == CompilerFilter::kEverything) {
     return false;
@@ -131,7 +129,6 @@
                                           return_type_,
                                           dex_compilation_unit_,
                                           outer_compilation_unit_,
-                                          compiler_driver_,
                                           code_generator_,
                                           interpreter_metadata_,
                                           compilation_stats_,
@@ -203,7 +200,6 @@
                                           return_type_,
                                           dex_compilation_unit_,
                                           outer_compilation_unit_,
-                                          compiler_driver_,
                                           code_generator_,
                                           interpreter_metadata_,
                                           compilation_stats_,
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 5a1914c..6152740 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -22,7 +22,6 @@
 #include "dex/code_item_accessors.h"
 #include "dex/dex_file-inl.h"
 #include "dex/dex_file.h"
-#include "driver/compiler_driver.h"
 #include "nodes.h"
 
 namespace art {
@@ -38,7 +37,6 @@
                 const CodeItemDebugInfoAccessor& accessor,
                 const DexCompilationUnit* dex_compilation_unit,
                 const DexCompilationUnit* outer_compilation_unit,
-                CompilerDriver* driver,
                 CodeGenerator* code_generator,
                 OptimizingCompilerStats* compiler_stats,
                 ArrayRef<const uint8_t> interpreter_metadata,
@@ -70,7 +68,6 @@
   // The compilation unit of the enclosing method being compiled.
   const DexCompilationUnit* const outer_compilation_unit_;
 
-  CompilerDriver* const compiler_driver_;
   CodeGenerator* const code_generator_;
 
   OptimizingCompilerStats* const compilation_stats_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index e84896b..9e2f5cd 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -197,7 +197,7 @@
     return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots();
   }
 
-  void EmitJitRoots(Handle<mirror::ObjectArray<mirror::Object>> roots)
+  void EmitJitRoots(/*out*/std::vector<Handle<mirror::Object>>* roots)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
@@ -230,29 +230,31 @@
 };
 
 void CodeGenerator::CodeGenerationData::EmitJitRoots(
-    Handle<mirror::ObjectArray<mirror::Object>> roots) {
-  DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
+    /*out*/std::vector<Handle<mirror::Object>>* roots) {
+  DCHECK(roots->empty());
+  roots->reserve(GetNumberOfJitRoots());
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   size_t index = 0;
   for (auto& entry : jit_string_roots_) {
     // Update the `roots` with the string, and replace the address temporarily
     // stored to the index in the table.
     uint64_t address = entry.second;
-    roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
-    DCHECK(roots->Get(index) != nullptr);
+    roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
+    DCHECK(roots->back() != nullptr);
+    DCHECK(roots->back()->IsString());
     entry.second = index;
     // Ensure the string is strongly interned. This is a requirement on how the JIT
     // handles strings. b/32995596
-    class_linker->GetInternTable()->InternStrong(
-        reinterpret_cast<mirror::String*>(roots->Get(index)));
+    class_linker->GetInternTable()->InternStrong(roots->back()->AsString());
     ++index;
   }
   for (auto& entry : jit_class_roots_) {
     // Update the `roots` with the class, and replace the address temporarily
     // stored to the index in the table.
     uint64_t address = entry.second;
-    roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
-    DCHECK(roots->Get(index) != nullptr);
+    roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
+    DCHECK(roots->back() != nullptr);
+    DCHECK(roots->back()->IsClass());
     entry.second = index;
     ++index;
   }
@@ -412,7 +414,7 @@
     // This ensures that we have correct native line mapping for all native instructions.
     // It is necessary to make stepping over a statement work. Otherwise, any initial
     // instructions (e.g. moves) would be assumed to be the start of next statement.
-    MaybeRecordNativeDebugInfo(nullptr /* instruction */, block->GetDexPc());
+    MaybeRecordNativeDebugInfo(/* instruction= */ nullptr, block->GetDexPc());
     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
       HInstruction* current = it.Current();
       if (current->HasEnvironment()) {
@@ -985,7 +987,7 @@
 // dex branch instructions.
 static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
                                             const CodeInfo& code_info,
-                                            const DexFile::CodeItem& code_item) {
+                                            const dex::CodeItem& code_item) {
   if (graph.HasTryCatch()) {
     // One can write loops through try/catch, which we do not support for OSR anyway.
     return;
@@ -1027,7 +1029,7 @@
   }
 }
 
-ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const DexFile::CodeItem* code_item) {
+ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const dex::CodeItem* code_item) {
   ScopedArenaVector<uint8_t> stack_map = GetStackMapStream()->Encode();
   if (kIsDebugBuild && code_item != nullptr) {
     CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map.data()), *code_item);
@@ -1083,7 +1085,7 @@
     // call). Therefore register_mask contains both callee-save and caller-save
     // registers that hold objects. We must remove the spilled caller-save from the
     // mask, since they will be overwritten by the callee.
-    uint32_t spills = GetSlowPathSpills(locations, /* core_registers */ true);
+    uint32_t spills = GetSlowPathSpills(locations, /* core_registers= */ true);
     register_mask &= ~spills;
   } else {
     // The register mask must be a subset of callee-save registers.
@@ -1124,6 +1126,7 @@
   if (osr) {
     DCHECK_EQ(info->GetSuspendCheck(), instruction);
     DCHECK(info->IsIrreducible());
+    DCHECK(environment != nullptr);
     if (kIsDebugBuild) {
       for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
         HInstruction* in_environment = environment->GetInstructionAt(i);
@@ -1161,7 +1164,7 @@
       // Ensure that we do not collide with the stack map of the previous instruction.
       GenerateNop();
     }
-    RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info */ true);
+    RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info= */ true);
   }
 }
 
@@ -1179,8 +1182,8 @@
 
     stack_map_stream->BeginStackMapEntry(dex_pc,
                                          native_pc,
-                                         /* register_mask */ 0,
-                                         /* stack_mask */ nullptr,
+                                         /* register_mask= */ 0,
+                                         /* sp_mask= */ nullptr,
                                          StackMap::Kind::Catch);
 
     HInstruction* current_phi = block->GetFirstPhi();
@@ -1552,7 +1555,7 @@
 void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
 
-  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
   for (uint32_t i : LowToHighBits(core_spills)) {
     // If the register holds an object, update the stack mask.
     if (locations->RegisterContainsObject(i)) {
@@ -1564,7 +1567,7 @@
     stack_offset += codegen->SaveCoreRegister(stack_offset, i);
   }
 
-  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
   for (uint32_t i : LowToHighBits(fp_spills)) {
     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -1576,14 +1579,14 @@
 void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
 
-  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
   for (uint32_t i : LowToHighBits(core_spills)) {
     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
     stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
   }
 
-  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
   for (uint32_t i : LowToHighBits(fp_spills)) {
     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -1645,28 +1648,21 @@
 }
 
 void CodeGenerator::EmitJitRoots(uint8_t* code,
-                                 Handle<mirror::ObjectArray<mirror::Object>> roots,
-                                 const uint8_t* roots_data) {
+                                 const uint8_t* roots_data,
+                                 /*out*/std::vector<Handle<mirror::Object>>* roots) {
   code_generation_data_->EmitJitRoots(roots);
   EmitJitRootPatches(code, roots_data);
 }
 
-QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass) {
-  ScopedObjectAccess soa(Thread::Current());
-  if (array_klass == nullptr) {
-    // This can only happen for non-primitive arrays, as primitive arrays can always
-    // be resolved.
-    return kQuickAllocArrayResolved32;
-  }
-
-  switch (array_klass->GetComponentSize()) {
-    case 1: return kQuickAllocArrayResolved8;
-    case 2: return kQuickAllocArrayResolved16;
-    case 4: return kQuickAllocArrayResolved32;
-    case 8: return kQuickAllocArrayResolved64;
+QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(HNewArray* new_array) {
+  switch (new_array->GetComponentSizeShift()) {
+    case 0: return kQuickAllocArrayResolved8;
+    case 1: return kQuickAllocArrayResolved16;
+    case 2: return kQuickAllocArrayResolved32;
+    case 3: return kQuickAllocArrayResolved64;
   }
   LOG(FATAL) << "Unreachable";
-  return kQuickAllocArrayResolved;
+  UNREACHABLE();
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index e77d621..f70ecb6 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -59,7 +59,6 @@
 
 class Assembler;
 class CodeGenerator;
-class CompilerDriver;
 class CompilerOptions;
 class StackMapStream;
 class ParallelMoveResolver;
@@ -350,14 +349,14 @@
 
   void AddSlowPath(SlowPathCode* slow_path);
 
-  ScopedArenaVector<uint8_t> BuildStackMaps(const DexFile::CodeItem* code_item_for_osr_check);
+  ScopedArenaVector<uint8_t> BuildStackMaps(const dex::CodeItem* code_item_for_osr_check);
   size_t GetNumberOfJitRoots() const;
 
   // Fills the `literals` array with literals collected during code generation.
   // Also emits literal patches.
   void EmitJitRoots(uint8_t* code,
-                    Handle<mirror::ObjectArray<mirror::Object>> roots,
-                    const uint8_t* roots_data)
+                    const uint8_t* roots_data,
+                    /*out*/std::vector<Handle<mirror::Object>>* roots)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool IsLeafMethod() const {
@@ -622,7 +621,7 @@
   // otherwise return a fall-back info that should be used instead.
   virtual HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) = 0;
+      ArtMethod* method) = 0;
 
   // Generate a call to a static or direct method.
   virtual void GenerateStaticOrDirectCall(
@@ -636,7 +635,7 @@
 
   virtual void GenerateNop() = 0;
 
-  static QuickEntrypointEnum GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass);
+  static QuickEntrypointEnum GetArrayAllocationEntrypoint(HNewArray* new_array);
 
  protected:
   // Patch info used for recording locations of required linker patches and their targets,
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index d56f7aa..ff99a3e 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -168,8 +168,8 @@
                                            LocationSummary* locations,
                                            int64_t spill_offset,
                                            bool is_save) {
-  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
-  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
+  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
   DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spills,
                                          codegen->GetNumberOfCoreRegisters(),
                                          fp_spills,
@@ -212,7 +212,7 @@
 
 void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
-  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
   for (uint32_t i : LowToHighBits(core_spills)) {
     // If the register holds an object, update the stack mask.
     if (locations->RegisterContainsObject(i)) {
@@ -224,7 +224,7 @@
     stack_offset += kXRegSizeInBytes;
   }
 
-  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
   for (uint32_t i : LowToHighBits(fp_spills)) {
     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -234,13 +234,13 @@
 
   SaveRestoreLiveRegistersHelper(codegen,
                                  locations,
-                                 codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */);
+                                 codegen->GetFirstRegisterSlotInSlowPath(), /* is_save= */ true);
 }
 
 void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
   SaveRestoreLiveRegistersHelper(codegen,
                                  locations,
-                                 codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */);
+                                 codegen->GetFirstRegisterSlotInSlowPath(), /* is_save= */ false);
 }
 
 class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
@@ -885,7 +885,8 @@
       location_builder_(graph, this),
       instruction_visitor_(graph, this),
       move_resolver_(graph->GetAllocator(), this),
-      assembler_(graph->GetAllocator()),
+      assembler_(graph->GetAllocator(),
+                 compiler_options.GetInstructionSetFeatures()->AsArm64InstructionSetFeatures()),
       uint32_literals_(std::less<uint32_t>(),
                        graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       uint64_literals_(std::less<uint64_t>(),
@@ -925,7 +926,7 @@
     uint32_t encoded_data = entry.first;
     vixl::aarch64::Label* slow_path_entry = &entry.second.label;
     __ Bind(slow_path_entry);
-    CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
+    CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name= */ nullptr);
   }
 
   // Ensure we emit the literal pool.
@@ -1117,7 +1118,7 @@
     }
   }
 
-  MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void CodeGeneratorARM64::GenerateFrameExit() {
@@ -1205,6 +1206,7 @@
   //      mr        : Runtime reserved.
   //      ip1       : VIXL core temp.
   //      ip0       : VIXL core temp.
+  //      x18       : Platform register.
   //
   // Blocked fp registers:
   //      d31       : VIXL fp temp.
@@ -1213,6 +1215,7 @@
   while (!reserved_core_registers.IsEmpty()) {
     blocked_core_registers_[reserved_core_registers.PopLowestIndex().GetCode()] = true;
   }
+  blocked_core_registers_[X18] = true;
 
   CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
   while (!reserved_fp_registers.IsEmpty()) {
@@ -1885,7 +1888,7 @@
         base,
         offset,
         maybe_temp,
-        /* needs_null_check */ true,
+        /* needs_null_check= */ true,
         field_info.IsVolatile());
   } else {
     // General case.
@@ -1894,7 +1897,7 @@
       // CodeGeneratorARM64::LoadAcquire call.
       // NB: LoadAcquire will record the pc info if needed.
       codegen_->LoadAcquire(
-          instruction, OutputCPURegister(instruction), field, /* needs_null_check */ true);
+          instruction, OutputCPURegister(instruction), field, /* needs_null_check= */ true);
     } else {
       // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
       EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
@@ -1949,7 +1952,7 @@
 
     if (field_info.IsVolatile()) {
       codegen_->StoreRelease(
-          instruction, field_type, source, HeapOperand(obj, offset), /* needs_null_check */ true);
+          instruction, field_type, source, HeapOperand(obj, offset), /* needs_null_check= */ true);
     } else {
       // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
       EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
@@ -2317,9 +2320,10 @@
       if (offset >= kReferenceLoadMinFarOffset) {
         locations->AddTemp(FixedTempLocation());
       }
-    } else {
+    } else if (!instruction->GetArray()->IsIntermediateAddress()) {
       // We need a non-scratch temporary for the array data pointer in
-      // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier().
+      // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier() for the case with no
+      // intermediate address.
       locations->AddTemp(Location::RequiresRegister());
     }
   }
@@ -2349,11 +2353,12 @@
   MacroAssembler* masm = GetVIXLAssembler();
   UseScratchRegisterScope temps(masm);
 
-  // The read barrier instrumentation of object ArrayGet instructions
+  // The non-Baker read barrier instrumentation of object ArrayGet instructions
   // does not support the HIntermediateAddress instruction.
   DCHECK(!((type == DataType::Type::kReference) &&
            instruction->GetArray()->IsIntermediateAddress() &&
-           kEmitCompilerReadBarrier));
+           kEmitCompilerReadBarrier &&
+           !kUseBakerReadBarrier));
 
   if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
     // Object ArrayGet with Baker's read barrier case.
@@ -2361,6 +2366,7 @@
     // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier call.
     DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0)));
     if (index.IsConstant()) {
+      DCHECK(!instruction->GetArray()->IsIntermediateAddress());
       // Array load with a constant index can be treated as a field load.
       offset += Int64FromLocation(index) << DataType::SizeShift(type);
       Location maybe_temp =
@@ -2370,12 +2376,11 @@
                                                       obj.W(),
                                                       offset,
                                                       maybe_temp,
-                                                      /* needs_null_check */ false,
-                                                      /* use_load_acquire */ false);
+                                                      /* needs_null_check= */ false,
+                                                      /* use_load_acquire= */ false);
     } else {
-      Register temp = WRegisterFrom(locations->GetTemp(0));
       codegen_->GenerateArrayLoadWithBakerReadBarrier(
-          out, obj.W(), offset, index, temp, /* needs_null_check */ false);
+          instruction, out, obj.W(), offset, index, /* needs_null_check= */ false);
     }
   } else {
     // General case.
@@ -2424,8 +2429,8 @@
         // input instruction has done it already. See the comment in
         // `TryExtractArrayAccessAddress()`.
         if (kIsDebugBuild) {
-          HIntermediateAddress* tmp = instruction->GetArray()->AsIntermediateAddress();
-          DCHECK_EQ(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64(), offset);
+          HIntermediateAddress* interm_addr = instruction->GetArray()->AsIntermediateAddress();
+          DCHECK_EQ(interm_addr->GetOffset()->AsIntConstant()->GetValueAsUint64(), offset);
         }
         temp = obj;
       } else {
@@ -2537,8 +2542,8 @@
         // input instruction has done it already. See the comment in
         // `TryExtractArrayAccessAddress()`.
         if (kIsDebugBuild) {
-          HIntermediateAddress* tmp = instruction->GetArray()->AsIntermediateAddress();
-          DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
+          HIntermediateAddress* interm_addr = instruction->GetArray()->AsIntermediateAddress();
+          DCHECK(interm_addr->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
         }
         temp = array;
       } else {
@@ -2920,7 +2925,7 @@
   int64_t magic;
   int shift;
   CalculateMagicAndShiftForDivRem(
-      imm, type == DataType::Type::kInt64 /* is_long */, &magic, &shift);
+      imm, /* is_long= */ type == DataType::Type::kInt64, &magic, &shift);
 
   UseScratchRegisterScope temps(GetVIXLAssembler());
   Register temp = temps.AcquireSameSizeAs(out);
@@ -3042,7 +3047,7 @@
 
   if (!DataType::IsIntegralType(type)) {
     LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
-    return;
+    UNREACHABLE();
   }
 
   if (value.IsConstant()) {
@@ -3111,7 +3116,7 @@
   }
   if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
     GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
   }
   if (!codegen_->GoesToNextBlock(block, successor)) {
     __ B(codegen_->GetLabelOf(successor));
@@ -3261,7 +3266,7 @@
   if (codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor)) {
     false_target = nullptr;
   }
-  GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+  GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
 }
 
 void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -3280,9 +3285,9 @@
   SlowPathCodeARM64* slow_path =
       deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM64>(deoptimize);
   GenerateTestAndBranch(deoptimize,
-                        /* condition_input_index */ 0,
+                        /* condition_input_index= */ 0,
                         slow_path->GetEntryLabel(),
-                        /* false_target */ nullptr);
+                        /* false_target= */ nullptr);
 }
 
 void LocationsBuilderARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -3622,7 +3627,7 @@
       __ Cmp(out, cls);
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ B(ne, slow_path->GetEntryLabel());
       __ Mov(out, 1);
@@ -3654,7 +3659,7 @@
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ B(slow_path->GetEntryLabel());
       if (zero.IsLinked()) {
@@ -3947,7 +3952,7 @@
 
 void InstructionCodeGeneratorARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
   codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
@@ -4017,7 +4022,7 @@
     codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
   }
 
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
@@ -4053,7 +4058,7 @@
 
 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM64::GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+      ArtMethod* method ATTRIBUTE_UNUSED) {
   // On ARM64 we support all dispatch types.
   return desired_dispatch_info;
 }
@@ -4196,7 +4201,7 @@
 
 void InstructionCodeGeneratorARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
   codegen_->GenerateInvokePolymorphicCall(invoke);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::VisitInvokeCustom(HInvokeCustom* invoke) {
@@ -4205,21 +4210,21 @@
 
 void InstructionCodeGeneratorARM64::VisitInvokeCustom(HInvokeCustom* invoke) {
   codegen_->GenerateInvokeCustomCall(invoke);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageIntrinsicPatch(
     uint32_t intrinsic_data,
     vixl::aarch64::Label* adrp_label) {
   return NewPcRelativePatch(
-      /* dex_file */ nullptr, intrinsic_data, adrp_label, &boot_image_intrinsic_patches_);
+      /* dex_file= */ nullptr, intrinsic_data, adrp_label, &boot_image_intrinsic_patches_);
 }
 
 vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageRelRoPatch(
     uint32_t boot_image_offset,
     vixl::aarch64::Label* adrp_label) {
   return NewPcRelativePatch(
-      /* dex_file */ nullptr, boot_image_offset, adrp_label, &boot_image_method_patches_);
+      /* dex_file= */ nullptr, boot_image_offset, adrp_label, &boot_image_method_patches_);
 }
 
 vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageMethodPatch(
@@ -4303,7 +4308,7 @@
   ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
   return jit_string_patches_.GetOrCreate(
       StringReference(&dex_file, string_index),
-      [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
+      [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u); });
 }
 
 vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral(
@@ -4311,7 +4316,7 @@
   ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
   return jit_class_patches_.GetOrCreate(
       TypeReference(&dex_file, type_index),
-      [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
+      [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u); });
 }
 
 void CodeGeneratorARM64::EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label,
@@ -4350,7 +4355,7 @@
     // Add ADD with its PC-relative type patch.
     vixl::aarch64::Label* add_label = NewBootImageIntrinsicPatch(boot_image_reference, adrp_label);
     EmitAddPlaceholder(add_label, reg.X(), reg.X());
-  } else if (Runtime::Current()->IsAotCompiler()) {
+  } else if (GetCompilerOptions().GetCompilePic()) {
     // Add ADRP with its PC-relative .data.bimg.rel.ro patch.
     vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_reference);
     EmitAdrpPlaceholder(adrp_label, reg.X());
@@ -4508,7 +4513,7 @@
   DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
 
   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
     return;
   }
 
@@ -4521,12 +4526,12 @@
         invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
   }
 
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
     return;
   }
 
@@ -4538,7 +4543,7 @@
     DCHECK(!codegen_->IsLeafMethod());
   }
 
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
@@ -4606,7 +4611,7 @@
   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
   if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
     codegen_->GenerateLoadClassRuntimeCall(cls);
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
     return;
   }
   DCHECK(!cls->NeedsAccessCheck());
@@ -4628,7 +4633,7 @@
                                         out_loc,
                                         current_method,
                                         ArtMethod::DeclaringClassOffset().Int32Value(),
-                                        /* fixup_label */ nullptr,
+                                        /* fixup_label= */ nullptr,
                                         read_barrier_option);
       break;
     }
@@ -4691,8 +4696,8 @@
       codegen_->GenerateGcRootFieldLoad(cls,
                                         out_loc,
                                         out.X(),
-                                        /* offset */ 0,
-                                        /* fixup_label */ nullptr,
+                                        /* offset= */ 0,
+                                        /* fixup_label= */ nullptr,
                                         read_barrier_option);
       break;
     }
@@ -4716,7 +4721,7 @@
     } else {
       __ Bind(slow_path->GetExitLabel());
     }
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
   }
 }
 
@@ -4854,7 +4859,7 @@
       codegen_->AddSlowPath(slow_path);
       __ Cbz(out.X(), slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
-      codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+      codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
       return;
     }
     case HLoadString::LoadKind::kJitBootImageAddress: {
@@ -4870,8 +4875,8 @@
       codegen_->GenerateGcRootFieldLoad(load,
                                         out_loc,
                                         out.X(),
-                                        /* offset */ 0,
-                                        /* fixup_label */ nullptr,
+                                        /* offset= */ 0,
+                                        /* fixup_label= */ nullptr,
                                         kCompilerReadBarrierOption);
       return;
     }
@@ -4885,7 +4890,7 @@
   __ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex().index_);
   codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
   CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
@@ -4913,7 +4918,7 @@
   } else {
     CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
   }
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::VisitMul(HMul* mul) {
@@ -5004,13 +5009,11 @@
 }
 
 void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
-  // Note: if heap poisoning is enabled, the entry point takes cares
-  // of poisoning the reference.
-  QuickEntrypointEnum entrypoint =
-      CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+  // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+  QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
   codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
   CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
@@ -5024,7 +5027,7 @@
 void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
   codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
   CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::VisitNot(HNot* instruction) {
@@ -5499,7 +5502,7 @@
     return;
   }
   GenerateSuspendCheck(instruction, nullptr);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
@@ -5712,8 +5715,8 @@
                                                       out_reg,
                                                       offset,
                                                       maybe_temp,
-                                                      /* needs_null_check */ false,
-                                                      /* use_load_acquire */ false);
+                                                      /* needs_null_check= */ false,
+                                                      /* use_load_acquire= */ false);
     } else {
       // Load with slow path based read barrier.
       // Save the value of `out` into `maybe_temp` before overwriting it
@@ -5753,8 +5756,8 @@
                                                       obj_reg,
                                                       offset,
                                                       maybe_temp,
-                                                      /* needs_null_check */ false,
-                                                      /* use_load_acquire */ false);
+                                                      /* needs_null_check= */ false,
+                                                      /* use_load_acquire= */ false);
     } else {
       // Load with slow path based read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
@@ -5839,7 +5842,7 @@
     // Note that GC roots are not affected by heap poisoning, thus we
     // do not have to unpoison `root_reg` here.
   }
-  MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void CodeGeneratorARM64::GenerateUnsafeCasOldValueMovWithBakerReadBarrier(
@@ -5928,7 +5931,7 @@
     }
     __ bind(&return_address);
   }
-  MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
+  MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__, /* temp_loc= */ LocationFrom(ip1));
 }
 
 void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -5957,11 +5960,11 @@
       instruction, ref, obj, src, needs_null_check, use_load_acquire);
 }
 
-void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(Location ref,
+void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instruction,
+                                                               Location ref,
                                                                Register obj,
                                                                uint32_t data_offset,
                                                                Location index,
-                                                               Register temp,
                                                                bool needs_null_check) {
   DCHECK(kEmitCompilerReadBarrier);
   DCHECK(kUseBakerReadBarrier);
@@ -6000,9 +6003,24 @@
   DCHECK(temps.IsAvailable(ip0));
   DCHECK(temps.IsAvailable(ip1));
   temps.Exclude(ip0, ip1);
+
+  Register temp;
+  if (instruction->GetArray()->IsIntermediateAddress()) {
+    // We do not need to compute the intermediate address from the array: the
+    // input instruction has done it already. See the comment in
+    // `TryExtractArrayAccessAddress()`.
+    if (kIsDebugBuild) {
+      HIntermediateAddress* interm_addr = instruction->GetArray()->AsIntermediateAddress();
+      DCHECK_EQ(interm_addr->GetOffset()->AsIntConstant()->GetValueAsUint64(), data_offset);
+    }
+    temp = obj;
+  } else {
+    temp = WRegisterFrom(instruction->GetLocations()->GetTemp(0));
+    __ Add(temp.X(), obj.X(), Operand(data_offset));
+  }
+
   uint32_t custom_data = EncodeBakerReadBarrierArrayData(temp.GetCode());
 
-  __ Add(temp.X(), obj.X(), Operand(data_offset));
   {
     ExactAssemblyScope guard(GetVIXLAssembler(),
                              (kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
@@ -6021,7 +6039,7 @@
     }
     __ bind(&return_address);
   }
-  MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
+  MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__, /* temp_loc= */ LocationFrom(ip1));
 }
 
 void CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 2e7a20b..ada5742 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -557,7 +557,7 @@
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) override;
+      ArtMethod* method) override;
 
   void GenerateStaticOrDirectCall(
       HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
@@ -694,11 +694,11 @@
                                              bool use_load_acquire);
   // Fast path implementation of ReadBarrier::Barrier for a heap
   // reference array load when Baker's read barriers are used.
-  void GenerateArrayLoadWithBakerReadBarrier(Location ref,
+  void GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instruction,
+                                             Location ref,
                                              vixl::aarch64::Register obj,
                                              uint32_t data_offset,
                                              Location index,
-                                             vixl::aarch64::Register temp,
                                              bool needs_null_check);
 
   // Emit code checking the status of the Marking Register, and
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 3580975..8204f1e 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -319,7 +319,7 @@
   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
   size_t orig_offset = stack_offset;
 
-  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
   for (uint32_t i : LowToHighBits(core_spills)) {
     // If the register holds an object, update the stack mask.
     if (locations->RegisterContainsObject(i)) {
@@ -334,7 +334,7 @@
   CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
   arm_codegen->GetAssembler()->StoreRegisterList(core_spills, orig_offset);
 
-  uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+  uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
   orig_offset = stack_offset;
   for (uint32_t i : LowToHighBits(fp_spills)) {
     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -357,7 +357,7 @@
   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
   size_t orig_offset = stack_offset;
 
-  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
   for (uint32_t i : LowToHighBits(core_spills)) {
     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -368,7 +368,7 @@
   CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
   arm_codegen->GetAssembler()->LoadRegisterList(core_spills, orig_offset);
 
-  uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+  uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
   while (fp_spills != 0u) {
     uint32_t begin = CTZ(fp_spills);
     uint32_t tmp = fp_spills + (1u << begin);
@@ -1037,26 +1037,26 @@
 size_t CodeGeneratorARMVIXL::SaveCoreRegister(size_t stack_index ATTRIBUTE_UNUSED,
                                               uint32_t reg_id ATTRIBUTE_UNUSED) {
   TODO_VIXL32(FATAL);
-  return 0;
+  UNREACHABLE();
 }
 
 // Restores the register from the stack. Returns the size taken on stack.
 size_t CodeGeneratorARMVIXL::RestoreCoreRegister(size_t stack_index ATTRIBUTE_UNUSED,
                                                  uint32_t reg_id ATTRIBUTE_UNUSED) {
   TODO_VIXL32(FATAL);
-  return 0;
+  UNREACHABLE();
 }
 
 size_t CodeGeneratorARMVIXL::SaveFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
                                                        uint32_t reg_id ATTRIBUTE_UNUSED) {
   TODO_VIXL32(FATAL);
-  return 0;
+  UNREACHABLE();
 }
 
 size_t CodeGeneratorARMVIXL::RestoreFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
                                                           uint32_t reg_id ATTRIBUTE_UNUSED) {
   TODO_VIXL32(FATAL);
-  return 0;
+  UNREACHABLE();
 }
 
 static void GenerateDataProcInstruction(HInstruction::InstructionKind kind,
@@ -1539,7 +1539,7 @@
     vixl32::Label done_label;
     vixl32::Label* const final_label = codegen->GetFinalLabel(cond, &done_label);
 
-    __ B(condition.second, final_label, /* far_target */ false);
+    __ B(condition.second, final_label, /* is_far_target= */ false);
     __ Mov(out, 1);
 
     if (done_label.IsReferenced()) {
@@ -1934,7 +1934,7 @@
     uint32_t encoded_data = entry.first;
     vixl::aarch32::Label* slow_path_entry = &entry.second.label;
     __ Bind(slow_path_entry);
-    CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
+    CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name= */ nullptr);
   }
 
   GetAssembler()->FinalizeCode();
@@ -2159,7 +2159,7 @@
     GetAssembler()->StoreToOffset(kStoreWord, temp, sp, GetStackOffsetOfShouldDeoptimizeFlag());
   }
 
-  MaybeGenerateMarkingRegisterCheck(/* code */ 1);
+  MaybeGenerateMarkingRegisterCheck(/* code= */ 1);
 }
 
 void CodeGeneratorARMVIXL::GenerateFrameExit() {
@@ -2268,7 +2268,7 @@
     case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unexpected parameter type " << type;
-      break;
+      UNREACHABLE();
   }
   return Location::NoLocation();
 }
@@ -2427,7 +2427,7 @@
   }
   if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
     GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 2);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 2);
   }
   if (!codegen_->GoesToNextBlock(block, successor)) {
     __ B(codegen_->GetLabelOf(successor));
@@ -2606,7 +2606,7 @@
       nullptr : codegen_->GetLabelOf(true_successor);
   vixl32::Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
       nullptr : codegen_->GetLabelOf(false_successor);
-  GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+  GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
 }
 
 void LocationsBuilderARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -2625,9 +2625,9 @@
   SlowPathCodeARMVIXL* slow_path =
       deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARMVIXL>(deoptimize);
   GenerateTestAndBranch(deoptimize,
-                        /* condition_input_index */ 0,
+                        /* condition_input_index= */ 0,
                         slow_path->GetEntryLabel(),
-                        /* false_target */ nullptr);
+                        /* false_target= */ nullptr);
 }
 
 void LocationsBuilderARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -2677,6 +2677,18 @@
   const Location first = locations->InAt(0);
   const Location out = locations->Out();
   const Location second = locations->InAt(1);
+
+  // In the unlucky case the output of this instruction overlaps
+  // with an input of an "emitted-at-use-site" condition, and
+  // the output of this instruction is not one of its inputs, we'll
+  // need to fallback to branches instead of conditional ARM instructions.
+  bool output_overlaps_with_condition_inputs =
+      !IsBooleanValueOrMaterializedCondition(condition) &&
+      !out.Equals(first) &&
+      !out.Equals(second) &&
+      (condition->GetLocations()->InAt(0).Equals(out) ||
+       condition->GetLocations()->InAt(1).Equals(out));
+  DCHECK(!output_overlaps_with_condition_inputs || condition->IsCondition());
   Location src;
 
   if (condition->IsIntConstant()) {
@@ -2690,7 +2702,7 @@
     return;
   }
 
-  if (!DataType::IsFloatingPointType(type)) {
+  if (!DataType::IsFloatingPointType(type) && !output_overlaps_with_condition_inputs) {
     bool invert = false;
 
     if (out.Equals(second)) {
@@ -2762,6 +2774,7 @@
   vixl32::Label* false_target = nullptr;
   vixl32::Label* true_target = nullptr;
   vixl32::Label select_end;
+  vixl32::Label other_case;
   vixl32::Label* const target = codegen_->GetFinalLabel(select, &select_end);
 
   if (out.Equals(second)) {
@@ -2772,12 +2785,21 @@
     src = second;
 
     if (!out.Equals(first)) {
-      codegen_->MoveLocation(out, first, type);
+      if (output_overlaps_with_condition_inputs) {
+        false_target = &other_case;
+      } else {
+        codegen_->MoveLocation(out, first, type);
+      }
     }
   }
 
-  GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target */ false);
+  GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target= */ false);
   codegen_->MoveLocation(out, src, type);
+  if (output_overlaps_with_condition_inputs) {
+    __ B(target);
+    __ Bind(&other_case);
+    codegen_->MoveLocation(out, first, type);
+  }
 
   if (select_end.IsReferenced()) {
     __ Bind(&select_end);
@@ -2876,31 +2898,16 @@
 void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) {
   LocationSummary* locations =
       new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall);
-  // Handle the long/FP comparisons made in instruction simplification.
-  switch (cond->InputAt(0)->GetType()) {
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
-      if (!cond->IsEmittedAtUseSite()) {
-        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      }
-      break;
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1)));
-      if (!cond->IsEmittedAtUseSite()) {
-        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      }
-      break;
-
-    default:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
-      if (!cond->IsEmittedAtUseSite()) {
-        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      }
+  const DataType::Type type = cond->InputAt(0)->GetType();
+  if (DataType::IsFloatingPointType(type)) {
+    locations->SetInAt(0, Location::RequiresFpuRegister());
+    locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1)));
+  } else {
+    locations->SetInAt(0, Location::RequiresRegister());
+    locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
+  }
+  if (!cond->IsEmittedAtUseSite()) {
+    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   }
 }
 
@@ -3128,7 +3135,7 @@
 
 void InstructionCodeGeneratorARMVIXL::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
   codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 3);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 3);
 }
 
 void LocationsBuilderARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
@@ -3159,7 +3166,7 @@
   DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
 
   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 4);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 4);
     return;
   }
 
@@ -3167,7 +3174,7 @@
   codegen_->GenerateStaticOrDirectCall(
       invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
 
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 5);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 5);
 }
 
 void LocationsBuilderARMVIXL::HandleInvoke(HInvoke* invoke) {
@@ -3186,14 +3193,14 @@
 
 void InstructionCodeGeneratorARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 6);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 6);
     return;
   }
 
   codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
   DCHECK(!codegen_->IsLeafMethod());
 
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 7);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 7);
 }
 
 void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) {
@@ -3271,7 +3278,7 @@
     DCHECK(!codegen_->IsLeafMethod());
   }
 
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 8);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 8);
 }
 
 void LocationsBuilderARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
@@ -3280,7 +3287,7 @@
 
 void InstructionCodeGeneratorARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
   codegen_->GenerateInvokePolymorphicCall(invoke);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 9);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 9);
 }
 
 void LocationsBuilderARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) {
@@ -3289,7 +3296,7 @@
 
 void InstructionCodeGeneratorARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) {
   codegen_->GenerateInvokeCustomCall(invoke);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 10);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 10);
 }
 
 void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) {
@@ -4006,7 +4013,7 @@
 
   int64_t magic;
   int shift;
-  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+  CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift);
 
   // TODO(VIXL): Change the static cast to Operand::From() after VIXL is fixed.
   __ Mov(temp1, static_cast<int32_t>(magic));
@@ -4414,7 +4421,7 @@
 
   __ Vcmp(op1, op2);
   __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
-  __ B(vs, &nan, /* far_target */ false);  // if un-ordered, go to NaN handling.
+  __ B(vs, &nan, /* is_far_target= */ false);  // if un-ordered, go to NaN handling.
 
   // op1 <> op2
   vixl32::ConditionType cond = is_min ? gt : lt;
@@ -4426,7 +4433,7 @@
     __ vmov(cond, F32, out, op2);
   }
   // for <>(not equal), we've done min/max calculation.
-  __ B(ne, final_label, /* far_target */ false);
+  __ B(ne, final_label, /* is_far_target= */ false);
 
   // handle op1 == op2, max(+0.0,-0.0), min(+0.0,-0.0).
   __ Vmov(temp1, op1);
@@ -4471,7 +4478,7 @@
 
   __ Vcmp(op1, op2);
   __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
-  __ B(vs, &handle_nan_eq, /* far_target */ false);  // if un-ordered, go to NaN handling.
+  __ B(vs, &handle_nan_eq, /* is_far_target= */ false);  // if un-ordered, go to NaN handling.
 
   // op1 <> op2
   vixl32::ConditionType cond = is_min ? gt : lt;
@@ -4483,7 +4490,7 @@
     __ vmov(cond, F64, out, op2);
   }
   // for <>(not equal), we've done min/max calculation.
-  __ B(ne, final_label, /* far_target */ false);
+  __ B(ne, final_label, /* is_far_target= */ false);
 
   // handle op1 == op2, max(+0.0,-0.0).
   if (!is_min) {
@@ -4707,7 +4714,7 @@
     __ And(shift_right, RegisterFrom(rhs), 0x1F);
     __ Lsrs(shift_left, RegisterFrom(rhs), 6);
     __ Rsb(LeaveFlags, shift_left, shift_right, Operand::From(kArmBitsPerWord));
-    __ B(cc, &shift_by_32_plus_shift_right, /* far_target */ false);
+    __ B(cc, &shift_by_32_plus_shift_right, /* is_far_target= */ false);
 
     // out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right).
     // out_reg_lo = (reg_lo << shift_left) | (reg_hi >> shift_right).
@@ -4964,7 +4971,7 @@
             __ Rrx(o_l, low);
           }
         } else {
-          DCHECK(2 <= shift_value && shift_value < 32) << shift_value;
+          DCHECK(0 <= shift_value && shift_value < 32) << shift_value;
           if (op->IsShl()) {
             __ Lsl(o_h, high, shift_value);
             __ Orr(o_h, o_h, Operand(low, ShiftType::LSR, 32 - shift_value));
@@ -5023,7 +5030,7 @@
 void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction) {
   codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
   CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 11);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 11);
 }
 
 void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
@@ -5036,14 +5043,12 @@
 }
 
 void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
-  // Note: if heap poisoning is enabled, the entry point takes cares
-  // of poisoning the reference.
-  QuickEntrypointEnum entrypoint =
-      CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+  // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+  QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
   codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
   CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
   DCHECK(!codegen_->IsLeafMethod());
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 12);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 12);
 }
 
 void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
@@ -5165,8 +5170,8 @@
     }
     case DataType::Type::kInt64: {
       __ Cmp(HighRegisterFrom(left), HighRegisterFrom(right));  // Signed compare.
-      __ B(lt, &less, /* far_target */ false);
-      __ B(gt, &greater, /* far_target */ false);
+      __ B(lt, &less, /* is_far_target= */ false);
+      __ B(gt, &greater, /* is_far_target= */ false);
       // Emit move to `out` before the last `Cmp`, as `Mov` might affect the status flags.
       __ Mov(out, 0);
       __ Cmp(LowRegisterFrom(left), LowRegisterFrom(right));  // Unsigned compare.
@@ -5187,8 +5192,8 @@
       UNREACHABLE();
   }
 
-  __ B(eq, final_label, /* far_target */ false);
-  __ B(less_cond, &less, /* far_target */ false);
+  __ B(eq, final_label, /* is_far_target= */ false);
+  __ B(less_cond, &less, /* is_far_target= */ false);
 
   __ Bind(&greater);
   __ Mov(out, 1);
@@ -5603,7 +5608,7 @@
         // Note that a potential implicit null check is handled in this
         // CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier call.
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            instruction, out, base, offset, maybe_temp, /* needs_null_check */ true);
+            instruction, out, base, offset, maybe_temp, /* needs_null_check= */ true);
         if (is_volatile) {
           codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
         }
@@ -5959,7 +5964,7 @@
           __ Lsrs(length, length, 1u);  // LSRS has a 16-bit encoding, TST (immediate) does not.
           static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
                         "Expecting 0=compressed, 1=uncompressed");
-          __ B(cs, &uncompressed_load, /* far_target */ false);
+          __ B(cs, &uncompressed_load, /* is_far_target= */ false);
           GetAssembler()->LoadFromOffset(kLoadUnsignedByte,
                                          RegisterFrom(out_loc),
                                          obj,
@@ -6001,7 +6006,7 @@
           __ Lsrs(length, length, 1u);  // LSRS has a 16-bit encoding, TST (immediate) does not.
           static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
                         "Expecting 0=compressed, 1=uncompressed");
-          __ B(cs, &uncompressed_load, /* far_target */ false);
+          __ B(cs, &uncompressed_load, /* is_far_target= */ false);
           __ Ldrb(RegisterFrom(out_loc), MemOperand(temp, RegisterFrom(index), vixl32::LSL, 0));
           __ B(final_label);
           __ Bind(&uncompressed_load);
@@ -6041,11 +6046,11 @@
                                                           obj,
                                                           data_offset,
                                                           maybe_temp,
-                                                          /* needs_null_check */ false);
+                                                          /* needs_null_check= */ false);
         } else {
           Location temp = locations->GetTemp(0);
           codegen_->GenerateArrayLoadWithBakerReadBarrier(
-              out_loc, obj, data_offset, index, temp, /* needs_null_check */ false);
+              out_loc, obj, data_offset, index, temp, /* needs_null_check= */ false);
         }
       } else {
         vixl32::Register out = OutputRegister(instruction);
@@ -6320,7 +6325,7 @@
 
         if (instruction->StaticTypeOfArrayIsObjectArray()) {
           vixl32::Label do_put;
-          __ B(eq, &do_put, /* far_target */ false);
+          __ B(eq, &do_put, /* is_far_target= */ false);
           // If heap poisoning is enabled, the `temp1` reference has
           // not been unpoisoned yet; unpoison it now.
           GetAssembler()->MaybeUnpoisonHeapReference(temp1);
@@ -6622,7 +6627,7 @@
     return;
   }
   GenerateSuspendCheck(instruction, nullptr);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 13);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 13);
 }
 
 void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instruction,
@@ -6970,7 +6975,7 @@
   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
   if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
     codegen_->GenerateLoadClassRuntimeCall(cls);
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 14);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 14);
     return;
   }
   DCHECK(!cls->NeedsAccessCheck());
@@ -7009,14 +7014,14 @@
       CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
           codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(cls));
       codegen_->EmitMovwMovtPlaceholder(labels, out);
-      __ Ldr(out, MemOperand(out, /* offset */ 0));
+      __ Ldr(out, MemOperand(out, /* offset= */ 0));
       break;
     }
     case HLoadClass::LoadKind::kBssEntry: {
       CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
           codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
       codegen_->EmitMovwMovtPlaceholder(labels, out);
-      codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
+      codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option);
       generate_null_check = true;
       break;
     }
@@ -7032,7 +7037,7 @@
                                                        cls->GetTypeIndex(),
                                                        cls->GetClass()));
       // /* GcRoot<mirror::Class> */ out = *out
-      codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
+      codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option);
       break;
     }
     case HLoadClass::LoadKind::kRuntimeCall:
@@ -7054,7 +7059,7 @@
     } else {
       __ Bind(slow_path->GetExitLabel());
     }
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 15);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 15);
   }
 }
 
@@ -7235,7 +7240,7 @@
       CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
           codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(load));
       codegen_->EmitMovwMovtPlaceholder(labels, out);
-      __ Ldr(out, MemOperand(out, /* offset */ 0));
+      __ Ldr(out, MemOperand(out, /* offset= */ 0));
       return;
     }
     case HLoadString::LoadKind::kBssEntry: {
@@ -7244,13 +7249,13 @@
           codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
       codegen_->EmitMovwMovtPlaceholder(labels, out);
       codegen_->GenerateGcRootFieldLoad(
-          load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
+          load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption);
       LoadStringSlowPathARMVIXL* slow_path =
           new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
       codegen_->AddSlowPath(slow_path);
       __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
-      codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 16);
+      codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 16);
       return;
     }
     case HLoadString::LoadKind::kJitBootImageAddress: {
@@ -7265,7 +7270,7 @@
                                                         load->GetString()));
       // /* GcRoot<mirror::String> */ out = *out
       codegen_->GenerateGcRootFieldLoad(
-          load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
+          load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption);
       return;
     }
     default:
@@ -7278,7 +7283,7 @@
   __ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
   codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
   CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 17);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 17);
 }
 
 static int32_t GetExceptionTlsOffset() {
@@ -7410,7 +7415,7 @@
   if (instruction->MustDoNullCheck()) {
     DCHECK(!out.Is(obj));
     __ Mov(out, 0);
-    __ CompareAndBranchIfZero(obj, final_label, /* far_target */ false);
+    __ CompareAndBranchIfZero(obj, final_label, /* is_far_target= */ false);
   }
 
   switch (type_check_kind) {
@@ -7442,7 +7447,7 @@
         __ it(eq);
         __ mov(eq, out, 1);
       } else {
-        __ B(ne, final_label, /* far_target */ false);
+        __ B(ne, final_label, /* is_far_target= */ false);
         __ Mov(out, 1);
       }
 
@@ -7470,9 +7475,9 @@
                                        maybe_temp_loc,
                                        read_barrier_option);
       // If `out` is null, we use it for the result, and jump to the final label.
-      __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
+      __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false);
       __ Cmp(out, cls);
-      __ B(ne, &loop, /* far_target */ false);
+      __ B(ne, &loop, /* is_far_target= */ false);
       __ Mov(out, 1);
       break;
     }
@@ -7491,7 +7496,7 @@
       vixl32::Label loop, success;
       __ Bind(&loop);
       __ Cmp(out, cls);
-      __ B(eq, &success, /* far_target */ false);
+      __ B(eq, &success, /* is_far_target= */ false);
       // /* HeapReference<Class> */ out = out->super_class_
       GenerateReferenceLoadOneRegister(instruction,
                                        out_loc,
@@ -7501,7 +7506,7 @@
       // This is essentially a null check, but it sets the condition flags to the
       // proper value for the code that follows the loop, i.e. not `eq`.
       __ Cmp(out, 1);
-      __ B(hs, &loop, /* far_target */ false);
+      __ B(hs, &loop, /* is_far_target= */ false);
 
       // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
       // we check that the output is in a low register, so that a 16-bit MOV
@@ -7546,7 +7551,7 @@
       // Do an exact check.
       vixl32::Label exact_check;
       __ Cmp(out, cls);
-      __ B(eq, &exact_check, /* far_target */ false);
+      __ B(eq, &exact_check, /* is_far_target= */ false);
       // Otherwise, we need to check that the object's class is a non-primitive array.
       // /* HeapReference<Class> */ out = out->component_type_
       GenerateReferenceLoadOneRegister(instruction,
@@ -7555,7 +7560,7 @@
                                        maybe_temp_loc,
                                        read_barrier_option);
       // If `out` is null, we use it for the result, and jump to the final label.
-      __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
+      __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false);
       GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
       static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
       __ Cmp(out, 0);
@@ -7577,7 +7582,7 @@
         __ it(eq);
         __ mov(eq, out, 1);
       } else {
-        __ B(ne, final_label, /* far_target */ false);
+        __ B(ne, final_label, /* is_far_target= */ false);
         __ Bind(&exact_check);
         __ Mov(out, 1);
       }
@@ -7597,7 +7602,7 @@
       __ Cmp(out, cls);
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ B(ne, slow_path->GetEntryLabel());
       __ Mov(out, 1);
@@ -7626,7 +7631,7 @@
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ B(slow_path->GetEntryLabel());
       break;
@@ -7711,7 +7716,7 @@
   vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done);
   // Avoid null check if we know obj is not null.
   if (instruction->MustDoNullCheck()) {
-    __ CompareAndBranchIfZero(obj, final_label, /* far_target */ false);
+    __ CompareAndBranchIfZero(obj, final_label, /* is_far_target= */ false);
   }
 
   switch (type_check_kind) {
@@ -7758,7 +7763,7 @@
 
       // Otherwise, compare the classes.
       __ Cmp(temp, cls);
-      __ B(ne, &loop, /* far_target */ false);
+      __ B(ne, &loop, /* is_far_target= */ false);
       break;
     }
 
@@ -7775,7 +7780,7 @@
       vixl32::Label loop;
       __ Bind(&loop);
       __ Cmp(temp, cls);
-      __ B(eq, final_label, /* far_target */ false);
+      __ B(eq, final_label, /* is_far_target= */ false);
 
       // /* HeapReference<Class> */ temp = temp->super_class_
       GenerateReferenceLoadOneRegister(instruction,
@@ -7803,7 +7808,7 @@
 
       // Do an exact check.
       __ Cmp(temp, cls);
-      __ B(eq, final_label, /* far_target */ false);
+      __ B(eq, final_label, /* is_far_target= */ false);
 
       // Otherwise, we need to check that the object's class is a non-primitive array.
       // /* HeapReference<Class> */ temp = temp->component_type_
@@ -7867,7 +7872,7 @@
       __ Sub(RegisterFrom(maybe_temp2_loc), RegisterFrom(maybe_temp2_loc), 2);
       // Compare the classes and continue the loop if they do not match.
       __ Cmp(cls, RegisterFrom(maybe_temp3_loc));
-      __ B(ne, &start_loop, /* far_target */ false);
+      __ B(ne, &start_loop, /* is_far_target= */ false);
       break;
     }
 
@@ -7908,7 +7913,7 @@
   } else {
     CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
   }
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 18);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 18);
 }
 
 void LocationsBuilderARMVIXL::VisitAnd(HAnd* instruction) {
@@ -8263,7 +8268,7 @@
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(out + offset)
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          instruction, out, out_reg, offset, maybe_temp, /* needs_null_check */ false);
+          instruction, out, out_reg, offset, maybe_temp, /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // Save the value of `out` into `maybe_temp` before overwriting it
@@ -8298,7 +8303,7 @@
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          instruction, out, obj_reg, offset, maybe_temp, /* needs_null_check */ false);
+          instruction, out, obj_reg, offset, maybe_temp, /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
@@ -8379,7 +8384,7 @@
     // Note that GC roots are not affected by heap poisoning, thus we
     // do not have to unpoison `root_reg` here.
   }
-  MaybeGenerateMarkingRegisterCheck(/* code */ 19);
+  MaybeGenerateMarkingRegisterCheck(/* code= */ 19);
 }
 
 void CodeGeneratorARMVIXL::GenerateUnsafeCasOldValueAddWithBakerReadBarrier(
@@ -8479,7 +8484,7 @@
               narrow ? BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET
                      : BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET);
   }
-  MaybeGenerateMarkingRegisterCheck(/* code */ 20, /* temp_loc */ LocationFrom(ip));
+  MaybeGenerateMarkingRegisterCheck(/* code= */ 20, /* temp_loc= */ LocationFrom(ip));
 }
 
 void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -8567,7 +8572,7 @@
     DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
               BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET);
   }
-  MaybeGenerateMarkingRegisterCheck(/* code */ 21, /* temp_loc */ LocationFrom(ip));
+  MaybeGenerateMarkingRegisterCheck(/* code= */ 21, /* temp_loc= */ LocationFrom(ip));
 }
 
 void CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
@@ -8650,7 +8655,7 @@
 // otherwise return a fall-back info that should be used instead.
 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch(
     const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-    HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+    ArtMethod* method ATTRIBUTE_UNUSED) {
   return desired_dispatch_info;
 }
 
@@ -8810,12 +8815,12 @@
 
 CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageIntrinsicPatch(
     uint32_t intrinsic_data) {
-  return NewPcRelativePatch(/* dex_file */ nullptr, intrinsic_data, &boot_image_intrinsic_patches_);
+  return NewPcRelativePatch(/* dex_file= */ nullptr, intrinsic_data, &boot_image_intrinsic_patches_);
 }
 
 CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageRelRoPatch(
     uint32_t boot_image_offset) {
-  return NewPcRelativePatch(/* dex_file */ nullptr,
+  return NewPcRelativePatch(/* dex_file= */ nullptr,
                             boot_image_offset,
                             &boot_image_method_patches_);
 }
@@ -8886,7 +8891,7 @@
   return jit_string_patches_.GetOrCreate(
       StringReference(&dex_file, string_index),
       [this]() {
-        return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u);
+        return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u);
       });
 }
 
@@ -8897,7 +8902,7 @@
   return jit_class_patches_.GetOrCreate(
       TypeReference(&dex_file, type_index),
       [this]() {
-        return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u);
+        return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u);
       });
 }
 
@@ -8907,11 +8912,11 @@
     CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
         NewBootImageIntrinsicPatch(boot_image_reference);
     EmitMovwMovtPlaceholder(labels, reg);
-  } else if (Runtime::Current()->IsAotCompiler()) {
+  } else if (GetCompilerOptions().GetCompilePic()) {
     CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
         NewBootImageRelRoPatch(boot_image_reference);
     EmitMovwMovtPlaceholder(labels, reg);
-    __ Ldr(reg, MemOperand(reg, /* offset */ 0));
+    __ Ldr(reg, MemOperand(reg, /* offset= */ 0));
   } else {
     DCHECK(Runtime::Current()->UseJitCompilation());
     gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -9056,7 +9061,7 @@
   return map->GetOrCreate(
       value,
       [this, value]() {
-        return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ value);
+        return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ value);
       });
 }
 
@@ -9283,9 +9288,9 @@
                          CodeBufferCheckScope::kMaximumSize);
   // TODO(VIXL): Think about using mov instead of movw.
   __ bind(&labels->movw_label);
-  __ movw(out, /* placeholder */ 0u);
+  __ movw(out, /* operand= */ 0u);
   __ bind(&labels->movt_label);
-  __ movt(out, /* placeholder */ 0u);
+  __ movt(out, /* operand= */ 0u);
   __ bind(&labels->add_pc_label);
   __ add(out, out, pc);
 }
@@ -9308,7 +9313,7 @@
   static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
   static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
   __ Tst(ip, Operand(LockWord::kReadBarrierStateMaskShifted));
-  __ B(ne, slow_path, /* is_far_target */ false);
+  __ B(ne, slow_path, /* is_far_target= */ false);
   // To throw NPE, we return to the fast path; the artificial dependence below does not matter.
   if (throw_npe != nullptr) {
     __ Bind(throw_npe);
@@ -9355,7 +9360,7 @@
       vixl32::Label* throw_npe = nullptr;
       if (GetCompilerOptions().GetImplicitNullChecks() && holder_reg.Is(base_reg)) {
         throw_npe = &throw_npe_label;
-        __ CompareAndBranchIfZero(holder_reg, throw_npe, /* is_far_target */ false);
+        __ CompareAndBranchIfZero(holder_reg, throw_npe, /* is_far_target= */ false);
       }
       // Check if the holder is gray and, if not, add fake dependency to the base register
       // and return to the LDR instruction to load the reference. Otherwise, use introspection
@@ -9432,7 +9437,7 @@
       UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
       temps.Exclude(ip);
       vixl32::Label return_label, not_marked, forwarding_address;
-      __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target */ false);
+      __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target= */ false);
       MemOperand lock_word(root_reg, mirror::Object::MonitorOffset().Int32Value());
       __ Ldr(ip, lock_word);
       __ Tst(ip, LockWord::kMarkBitStateMaskShifted);
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 33502d4..5edca87 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -547,7 +547,7 @@
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) override;
+      ArtMethod* method) override;
 
   void GenerateStaticOrDirectCall(
       HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index d74a7a7..f7f37db 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -147,7 +147,7 @@
     case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unexpected parameter type " << type;
-      break;
+      UNREACHABLE();
   }
 
   // Space on the stack is reserved for all arguments.
@@ -587,7 +587,7 @@
       mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
                                                         instruction_,
                                                         this,
-                                                        /* direct */ false);
+                                                        /* direct= */ false);
     }
     __ B(GetExitLabel());
   }
@@ -681,7 +681,7 @@
     mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
                                                       instruction_,
                                                       this,
-                                                      /* direct */ false);
+                                                      /* direct= */ false);
 
     // If the new reference is different from the old reference,
     // update the field in the holder (`*(obj_ + field_offset_)`).
@@ -1167,9 +1167,9 @@
     __ Move(r2_l, TMP);
     __ Move(r2_h, AT);
   } else if (loc1.IsStackSlot() && loc2.IsStackSlot()) {
-    Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ false);
+    Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot= */ false);
   } else if (loc1.IsDoubleStackSlot() && loc2.IsDoubleStackSlot()) {
-    Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ true);
+    Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot= */ true);
   } else if (loc1.IsSIMDStackSlot() && loc2.IsSIMDStackSlot()) {
     ExchangeQuadSlots(loc1.GetStackIndex(), loc2.GetStackIndex());
   } else if ((loc1.IsRegister() && loc2.IsStackSlot()) ||
@@ -1654,14 +1654,14 @@
     uint32_t intrinsic_data,
     const PcRelativePatchInfo* info_high) {
   return NewPcRelativePatch(
-      /* dex_file */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
+      /* dex_file= */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
 }
 
 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageRelRoPatch(
     uint32_t boot_image_offset,
     const PcRelativePatchInfo* info_high) {
   return NewPcRelativePatch(
-      /* dex_file */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
+      /* dex_file= */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
 }
 
 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageMethodPatch(
@@ -1737,7 +1737,7 @@
     __ Bind(&info_high->label);
     __ Bind(&info_high->pc_rel_label);
     // Add the high half of a 32-bit offset to PC.
-    __ Auipc(out, /* placeholder */ 0x1234);
+    __ Auipc(out, /* imm16= */ 0x1234);
     __ SetReorder(reordering);
   } else {
     // If base is ZERO, emit NAL to obtain the actual base.
@@ -1746,7 +1746,7 @@
       __ Nal();
     }
     __ Bind(&info_high->label);
-    __ Lui(out, /* placeholder */ 0x1234);
+    __ Lui(out, /* imm16= */ 0x1234);
     // If we emitted the NAL, bind the pc_rel_label, otherwise base is a register holding
     // the HMipsComputeBaseMethodAddress which has its own label stored in MipsAssembler.
     if (base == ZERO) {
@@ -1764,13 +1764,13 @@
   if (GetCompilerOptions().IsBootImage()) {
     PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference);
     PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
-    EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base */ ZERO);
-    __ Addiu(reg, TMP, /* placeholder */ 0x5678, &info_low->label);
-  } else if (Runtime::Current()->IsAotCompiler()) {
+    EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base= */ ZERO);
+    __ Addiu(reg, TMP, /* imm16= */ 0x5678, &info_low->label);
+  } else if (GetCompilerOptions().GetCompilePic()) {
     PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
     PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
-    EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base */ ZERO);
-    __ Lw(reg, reg, /* placeholder */ 0x5678, &info_low->label);
+    EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base= */ ZERO);
+    __ Lw(reg, reg, /* imm16= */ 0x5678, &info_low->label);
   } else {
     DCHECK(Runtime::Current()->UseJitCompilation());
     gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1793,8 +1793,8 @@
     PcRelativePatchInfo* info_high = NewBootImageTypePatch(*target_method.dex_file, type_idx);
     PcRelativePatchInfo* info_low =
         NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high);
-    EmitPcRelativeAddressPlaceholderHigh(info_high, argument, /* base */ ZERO);
-    __ Addiu(argument, argument, /* placeholder */ 0x5678, &info_low->label);
+    EmitPcRelativeAddressPlaceholderHigh(info_high, argument, /* base= */ ZERO);
+    __ Addiu(argument, argument, /* imm16= */ 0x5678, &info_low->label);
   } else {
     LoadBootImageAddress(argument, boot_image_offset);
   }
@@ -2579,7 +2579,7 @@
           __ Or(dst_high, dst_high, TMP);
           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
           if (isR6) {
-            __ Beqzc(TMP, &done, /* is_bare */ true);
+            __ Beqzc(TMP, &done, /* is_bare= */ true);
             __ Move(dst_high, dst_low);
             __ Move(dst_low, ZERO);
           } else {
@@ -2595,7 +2595,7 @@
           __ Or(dst_low, dst_low, TMP);
           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
           if (isR6) {
-            __ Beqzc(TMP, &done, /* is_bare */ true);
+            __ Beqzc(TMP, &done, /* is_bare= */ true);
             __ Move(dst_low, dst_high);
             __ Sra(dst_high, dst_high, 31);
           } else {
@@ -2612,7 +2612,7 @@
           __ Or(dst_low, dst_low, TMP);
           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
           if (isR6) {
-            __ Beqzc(TMP, &done, /* is_bare */ true);
+            __ Beqzc(TMP, &done, /* is_bare= */ true);
             __ Move(dst_low, dst_high);
             __ Move(dst_high, ZERO);
           } else {
@@ -2631,7 +2631,7 @@
           __ Or(dst_high, dst_high, TMP);
           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
           if (isR6) {
-            __ Beqzc(TMP, &done, /* is_bare */ true);
+            __ Beqzc(TMP, &done, /* is_bare= */ true);
             __ Move(TMP, dst_high);
             __ Move(dst_high, dst_low);
             __ Move(dst_low, TMP);
@@ -2862,7 +2862,7 @@
                                                           obj,
                                                           offset,
                                                           temp,
-                                                          /* needs_null_check */ false);
+                                                          /* needs_null_check= */ false);
         } else {
           codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
                                                           out_loc,
@@ -2870,7 +2870,7 @@
                                                           data_offset,
                                                           index,
                                                           temp,
-                                                          /* needs_null_check */ false);
+                                                          /* needs_null_check= */ false);
         }
       } else {
         Register out = out_loc.AsRegister<Register>();
@@ -4104,7 +4104,7 @@
 
   int64_t magic;
   int shift;
-  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+  CalculateMagicAndShiftForDivRem(imm, false /* is_long= */, &magic, &shift);
 
   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
 
@@ -5948,7 +5948,7 @@
       nullptr : codegen_->GetLabelOf(true_successor);
   MipsLabel* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
       nullptr : codegen_->GetLabelOf(false_successor);
-  GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+  GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
 }
 
 void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -5967,9 +5967,9 @@
   SlowPathCodeMIPS* slow_path =
       deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS>(deoptimize);
   GenerateTestAndBranch(deoptimize,
-                        /* condition_input_index */ 0,
+                        /* condition_input_index= */ 0,
                         slow_path->GetEntryLabel(),
-                        /* false_target */ nullptr);
+                        /* false_target= */ nullptr);
 }
 
 // This function returns true if a conditional move can be generated for HSelect.
@@ -5983,7 +5983,7 @@
 // of common logic.
 static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* locations_to_set) {
   bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
-  HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+  HInstruction* cond = select->InputAt(/* i= */ 2);
   HCondition* condition = cond->AsCondition();
 
   DataType::Type cond_type =
@@ -6216,7 +6216,7 @@
   Location src = locations->InAt(1);
   Register src_reg = ZERO;
   Register src_reg_high = ZERO;
-  HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+  HInstruction* cond = select->InputAt(/* i= */ 2);
   Register cond_reg = TMP;
   int cond_cc = 0;
   DataType::Type cond_type = DataType::Type::kInt32;
@@ -6224,7 +6224,7 @@
   DataType::Type dst_type = select->GetType();
 
   if (IsBooleanValueOrMaterializedCondition(cond)) {
-    cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
+    cond_reg = locations->InAt(/* at= */ 2).AsRegister<Register>();
   } else {
     HCondition* condition = cond->AsCondition();
     LocationSummary* cond_locations = cond->GetLocations();
@@ -6337,7 +6337,7 @@
   Location dst = locations->Out();
   Location false_src = locations->InAt(0);
   Location true_src = locations->InAt(1);
-  HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+  HInstruction* cond = select->InputAt(/* i= */ 2);
   Register cond_reg = TMP;
   FRegister fcond_reg = FTMP;
   DataType::Type cond_type = DataType::Type::kInt32;
@@ -6345,7 +6345,7 @@
   DataType::Type dst_type = select->GetType();
 
   if (IsBooleanValueOrMaterializedCondition(cond)) {
-    cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
+    cond_reg = locations->InAt(/* at= */ 2).AsRegister<Register>();
   } else {
     HCondition* condition = cond->AsCondition();
     LocationSummary* cond_locations = cond->GetLocations();
@@ -6526,7 +6526,7 @@
 
 void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) {
   bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
-  if (CanMoveConditionally(select, is_r6, /* locations_to_set */ nullptr)) {
+  if (CanMoveConditionally(select, is_r6, /* locations_to_set= */ nullptr)) {
     if (is_r6) {
       GenConditionalMoveR6(select);
     } else {
@@ -6536,8 +6536,8 @@
     LocationSummary* locations = select->GetLocations();
     MipsLabel false_target;
     GenerateTestAndBranch(select,
-                          /* condition_input_index */ 2,
-                          /* true_target */ nullptr,
+                          /* condition_input_index= */ 2,
+                          /* true_target= */ nullptr,
                           &false_target);
     codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
     __ Bind(&false_target);
@@ -6696,7 +6696,7 @@
                                                         obj,
                                                         offset,
                                                         temp_loc,
-                                                        /* needs_null_check */ true);
+                                                        /* needs_null_check= */ true);
         if (is_volatile) {
           GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
         }
@@ -6929,7 +6929,7 @@
                                                       out_reg,
                                                       offset,
                                                       maybe_temp,
-                                                      /* needs_null_check */ false);
+                                                      /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // Save the value of `out` into `maybe_temp` before overwriting it
@@ -6970,7 +6970,7 @@
                                                       obj_reg,
                                                       offset,
                                                       maybe_temp,
-                                                      /* needs_null_check */ false);
+                                                      /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
@@ -7061,7 +7061,7 @@
           __ AddUpper(base, obj, offset_high);
         }
         MipsLabel skip_call;
-        __ Beqz(T9, &skip_call, /* is_bare */ true);
+        __ Beqz(T9, &skip_call, /* is_bare= */ true);
         if (label_low != nullptr) {
           DCHECK(short_offset);
           __ Bind(label_low);
@@ -7216,11 +7216,11 @@
     MipsLabel skip_call;
     if (short_offset) {
       if (isR6) {
-        __ Beqzc(T9, &skip_call, /* is_bare */ true);
+        __ Beqzc(T9, &skip_call, /* is_bare= */ true);
         __ Nop();  // In forbidden slot.
         __ Jialc(T9, thunk_disp);
       } else {
-        __ Beqz(T9, &skip_call, /* is_bare */ true);
+        __ Beqz(T9, &skip_call, /* is_bare= */ true);
         __ Addiu(T9, T9, thunk_disp);  // In delay slot.
         __ Jalr(T9);
         __ Nop();  // In delay slot.
@@ -7228,13 +7228,13 @@
       __ Bind(&skip_call);
     } else {
       if (isR6) {
-        __ Beqz(T9, &skip_call, /* is_bare */ true);
+        __ Beqz(T9, &skip_call, /* is_bare= */ true);
         __ Aui(base, obj, offset_high);  // In delay slot.
         __ Jialc(T9, thunk_disp);
         __ Bind(&skip_call);
       } else {
         __ Lui(base, offset_high);
-        __ Beqz(T9, &skip_call, /* is_bare */ true);
+        __ Beqz(T9, &skip_call, /* is_bare= */ true);
         __ Addiu(T9, T9, thunk_disp);  // In delay slot.
         __ Jalr(T9);
         __ Bind(&skip_call);
@@ -7311,7 +7311,7 @@
     // We will not do the explicit null check in the thunk as some form of a null check
     // must've been done earlier.
     DCHECK(!needs_null_check);
-    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset */ false);
+    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset= */ false);
     // Loading the entrypoint does not require a load acquire since it is only changed when
     // threads are suspended or running a checkpoint.
     __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
@@ -7321,13 +7321,13 @@
         : index.AsRegister<Register>();
     MipsLabel skip_call;
     if (GetInstructionSetFeatures().IsR6()) {
-      __ Beqz(T9, &skip_call, /* is_bare */ true);
+      __ Beqz(T9, &skip_call, /* is_bare= */ true);
       __ Lsa(TMP, index_reg, obj, scale_factor);  // In delay slot.
       __ Jialc(T9, thunk_disp);
       __ Bind(&skip_call);
     } else {
       __ Sll(TMP, index_reg, scale_factor);
-      __ Beqz(T9, &skip_call, /* is_bare */ true);
+      __ Beqz(T9, &skip_call, /* is_bare= */ true);
       __ Addiu(T9, T9, thunk_disp);  // In delay slot.
       __ Jalr(T9);
       __ Bind(&skip_call);
@@ -7442,7 +7442,7 @@
         ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
                                                   ref,
                                                   obj,
-                                                  /* field_offset */ index,
+                                                  /* field_offset= */ index,
                                                   temp_reg);
   } else {
     slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
@@ -7705,7 +7705,7 @@
                                         kWithoutReadBarrier);
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ Bne(out, cls.AsRegister<Register>(), slow_path->GetEntryLabel());
       __ LoadConst32(out, 1);
@@ -7734,7 +7734,7 @@
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ B(slow_path->GetEntryLabel());
       break;
@@ -7964,7 +7964,7 @@
 
 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS::GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+      ArtMethod* method ATTRIBUTE_UNUSED) {
   return desired_dispatch_info;
 }
 
@@ -8001,7 +8001,7 @@
           NewBootImageMethodPatch(invoke->GetTargetMethod(), info_high);
       Register temp_reg = temp.AsRegister<Register>();
       EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
-      __ Addiu(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+      __ Addiu(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
@@ -8010,7 +8010,7 @@
       PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
       Register temp_reg = temp.AsRegister<Register>();
       EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
-      __ Lw(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+      __ Lw(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
@@ -8020,7 +8020,7 @@
           MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high);
       Register temp_reg = temp.AsRegister<Register>();
       EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
-      __ Lw(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+      __ Lw(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
@@ -8226,7 +8226,7 @@
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
                                                      out,
                                                      base_or_current_method_reg);
-      __ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label);
+      __ Addiu(out, out, /* imm16= */ 0x5678, &info_low->label);
       break;
     }
     case HLoadClass::LoadKind::kBootImageRelRo: {
@@ -8239,7 +8239,7 @@
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
                                                      out,
                                                      base_or_current_method_reg);
-      __ Lw(out, out, /* placeholder */ 0x5678, &info_low->label);
+      __ Lw(out, out, /* imm16= */ 0x5678, &info_low->label);
       break;
     }
     case HLoadClass::LoadKind::kBssEntry: {
@@ -8253,7 +8253,7 @@
       GenerateGcRootFieldLoad(cls,
                               out_loc,
                               out,
-                              /* placeholder */ 0x5678,
+                              /* offset= */ 0x5678,
                               read_barrier_option,
                               &info_low->label);
       generate_null_check = true;
@@ -8278,12 +8278,12 @@
                                                                              cls->GetClass());
       bool reordering = __ SetReorder(false);
       __ Bind(&info->high_label);
-      __ Lui(out, /* placeholder */ 0x1234);
+      __ Lui(out, /* imm16= */ 0x1234);
       __ SetReorder(reordering);
       GenerateGcRootFieldLoad(cls,
                               out_loc,
                               out,
-                              /* placeholder */ 0x5678,
+                              /* offset= */ 0x5678,
                               read_barrier_option,
                               &info->low_label);
       break;
@@ -8432,7 +8432,7 @@
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
                                                      out,
                                                      base_or_current_method_reg);
-      __ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label);
+      __ Addiu(out, out, /* imm16= */ 0x5678, &info_low->label);
       return;
     }
     case HLoadString::LoadKind::kBootImageRelRo: {
@@ -8445,7 +8445,7 @@
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
                                                      out,
                                                      base_or_current_method_reg);
-      __ Lw(out, out, /* placeholder */ 0x5678, &info_low->label);
+      __ Lw(out, out, /* imm16= */ 0x5678, &info_low->label);
       return;
     }
     case HLoadString::LoadKind::kBssEntry: {
@@ -8460,7 +8460,7 @@
       GenerateGcRootFieldLoad(load,
                               out_loc,
                               out,
-                              /* placeholder */ 0x5678,
+                              /* offset= */ 0x5678,
                               kCompilerReadBarrierOption,
                               &info_low->label);
       SlowPathCodeMIPS* slow_path =
@@ -8489,12 +8489,12 @@
                                           load->GetString());
       bool reordering = __ SetReorder(false);
       __ Bind(&info->high_label);
-      __ Lui(out, /* placeholder */ 0x1234);
+      __ Lui(out, /* imm16= */ 0x1234);
       __ SetReorder(reordering);
       GenerateGcRootFieldLoad(load,
                               out_loc,
                               out,
-                              /* placeholder */ 0x5678,
+                              /* offset= */ 0x5678,
                               kCompilerReadBarrierOption,
                               &info->low_label);
       return;
@@ -8702,10 +8702,8 @@
 }
 
 void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
-  // Note: if heap poisoning is enabled, the entry point takes care
-  // of poisoning the reference.
-  QuickEntrypointEnum entrypoint =
-      CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+  // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+  QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
   codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
   CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
   DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index bf95893..5080731 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -563,7 +563,7 @@
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) override;
+      ArtMethod* method) override;
 
   void GenerateStaticOrDirectCall(
       HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 7c89808..8b6328f 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -953,7 +953,7 @@
     : CodeGenerator(graph,
                     kNumberOfGpuRegisters,
                     kNumberOfFpuRegisters,
-                    /* number_of_register_pairs */ 0,
+                    /* number_of_register_pairs= */ 0,
                     ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
                                         arraysize(kCoreCalleeSaves)),
                     ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
@@ -1581,14 +1581,14 @@
     uint32_t intrinsic_data,
     const PcRelativePatchInfo* info_high) {
   return NewPcRelativePatch(
-      /* dex_file */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
+      /* dex_file= */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
 }
 
 CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageRelRoPatch(
     uint32_t boot_image_offset,
     const PcRelativePatchInfo* info_high) {
   return NewPcRelativePatch(
-      /* dex_file */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
+      /* dex_file= */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
 }
 
 CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageMethodPatch(
@@ -1665,7 +1665,7 @@
   DCHECK(!info_high->patch_info_high);
   __ Bind(&info_high->label);
   // Add the high half of a 32-bit offset to PC.
-  __ Auipc(out, /* placeholder */ 0x1234);
+  __ Auipc(out, /* imm16= */ 0x1234);
   // A following instruction will add the sign-extended low half of the 32-bit
   // offset to `out` (e.g. ld, jialc, daddiu).
   if (info_low != nullptr) {
@@ -1679,13 +1679,13 @@
     PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference);
     PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
     EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-    __ Daddiu(reg, AT, /* placeholder */ 0x5678);
-  } else if (Runtime::Current()->IsAotCompiler()) {
+    __ Daddiu(reg, AT, /* imm16= */ 0x5678);
+  } else if (GetCompilerOptions().GetCompilePic()) {
     PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
     PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
     EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
     // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
-    __ Lwu(reg, AT, /* placeholder */ 0x5678);
+    __ Lwu(reg, AT, /* imm16= */ 0x5678);
   } else {
     DCHECK(Runtime::Current()->UseJitCompilation());
     gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1710,7 +1710,7 @@
     PcRelativePatchInfo* info_low =
         NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high);
     EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-    __ Daddiu(argument, AT, /* placeholder */ 0x5678);
+    __ Daddiu(argument, AT, /* imm16= */ 0x5678);
   } else {
     LoadBootImageAddress(argument, boot_image_offset);
   }
@@ -1724,7 +1724,7 @@
   ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
   return jit_string_patches_.GetOrCreate(
       StringReference(&dex_file, string_index),
-      [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+      [this]() { return __ NewLiteral<uint32_t>(/* value= */ 0u); });
 }
 
 Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file,
@@ -1733,7 +1733,7 @@
   ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
   return jit_class_patches_.GetOrCreate(
       TypeReference(&dex_file, type_index),
-      [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+      [this]() { return __ NewLiteral<uint32_t>(/* value= */ 0u); });
 }
 
 void CodeGeneratorMIPS64::PatchJitRootUse(uint8_t* code,
@@ -2458,7 +2458,7 @@
                                                           obj,
                                                           offset,
                                                           temp,
-                                                          /* needs_null_check */ false);
+                                                          /* needs_null_check= */ false);
         } else {
           codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
                                                           out_loc,
@@ -2466,7 +2466,7 @@
                                                           data_offset,
                                                           index,
                                                           temp,
-                                                          /* needs_null_check */ false);
+                                                          /* needs_null_check= */ false);
         }
       } else {
         GpuRegister out = out_loc.AsRegister<GpuRegister>();
@@ -3337,10 +3337,10 @@
   switch (type) {
     default:
       // Integer case.
-      GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ false, locations);
+      GenerateIntLongCompare(instruction->GetCondition(), /* is64bit= */ false, locations);
       return;
     case DataType::Type::kInt64:
-      GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ true, locations);
+      GenerateIntLongCompare(instruction->GetCondition(), /* is64bit= */ true, locations);
       return;
     case DataType::Type::kFloat32:
     case DataType::Type::kFloat64:
@@ -3642,7 +3642,7 @@
 
   if (!DataType::IsIntegralType(type)) {
     LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
-    return;
+    UNREACHABLE();
   }
 
   if (value.IsConstant()) {
@@ -4449,10 +4449,10 @@
 
     switch (type) {
       default:
-        GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ false, locations, branch_target);
+        GenerateIntLongCompareAndBranch(if_cond, /* is64bit= */ false, locations, branch_target);
         break;
       case DataType::Type::kInt64:
-        GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ true, locations, branch_target);
+        GenerateIntLongCompareAndBranch(if_cond, /* is64bit= */ true, locations, branch_target);
         break;
       case DataType::Type::kFloat32:
       case DataType::Type::kFloat64:
@@ -4482,7 +4482,7 @@
       nullptr : codegen_->GetLabelOf(true_successor);
   Mips64Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
       nullptr : codegen_->GetLabelOf(false_successor);
-  GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+  GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
 }
 
 void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -4501,9 +4501,9 @@
   SlowPathCodeMIPS64* slow_path =
       deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS64>(deoptimize);
   GenerateTestAndBranch(deoptimize,
-                        /* condition_input_index */ 0,
+                        /* condition_input_index= */ 0,
                         slow_path->GetEntryLabel(),
-                        /* false_target */ nullptr);
+                        /* false_target= */ nullptr);
 }
 
 // This function returns true if a conditional move can be generated for HSelect.
@@ -4517,7 +4517,7 @@
 // of common logic.
 static bool CanMoveConditionally(HSelect* select, LocationSummary* locations_to_set) {
   bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
-  HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+  HInstruction* cond = select->InputAt(/* i= */ 2);
   HCondition* condition = cond->AsCondition();
 
   DataType::Type cond_type =
@@ -4660,7 +4660,7 @@
   Location dst = locations->Out();
   Location false_src = locations->InAt(0);
   Location true_src = locations->InAt(1);
-  HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+  HInstruction* cond = select->InputAt(/* i= */ 2);
   GpuRegister cond_reg = TMP;
   FpuRegister fcond_reg = FTMP;
   DataType::Type cond_type = DataType::Type::kInt32;
@@ -4668,7 +4668,7 @@
   DataType::Type dst_type = select->GetType();
 
   if (IsBooleanValueOrMaterializedCondition(cond)) {
-    cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<GpuRegister>();
+    cond_reg = locations->InAt(/* at= */ 2).AsRegister<GpuRegister>();
   } else {
     HCondition* condition = cond->AsCondition();
     LocationSummary* cond_locations = cond->GetLocations();
@@ -4677,13 +4677,13 @@
     switch (cond_type) {
       default:
         cond_inverted = MaterializeIntLongCompare(if_cond,
-                                                  /* is64bit */ false,
+                                                  /* is64bit= */ false,
                                                   cond_locations,
                                                   cond_reg);
         break;
       case DataType::Type::kInt64:
         cond_inverted = MaterializeIntLongCompare(if_cond,
-                                                  /* is64bit */ true,
+                                                  /* is64bit= */ true,
                                                   cond_locations,
                                                   cond_reg);
         break;
@@ -4826,14 +4826,14 @@
 }
 
 void InstructionCodeGeneratorMIPS64::VisitSelect(HSelect* select) {
-  if (CanMoveConditionally(select, /* locations_to_set */ nullptr)) {
+  if (CanMoveConditionally(select, /* locations_to_set= */ nullptr)) {
     GenConditionalMove(select);
   } else {
     LocationSummary* locations = select->GetLocations();
     Mips64Label false_target;
     GenerateTestAndBranch(select,
-                          /* condition_input_index */ 2,
-                          /* true_target */ nullptr,
+                          /* condition_input_index= */ 2,
+                          /* true_target= */ nullptr,
                           &false_target);
     codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
     __ Bind(&false_target);
@@ -4945,7 +4945,7 @@
                                                         obj,
                                                         offset,
                                                         temp_loc,
-                                                        /* needs_null_check */ true);
+                                                        /* needs_null_check= */ true);
         if (is_volatile) {
           GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
         }
@@ -5101,7 +5101,7 @@
                                                       out_reg,
                                                       offset,
                                                       maybe_temp,
-                                                      /* needs_null_check */ false);
+                                                      /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // Save the value of `out` into `maybe_temp` before overwriting it
@@ -5142,7 +5142,7 @@
                                                       obj_reg,
                                                       offset,
                                                       maybe_temp,
-                                                      /* needs_null_check */ false);
+                                                      /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
@@ -5230,7 +5230,7 @@
           __ Daui(base, obj, offset_high);
         }
         Mips64Label skip_call;
-        __ Beqz(T9, &skip_call, /* is_bare */ true);
+        __ Beqz(T9, &skip_call, /* is_bare= */ true);
         if (label_low != nullptr) {
           DCHECK(short_offset);
           __ Bind(label_low);
@@ -5360,7 +5360,7 @@
     GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
     Mips64Label skip_call;
     if (short_offset) {
-      __ Beqzc(T9, &skip_call, /* is_bare */ true);
+      __ Beqzc(T9, &skip_call, /* is_bare= */ true);
       __ Nop();  // In forbidden slot.
       __ Jialc(T9, thunk_disp);
       __ Bind(&skip_call);
@@ -5369,7 +5369,7 @@
     } else {
       int16_t offset_low = Low16Bits(offset);
       int16_t offset_high = High16Bits(offset - offset_low);  // Accounts for sign extension in lwu.
-      __ Beqz(T9, &skip_call, /* is_bare */ true);
+      __ Beqz(T9, &skip_call, /* is_bare= */ true);
       __ Daui(TMP, obj, offset_high);  // In delay slot.
       __ Jialc(T9, thunk_disp);
       __ Bind(&skip_call);
@@ -5442,12 +5442,12 @@
     // We will not do the explicit null check in the thunk as some form of a null check
     // must've been done earlier.
     DCHECK(!needs_null_check);
-    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset */ false);
+    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset= */ false);
     // Loading the entrypoint does not require a load acquire since it is only changed when
     // threads are suspended or running a checkpoint.
     __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
     Mips64Label skip_call;
-    __ Beqz(T9, &skip_call, /* is_bare */ true);
+    __ Beqz(T9, &skip_call, /* is_bare= */ true);
     GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
     GpuRegister index_reg = index.AsRegister<GpuRegister>();
     __ Dlsa(TMP, index_reg, obj, scale_factor);  // In delay slot.
@@ -5558,7 +5558,7 @@
         ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction,
                                                     ref,
                                                     obj,
-                                                    /* field_offset */ index,
+                                                    /* field_offset= */ index,
                                                     temp_reg);
   } else {
     slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
@@ -5821,7 +5821,7 @@
                                         kWithoutReadBarrier);
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ Bnec(out, cls.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
       __ LoadConst32(out, 1);
@@ -5850,7 +5850,7 @@
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ Bc(slow_path->GetEntryLabel());
       break;
@@ -6059,7 +6059,7 @@
 
 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+      ArtMethod* method ATTRIBUTE_UNUSED) {
   // On MIPS64 we support all dispatch types.
   return desired_dispatch_info;
 }
@@ -6092,7 +6092,7 @@
       CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
           NewBootImageMethodPatch(invoke->GetTargetMethod(), info_high);
       EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Daddiu(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+      __ Daddiu(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
@@ -6101,7 +6101,7 @@
       PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
       EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
       // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
-      __ Lwu(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+      __ Lwu(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
@@ -6110,7 +6110,7 @@
       PcRelativePatchInfo* info_low = NewMethodBssEntryPatch(
           MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high);
       EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Ld(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+      __ Ld(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
@@ -6280,7 +6280,7 @@
       CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
           codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex(), info_high);
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Daddiu(out, AT, /* placeholder */ 0x5678);
+      __ Daddiu(out, AT, /* imm16= */ 0x5678);
       break;
     }
     case HLoadClass::LoadKind::kBootImageRelRo: {
@@ -6291,7 +6291,7 @@
       CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
           codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Lwu(out, AT, /* placeholder */ 0x5678);
+      __ Lwu(out, AT, /* imm16= */ 0x5678);
       break;
     }
     case HLoadClass::LoadKind::kBssEntry: {
@@ -6303,7 +6303,7 @@
       GenerateGcRootFieldLoad(cls,
                               out_loc,
                               out,
-                              /* placeholder */ 0x5678,
+                              /* offset= */ 0x5678,
                               read_barrier_option,
                               &info_low->label);
       generate_null_check = true;
@@ -6427,7 +6427,7 @@
       CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
           codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Daddiu(out, AT, /* placeholder */ 0x5678);
+      __ Daddiu(out, AT, /* imm16= */ 0x5678);
       return;
     }
     case HLoadString::LoadKind::kBootImageRelRo: {
@@ -6438,7 +6438,7 @@
       CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
           codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Lwu(out, AT, /* placeholder */ 0x5678);
+      __ Lwu(out, AT, /* imm16= */ 0x5678);
       return;
     }
     case HLoadString::LoadKind::kBssEntry: {
@@ -6451,7 +6451,7 @@
       GenerateGcRootFieldLoad(load,
                               out_loc,
                               out,
-                              /* placeholder */ 0x5678,
+                              /* offset= */ 0x5678,
                               kCompilerReadBarrierOption,
                               &info_low->label);
       SlowPathCodeMIPS64* slow_path =
@@ -6633,10 +6633,8 @@
 }
 
 void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
-  // Note: if heap poisoning is enabled, the entry point takes care
-  // of poisoning the reference.
-  QuickEntrypointEnum entrypoint =
-      CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+  // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+  QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
   codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
   CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
   DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index ddc154d..52f3a62 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -541,7 +541,7 @@
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) override;
+      ArtMethod* method) override;
 
   void GenerateStaticOrDirectCall(
       HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 43169ba..5a18c1f 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -216,7 +216,7 @@
   switch (instruction->GetPackedType()) {
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      switch (instruction->GetKind()) {
+      switch (instruction->GetReductionKind()) {
         case HVecReduce::kSum:
           __ Addv(dst.S(), src.V4S());
           break;
@@ -230,7 +230,7 @@
       break;
     case DataType::Type::kInt64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      switch (instruction->GetKind()) {
+      switch (instruction->GetReductionKind()) {
         case HVecReduce::kSum:
           __ Addp(dst.D(), src.V2D());
           break;
@@ -1277,6 +1277,74 @@
   }
 }
 
+void LocationsBuilderARM64::VisitVecDotProd(HVecDotProd* instruction) {
+  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+  DCHECK(instruction->GetPackedType() == DataType::Type::kInt32);
+  locations->SetInAt(0, Location::RequiresFpuRegister());
+  locations->SetInAt(1, Location::RequiresFpuRegister());
+  locations->SetInAt(2, Location::RequiresFpuRegister());
+  locations->SetOut(Location::SameAsFirstInput());
+
+  // For Int8 and Uint8 we need a temp register.
+  if (DataType::Size(instruction->InputAt(1)->AsVecOperation()->GetPackedType()) == 1) {
+    locations->AddTemp(Location::RequiresFpuRegister());
+  }
+}
+
+void InstructionCodeGeneratorARM64::VisitVecDotProd(HVecDotProd* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  DCHECK(locations->InAt(0).Equals(locations->Out()));
+  VRegister acc = VRegisterFrom(locations->InAt(0));
+  VRegister left = VRegisterFrom(locations->InAt(1));
+  VRegister right = VRegisterFrom(locations->InAt(2));
+  HVecOperation* a = instruction->InputAt(1)->AsVecOperation();
+  HVecOperation* b = instruction->InputAt(2)->AsVecOperation();
+  DCHECK_EQ(HVecOperation::ToSignedType(a->GetPackedType()),
+            HVecOperation::ToSignedType(b->GetPackedType()));
+  DCHECK_EQ(instruction->GetPackedType(), DataType::Type::kInt32);
+  DCHECK_EQ(4u, instruction->GetVectorLength());
+
+  size_t inputs_data_size = DataType::Size(a->GetPackedType());
+  switch (inputs_data_size) {
+    case 1u: {
+      DCHECK_EQ(16u, a->GetVectorLength());
+      VRegister tmp = VRegisterFrom(locations->GetTemp(0));
+      if (instruction->IsZeroExtending()) {
+        // TODO: Use Armv8.4-A UDOT instruction when it is available.
+        __ Umull(tmp.V8H(), left.V8B(), right.V8B());
+        __ Uaddw(acc.V4S(), acc.V4S(), tmp.V4H());
+        __ Uaddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+
+        __ Umull2(tmp.V8H(), left.V16B(), right.V16B());
+        __ Uaddw(acc.V4S(), acc.V4S(), tmp.V4H());
+        __ Uaddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+      } else {
+        // TODO: Use Armv8.4-A SDOT instruction when it is available.
+        __ Smull(tmp.V8H(), left.V8B(), right.V8B());
+        __ Saddw(acc.V4S(), acc.V4S(), tmp.V4H());
+        __ Saddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+
+        __ Smull2(tmp.V8H(), left.V16B(), right.V16B());
+        __ Saddw(acc.V4S(), acc.V4S(), tmp.V4H());
+        __ Saddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+      }
+      break;
+    }
+    case 2u:
+      DCHECK_EQ(8u, a->GetVectorLength());
+      if (instruction->IsZeroExtending()) {
+        __ Umlal(acc.V4S(), left.V4H(), right.V4H());
+        __ Umlal2(acc.V4S(), left.V8H(), right.V8H());
+      } else {
+        __ Smlal(acc.V4S(), left.V4H(), right.V4H());
+        __ Smlal2(acc.V4S(), left.V8H(), right.V8H());
+      }
+      break;
+    default:
+      LOG(FATAL) << "Unsupported SIMD type size: " << inputs_data_size;
+  }
+}
+
 // Helper to set up locations for vector memory operations.
 static void CreateVecMemLocations(ArenaAllocator* allocator,
                                   HVecMemoryOperation* instruction,
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index 7b66b17..b092961 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -138,7 +138,7 @@
   switch (instruction->GetPackedType()) {
     case DataType::Type::kInt32:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      switch (instruction->GetKind()) {
+      switch (instruction->GetReductionKind()) {
         case HVecReduce::kSum:
           __ Vpadd(DataTypeValue::I32, dst, src, src);
           break;
@@ -854,6 +854,14 @@
   }
 }
 
+void LocationsBuilderARMVIXL::VisitVecDotProd(HVecDotProd* instruction) {
+  LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecDotProd(HVecDotProd* instruction) {
+  LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
 // Return whether the vector memory access operation is guaranteed to be word-aligned (ARM word
 // size equals to 4).
 static bool IsWordAligned(HVecMemoryOperation* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index df0e148..4e9ba0d 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -74,19 +74,19 @@
       __ InsertW(static_cast<VectorRegister>(FTMP),
                  locations->InAt(0).AsRegisterPairHigh<Register>(),
                  1);
-      __ ReplicateFPToVectorRegister(dst, FTMP, /* is_double */ true);
+      __ ReplicateFPToVectorRegister(dst, FTMP, /* is_double= */ true);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
       __ ReplicateFPToVectorRegister(dst,
                                      locations->InAt(0).AsFpuRegister<FRegister>(),
-                                     /* is_double */ false);
+                                     /* is_double= */ false);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
       __ ReplicateFPToVectorRegister(dst,
                                      locations->InAt(0).AsFpuRegister<FRegister>(),
-                                     /* is_double */ true);
+                                     /* is_double= */ true);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -187,7 +187,7 @@
   switch (instruction->GetPackedType()) {
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      switch (instruction->GetKind()) {
+      switch (instruction->GetReductionKind()) {
         case HVecReduce::kSum:
           __ Hadd_sD(tmp, src, src);
           __ IlvlD(dst, tmp, tmp);
@@ -209,7 +209,7 @@
       break;
     case DataType::Type::kInt64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      switch (instruction->GetKind()) {
+      switch (instruction->GetReductionKind()) {
         case HVecReduce::kSum:
           __ IlvlD(dst, src, src);
           __ AddvD(dst, dst, src);
@@ -1274,6 +1274,14 @@
   }
 }
 
+void LocationsBuilderMIPS::VisitVecDotProd(HVecDotProd* instruction) {
+  LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecDotProd(HVecDotProd* instruction) {
+  LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
 // Helper to set up locations for vector memory operations.
 static void CreateVecMemLocations(ArenaAllocator* allocator,
                                   HVecMemoryOperation* instruction,
@@ -1336,7 +1344,7 @@
 }
 
 void LocationsBuilderMIPS::VisitVecLoad(HVecLoad* instruction) {
-  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
+  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ true);
 }
 
 void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) {
@@ -1379,7 +1387,7 @@
 }
 
 void LocationsBuilderMIPS::VisitVecStore(HVecStore* instruction) {
-  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
+  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ false);
 }
 
 void InstructionCodeGeneratorMIPS::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index de354b6..6467d3e 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -79,13 +79,13 @@
       DCHECK_EQ(4u, instruction->GetVectorLength());
       __ ReplicateFPToVectorRegister(dst,
                                      locations->InAt(0).AsFpuRegister<FpuRegister>(),
-                                     /* is_double */ false);
+                                     /* is_double= */ false);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
       __ ReplicateFPToVectorRegister(dst,
                                      locations->InAt(0).AsFpuRegister<FpuRegister>(),
-                                     /* is_double */ true);
+                                     /* is_double= */ true);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -185,7 +185,7 @@
   switch (instruction->GetPackedType()) {
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      switch (instruction->GetKind()) {
+      switch (instruction->GetReductionKind()) {
         case HVecReduce::kSum:
           __ Hadd_sD(tmp, src, src);
           __ IlvlD(dst, tmp, tmp);
@@ -207,7 +207,7 @@
       break;
     case DataType::Type::kInt64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      switch (instruction->GetKind()) {
+      switch (instruction->GetReductionKind()) {
         case HVecReduce::kSum:
           __ IlvlD(dst, src, src);
           __ AddvD(dst, dst, src);
@@ -1272,6 +1272,14 @@
   }
 }
 
+void LocationsBuilderMIPS64::VisitVecDotProd(HVecDotProd* instruction) {
+  LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecDotProd(HVecDotProd* instruction) {
+  LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
 // Helper to set up locations for vector memory operations.
 static void CreateVecMemLocations(ArenaAllocator* allocator,
                                   HVecMemoryOperation* instruction,
@@ -1334,7 +1342,7 @@
 }
 
 void LocationsBuilderMIPS64::VisitVecLoad(HVecLoad* instruction) {
-  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
+  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ true);
 }
 
 void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) {
@@ -1377,7 +1385,7 @@
 }
 
 void LocationsBuilderMIPS64::VisitVecStore(HVecStore* instruction) {
-  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
+  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ false);
 }
 
 void InstructionCodeGeneratorMIPS64::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 2502275..0ee0035 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -205,8 +205,8 @@
   CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
   // Long reduction or min/max require a temporary.
   if (instruction->GetPackedType() == DataType::Type::kInt64 ||
-      instruction->GetKind() == HVecReduce::kMin ||
-      instruction->GetKind() == HVecReduce::kMax) {
+      instruction->GetReductionKind() == HVecReduce::kMin ||
+      instruction->GetReductionKind() == HVecReduce::kMax) {
     instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
   }
 }
@@ -218,38 +218,23 @@
   switch (instruction->GetPackedType()) {
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      switch (instruction->GetKind()) {
+      switch (instruction->GetReductionKind()) {
         case HVecReduce::kSum:
           __ movaps(dst, src);
           __ phaddd(dst, dst);
           __ phaddd(dst, dst);
           break;
-        case HVecReduce::kMin: {
-          XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
-          __ movaps(tmp, src);
-          __ movaps(dst, src);
-          __ psrldq(tmp, Immediate(8));
-          __ pminsd(dst, tmp);
-          __ psrldq(tmp, Immediate(4));
-          __ pminsd(dst, tmp);
-          break;
-        }
-        case HVecReduce::kMax: {
-          XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
-          __ movaps(tmp, src);
-          __ movaps(dst, src);
-          __ psrldq(tmp, Immediate(8));
-          __ pmaxsd(dst, tmp);
-          __ psrldq(tmp, Immediate(4));
-          __ pmaxsd(dst, tmp);
-          break;
-        }
+        case HVecReduce::kMin:
+        case HVecReduce::kMax:
+          // Historical note: We've had a broken implementation here. b/117863065
+          // Do not draw on the old code if we ever want to bring MIN/MAX reduction back.
+          LOG(FATAL) << "Unsupported reduction type.";
       }
       break;
     case DataType::Type::kInt64: {
       DCHECK_EQ(2u, instruction->GetVectorLength());
       XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
-      switch (instruction->GetKind()) {
+      switch (instruction->GetReductionKind()) {
         case HVecReduce::kSum:
           __ movaps(tmp, src);
           __ movaps(dst, src);
@@ -1143,6 +1128,14 @@
   LOG(FATAL) << "No SIMD for " << instruction->GetId();
 }
 
+void LocationsBuilderX86::VisitVecDotProd(HVecDotProd* instruction) {
+  LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorX86::VisitVecDotProd(HVecDotProd* instruction) {
+  LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
 // Helper to set up locations for vector memory operations.
 static void CreateVecMemLocations(ArenaAllocator* allocator,
                                   HVecMemoryOperation* instruction,
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 4a67daf..9c28827 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -188,8 +188,8 @@
   CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
   // Long reduction or min/max require a temporary.
   if (instruction->GetPackedType() == DataType::Type::kInt64 ||
-      instruction->GetKind() == HVecReduce::kMin ||
-      instruction->GetKind() == HVecReduce::kMax) {
+      instruction->GetReductionKind() == HVecReduce::kMin ||
+      instruction->GetReductionKind() == HVecReduce::kMax) {
     instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
   }
 }
@@ -201,38 +201,23 @@
   switch (instruction->GetPackedType()) {
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      switch (instruction->GetKind()) {
+      switch (instruction->GetReductionKind()) {
         case HVecReduce::kSum:
           __ movaps(dst, src);
           __ phaddd(dst, dst);
           __ phaddd(dst, dst);
           break;
-        case HVecReduce::kMin: {
-          XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
-          __ movaps(tmp, src);
-          __ movaps(dst, src);
-          __ psrldq(tmp, Immediate(8));
-          __ pminsd(dst, tmp);
-          __ psrldq(tmp, Immediate(4));
-          __ pminsd(dst, tmp);
-          break;
-        }
-        case HVecReduce::kMax: {
-          XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
-          __ movaps(tmp, src);
-          __ movaps(dst, src);
-          __ psrldq(tmp, Immediate(8));
-          __ pmaxsd(dst, tmp);
-          __ psrldq(tmp, Immediate(4));
-          __ pmaxsd(dst, tmp);
-          break;
-        }
+        case HVecReduce::kMin:
+        case HVecReduce::kMax:
+          // Historical note: We've had a broken implementation here. b/117863065
+          // Do not draw on the old code if we ever want to bring MIN/MAX reduction back.
+          LOG(FATAL) << "Unsupported reduction type.";
       }
       break;
     case DataType::Type::kInt64: {
       DCHECK_EQ(2u, instruction->GetVectorLength());
       XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
-      switch (instruction->GetKind()) {
+      switch (instruction->GetReductionKind()) {
         case HVecReduce::kSum:
           __ movaps(tmp, src);
           __ movaps(dst, src);
@@ -1116,6 +1101,14 @@
   LOG(FATAL) << "No SIMD for " << instruction->GetId();
 }
 
+void LocationsBuilderX86_64::VisitVecDotProd(HVecDotProd* instruction) {
+  LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecDotProd(HVecDotProd* instruction) {
+  LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
 // Helper to set up locations for vector memory operations.
 static void CreateVecMemLocations(ArenaAllocator* allocator,
                                   HVecMemoryOperation* instruction,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 6a27081..766ff78 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1228,7 +1228,7 @@
     case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unexpected parameter type " << type;
-      break;
+      UNREACHABLE();
   }
   return Location::NoLocation();
 }
@@ -1720,7 +1720,7 @@
       nullptr : codegen_->GetLabelOf(true_successor);
   Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
       nullptr : codegen_->GetLabelOf(false_successor);
-  GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+  GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
 }
 
 void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -1738,9 +1738,9 @@
 void InstructionCodeGeneratorX86::VisitDeoptimize(HDeoptimize* deoptimize) {
   SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86>(deoptimize);
   GenerateTestAndBranch<Label>(deoptimize,
-                               /* condition_input_index */ 0,
+                               /* condition_input_index= */ 0,
                                slow_path->GetEntryLabel(),
-                               /* false_target */ nullptr);
+                               /* false_target= */ nullptr);
 }
 
 void LocationsBuilderX86::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -1863,7 +1863,7 @@
   } else {
     NearLabel false_target;
     GenerateTestAndBranch<NearLabel>(
-        select, /* condition_input_index */ 2, /* true_target */ nullptr, &false_target);
+        select, /* condition_input_index= */ 2, /* true_target= */ nullptr, &false_target);
     codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
     __ Bind(&false_target);
   }
@@ -2989,7 +2989,7 @@
 
     default:
       LOG(FATAL) << "Unexpected add type " << add->GetResultType();
-      break;
+      UNREACHABLE();
   }
 }
 
@@ -3434,8 +3434,8 @@
 
   // Load the values to the FP stack in reverse order, using temporaries if needed.
   const bool is_wide = !is_float;
-  PushOntoFPStack(second, elem_size, 2 * elem_size, /* is_fp */ true, is_wide);
-  PushOntoFPStack(first, 0, 2 * elem_size, /* is_fp */ true, is_wide);
+  PushOntoFPStack(second, elem_size, 2 * elem_size, /* is_fp= */ true, is_wide);
+  PushOntoFPStack(first, 0, 2 * elem_size, /* is_fp= */ true, is_wide);
 
   // Loop doing FPREM until we stabilize.
   NearLabel retry;
@@ -3497,6 +3497,27 @@
   }
 }
 
+void InstructionCodeGeneratorX86::RemByPowerOfTwo(HRem* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  Location second = locations->InAt(1);
+
+  Register out = locations->Out().AsRegister<Register>();
+  Register numerator = locations->InAt(0).AsRegister<Register>();
+
+  int32_t imm = Int64FromConstant(second.GetConstant());
+  DCHECK(IsPowerOfTwo(AbsOrMin(imm)));
+  uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
+
+  Register tmp = locations->GetTemp(0).AsRegister<Register>();
+  NearLabel done;
+  __ movl(out, numerator);
+  __ andl(out, Immediate(abs_imm-1));
+  __ j(Condition::kZero, &done);
+  __ leal(tmp, Address(out, static_cast<int32_t>(~(abs_imm-1))));
+  __ testl(numerator, numerator);
+  __ cmovl(Condition::kLess, out, tmp);
+  __ Bind(&done);
+}
 
 void InstructionCodeGeneratorX86::DivByPowerOfTwo(HDiv* instruction) {
   LocationSummary* locations = instruction->GetLocations();
@@ -3551,7 +3572,7 @@
 
   int64_t magic;
   int shift;
-  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+  CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift);
 
   // Save the numerator.
   __ movl(num, eax);
@@ -3610,8 +3631,12 @@
           // Do not generate anything for 0. DivZeroCheck would forbid any generated code.
         } else if (imm == 1 || imm == -1) {
           DivRemOneOrMinusOne(instruction);
-        } else if (is_div && IsPowerOfTwo(AbsOrMin(imm))) {
-          DivByPowerOfTwo(instruction->AsDiv());
+        } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+          if (is_div) {
+            DivByPowerOfTwo(instruction->AsDiv());
+          } else {
+            RemByPowerOfTwo(instruction->AsRem());
+          }
         } else {
           DCHECK(imm <= -2 || imm >= 2);
           GenerateDivRemWithAnyConstant(instruction);
@@ -4525,10 +4550,8 @@
 }
 
 void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) {
-  // Note: if heap poisoning is enabled, the entry point takes cares
-  // of poisoning the reference.
-  QuickEntrypointEnum entrypoint =
-      CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+  // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+  QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
   codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
   CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
   DCHECK(!codegen_->IsLeafMethod());
@@ -4778,14 +4801,14 @@
     }
     case MemBarrierKind::kNTStoreStore:
       // Non-Temporal Store/Store needs an explicit fence.
-      MemoryFence(/* non-temporal */ true);
+      MemoryFence(/* non-temporal= */ true);
       break;
   }
 }
 
 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86::GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+      ArtMethod* method ATTRIBUTE_UNUSED) {
   return desired_dispatch_info;
 }
 
@@ -4913,14 +4936,14 @@
 void CodeGeneratorX86::RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address,
                                                      uint32_t intrinsic_data) {
   boot_image_intrinsic_patches_.emplace_back(
-      method_address, /* target_dex_file */ nullptr, intrinsic_data);
+      method_address, /* target_dex_file= */ nullptr, intrinsic_data);
   __ Bind(&boot_image_intrinsic_patches_.back().label);
 }
 
 void CodeGeneratorX86::RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* method_address,
                                                  uint32_t boot_image_offset) {
   boot_image_method_patches_.emplace_back(
-      method_address, /* target_dex_file */ nullptr, boot_image_offset);
+      method_address, /* target_dex_file= */ nullptr, boot_image_offset);
   __ Bind(&boot_image_method_patches_.back().label);
 }
 
@@ -4988,7 +5011,7 @@
         invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).AsRegister<Register>();
     __ leal(reg, Address(method_address_reg, CodeGeneratorX86::kDummy32BitOffset));
     RecordBootImageIntrinsicPatch(method_address, boot_image_reference);
-  } else if (Runtime::Current()->IsAotCompiler()) {
+  } else if (GetCompilerOptions().GetCompilePic()) {
     DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
     HX86ComputeBaseMethodAddress* method_address =
         invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
@@ -5214,7 +5237,7 @@
         // Note that a potential implicit null check is handled in this
         // CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier call.
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            instruction, out, base, offset, /* needs_null_check */ true);
+            instruction, out, base, offset, /* needs_null_check= */ true);
         if (is_volatile) {
           codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
         }
@@ -5697,7 +5720,7 @@
         // Note that a potential implicit null check is handled in this
         // CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier call.
         codegen_->GenerateArrayLoadWithBakerReadBarrier(
-            instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
+            instruction, out_loc, obj, data_offset, index, /* needs_null_check= */ true);
       } else {
         Register out = out_loc.AsRegister<Register>();
         __ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset));
@@ -6559,7 +6582,7 @@
           cls,
           out_loc,
           Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
-          /* fixup_label */ nullptr,
+          /* fixup_label= */ nullptr,
           read_barrier_option);
       break;
     }
@@ -7086,7 +7109,7 @@
       }
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ j(kNotEqual, slow_path->GetEntryLabel());
       __ movl(out, Immediate(1));
@@ -7118,7 +7141,7 @@
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ jmp(slow_path->GetEntryLabel());
       if (zero.IsLinked()) {
@@ -7426,6 +7449,61 @@
   }
 }
 
+void LocationsBuilderX86::VisitX86AndNot(HX86AndNot* instruction) {
+  DCHECK(codegen_->GetInstructionSetFeatures().HasAVX2());
+  DCHECK(DataType::IsIntOrLongType(instruction->GetType())) << instruction->GetType();
+  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorX86::VisitX86AndNot(HX86AndNot* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  Location first = locations->InAt(0);
+  Location second = locations->InAt(1);
+  Location dest = locations->Out();
+  if (instruction->GetResultType() == DataType::Type::kInt32) {
+    __ andn(dest.AsRegister<Register>(),
+            first.AsRegister<Register>(),
+            second.AsRegister<Register>());
+  } else {
+    DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64);
+    __ andn(dest.AsRegisterPairLow<Register>(),
+            first.AsRegisterPairLow<Register>(),
+            second.AsRegisterPairLow<Register>());
+    __ andn(dest.AsRegisterPairHigh<Register>(),
+            first.AsRegisterPairHigh<Register>(),
+            second.AsRegisterPairHigh<Register>());
+  }
+}
+
+void LocationsBuilderX86::VisitX86MaskOrResetLeastSetBit(HX86MaskOrResetLeastSetBit* instruction) {
+  DCHECK(codegen_->GetInstructionSetFeatures().HasAVX2());
+  DCHECK(instruction->GetType() == DataType::Type::kInt32) << instruction->GetType();
+  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorX86::VisitX86MaskOrResetLeastSetBit(
+    HX86MaskOrResetLeastSetBit* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  Location src = locations->InAt(0);
+  Location dest = locations->Out();
+  DCHECK(instruction->GetResultType() == DataType::Type::kInt32);
+  switch (instruction->GetOpKind()) {
+    case HInstruction::kAnd:
+      __ blsr(dest.AsRegister<Register>(), src.AsRegister<Register>());
+      break;
+    case HInstruction::kXor:
+      __ blsmsk(dest.AsRegister<Register>(), src.AsRegister<Register>());
+      break;
+    default:
+      LOG(FATAL) << "Unreachable";
+  }
+}
+
 void LocationsBuilderX86::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
 void LocationsBuilderX86::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
 void LocationsBuilderX86::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
@@ -7572,7 +7650,7 @@
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(out + offset)
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          instruction, out, out_reg, offset, /* needs_null_check */ false);
+          instruction, out, out_reg, offset, /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // Save the value of `out` into `maybe_temp` before overwriting it
@@ -7606,7 +7684,7 @@
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          instruction, out, obj_reg, offset, /* needs_null_check */ false);
+          instruction, out, obj_reg, offset, /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
@@ -7655,7 +7733,7 @@
 
       // Slow path marking the GC root `root`.
       SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
-          instruction, root, /* unpoison_ref_before_marking */ false);
+          instruction, root, /* unpoison_ref_before_marking= */ false);
       codegen_->AddSlowPath(slow_path);
 
       // Test the entrypoint (`Thread::Current()->pReadBarrierMarkReg ## root.reg()`).
@@ -7785,10 +7863,10 @@
   if (always_update_field) {
     DCHECK(temp != nullptr);
     slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
-        instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp);
+        instruction, ref, obj, src, /* unpoison_ref_before_marking= */ true, *temp);
   } else {
     slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
-        instruction, ref, /* unpoison_ref_before_marking */ true);
+        instruction, ref, /* unpoison_ref_before_marking= */ true);
   }
   AddSlowPath(slow_path);
 
@@ -8301,7 +8379,7 @@
   uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
   uintptr_t address =
       reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
-  typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+  using unaligned_uint32_t __attribute__((__aligned__(1))) = uint32_t;
   reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
      dchecked_integral_cast<uint32_t>(address);
 }
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 6154771..deeef88 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -159,6 +159,7 @@
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
+  FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
@@ -190,6 +191,7 @@
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
+  FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
@@ -216,6 +218,7 @@
   void GenerateDivRemIntegral(HBinaryOperation* instruction);
   void DivRemOneOrMinusOne(HBinaryOperation* instruction);
   void DivByPowerOfTwo(HDiv* instruction);
+  void RemByPowerOfTwo(HRem* instruction);
   void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
   void GenerateRemFP(HRem* rem);
   void HandleCondition(HCondition* condition);
@@ -410,7 +413,7 @@
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) override;
+      ArtMethod* method) override;
 
   // Generate a call to a static or direct method.
   void GenerateStaticOrDirectCall(
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 489652b..67a2aa5 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -978,7 +978,7 @@
 
 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+      ArtMethod* method ATTRIBUTE_UNUSED) {
   return desired_dispatch_info;
 }
 
@@ -992,7 +992,7 @@
       // temp = thread->string_init_entrypoint
       uint32_t offset =
           GetThreadOffset<kX86_64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
-      __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip */ true));
+      __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip= */ true));
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
@@ -1001,19 +1001,19 @@
     case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative:
       DCHECK(GetCompilerOptions().IsBootImage());
       __ leal(temp.AsRegister<CpuRegister>(),
-              Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+              Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
       RecordBootImageMethodPatch(invoke);
       break;
     case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
       // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
       __ movl(temp.AsRegister<CpuRegister>(),
-              Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+              Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
       RecordBootImageRelRoPatch(GetBootImageOffset(invoke));
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
       __ movq(temp.AsRegister<CpuRegister>(),
-              Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+              Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
       RecordMethodBssEntryPatch(invoke);
       break;
     }
@@ -1076,12 +1076,12 @@
 }
 
 void CodeGeneratorX86_64::RecordBootImageIntrinsicPatch(uint32_t intrinsic_data) {
-  boot_image_intrinsic_patches_.emplace_back(/* target_dex_file */ nullptr, intrinsic_data);
+  boot_image_intrinsic_patches_.emplace_back(/* target_dex_file= */ nullptr, intrinsic_data);
   __ Bind(&boot_image_intrinsic_patches_.back().label);
 }
 
 void CodeGeneratorX86_64::RecordBootImageRelRoPatch(uint32_t boot_image_offset) {
-  boot_image_method_patches_.emplace_back(/* target_dex_file */ nullptr, boot_image_offset);
+  boot_image_method_patches_.emplace_back(/* target_dex_file= */ nullptr, boot_image_offset);
   __ Bind(&boot_image_method_patches_.back().label);
 }
 
@@ -1123,10 +1123,10 @@
 
 void CodeGeneratorX86_64::LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_reference) {
   if (GetCompilerOptions().IsBootImage()) {
-    __ leal(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+    __ leal(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
     RecordBootImageIntrinsicPatch(boot_image_reference);
-  } else if (Runtime::Current()->IsAotCompiler()) {
-    __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+  } else if (GetCompilerOptions().GetCompilePic()) {
+    __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
     RecordBootImageRelRoPatch(boot_image_reference);
   } else {
     DCHECK(Runtime::Current()->UseJitCompilation());
@@ -1146,7 +1146,7 @@
     DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference);
     // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
     __ leal(argument,
-            Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+            Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
     MethodReference target_method = invoke->GetTargetMethod();
     dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
     boot_image_type_patches_.emplace_back(target_method.dex_file, type_idx.index_);
@@ -1277,7 +1277,7 @@
 }
 
 void CodeGeneratorX86_64::GenerateInvokeRuntime(int32_t entry_point_offset) {
-  __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip */ true));
+  __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip= */ true));
 }
 
 static constexpr int kNumberOfCpuRegisterPairs = 0;
@@ -1799,7 +1799,7 @@
       nullptr : codegen_->GetLabelOf(true_successor);
   Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
       nullptr : codegen_->GetLabelOf(false_successor);
-  GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+  GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
 }
 
 void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -1817,9 +1817,9 @@
 void InstructionCodeGeneratorX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
   SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86_64>(deoptimize);
   GenerateTestAndBranch<Label>(deoptimize,
-                               /* condition_input_index */ 0,
+                               /* condition_input_index= */ 0,
                                slow_path->GetEntryLabel(),
-                               /* false_target */ nullptr);
+                               /* false_target= */ nullptr);
 }
 
 void LocationsBuilderX86_64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -1922,8 +1922,8 @@
   } else {
     NearLabel false_target;
     GenerateTestAndBranch<NearLabel>(select,
-                                     /* condition_input_index */ 2,
-                                     /* true_target */ nullptr,
+                                     /* condition_input_index= */ 2,
+                                     /* true_target= */ nullptr,
                                      &false_target);
     codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
     __ Bind(&false_target);
@@ -2425,7 +2425,7 @@
     case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unexpected parameter type " << type;
-      break;
+      UNREACHABLE();
   }
   return Location::NoLocation();
 }
@@ -3560,7 +3560,40 @@
       LOG(FATAL) << "Unexpected type for div by (-)1 " << instruction->GetResultType();
   }
 }
+void InstructionCodeGeneratorX86_64::RemByPowerOfTwo(HRem* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  Location second = locations->InAt(1);
+  CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+  CpuRegister numerator = locations->InAt(0).AsRegister<CpuRegister>();
+  int64_t imm = Int64FromConstant(second.GetConstant());
+  DCHECK(IsPowerOfTwo(AbsOrMin(imm)));
+  uint64_t abs_imm = AbsOrMin(imm);
+  CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
+  if (instruction->GetResultType() == DataType::Type::kInt32) {
+    NearLabel done;
+    __ movl(out, numerator);
+    __ andl(out, Immediate(abs_imm-1));
+    __ j(Condition::kZero, &done);
+    __ leal(tmp, Address(out, static_cast<int32_t>(~(abs_imm-1))));
+    __ testl(numerator, numerator);
+    __ cmov(Condition::kLess, out, tmp, false);
+    __ Bind(&done);
 
+  } else {
+    DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64);
+    codegen_->Load64BitValue(tmp, abs_imm - 1);
+    NearLabel done;
+
+    __ movq(out, numerator);
+    __ andq(out, tmp);
+    __ j(Condition::kZero, &done);
+    __ movq(tmp, numerator);
+    __ sarq(tmp, Immediate(63));
+    __ shlq(tmp, Immediate(WhichPowerOf2(abs_imm)));
+    __ orq(out, tmp);
+    __ Bind(&done);
+  }
+}
 void InstructionCodeGeneratorX86_64::DivByPowerOfTwo(HDiv* instruction) {
   LocationSummary* locations = instruction->GetLocations();
   Location second = locations->InAt(1);
@@ -3575,9 +3608,17 @@
   CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
 
   if (instruction->GetResultType() == DataType::Type::kInt32) {
-    __ leal(tmp, Address(numerator, abs_imm - 1));
-    __ testl(numerator, numerator);
-    __ cmov(kGreaterEqual, tmp, numerator);
+    // When denominator is equal to 2, we can add signed bit and numerator to tmp.
+    // Below we are using addl instruction instead of cmov which give us 1 cycle benefit.
+    if (abs_imm == 2) {
+      __ leal(tmp, Address(numerator, 0));
+      __ shrl(tmp, Immediate(31));
+      __ addl(tmp, numerator);
+    } else {
+      __ leal(tmp, Address(numerator, abs_imm - 1));
+      __ testl(numerator, numerator);
+      __ cmov(kGreaterEqual, tmp, numerator);
+    }
     int shift = CTZ(imm);
     __ sarl(tmp, Immediate(shift));
 
@@ -3589,11 +3630,16 @@
   } else {
     DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64);
     CpuRegister rdx = locations->GetTemp(0).AsRegister<CpuRegister>();
-
-    codegen_->Load64BitValue(rdx, abs_imm - 1);
-    __ addq(rdx, numerator);
-    __ testq(numerator, numerator);
-    __ cmov(kGreaterEqual, rdx, numerator);
+    if (abs_imm == 2) {
+      __ movq(rdx, numerator);
+      __ shrq(rdx, Immediate(63));
+      __ addq(rdx, numerator);
+    } else {
+      codegen_->Load64BitValue(rdx, abs_imm - 1);
+      __ addq(rdx, numerator);
+      __ testq(numerator, numerator);
+      __ cmov(kGreaterEqual, rdx, numerator);
+    }
     int shift = CTZ(imm);
     __ sarq(rdx, Immediate(shift));
 
@@ -3633,7 +3679,7 @@
   if (instruction->GetResultType() == DataType::Type::kInt32) {
     int imm = second.GetConstant()->AsIntConstant()->GetValue();
 
-    CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+    CalculateMagicAndShiftForDivRem(imm, false /* is_long= */, &magic, &shift);
 
     __ movl(numerator, eax);
 
@@ -3670,7 +3716,7 @@
     CpuRegister rax = eax;
     CpuRegister rdx = edx;
 
-    CalculateMagicAndShiftForDivRem(imm, true /* is_long */, &magic, &shift);
+    CalculateMagicAndShiftForDivRem(imm, true /* is_long= */, &magic, &shift);
 
     // Save the numerator.
     __ movq(numerator, rax);
@@ -3737,8 +3783,12 @@
       // Do not generate anything. DivZeroCheck would prevent any code to be executed.
     } else if (imm == 1 || imm == -1) {
       DivRemOneOrMinusOne(instruction);
-    } else if (instruction->IsDiv() && IsPowerOfTwo(AbsOrMin(imm))) {
-      DivByPowerOfTwo(instruction->AsDiv());
+    } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+      if (is_div) {
+        DivByPowerOfTwo(instruction->AsDiv());
+      } else {
+        RemByPowerOfTwo(instruction->AsRem());
+      }
     } else {
       DCHECK(imm <= -2 || imm >= 2);
       GenerateDivRemWithAnyConstant(instruction);
@@ -4371,10 +4421,8 @@
 }
 
 void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) {
-  // Note: if heap poisoning is enabled, the entry point takes cares
-  // of poisoning the reference.
-  QuickEntrypointEnum entrypoint =
-      CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+  // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+  QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
   codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
   CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
   DCHECK(!codegen_->IsLeafMethod());
@@ -4506,7 +4554,7 @@
     }
     case MemBarrierKind::kNTStoreStore:
       // Non-Temporal Store/Store needs an explicit fence.
-      MemoryFence(/* non-temporal */ true);
+      MemoryFence(/* non-temporal= */ true);
       break;
   }
 }
@@ -4583,7 +4631,7 @@
         // Note that a potential implicit null check is handled in this
         // CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier call.
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            instruction, out, base, offset, /* needs_null_check */ true);
+            instruction, out, base, offset, /* needs_null_check= */ true);
         if (is_volatile) {
           codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
         }
@@ -5038,7 +5086,7 @@
         // Note that a potential implicit null check is handled in this
         // CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier call.
         codegen_->GenerateArrayLoadWithBakerReadBarrier(
-            instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
+            instruction, out_loc, obj, data_offset, index, /* needs_null_check= */ true);
       } else {
         CpuRegister out = out_loc.AsRegister<CpuRegister>();
         __ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset));
@@ -5438,7 +5486,7 @@
   }
   // Load the address of the card table into `card`.
   __ gs()->movq(card, Address::Absolute(Thread::CardTableOffset<kX86_64PointerSize>().Int32Value(),
-                                        /* no_rip */ true));
+                                        /* no_rip= */ true));
   // Calculate the offset (in the card table) of the card corresponding to
   // `object`.
   __ movq(temp, object);
@@ -5518,7 +5566,7 @@
   }
 
   __ gs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64PointerSize>().Int32Value(),
-                                  /* no_rip */ true),
+                                  /* no_rip= */ true),
                 Immediate(0));
   if (successor == nullptr) {
     __ j(kNotEqual, slow_path->GetEntryLabel());
@@ -5900,25 +5948,25 @@
           cls,
           out_loc,
           Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
-          /* fixup_label */ nullptr,
+          /* fixup_label= */ nullptr,
           read_barrier_option);
       break;
     }
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
       DCHECK(codegen_->GetCompilerOptions().IsBootImage());
       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
-      __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+      __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
       codegen_->RecordBootImageTypePatch(cls);
       break;
     case HLoadClass::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
-      __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+      __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
       codegen_->RecordBootImageRelRoPatch(codegen_->GetBootImageOffset(cls));
       break;
     }
     case HLoadClass::LoadKind::kBssEntry: {
       Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
-                                          /* no_rip */ false);
+                                          /* no_rip= */ false);
       Label* fixup_label = codegen_->NewTypeBssEntryPatch(cls);
       // /* GcRoot<mirror::Class> */ out = *address  /* PC-relative */
       GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
@@ -5934,7 +5982,7 @@
     }
     case HLoadClass::LoadKind::kJitTableAddress: {
       Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
-                                          /* no_rip */ true);
+                                          /* no_rip= */ true);
       Label* fixup_label =
           codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass());
       // /* GcRoot<mirror::Class> */ out = *address
@@ -6059,19 +6107,19 @@
   switch (load->GetLoadKind()) {
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
       DCHECK(codegen_->GetCompilerOptions().IsBootImage());
-      __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+      __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
       codegen_->RecordBootImageStringPatch(load);
       return;
     }
     case HLoadString::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
-      __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+      __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
       codegen_->RecordBootImageRelRoPatch(codegen_->GetBootImageOffset(load));
       return;
     }
     case HLoadString::LoadKind::kBssEntry: {
       Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
-                                          /* no_rip */ false);
+                                          /* no_rip= */ false);
       Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
       // /* GcRoot<mirror::Class> */ out = *address  /* PC-relative */
       GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
@@ -6090,7 +6138,7 @@
     }
     case HLoadString::LoadKind::kJitTableAddress: {
       Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
-                                          /* no_rip */ true);
+                                          /* no_rip= */ true);
       Label* fixup_label = codegen_->NewJitRootStringPatch(
           load->GetDexFile(), load->GetStringIndex(), load->GetString());
       // /* GcRoot<mirror::String> */ out = *address
@@ -6112,7 +6160,7 @@
 
 static Address GetExceptionTlsAddress() {
   return Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>().Int32Value(),
-                           /* no_rip */ true);
+                           /* no_rip= */ true);
 }
 
 void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) {
@@ -6387,7 +6435,7 @@
       }
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ j(kNotEqual, slow_path->GetEntryLabel());
       __ movl(out, Immediate(1));
@@ -6419,7 +6467,7 @@
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ jmp(slow_path->GetEntryLabel());
       if (zero.IsLinked()) {
@@ -6736,6 +6784,48 @@
   }
 }
 
+void LocationsBuilderX86_64::VisitX86AndNot(HX86AndNot* instruction) {
+  DCHECK(codegen_->GetInstructionSetFeatures().HasAVX2());
+  DCHECK(DataType::IsIntOrLongType(instruction->GetType())) << instruction->GetType();
+  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+  locations->SetInAt(0, Location::RequiresRegister());
+  // There is no immediate variant of negated bitwise and in X86.
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void LocationsBuilderX86_64::VisitX86MaskOrResetLeastSetBit(HX86MaskOrResetLeastSetBit* instruction) {
+  DCHECK(codegen_->GetInstructionSetFeatures().HasAVX2());
+  DCHECK(DataType::IsIntOrLongType(instruction->GetType())) << instruction->GetType();
+  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorX86_64::VisitX86AndNot(HX86AndNot* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  Location first = locations->InAt(0);
+  Location second = locations->InAt(1);
+  Location dest = locations->Out();
+  __ andn(dest.AsRegister<CpuRegister>(), first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+}
+
+void InstructionCodeGeneratorX86_64::VisitX86MaskOrResetLeastSetBit(HX86MaskOrResetLeastSetBit* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  Location src = locations->InAt(0);
+  Location dest = locations->Out();
+  switch (instruction->GetOpKind()) {
+    case HInstruction::kAnd:
+      __ blsr(dest.AsRegister<CpuRegister>(), src.AsRegister<CpuRegister>());
+      break;
+    case HInstruction::kXor:
+      __ blsmsk(dest.AsRegister<CpuRegister>(), src.AsRegister<CpuRegister>());
+      break;
+    default:
+      LOG(FATAL) << "Unreachable";
+  }
+}
+
 void LocationsBuilderX86_64::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
 void LocationsBuilderX86_64::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
 void LocationsBuilderX86_64::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
@@ -6864,7 +6954,7 @@
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(out + offset)
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          instruction, out, out_reg, offset, /* needs_null_check */ false);
+          instruction, out, out_reg, offset, /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // Save the value of `out` into `maybe_temp` before overwriting it
@@ -6898,7 +6988,7 @@
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          instruction, out, obj_reg, offset, /* needs_null_check */ false);
+          instruction, out, obj_reg, offset, /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
@@ -6947,13 +7037,13 @@
 
       // Slow path marking the GC root `root`.
       SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
-          instruction, root, /* unpoison_ref_before_marking */ false);
+          instruction, root, /* unpoison_ref_before_marking= */ false);
       codegen_->AddSlowPath(slow_path);
 
       // Test the `Thread::Current()->pReadBarrierMarkReg ## root.reg()` entrypoint.
       const int32_t entry_point_offset =
           Thread::ReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(root.reg());
-      __ gs()->cmpl(Address::Absolute(entry_point_offset, /* no_rip */ true), Immediate(0));
+      __ gs()->cmpl(Address::Absolute(entry_point_offset, /* no_rip= */ true), Immediate(0));
       // The entrypoint is null when the GC is not marking.
       __ j(kNotEqual, slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
@@ -7079,10 +7169,10 @@
     DCHECK(temp1 != nullptr);
     DCHECK(temp2 != nullptr);
     slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
-        instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp1, *temp2);
+        instruction, ref, obj, src, /* unpoison_ref_before_marking= */ true, *temp1, *temp2);
   } else {
     slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
-        instruction, ref, /* unpoison_ref_before_marking */ true);
+        instruction, ref, /* unpoison_ref_before_marking= */ true);
   }
   AddSlowPath(slow_path);
 
@@ -7542,7 +7632,7 @@
   uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
   uintptr_t address =
       reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
-  typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+  using unaligned_uint32_t __attribute__((__aligned__(1))) = uint32_t;
   reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
      dchecked_integral_cast<uint32_t>(address);
 }
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index f77a5c8..f74e130 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -161,6 +161,7 @@
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
+  FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
@@ -192,6 +193,7 @@
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
+  FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
@@ -213,6 +215,7 @@
   void GenerateRemFP(HRem* rem);
   void DivRemOneOrMinusOne(HBinaryOperation* instruction);
   void DivByPowerOfTwo(HDiv* instruction);
+  void RemByPowerOfTwo(HRem* instruction);
   void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
   void GenerateDivRemIntegral(HBinaryOperation* instruction);
   void HandleCondition(HCondition* condition);
@@ -409,7 +412,7 @@
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) override;
+      ArtMethod* method) override;
 
   void GenerateStaticOrDirectCall(
       HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index d6c9755..f406983 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -180,7 +180,7 @@
   DCHECK(!instruction->IsPhi());  // Makes no sense for Phi.
 
   // Find the target block.
-  CommonDominator finder(/* start_block */ nullptr);
+  CommonDominator finder(/* block= */ nullptr);
   for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
     HInstruction* user = use.GetUser();
     if (!(filter && ShouldFilterUse(instruction, user, post_dominated))) {
@@ -259,12 +259,12 @@
 
   size_t number_of_instructions = graph_->GetCurrentInstructionId();
   ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
-  ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable */ false);
+  ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable= */ false);
   processed_instructions.ClearAllBits();
-  ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable */ false);
+  ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable= */ false);
   post_dominated.ClearAllBits();
   ArenaBitVector instructions_that_can_move(
-      &allocator, number_of_instructions, /* expandable */ false);
+      &allocator, number_of_instructions, /* expandable= */ false);
   instructions_that_can_move.ClearAllBits();
   ScopedArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
 
@@ -414,7 +414,7 @@
       }
       // Find the position of the instruction we're storing into, filtering out this
       // store and all other stores to that instruction.
-      position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter */ true);
+      position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter= */ true);
 
       // The position needs to be dominated by the store, in order for the store to move there.
       if (position == nullptr || !instruction->GetBlock()->Dominates(position->GetBlock())) {
@@ -434,7 +434,7 @@
       continue;
     }
     MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSunk);
-    instruction->MoveBefore(position, /* ensure_safety */ false);
+    instruction->MoveBefore(position, /* do_checks= */ false);
   }
 }
 
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index f186191..b5a7c13 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -823,6 +823,33 @@
   InternalCodeAllocator code_allocator;
   codegen.Finalize(&code_allocator);
 }
+
+// Check that ART ISA Features are propagated to VIXL for arm64 (using cortex-a75 as example).
+TEST_F(CodegenTest, ARM64IsaVIXLFeaturesA75) {
+  OverrideInstructionSetFeatures(InstructionSet::kArm64, "cortex-a75");
+  HGraph* graph = CreateGraph();
+  arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+  vixl::CPUFeatures* features = codegen.GetVIXLAssembler()->GetCPUFeatures();
+
+  EXPECT_TRUE(features->Has(vixl::CPUFeatures::kCRC32));
+  EXPECT_TRUE(features->Has(vixl::CPUFeatures::kDotProduct));
+  EXPECT_TRUE(features->Has(vixl::CPUFeatures::kFPHalf));
+  EXPECT_TRUE(features->Has(vixl::CPUFeatures::kAtomics));
+}
+
+// Check that ART ISA Features are propagated to VIXL for arm64 (using cortex-a53 as example).
+TEST_F(CodegenTest, ARM64IsaVIXLFeaturesA53) {
+  OverrideInstructionSetFeatures(InstructionSet::kArm64, "cortex-a53");
+  HGraph* graph = CreateGraph();
+  arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+  vixl::CPUFeatures* features = codegen.GetVIXLAssembler()->GetCPUFeatures();
+
+  EXPECT_TRUE(features->Has(vixl::CPUFeatures::kCRC32));
+  EXPECT_FALSE(features->Has(vixl::CPUFeatures::kDotProduct));
+  EXPECT_FALSE(features->Has(vixl::CPUFeatures::kFPHalf));
+  EXPECT_FALSE(features->Has(vixl::CPUFeatures::kAtomics));
+}
+
 #endif
 
 #ifdef ART_ENABLE_CODEGEN_mips
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index b1436f8..74d9d3a 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -70,7 +70,7 @@
 
     check_after_cf(graph_);
 
-    HDeadCodeElimination(graph_, nullptr /* stats */, "dead_code_elimination").Run();
+    HDeadCodeElimination(graph_, /* stats= */ nullptr, "dead_code_elimination").Run();
     GraphChecker graph_checker_dce(graph_);
     graph_checker_dce.Run();
     ASSERT_TRUE(graph_checker_dce.IsValid());
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 3cb8bf2..3a1a9e0 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -78,7 +78,7 @@
     VisitSetLocation(instruction, value);
   }
 
-  void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) {
+  void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) override {
     // Pessimize: Merge all fences.
     MergeCandidateFences();
   }
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 5ac6e46..3cbcc9e 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -231,6 +231,21 @@
     }
   }
 
+  static Type ToUnsigned(Type type) {
+    switch (type) {
+      case Type::kInt8:
+        return Type::kUint8;
+      case Type::kInt16:
+        return Type::kUint16;
+      case Type::kInt32:
+        return Type::kUint32;
+      case Type::kInt64:
+        return Type::kUint64;
+      default:
+        return type;
+    }
+  }
+
   static const char* PrettyDescriptor(Type type);
 
  private:
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 2774535..f5cd4dc 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -43,7 +43,7 @@
   std::string actual_before = printer_before.str();
   ASSERT_EQ(actual_before, expected_before);
 
-  HDeadCodeElimination(graph, nullptr /* stats */, "dead_code_elimination").Run();
+  HDeadCodeElimination(graph, /* stats= */ nullptr, "dead_code_elimination").Run();
   GraphChecker graph_checker(graph);
   graph_checker.Run();
   ASSERT_TRUE(graph_checker.IsValid());
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index a689f35..01d9603 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -635,8 +635,8 @@
       }
     }
     CheckTypeCheckBitstringInput(
-        check, /* input_pos */ 2, check_values, expected_path_to_root, "path_to_root");
-    CheckTypeCheckBitstringInput(check, /* input_pos */ 3, check_values, expected_mask, "mask");
+        check, /* input_pos= */ 2, check_values, expected_path_to_root, "path_to_root");
+    CheckTypeCheckBitstringInput(check, /* input_pos= */ 3, check_values, expected_mask, "mask");
   } else {
     if (!input->IsLoadClass()) {
       AddError(StringPrintf("%s:%d (classic) expects a HLoadClass as second input, not %s:%d.",
@@ -931,7 +931,7 @@
           // because the BitVector reallocation strategy has very bad worst-case behavior.
           ArenaBitVector visited(&allocator,
                                  GetGraph()->GetCurrentInstructionId(),
-                                 /* expandable */ false,
+                                 /* expandable= */ false,
                                  kArenaAllocGraphChecker);
           visited.ClearAllBits();
           if (!IsConstantEquivalent(phi, other_phi, &visited)) {
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 31db8c2..2a7bbcb 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -106,8 +106,7 @@
   }
 }
 
-typedef Disassembler* create_disasm_prototype(InstructionSet instruction_set,
-                                              DisassemblerOptions* options);
+using create_disasm_prototype = Disassembler*(InstructionSet, DisassemblerOptions*);
 class HGraphVisualizerDisassembler {
  public:
   HGraphVisualizerDisassembler(InstructionSet instruction_set,
@@ -131,10 +130,10 @@
     // been generated, so we can read data in literal pools.
     disassembler_ = std::unique_ptr<Disassembler>((*create_disassembler)(
             instruction_set,
-            new DisassemblerOptions(/* absolute_addresses */ false,
+            new DisassemblerOptions(/* absolute_addresses= */ false,
                                     base_address,
                                     end_address,
-                                    /* can_read_literals */ true,
+                                    /* can_read_literals= */ true,
                                     Is64BitInstructionSet(instruction_set)
                                         ? &Thread::DumpThreadOffset<PointerSize::k64>
                                         : &Thread::DumpThreadOffset<PointerSize::k32>)));
@@ -394,7 +393,7 @@
   void VisitLoadMethodType(HLoadMethodType* load_method_type) override {
     StartAttributeStream("load_kind") << "RuntimeCall";
     const DexFile& dex_file = load_method_type->GetDexFile();
-    const DexFile::ProtoId& proto_id = dex_file.GetProtoId(load_method_type->GetProtoIndex());
+    const dex::ProtoId& proto_id = dex_file.GetProtoId(load_method_type->GetProtoIndex());
     StartAttributeStream("method_type") << dex_file.GetProtoSignature(proto_id);
   }
 
@@ -564,6 +563,14 @@
     StartAttributeStream("kind") << instruction->GetOpKind();
   }
 
+  void VisitVecDotProd(HVecDotProd* instruction) override {
+    VisitVecOperation(instruction);
+    DataType::Type arg_type = instruction->InputAt(1)->AsVecOperation()->GetPackedType();
+    StartAttributeStream("type") << (instruction->IsZeroExtending() ?
+                                    DataType::ToUnsigned(arg_type) :
+                                    DataType::ToSigned(arg_type));
+  }
+
 #if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
   void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) override {
     StartAttributeStream("kind") << instruction->GetOpKind();
@@ -917,8 +924,8 @@
     HGraphVisualizerPrinter printer(graph_,
                                     *output_,
                                     "disassembly",
-                                    /* is_after_pass */ true,
-                                    /* graph_in_bad_state */ false,
+                                    /* is_after_pass= */ true,
+                                    /* graph_in_bad_state= */ false,
                                     codegen_,
                                     codegen_.GetDisassemblyInformation());
     printer.Run();
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index e6b6326..3689d1d 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -348,7 +348,7 @@
         side_effects_(side_effects),
         sets_(graph->GetBlocks().size(), nullptr, allocator_.Adapter(kArenaAllocGvn)),
         visited_blocks_(
-            &allocator_, graph->GetBlocks().size(), /* expandable */ false, kArenaAllocGvn) {
+            &allocator_, graph->GetBlocks().size(), /* expandable= */ false, kArenaAllocGvn) {
     visited_blocks_.ClearAllBits();
   }
 
@@ -546,12 +546,12 @@
     // that is larger, we return it if no perfectly-matching set is found.
     // Note that we defer testing WillBeReferencedAgain until all other criteria
     // have been satisfied because it might be expensive.
-    if (current_set->CanHoldCopyOf(reference_set, /* exact_match */ true)) {
+    if (current_set->CanHoldCopyOf(reference_set, /* exact_match= */ true)) {
       if (!WillBeReferencedAgain(current_block)) {
         return current_block;
       }
     } else if (secondary_match == nullptr &&
-               current_set->CanHoldCopyOf(reference_set, /* exact_match */ false)) {
+               current_set->CanHoldCopyOf(reference_set, /* exact_match= */ false)) {
       if (!WillBeReferencedAgain(current_block)) {
         secondary_match = current_block;
       }
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index a4d638f..3a10d58 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -1074,8 +1074,8 @@
           && lower_value >= upper_value;
     default:
       LOG(FATAL) << "CONDITION UNREACHABLE";
+      UNREACHABLE();
   }
-  return false;  // not certain, may be untaken
 }
 
 bool HInductionVarAnalysis::IsFinite(InductionInfo* upper_expr,
@@ -1099,8 +1099,8 @@
       return (IsAtLeast(upper_expr, &value) && value >= (min - stride_value));
     default:
       LOG(FATAL) << "CONDITION UNREACHABLE";
+      UNREACHABLE();
   }
-  return false;  // not certain, may be infinite
 }
 
 bool HInductionVarAnalysis::FitsNarrowerControl(InductionInfo* lower_expr,
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 55eca23..4c78fa8 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -216,13 +216,13 @@
   chase_hint_ = chase_hint;
   bool in_body = context->GetBlock() != loop->GetHeader();
   int64_t stride_value = 0;
-  *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true));
-  *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min */ false), chase_hint);
+  *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min= */ true));
+  *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min= */ false), chase_hint);
   *needs_finite_test = NeedsTripCount(info, &stride_value) && IsUnsafeTripCount(trip);
   chase_hint_ = nullptr;
   // Retry chasing constants for wrap-around (merge sensitive).
   if (!min_val->is_known && info->induction_class == HInductionVarAnalysis::kWrapAround) {
-    *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true));
+    *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min= */ true));
   }
   return true;
 }
@@ -445,8 +445,8 @@
     }
     // Try range analysis on the invariant, only accept a proper range
     // to avoid arithmetic wrap-around anomalies.
-    Value min_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ true);
-    Value max_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ false);
+    Value min_val = GetVal(info, nullptr, /* in_body= */ true, /* is_min= */ true);
+    Value max_val = GetVal(info, nullptr, /* in_body= */ true, /* is_min= */ false);
     if (IsConstantValue(min_val) &&
         IsConstantValue(max_val) && min_val.b_constant <= max_val.b_constant) {
       if ((request == kExact && min_val.b_constant == max_val.b_constant) || request == kAtMost) {
@@ -791,10 +791,10 @@
     return MulRangeAndConstant(value, info1, trip, in_body, is_min);
   }
   // Interval ranges.
-  Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true);
-  Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
-  Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
-  Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
+  Value v1_min = GetVal(info1, trip, in_body, /* is_min= */ true);
+  Value v1_max = GetVal(info1, trip, in_body, /* is_min= */ false);
+  Value v2_min = GetVal(info2, trip, in_body, /* is_min= */ true);
+  Value v2_max = GetVal(info2, trip, in_body, /* is_min= */ false);
   // Positive range vs. positive or negative range.
   if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) {
     if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
@@ -825,10 +825,10 @@
     return DivRangeAndConstant(value, info1, trip, in_body, is_min);
   }
   // Interval ranges.
-  Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true);
-  Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
-  Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
-  Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
+  Value v1_min = GetVal(info1, trip, in_body, /* is_min= */ true);
+  Value v1_max = GetVal(info1, trip, in_body, /* is_min= */ false);
+  Value v2_min = GetVal(info2, trip, in_body, /* is_min= */ true);
+  Value v2_max = GetVal(info2, trip, in_body, /* is_min= */ false);
   // Positive range vs. positive or negative range.
   if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) {
     if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
@@ -1019,10 +1019,10 @@
   // Code generation for taken test: generate the code when requested or otherwise analyze
   // if code generation is feasible when taken test is needed.
   if (taken_test != nullptr) {
-    return GenerateCode(trip->op_b, nullptr, graph, block, taken_test, in_body, /* is_min */ false);
+    return GenerateCode(trip->op_b, nullptr, graph, block, taken_test, in_body, /* is_min= */ false);
   } else if (*needs_taken_test) {
     if (!GenerateCode(
-        trip->op_b, nullptr, nullptr, nullptr, nullptr, in_body, /* is_min */ false)) {
+        trip->op_b, nullptr, nullptr, nullptr, nullptr, in_body, /* is_min= */ false)) {
       return false;
     }
   }
@@ -1030,9 +1030,9 @@
   return
       // Success on lower if invariant (not set), or code can be generated.
       ((info->induction_class == HInductionVarAnalysis::kInvariant) ||
-          GenerateCode(info, trip, graph, block, lower, in_body, /* is_min */ true)) &&
+          GenerateCode(info, trip, graph, block, lower, in_body, /* is_min= */ true)) &&
       // And success on upper.
-      GenerateCode(info, trip, graph, block, upper, in_body, /* is_min */ false);
+      GenerateCode(info, trip, graph, block, upper, in_body, /* is_min= */ false);
 }
 
 bool InductionVarRange::GenerateLastValuePolynomial(HInductionVarAnalysis::InductionInfo* info,
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index e5bc6ef..f6af384 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -252,24 +252,24 @@
 
   Value GetMin(HInductionVarAnalysis::InductionInfo* info,
                HInductionVarAnalysis::InductionInfo* trip) {
-    return range_.GetVal(info, trip, /* in_body */ true, /* is_min */ true);
+    return range_.GetVal(info, trip, /* in_body= */ true, /* is_min= */ true);
   }
 
   Value GetMax(HInductionVarAnalysis::InductionInfo* info,
                HInductionVarAnalysis::InductionInfo* trip) {
-    return range_.GetVal(info, trip, /* in_body */ true, /* is_min */ false);
+    return range_.GetVal(info, trip, /* in_body= */ true, /* is_min= */ false);
   }
 
   Value GetMul(HInductionVarAnalysis::InductionInfo* info1,
                HInductionVarAnalysis::InductionInfo* info2,
                bool is_min) {
-    return range_.GetMul(info1, info2, nullptr, /* in_body */ true, is_min);
+    return range_.GetMul(info1, info2, nullptr, /* in_body= */ true, is_min);
   }
 
   Value GetDiv(HInductionVarAnalysis::InductionInfo* info1,
                HInductionVarAnalysis::InductionInfo* info2,
                bool is_min) {
-    return range_.GetDiv(info1, info2, nullptr, /* in_body */ true, is_min);
+    return range_.GetDiv(info1, info2, nullptr, /* in_body= */ true, is_min);
   }
 
   Value GetRem(HInductionVarAnalysis::InductionInfo* info1,
@@ -701,7 +701,11 @@
 
 TEST_F(InductionVarRangeTest, ArrayLengthAndHints) {
   // We pass a bogus constant for the class to avoid mocking one.
-  HInstruction* new_array = new (GetAllocator()) HNewArray(x_, x_, 0);
+  HInstruction* new_array = new (GetAllocator()) HNewArray(
+      /* cls= */ x_,
+      /* length= */ x_,
+      /* dex_pc= */ 0,
+      /* component_size_shift= */ 0);
   entry_block_->AddInstruction(new_array);
   HInstruction* array_length = new (GetAllocator()) HArrayLength(new_array, 0);
   entry_block_->AddInstruction(array_length);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 3ba7414..96d6d2a 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -18,6 +18,7 @@
 
 #include "art_method-inl.h"
 #include "base/enums.h"
+#include "base/logging.h"
 #include "builder.h"
 #include "class_linker.h"
 #include "class_root.h"
@@ -27,7 +28,6 @@
 #include "dex/inline_method_analyser.h"
 #include "dex/verification_results.h"
 #include "dex/verified_method.h"
-#include "driver/compiler_driver-inl.h"
 #include "driver/compiler_options.h"
 #include "driver/dex_compilation_unit.h"
 #include "instruction_simplifier.h"
@@ -36,8 +36,9 @@
 #include "jit/jit_code_cache.h"
 #include "mirror/class_loader.h"
 #include "mirror/dex_cache.h"
+#include "mirror/object_array-alloc-inl.h"
+#include "mirror/object_array-inl.h"
 #include "nodes.h"
-#include "optimizing_compiler.h"
 #include "reference_type_propagation.h"
 #include "register_allocator_linear_scan.h"
 #include "scoped_thread_state_change-inl.h"
@@ -149,13 +150,13 @@
 
   // If we're compiling with a core image (which is only used for
   // test purposes), honor inlining directives in method names:
-  // - if a method's name contains the substring "$inline$", ensure
-  //   that this method is actually inlined;
   // - if a method's name contains the substring "$noinline$", do not
-  //   inline that method.
+  //   inline that method;
+  // - if a method's name contains the substring "$inline$", ensure
+  //   that this method is actually inlined.
   // We limit the latter to AOT compilation, as the JIT may or may not inline
   // depending on the state of classes at runtime.
-  const bool honor_noinline_directives = IsCompilingWithCoreImage();
+  const bool honor_noinline_directives = codegen_->GetCompilerOptions().CompilingWithCoreImage();
   const bool honor_inline_directives =
       honor_noinline_directives && Runtime::Current()->IsAotCompiler();
 
@@ -174,7 +175,7 @@
         if (honor_noinline_directives) {
           // Debugging case: directives in method names control or assert on inlining.
           std::string callee_name = outer_compilation_unit_.GetDexFile()->PrettyMethod(
-              call->GetDexMethodIndex(), /* with_signature */ false);
+              call->GetDexMethodIndex(), /* with_signature= */ false);
           // Tests prevent inlining by having $noinline$ in their method names.
           if (callee_name.find("$noinline$") == std::string::npos) {
             if (TryInline(call)) {
@@ -406,7 +407,7 @@
   return single_impl;
 }
 
-static bool IsMethodUnverified(CompilerDriver* const compiler_driver, ArtMethod* method)
+static bool IsMethodUnverified(const CompilerOptions& compiler_options, ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   if (!method->GetDeclaringClass()->IsVerified()) {
     if (Runtime::Current()->UseJitCompilation()) {
@@ -415,8 +416,9 @@
       return true;
     }
     uint16_t class_def_idx = method->GetDeclaringClass()->GetDexClassDefIndex();
-    if (!compiler_driver->IsMethodVerifiedWithoutFailures(
-        method->GetDexMethodIndex(), class_def_idx, *method->GetDexFile())) {
+    if (!compiler_options.IsMethodVerifiedWithoutFailures(method->GetDexMethodIndex(),
+                                                          class_def_idx,
+                                                          *method->GetDexFile())) {
       // Method has soft or hard failures, don't analyze.
       return true;
     }
@@ -424,11 +426,11 @@
   return false;
 }
 
-static bool AlwaysThrows(CompilerDriver* const compiler_driver, ArtMethod* method)
+static bool AlwaysThrows(const CompilerOptions& compiler_options, ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK(method != nullptr);
   // Skip non-compilable and unverified methods.
-  if (!method->IsCompilable() || IsMethodUnverified(compiler_driver, method)) {
+  if (!method->IsCompilable() || IsMethodUnverified(compiler_options, method)) {
     return false;
   }
   // Skip native methods, methods with try blocks, and methods that are too large.
@@ -502,7 +504,7 @@
     bool result = TryInlineAndReplace(invoke_instruction,
                                       actual_method,
                                       ReferenceTypeInfo::CreateInvalid(),
-                                      /* do_rtp */ true,
+                                      /* do_rtp= */ true,
                                       cha_devirtualize);
     if (result) {
       // Successfully inlined.
@@ -516,7 +518,7 @@
           MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvokeVirtualOrInterface);
         }
       }
-    } else if (!cha_devirtualize && AlwaysThrows(compiler_driver_, actual_method)) {
+    } else if (!cha_devirtualize && AlwaysThrows(codegen_->GetCompilerOptions(), actual_method)) {
       // Set always throws property for non-inlined method call with single target
       // (unless it was obtained through CHA, because that would imply we have
       // to add the CHA dependency, which seems not worth it).
@@ -678,7 +680,7 @@
     /*out*/Handle<mirror::ObjectArray<mirror::Class>>* inline_cache)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK(Runtime::Current()->IsAotCompiler());
-  const ProfileCompilationInfo* pci = compiler_driver_->GetProfileCompilationInfo();
+  const ProfileCompilationInfo* pci = codegen_->GetCompilerOptions().GetProfileCompilationInfo();
   if (pci == nullptr) {
     return kInlineCacheNoData;
   }
@@ -856,9 +858,9 @@
   HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
   if (!TryInlineAndReplace(invoke_instruction,
                            resolved_method,
-                           ReferenceTypeInfo::Create(monomorphic_type, /* is_exact */ true),
-                           /* do_rtp */ false,
-                           /* cha_devirtualize */ false)) {
+                           ReferenceTypeInfo::Create(monomorphic_type, /* is_exact= */ true),
+                           /* do_rtp= */ false,
+                           /* cha_devirtualize= */ false)) {
     return false;
   }
 
@@ -869,7 +871,7 @@
                class_index,
                monomorphic_type,
                invoke_instruction,
-               /* with_deoptimization */ true);
+               /* with_deoptimization= */ true);
 
   // Run type propagation to get the guard typed, and eventually propagate the
   // type of the receiver.
@@ -877,7 +879,7 @@
                                      outer_compilation_unit_.GetClassLoader(),
                                      outer_compilation_unit_.GetDexCache(),
                                      handles_,
-                                     /* is_first_run */ false);
+                                     /* is_first_run= */ false);
   rtp_fixup.Run();
 
   MaybeRecordStat(stats_, MethodCompilationStat::kInlinedMonomorphicCall);
@@ -947,7 +949,7 @@
                                                                    klass,
                                                                    is_referrer,
                                                                    invoke_instruction->GetDexPc(),
-                                                                   /* needs_access_check */ false);
+                                                                   /* needs_access_check= */ false);
   HLoadClass::LoadKind kind = HSharpening::ComputeLoadClassKind(
       load_class, codegen_, caller_compilation_unit_);
   DCHECK(kind != HLoadClass::LoadKind::kInvalid)
@@ -1025,7 +1027,7 @@
     if (!class_index.IsValid() ||
         !TryBuildAndInline(invoke_instruction,
                            method,
-                           ReferenceTypeInfo::Create(handle, /* is_exact */ true),
+                           ReferenceTypeInfo::Create(handle, /* is_exact= */ true),
                            &return_replacement)) {
       all_targets_inlined = false;
     } else {
@@ -1077,7 +1079,7 @@
                                      outer_compilation_unit_.GetClassLoader(),
                                      outer_compilation_unit_.GetDexCache(),
                                      handles_,
-                                     /* is_first_run */ false);
+                                     /* is_first_run= */ false);
   rtp_fixup.Run();
   return true;
 }
@@ -1148,14 +1150,14 @@
 
 
   graph_->UpdateLoopAndTryInformationOfNewBlock(
-      then, original_invoke_block, /* replace_if_back_edge */ false);
+      then, original_invoke_block, /* replace_if_back_edge= */ false);
   graph_->UpdateLoopAndTryInformationOfNewBlock(
-      otherwise, original_invoke_block, /* replace_if_back_edge */ false);
+      otherwise, original_invoke_block, /* replace_if_back_edge= */ false);
 
   // In case the original invoke location was a back edge, we need to update
   // the loop to now have the merge block as a back edge.
   graph_->UpdateLoopAndTryInformationOfNewBlock(
-      merge, original_invoke_block, /* replace_if_back_edge */ true);
+      merge, original_invoke_block, /* replace_if_back_edge= */ true);
 }
 
 bool HInliner::TryInlinePolymorphicCallToSameTarget(
@@ -1273,7 +1275,7 @@
                                      outer_compilation_unit_.GetClassLoader(),
                                      outer_compilation_unit_.GetDexCache(),
                                      handles_,
-                                     /* is_first_run */ false);
+                                     /* is_first_run= */ false);
   rtp_fixup.Run();
 
   MaybeRecordStat(stats_, MethodCompilationStat::kInlinedPolymorphicCall);
@@ -1296,9 +1298,7 @@
 
   // If invoke_instruction is devirtualized to a different method, give intrinsics
   // another chance before we try to inline it.
-  bool wrong_invoke_type = false;
-  if (invoke_instruction->GetResolvedMethod() != method &&
-      IntrinsicsRecognizer::Recognize(invoke_instruction, method, &wrong_invoke_type)) {
+  if (invoke_instruction->GetResolvedMethod() != method && method->IsIntrinsic()) {
     MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
     if (invoke_instruction->IsInvokeInterface()) {
       // We don't intrinsify an invoke-interface directly.
@@ -1311,6 +1311,7 @@
           invoke_instruction->GetDexMethodIndex(),  // Use interface method's dex method index.
           method,
           method->GetMethodIndex());
+      DCHECK_NE(new_invoke->GetIntrinsic(), Intrinsics::kNone);
       HInputsRef inputs = invoke_instruction->GetInputs();
       for (size_t index = 0; index != inputs.size(); ++index) {
         new_invoke->SetArgumentAt(index, inputs[index]);
@@ -1320,14 +1321,11 @@
       if (invoke_instruction->GetType() == DataType::Type::kReference) {
         new_invoke->SetReferenceTypeInfo(invoke_instruction->GetReferenceTypeInfo());
       }
-      // Run intrinsic recognizer again to set new_invoke's intrinsic.
-      IntrinsicsRecognizer::Recognize(new_invoke, method, &wrong_invoke_type);
-      DCHECK_NE(new_invoke->GetIntrinsic(), Intrinsics::kNone);
       return_replacement = new_invoke;
       // invoke_instruction is replaced with new_invoke.
       should_remove_invoke_instruction = true;
     } else {
-      // invoke_instruction is intrinsified and stays.
+      invoke_instruction->SetResolvedMethod(method);
     }
   } else if (!TryBuildAndInline(invoke_instruction, method, receiver_type, &return_replacement)) {
     if (invoke_instruction->IsInvokeInterface()) {
@@ -1401,7 +1399,7 @@
                              outer_compilation_unit_.GetClassLoader(),
                              outer_compilation_unit_.GetDexCache(),
                              handles_,
-                             /* is_first_run */ false).Run();
+                             /* is_first_run= */ false).Run();
   }
   return true;
 }
@@ -1421,10 +1419,6 @@
 static inline bool MayInline(const CompilerOptions& compiler_options,
                              const DexFile& inlined_from,
                              const DexFile& inlined_into) {
-  if (kIsTargetBuild) {
-    return true;
-  }
-
   // We're not allowed to inline across dex files if we're the no-inline-from dex file.
   if (!IsSameDexFile(inlined_from, inlined_into) &&
       ContainsElement(compiler_options.GetNoInlineFromDexFile(), &inlined_from)) {
@@ -1506,7 +1500,7 @@
     return false;
   }
 
-  if (IsMethodUnverified(compiler_driver_, method)) {
+  if (IsMethodUnverified(codegen_->GetCompilerOptions(), method)) {
     LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedNotVerified)
         << "Method " << method->PrettyMethod()
         << " couldn't be verified, so it cannot be inlined";
@@ -1631,7 +1625,8 @@
                                  [](uint16_t index) { return index != DexFile::kDexNoIndex16; }));
 
       // Create HInstanceFieldSet for each IPUT that stores non-zero data.
-      HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, /* this */ 0u);
+      HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction,
+                                                        /* arg_vreg_index= */ 0u);
       bool needs_constructor_barrier = false;
       for (size_t i = 0; i != number_of_iputs; ++i) {
         HInstruction* value = GetInvokeInputForArgVRegIndex(invoke_instruction, iput_args[i]);
@@ -1649,7 +1644,7 @@
         }
       }
       if (needs_constructor_barrier) {
-        // See CompilerDriver::RequiresConstructorBarrier for more details.
+        // See DexCompilationUnit::RequiresConstructorBarrier for more details.
         DCHECK(obj != nullptr) << "only non-static methods can have a constructor fence";
 
         HConstructorFence* constructor_fence =
@@ -1673,7 +1668,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   ArtField* resolved_field =
-      class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false);
+      class_linker->LookupResolvedField(field_index, referrer, /* is_static= */ false);
   DCHECK(resolved_field != nullptr);
   HInstanceFieldGet* iget = new (graph_->GetAllocator()) HInstanceFieldGet(
       obj,
@@ -1686,7 +1681,7 @@
       *referrer->GetDexFile(),
       // Read barrier generates a runtime call in slow path and we need a valid
       // dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
-      /* dex_pc */ 0);
+      /* dex_pc= */ 0);
   if (iget->GetType() == DataType::Type::kReference) {
     // Use the same dex_cache that we used for field lookup as the hint_dex_cache.
     Handle<mirror::DexCache> dex_cache = handles_->NewHandle(referrer->GetDexCache());
@@ -1694,7 +1689,7 @@
                                  outer_compilation_unit_.GetClassLoader(),
                                  dex_cache,
                                  handles_,
-                                 /* is_first_run */ false);
+                                 /* is_first_run= */ false);
     rtp.Visit(iget);
   }
   return iget;
@@ -1708,7 +1703,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   ArtField* resolved_field =
-      class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false);
+      class_linker->LookupResolvedField(field_index, referrer, /* is_static= */ false);
   DCHECK(resolved_field != nullptr);
   if (is_final != nullptr) {
     // This information is needed only for constructors.
@@ -1727,7 +1722,7 @@
       *referrer->GetDexFile(),
       // Read barrier generates a runtime call in slow path and we need a valid
       // dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
-      /* dex_pc */ 0);
+      /* dex_pc= */ 0);
   return iput;
 }
 
@@ -1739,6 +1734,21 @@
   return (object != hint.Get()) ? handles->NewHandle(object) : hint;
 }
 
+static bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (!Runtime::Current()->IsAotCompiler()) {
+    // JIT can always encode methods in stack maps.
+    return true;
+  }
+  if (IsSameDexFile(caller_dex_file, *callee->GetDexFile())) {
+    return true;
+  }
+  // TODO(ngeoffray): Support more AOT cases for inlining:
+  // - methods in multidex
+  // - methods in boot image for on-device non-PIC compilation.
+  return false;
+}
+
 bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
                                        ArtMethod* resolved_method,
                                        ReferenceTypeInfo receiver_type,
@@ -1746,7 +1756,7 @@
                                        HInstruction** return_replacement) {
   DCHECK(!(resolved_method->IsStatic() && receiver_type.IsValid()));
   ScopedObjectAccess soa(Thread::Current());
-  const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
+  const dex::CodeItem* code_item = resolved_method->GetCodeItem();
   const DexFile& callee_dex_file = *resolved_method->GetDexFile();
   uint32_t method_index = resolved_method->GetDexMethodIndex();
   CodeItemDebugInfoAccessor code_item_accessor(resolved_method->DexInstructionDebugInfo());
@@ -1759,6 +1769,7 @@
                            caller_compilation_unit_.GetClassLoader(),
                            handles_);
 
+  Handle<mirror::Class> compiling_class = handles_->NewHandle(resolved_method->GetDeclaringClass());
   DexCompilationUnit dex_compilation_unit(
       class_loader,
       class_linker,
@@ -1767,8 +1778,9 @@
       resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
       method_index,
       resolved_method->GetAccessFlags(),
-      /* verified_method */ nullptr,
-      dex_cache);
+      /* verified_method= */ nullptr,
+      dex_cache,
+      compiling_class);
 
   InvokeType invoke_type = invoke_instruction->GetInvokeType();
   if (invoke_type == kInterface) {
@@ -1777,6 +1789,14 @@
     invoke_type = kVirtual;
   }
 
+  bool caller_dead_reference_safe = graph_->IsDeadReferenceSafe();
+  const dex::ClassDef& callee_class = resolved_method->GetClassDef();
+  // MethodContainsRSensitiveAccess is currently slow, but HasDeadReferenceSafeAnnotation()
+  // is currently rarely true.
+  bool callee_dead_reference_safe =
+      annotations::HasDeadReferenceSafeAnnotation(callee_dex_file, callee_class)
+      && !annotations::MethodContainsRSensitiveAccess(callee_dex_file, callee_class, method_index);
+
   const int32_t caller_instruction_counter = graph_->GetCurrentInstructionId();
   HGraph* callee_graph = new (graph_->GetAllocator()) HGraph(
       graph_->GetAllocator(),
@@ -1785,8 +1805,9 @@
       method_index,
       codegen_->GetCompilerOptions().GetInstructionSet(),
       invoke_type,
+      callee_dead_reference_safe,
       graph_->IsDebuggable(),
-      /* osr */ false,
+      /* osr= */ false,
       caller_instruction_counter);
   callee_graph->SetArtMethod(resolved_method);
 
@@ -1807,7 +1828,6 @@
                         code_item_accessor,
                         &dex_compilation_unit,
                         &outer_compilation_unit_,
-                        compiler_driver_,
                         codegen_,
                         inline_stats_,
                         resolved_method->GetQuickenedInfo(),
@@ -1868,7 +1888,7 @@
                              outer_compilation_unit_.GetClassLoader(),
                              dex_compilation_unit.GetDexCache(),
                              handles_,
-                             /* is_first_run */ false).Run();
+                             /* is_first_run= */ false).Run();
   }
 
   RunOptimizations(callee_graph, code_item, dex_compilation_unit);
@@ -2012,23 +2032,26 @@
     inline_stats_->AddTo(stats_);
   }
 
+  if (caller_dead_reference_safe && !callee_dead_reference_safe) {
+    // Caller was dead reference safe, but is not anymore, since we inlined dead
+    // reference unsafe code. Prior transformations remain valid, since they did not
+    // affect the inlined code.
+    graph_->MarkDeadReferenceUnsafe();
+  }
+
   return true;
 }
 
 void HInliner::RunOptimizations(HGraph* callee_graph,
-                                const DexFile::CodeItem* code_item,
+                                const dex::CodeItem* code_item,
                                 const DexCompilationUnit& dex_compilation_unit) {
   // Note: if the outermost_graph_ is being compiled OSR, we should not run any
   // optimization that could lead to a HDeoptimize. The following optimizations do not.
   HDeadCodeElimination dce(callee_graph, inline_stats_, "dead_code_elimination$inliner");
   HConstantFolding fold(callee_graph, "constant_folding$inliner");
-  HSharpening sharpening(callee_graph, codegen_);
   InstructionSimplifier simplify(callee_graph, codegen_, inline_stats_);
-  IntrinsicsRecognizer intrinsics(callee_graph, inline_stats_);
 
   HOptimization* optimizations[] = {
-    &intrinsics,
-    &sharpening,
     &simplify,
     &fold,
     &dce,
@@ -2063,7 +2086,6 @@
                    codegen_,
                    outer_compilation_unit_,
                    dex_compilation_unit,
-                   compiler_driver_,
                    handles_,
                    inline_stats_,
                    total_number_of_dex_registers_ + accessor.RegistersSize(),
@@ -2097,7 +2119,7 @@
   // is more specific than the class which declares the method.
   if (!resolved_method->IsStatic()) {
     if (IsReferenceTypeRefinement(GetClassRTI(resolved_method->GetDeclaringClass()),
-                                  /* declared_can_be_null */ false,
+                                  /* declared_can_be_null= */ false,
                                   invoke_instruction->InputAt(0u))) {
       return true;
     }
@@ -2106,7 +2128,7 @@
   // Iterate over the list of parameter types and test whether any of the
   // actual inputs has a more specific reference type than the type declared in
   // the signature.
-  const DexFile::TypeList* param_list = resolved_method->GetParameterTypeList();
+  const dex::TypeList* param_list = resolved_method->GetParameterTypeList();
   for (size_t param_idx = 0,
               input_idx = resolved_method->IsStatic() ? 0 : 1,
               e = (param_list == nullptr ? 0 : param_list->Size());
@@ -2117,7 +2139,7 @@
       ObjPtr<mirror::Class> param_cls = resolved_method->LookupResolvedClassFromTypeIndex(
           param_list->GetTypeItem(param_idx).type_idx_);
       if (IsReferenceTypeRefinement(GetClassRTI(param_cls),
-                                    /* declared_can_be_null */ true,
+                                    /* declared_can_be_null= */ true,
                                     input)) {
         return true;
       }
@@ -2134,7 +2156,7 @@
     if (return_replacement->GetType() == DataType::Type::kReference) {
       // Test if the return type is a refinement of the declared return type.
       if (IsReferenceTypeRefinement(invoke_instruction->GetReferenceTypeInfo(),
-                                    /* declared_can_be_null */ true,
+                                    /* declared_can_be_null= */ true,
                                     return_replacement)) {
         return true;
       } else if (return_replacement->IsInstanceFieldGet()) {
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 6fd0c20..efd4c74 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -38,7 +38,6 @@
            CodeGenerator* codegen,
            const DexCompilationUnit& outer_compilation_unit,
            const DexCompilationUnit& caller_compilation_unit,
-           CompilerDriver* compiler_driver,
            VariableSizedHandleScope* handles,
            OptimizingCompilerStats* stats,
            size_t total_number_of_dex_registers,
@@ -51,7 +50,6 @@
         outer_compilation_unit_(outer_compilation_unit),
         caller_compilation_unit_(caller_compilation_unit),
         codegen_(codegen),
-        compiler_driver_(compiler_driver),
         total_number_of_dex_registers_(total_number_of_dex_registers),
         total_number_of_instructions_(total_number_of_instructions),
         parent_(parent),
@@ -101,7 +99,7 @@
 
   // Run simple optimizations on `callee_graph`.
   void RunOptimizations(HGraph* callee_graph,
-                        const DexFile::CodeItem* code_item,
+                        const dex::CodeItem* code_item,
                         const DexCompilationUnit& dex_compilation_unit)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -280,7 +278,6 @@
   const DexCompilationUnit& outer_compilation_unit_;
   const DexCompilationUnit& caller_compilation_unit_;
   CodeGenerator* const codegen_;
-  CompilerDriver* const compiler_driver_;
   const size_t total_number_of_dex_registers_;
   size_t total_number_of_instructions_;
 
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index e555d0d..5e7b575 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -19,12 +19,13 @@
 #include "art_method-inl.h"
 #include "base/arena_bit_vector.h"
 #include "base/bit_vector-inl.h"
+#include "base/logging.h"
 #include "block_builder.h"
-#include "class_linker.h"
+#include "class_linker-inl.h"
+#include "code_generator.h"
 #include "data_type-inl.h"
 #include "dex/bytecode_utils.h"
 #include "dex/dex_instruction-inl.h"
-#include "driver/compiler_driver-inl.h"
 #include "driver/dex_compilation_unit.h"
 #include "driver/compiler_options.h"
 #include "imtable-inl.h"
@@ -47,7 +48,6 @@
                                          DataType::Type return_type,
                                          const DexCompilationUnit* dex_compilation_unit,
                                          const DexCompilationUnit* outer_compilation_unit,
-                                         CompilerDriver* compiler_driver,
                                          CodeGenerator* code_generator,
                                          ArrayRef<const uint8_t> interpreter_metadata,
                                          OptimizingCompilerStats* compiler_stats,
@@ -61,7 +61,6 @@
       return_type_(return_type),
       block_builder_(block_builder),
       ssa_builder_(ssa_builder),
-      compiler_driver_(compiler_driver),
       code_generator_(code_generator),
       dex_compilation_unit_(dex_compilation_unit),
       outer_compilation_unit_(outer_compilation_unit),
@@ -73,7 +72,8 @@
       current_locals_(nullptr),
       latest_result_(nullptr),
       current_this_parameter_(nullptr),
-      loop_headers_(local_allocator->Adapter(kArenaAllocGraphBuilder)) {
+      loop_headers_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
+      class_cache_(std::less<dex::TypeIndex>(), local_allocator->Adapter(kArenaAllocGraphBuilder)) {
   loop_headers_.reserve(kDefaultNumberOfLoops);
 }
 
@@ -319,8 +319,8 @@
   // Find locations where we want to generate extra stackmaps for native debugging.
   // This allows us to generate the info only at interesting points (for example,
   // at start of java statement) rather than before every dex instruction.
-  const bool native_debuggable = compiler_driver_ != nullptr &&
-                                 compiler_driver_->GetCompilerOptions().GetNativeDebuggable();
+  const bool native_debuggable = code_generator_ != nullptr &&
+                                 code_generator_->GetCompilerOptions().GetNativeDebuggable();
   ArenaBitVector* native_debug_info_locations = nullptr;
   if (native_debuggable) {
     native_debug_info_locations = FindNativeDebugInfoLocations();
@@ -434,7 +434,7 @@
   HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
       HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall,
       HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
-      /* method_load_data */ 0u
+      /* method_load_data= */ 0u
   };
   InvokeType invoke_type = dex_compilation_unit_->IsStatic() ? kStatic : kDirect;
   HInvokeStaticOrDirect* invoke = new (allocator_) HInvokeStaticOrDirect(
@@ -449,7 +449,7 @@
       target_method,
       HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
   RangeInstructionOperands operands(graph_->GetNumberOfVRegs() - in_vregs, in_vregs);
-  HandleInvoke(invoke, operands, dex_file_->GetMethodShorty(method_idx), /* is_unresolved */ false);
+  HandleInvoke(invoke, operands, dex_file_->GetMethodShorty(method_idx), /* is_unresolved= */ false);
 
   // Add the return instruction.
   if (return_type_ == DataType::Type::kVoid) {
@@ -466,22 +466,17 @@
 }
 
 ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() {
-  // The callback gets called when the line number changes.
-  // In other words, it marks the start of new java statement.
-  struct Callback {
-    static bool Position(void* ctx, const DexFile::PositionInfo& entry) {
-      static_cast<ArenaBitVector*>(ctx)->SetBit(entry.address_);
-      return false;
-    }
-  };
   ArenaBitVector* locations = ArenaBitVector::Create(local_allocator_,
                                                      code_item_accessor_.InsnsSizeInCodeUnits(),
-                                                     /* expandable */ false,
+                                                     /* expandable= */ false,
                                                      kArenaAllocGraphBuilder);
   locations->ClearAllBits();
-  dex_file_->DecodeDebugPositionInfo(code_item_accessor_.DebugInfoOffset(),
-                                     Callback::Position,
-                                     locations);
+  // The visitor gets called when the line number changes.
+  // In other words, it marks the start of new java statement.
+  code_item_accessor_.DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
+    locations->SetBit(entry.address_);
+    return false;
+  });
   // Instruction-specific tweaks.
   for (const DexInstructionPcPair& inst : code_item_accessor_) {
     switch (inst->Opcode()) {
@@ -564,7 +559,7 @@
   uint16_t locals_index = graph_->GetNumberOfLocalVRegs();
   uint16_t parameter_index = 0;
 
-  const DexFile::MethodId& referrer_method_id =
+  const dex::MethodId& referrer_method_id =
       dex_file_->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
   if (!dex_compilation_unit_->IsStatic()) {
     // Add the implicit 'this' argument, not expressed in the signature.
@@ -572,7 +567,7 @@
                                                               referrer_method_id.class_idx_,
                                                               parameter_index++,
                                                               DataType::Type::kReference,
-                                                              /* is_this */ true);
+                                                              /* is_this= */ true);
     AppendInstruction(parameter);
     UpdateLocal(locals_index++, parameter);
     number_of_parameters--;
@@ -581,15 +576,15 @@
     DCHECK(current_this_parameter_ == nullptr);
   }
 
-  const DexFile::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id);
-  const DexFile::TypeList* arg_types = dex_file_->GetProtoParameters(proto);
+  const dex::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id);
+  const dex::TypeList* arg_types = dex_file_->GetProtoParameters(proto);
   for (int i = 0, shorty_pos = 1; i < number_of_parameters; i++) {
     HParameterValue* parameter = new (allocator_) HParameterValue(
         *dex_file_,
         arg_types->GetTypeItem(shorty_pos - 1).type_idx_,
         parameter_index++,
         DataType::FromShorty(shorty[shorty_pos]),
-        /* is_this */ false);
+        /* is_this= */ false);
     ++shorty_pos;
     AppendInstruction(parameter);
     // Store the parameter value in the local that the dex code will use
@@ -714,20 +709,18 @@
 
 // Does the method being compiled need any constructor barriers being inserted?
 // (Always 'false' for methods that aren't <init>.)
-static bool RequiresConstructorBarrier(const DexCompilationUnit* cu, CompilerDriver* driver) {
+static bool RequiresConstructorBarrier(const DexCompilationUnit* cu) {
   // Can be null in unit tests only.
   if (UNLIKELY(cu == nullptr)) {
     return false;
   }
 
-  Thread* self = Thread::Current();
-  return cu->IsConstructor()
-      && !cu->IsStatic()
-      // RequiresConstructorBarrier must only be queried for <init> methods;
-      // it's effectively "false" for every other method.
-      //
-      // See CompilerDriver::RequiresConstructBarrier for more explanation.
-      && driver->RequiresConstructorBarrier(self, cu->GetDexFile(), cu->GetClassDefIndex());
+  // Constructor barriers are applicable only for <init> methods.
+  if (LIKELY(!cu->IsConstructor() || cu->IsStatic())) {
+    return false;
+  }
+
+  return cu->RequiresConstructorBarrier();
 }
 
 // Returns true if `block` has only one successor which starts at the next
@@ -773,7 +766,7 @@
     // Only <init> (which is a return-void) could possibly have a constructor fence.
     // This may insert additional redundant constructor fences from the super constructors.
     // TODO: remove redundant constructor fences (b/36656456).
-    if (RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_)) {
+    if (RequiresConstructorBarrier(dex_compilation_unit_)) {
       // Compiling instance constructor.
       DCHECK_STREQ("<init>", graph_->GetMethodName());
 
@@ -787,7 +780,7 @@
     }
     AppendInstruction(new (allocator_) HReturnVoid(dex_pc));
   } else {
-    DCHECK(!RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_));
+    DCHECK(!RequiresConstructorBarrier(dex_compilation_unit_));
     HInstruction* value = LoadLocal(instruction.VRegA(), type);
     AppendInstruction(new (allocator_) HReturn(value, dex_pc));
   }
@@ -854,7 +847,7 @@
   // make this an invoke-unresolved to handle cross-dex invokes or abstract super methods, both of
   // which require runtime handling.
   if (invoke_type == kSuper) {
-    ObjPtr<mirror::Class> compiling_class = ResolveCompilingClass(soa);
+    ObjPtr<mirror::Class> compiling_class = dex_compilation_unit_->GetCompilingClass().Get();
     if (compiling_class == nullptr) {
       // We could not determine the method's class we need to wait until runtime.
       DCHECK(Runtime::Current()->IsAotCompiler());
@@ -884,8 +877,8 @@
       // The back-end code generator relies on this check in order to ensure that it will not
       // attempt to read the dex_cache with a dex_method_index that is not from the correct
       // dex_file. If we didn't do this check then the dex_method_index will not be updated in the
-      // builder, which means that the code-generator (and compiler driver during sharpening and
-      // inliner, maybe) might invoke an incorrect method.
+      // builder, which means that the code-generator (and sharpening and inliner, maybe)
+      // might invoke an incorrect method.
       // TODO: The actual method could still be referenced in the current dex file, so we
       //       could try locating it.
       // TODO: Remove the dex_file restriction.
@@ -933,7 +926,7 @@
                                                          dex_pc,
                                                          method_idx,
                                                          invoke_type);
-    return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ true);
+    return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ true);
   }
 
   // Replace calls to String.<init> with StringFactory.
@@ -952,10 +945,10 @@
     HInvoke* invoke = new (allocator_) HInvokeStaticOrDirect(
         allocator_,
         number_of_arguments - 1,
-        DataType::Type::kReference /*return_type */,
+        /* return_type= */ DataType::Type::kReference,
         dex_pc,
         method_idx,
-        nullptr /* resolved_method */,
+        /* resolved_method= */ nullptr,
         dispatch_info,
         invoke_type,
         target_method,
@@ -974,7 +967,7 @@
     ScopedObjectAccess soa(Thread::Current());
     if (invoke_type == kStatic) {
       clinit_check =
-          ProcessClinitCheckForInvoke(soa, dex_pc, resolved_method, &clinit_check_requirement);
+          ProcessClinitCheckForInvoke(dex_pc, resolved_method, &clinit_check_requirement);
     } else if (invoke_type == kSuper) {
       if (IsSameDexFile(*resolved_method->GetDexFile(), *dex_compilation_unit_->GetDexFile())) {
         // Update the method index to the one resolved. Note that this may be a no-op if
@@ -983,11 +976,8 @@
       }
     }
 
-    HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
-        HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall,
-        HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
-        0u
-    };
+    HInvokeStaticOrDirect::DispatchInfo dispatch_info =
+        HSharpening::SharpenInvokeStaticOrDirect(resolved_method, code_generator_);
     MethodReference target_method(resolved_method->GetDexFile(),
                                   resolved_method->GetDexMethodIndex());
     invoke = new (allocator_) HInvokeStaticOrDirect(allocator_,
@@ -1020,7 +1010,7 @@
                                                resolved_method,
                                                ImTable::GetImtIndex(resolved_method));
   }
-  return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false, clinit_check);
+  return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false, clinit_check);
 }
 
 bool HInstructionBuilder::BuildInvokePolymorphic(uint32_t dex_pc,
@@ -1036,7 +1026,7 @@
                                                         return_type,
                                                         dex_pc,
                                                         method_idx);
-  return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false);
+  return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false);
 }
 
 
@@ -1052,7 +1042,7 @@
                                                    call_site_idx,
                                                    return_type,
                                                    dex_pc);
-  return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false);
+  return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false);
 }
 
 HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t dex_pc) {
@@ -1063,7 +1053,7 @@
   HInstruction* cls = load_class;
   Handle<mirror::Class> klass = load_class->GetClass();
 
-  if (!IsInitialized(soa, klass)) {
+  if (!IsInitialized(klass)) {
     cls = new (allocator_) HClinitCheck(load_class, dex_pc);
     AppendInstruction(cls);
   }
@@ -1292,7 +1282,7 @@
   return true;
 }
 
-bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::Class> cls) const {
+bool HInstructionBuilder::IsInitialized(Handle<mirror::Class> cls) const {
   if (cls == nullptr) {
     return false;
   }
@@ -1307,37 +1297,33 @@
     }
     // Assume loaded only if klass is in the boot image. App classes cannot be assumed
     // loaded because we don't even know what class loader will be used to load them.
-    if (IsInBootImage(cls.Get(), compiler_driver_->GetCompilerOptions())) {
+    if (IsInBootImage(cls.Get(), code_generator_->GetCompilerOptions())) {
       return true;
     }
   }
 
-  // We can avoid the class initialization check for `cls` in static methods in the
-  // very same class. Instance methods of the same class can run on an escaped instance
+  // We can avoid the class initialization check for `cls` in static methods and constructors
+  // in the very same class; invoking a static method involves a class initialization check
+  // and so does the instance allocation that must be executed before invoking a constructor.
+  // Other instance methods of the same class can run on an escaped instance
   // of an erroneous class. Even a superclass may need to be checked as the subclass
   // can be completely initialized while the superclass is initializing and the subclass
   // remains initialized when the superclass initializer throws afterwards. b/62478025
   // Note: The HClinitCheck+HInvokeStaticOrDirect merging can still apply.
-  ObjPtr<mirror::Class> outermost_cls = ResolveOutermostCompilingClass(soa);
-  bool is_static = (dex_compilation_unit_->GetAccessFlags() & kAccStatic) != 0u;
-  if (is_static && outermost_cls == cls.Get()) {
+  auto is_static_method_or_constructor_of_cls = [cls](const DexCompilationUnit& compilation_unit)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    return (compilation_unit.GetAccessFlags() & (kAccStatic | kAccConstructor)) != 0u &&
+           compilation_unit.GetCompilingClass().Get() == cls.Get();
+  };
+  if (is_static_method_or_constructor_of_cls(*outer_compilation_unit_) ||
+      // Check also the innermost method. Though excessive copies of ClinitCheck can be
+      // eliminated by GVN, that happens only after the decision whether to inline the
+      // graph or not and that may depend on the presence of the ClinitCheck.
+      // TODO: We should walk over the entire inlined method chain, but we don't pass that
+      // information to the builder.
+      is_static_method_or_constructor_of_cls(*dex_compilation_unit_)) {
     return true;
   }
-  // Remember if the compiled class is a subclass of `cls`. By the time this is used
-  // below the `outermost_cls` may be invalidated by calling ResolveCompilingClass().
-  bool is_subclass = IsSubClass(outermost_cls, cls.Get());
-  if (dex_compilation_unit_ != outer_compilation_unit_) {
-    // Check also the innermost method. Though excessive copies of ClinitCheck can be
-    // eliminated by GVN, that happens only after the decision whether to inline the
-    // graph or not and that may depend on the presence of the ClinitCheck.
-    // TODO: We should walk over the entire inlined method chain, but we don't pass that
-    // information to the builder.
-    ObjPtr<mirror::Class> innermost_cls = ResolveCompilingClass(soa);
-    if (is_static && innermost_cls == cls.Get()) {
-      return true;
-    }
-    is_subclass = is_subclass || IsSubClass(innermost_cls, cls.Get());
-  }
 
   // Otherwise, we may be able to avoid the check if `cls` is a superclass of a method being
   // compiled here (anywhere in the inlining chain) as the `cls` must have started initializing
@@ -1358,7 +1344,12 @@
   // TODO: We should walk over the entire inlined methods chain, but we don't pass that
   // information to the builder. (We could also check if we're guaranteed a non-null instance
   // of `cls` at this location but that's outside the scope of the instruction builder.)
-  if (is_subclass && HasTrivialInitialization(cls.Get(), compiler_driver_->GetCompilerOptions())) {
+  bool is_subclass = IsSubClass(outer_compilation_unit_->GetCompilingClass().Get(), cls.Get());
+  if (dex_compilation_unit_ != outer_compilation_unit_) {
+    is_subclass = is_subclass ||
+                  IsSubClass(dex_compilation_unit_->GetCompilingClass().Get(), cls.Get());
+  }
+  if (is_subclass && HasTrivialInitialization(cls.Get(), code_generator_->GetCompilerOptions())) {
     return true;
   }
 
@@ -1366,22 +1357,20 @@
 }
 
 HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke(
-    ScopedObjectAccess& soa,
     uint32_t dex_pc,
     ArtMethod* resolved_method,
     HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) {
   Handle<mirror::Class> klass = handles_->NewHandle(resolved_method->GetDeclaringClass());
 
   HClinitCheck* clinit_check = nullptr;
-  if (IsInitialized(soa, klass)) {
+  if (IsInitialized(klass)) {
     *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
   } else {
-    HLoadClass* cls = BuildLoadClass(soa,
-                                     klass->GetDexTypeIndex(),
+    HLoadClass* cls = BuildLoadClass(klass->GetDexTypeIndex(),
                                      klass->GetDexFile(),
                                      klass,
                                      dex_pc,
-                                     /* needs_access_check */ false);
+                                     /* needs_access_check= */ false);
     if (cls != nullptr) {
       *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
       clinit_check = new (allocator_) HClinitCheck(cls, dex_pc);
@@ -1505,27 +1494,28 @@
   // to be visited once it is clear whether it has remaining uses.
   if (arg_this->IsNewInstance()) {
     ssa_builder_->AddUninitializedString(arg_this->AsNewInstance());
-    // Walk over all vregs and replace any occurrence of `arg_this` with `invoke`.
-    for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
-      if ((*current_locals_)[vreg] == arg_this) {
-        (*current_locals_)[vreg] = invoke;
-      }
-    }
   } else {
     DCHECK(arg_this->IsPhi());
     // We can get a phi as input of a String.<init> if there is a loop between the
     // allocation and the String.<init> call. As we don't know which other phis might alias
-    // with `arg_this`, we keep a record of these phis and will analyze their inputs and
-    // uses once the inputs and users are populated (in ssa_builder.cc).
-    // Note: we only do this for phis, as it is a somewhat more expensive operation than
-    // what we're doing above when the input is the `HNewInstance`.
-    ssa_builder_->AddUninitializedStringPhi(arg_this->AsPhi(), invoke);
+    // with `arg_this`, we keep a record of those invocations so we can later replace
+    // the allocation with the invocation.
+    // Add the actual 'this' input so the analysis knows what is the allocation instruction.
+    // The input will be removed during the analysis.
+    invoke->AddInput(arg_this);
+    ssa_builder_->AddUninitializedStringPhi(invoke);
+  }
+  // Walk over all vregs and replace any occurrence of `arg_this` with `invoke`.
+  for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
+    if ((*current_locals_)[vreg] == arg_this) {
+      (*current_locals_)[vreg] = invoke;
+    }
   }
   return true;
 }
 
 static DataType::Type GetFieldAccessType(const DexFile& dex_file, uint16_t field_index) {
-  const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+  const dex::FieldId& field_id = dex_file.GetFieldId(field_index);
   const char* type = dex_file.GetFieldTypeDescriptor(field_id);
   return DataType::FromShorty(type[0]);
 }
@@ -1549,7 +1539,7 @@
   }
 
   ScopedObjectAccess soa(Thread::Current());
-  ArtField* resolved_field = ResolveField(field_index, /* is_static */ false, is_put);
+  ArtField* resolved_field = ResolveField(field_index, /* is_static= */ false, is_put);
 
   // Generate an explicit null check on the reference, unless the field access
   // is unresolved. In that case, we rely on the runtime to perform various
@@ -1612,43 +1602,6 @@
   return true;
 }
 
-static ObjPtr<mirror::Class> ResolveClassFrom(ScopedObjectAccess& soa,
-                                              CompilerDriver* driver,
-                                              const DexCompilationUnit& compilation_unit)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  Handle<mirror::ClassLoader> class_loader = compilation_unit.GetClassLoader();
-  Handle<mirror::DexCache> dex_cache = compilation_unit.GetDexCache();
-
-  return driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, &compilation_unit);
-}
-
-ObjPtr<mirror::Class> HInstructionBuilder::ResolveOutermostCompilingClass(
-    ScopedObjectAccess& soa) const {
-  return ResolveClassFrom(soa, compiler_driver_, *outer_compilation_unit_);
-}
-
-ObjPtr<mirror::Class> HInstructionBuilder::ResolveCompilingClass(ScopedObjectAccess& soa) const {
-  return ResolveClassFrom(soa, compiler_driver_, *dex_compilation_unit_);
-}
-
-bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) const {
-  ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<2> hs(soa.Self());
-  Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
-  Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
-  Handle<mirror::Class> cls(hs.NewHandle(compiler_driver_->ResolveClass(
-      soa, dex_cache, class_loader, type_index, dex_compilation_unit_)));
-  Handle<mirror::Class> outer_class(hs.NewHandle(ResolveOutermostCompilingClass(soa)));
-
-  // GetOutermostCompilingClass returns null when the class is unresolved
-  // (e.g. if it derives from an unresolved class). This is bogus knowing that
-  // we are compiling it.
-  // When this happens we cannot establish a direct relation between the current
-  // class and the outer class, so we return false.
-  // (Note that this is only used for optimizing invokes and field accesses)
-  return (cls != nullptr) && (outer_class.Get() == cls.Get());
-}
-
 void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& instruction,
                                                            uint32_t dex_pc,
                                                            bool is_put,
@@ -1668,18 +1621,17 @@
 
 ArtField* HInstructionBuilder::ResolveField(uint16_t field_idx, bool is_static, bool is_put) {
   ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<2> hs(soa.Self());
 
   ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
   Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
-  Handle<mirror::Class> compiling_class(hs.NewHandle(ResolveCompilingClass(soa)));
 
   ArtField* resolved_field = class_linker->ResolveField(field_idx,
                                                         dex_compilation_unit_->GetDexCache(),
                                                         class_loader,
                                                         is_static);
+  DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending());
   if (UNLIKELY(resolved_field == nullptr)) {
-    // Clean up any exception left by type resolution.
+    // Clean up any exception left by field resolution.
     soa.Self()->ClearException();
     return nullptr;
   }
@@ -1691,6 +1643,7 @@
   }
 
   // Check access.
+  Handle<mirror::Class> compiling_class = dex_compilation_unit_->GetCompilingClass();
   if (compiling_class == nullptr) {
     if (!resolved_field->IsPublic()) {
       return nullptr;
@@ -1720,7 +1673,7 @@
   uint16_t field_index = instruction.VRegB_21c();
 
   ScopedObjectAccess soa(Thread::Current());
-  ArtField* resolved_field = ResolveField(field_index, /* is_static */ true, is_put);
+  ArtField* resolved_field = ResolveField(field_index, /* is_static= */ true, is_put);
 
   if (resolved_field == nullptr) {
     MaybeRecordStat(compilation_stats_,
@@ -1733,12 +1686,11 @@
   DataType::Type field_type = GetFieldAccessType(*dex_file_, field_index);
 
   Handle<mirror::Class> klass = handles_->NewHandle(resolved_field->GetDeclaringClass());
-  HLoadClass* constant = BuildLoadClass(soa,
-                                        klass->GetDexTypeIndex(),
+  HLoadClass* constant = BuildLoadClass(klass->GetDexTypeIndex(),
                                         klass->GetDexFile(),
                                         klass,
                                         dex_pc,
-                                        /* needs_access_check */ false);
+                                        /* needs_access_check= */ false);
 
   if (constant == nullptr) {
     // The class cannot be referenced from this compiled code. Generate
@@ -1750,7 +1702,7 @@
   }
 
   HInstruction* cls = constant;
-  if (!IsInitialized(soa, klass)) {
+  if (!IsInitialized(klass)) {
     cls = new (allocator_) HClinitCheck(constant, dex_pc);
     AppendInstruction(cls);
   }
@@ -1849,15 +1801,27 @@
   graph_->SetHasBoundsChecks(true);
 }
 
+HNewArray* HInstructionBuilder::BuildNewArray(uint32_t dex_pc,
+                                              dex::TypeIndex type_index,
+                                              HInstruction* length) {
+  HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
+
+  const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(type_index));
+  DCHECK_EQ(descriptor[0], '[');
+  size_t component_type_shift = Primitive::ComponentSizeShift(Primitive::GetType(descriptor[1]));
+
+  HNewArray* new_array = new (allocator_) HNewArray(cls, length, dex_pc, component_type_shift);
+  AppendInstruction(new_array);
+  return new_array;
+}
+
 HNewArray* HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc,
                                                     dex::TypeIndex type_index,
                                                     const InstructionOperands& operands) {
   const size_t number_of_operands = operands.GetNumberOfOperands();
   HInstruction* length = graph_->GetIntConstant(number_of_operands, dex_pc);
-  HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
-  HNewArray* const object = new (allocator_) HNewArray(cls, length, dex_pc);
-  AppendInstruction(object);
 
+  HNewArray* new_array = BuildNewArray(dex_pc, type_index, length);
   const char* descriptor = dex_file_->StringByTypeIdx(type_index);
   DCHECK_EQ(descriptor[0], '[') << descriptor;
   char primitive = descriptor[1];
@@ -1870,13 +1834,13 @@
   for (size_t i = 0; i < number_of_operands; ++i) {
     HInstruction* value = LoadLocal(operands.GetOperand(i), type);
     HInstruction* index = graph_->GetIntConstant(i, dex_pc);
-    HArraySet* aset = new (allocator_) HArraySet(object, index, value, type, dex_pc);
+    HArraySet* aset = new (allocator_) HArraySet(new_array, index, value, type, dex_pc);
     ssa_builder_->MaybeAddAmbiguousArraySet(aset);
     AppendInstruction(aset);
   }
-  latest_result_ = object;
+  latest_result_ = new_array;
 
-  return object;
+  return new_array;
 }
 
 template <typename T>
@@ -1979,12 +1943,11 @@
   ScopedObjectAccess soa(Thread::Current());
   const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
   Handle<mirror::Class> klass = ResolveClass(soa, type_index);
-  bool needs_access_check = LoadClassNeedsAccessCheck(soa, klass);
-  return BuildLoadClass(soa, type_index, dex_file, klass, dex_pc, needs_access_check);
+  bool needs_access_check = LoadClassNeedsAccessCheck(klass);
+  return BuildLoadClass(type_index, dex_file, klass, dex_pc, needs_access_check);
 }
 
-HLoadClass* HInstructionBuilder::BuildLoadClass(ScopedObjectAccess& soa,
-                                                dex::TypeIndex type_index,
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
                                                 const DexFile& dex_file,
                                                 Handle<mirror::Class> klass,
                                                 uint32_t dex_pc,
@@ -2001,11 +1964,8 @@
   }
 
   // Note: `klass` must be from `handles_`.
-  bool is_referrers_class = false;
-  if (klass != nullptr) {
-    ObjPtr<mirror::Class> outermost_cls = ResolveOutermostCompilingClass(soa);
-    is_referrers_class = (outermost_cls == klass.Get());
-  }
+  bool is_referrers_class =
+      (klass != nullptr) && (outer_compilation_unit_->GetCompilingClass().Get() == klass.Get());
   HLoadClass* load_class = new (allocator_) HLoadClass(
       graph_->GetCurrentMethod(),
       type_index,
@@ -2031,22 +1991,28 @@
 
 Handle<mirror::Class> HInstructionBuilder::ResolveClass(ScopedObjectAccess& soa,
                                                         dex::TypeIndex type_index) {
-  Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
-  ObjPtr<mirror::Class> klass = compiler_driver_->ResolveClass(
-      soa, dex_compilation_unit_->GetDexCache(), class_loader, type_index, dex_compilation_unit_);
-  // TODO: Avoid creating excessive handles if the method references the same class repeatedly.
-  // (Use a map on the local_allocator_.)
-  return handles_->NewHandle(klass);
+  auto it = class_cache_.find(type_index);
+  if (it != class_cache_.end()) {
+    return it->second;
+  }
+
+  ObjPtr<mirror::Class> klass = dex_compilation_unit_->GetClassLinker()->ResolveType(
+      type_index, dex_compilation_unit_->GetDexCache(), dex_compilation_unit_->GetClassLoader());
+  DCHECK_EQ(klass == nullptr, soa.Self()->IsExceptionPending());
+  soa.Self()->ClearException();  // Clean up the exception left by type resolution if any.
+
+  Handle<mirror::Class> h_klass = handles_->NewHandle(klass);
+  class_cache_.Put(type_index, h_klass);
+  return h_klass;
 }
 
-bool HInstructionBuilder::LoadClassNeedsAccessCheck(ScopedObjectAccess& soa,
-                                                    Handle<mirror::Class> klass) {
+bool HInstructionBuilder::LoadClassNeedsAccessCheck(Handle<mirror::Class> klass) {
   if (klass == nullptr) {
     return true;
   } else if (klass->IsPublic()) {
     return false;
   } else {
-    ObjPtr<mirror::Class> compiling_class = ResolveCompilingClass(soa);
+    ObjPtr<mirror::Class> compiling_class = dex_compilation_unit_->GetCompilingClass().Get();
     return compiling_class == nullptr || !compiling_class->CanAccess(klass.Get());
   }
 }
@@ -2075,7 +2041,7 @@
   ScopedObjectAccess soa(Thread::Current());
   const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
   Handle<mirror::Class> klass = ResolveClass(soa, type_index);
-  bool needs_access_check = LoadClassNeedsAccessCheck(soa, klass);
+  bool needs_access_check = LoadClassNeedsAccessCheck(klass);
   TypeCheckKind check_kind = HSharpening::ComputeTypeCheckKind(
       klass.Get(), code_generator_, needs_access_check);
 
@@ -2093,7 +2059,7 @@
     bitstring_path_to_root = graph_->GetIntConstant(static_cast<int32_t>(path_to_root), dex_pc);
     bitstring_mask = graph_->GetIntConstant(static_cast<int32_t>(mask), dex_pc);
   } else {
-    class_or_null = BuildLoadClass(soa, type_index, dex_file, klass, dex_pc, needs_access_check);
+    class_or_null = BuildLoadClass(type_index, dex_file, klass, dex_pc, needs_access_check);
   }
   DCHECK(class_or_null != nullptr);
 
@@ -2899,10 +2865,8 @@
     case Instruction::NEW_ARRAY: {
       dex::TypeIndex type_index(instruction.VRegC_22c());
       HInstruction* length = LoadLocal(instruction.VRegB_22c(), DataType::Type::kInt32);
-      HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
+      HNewArray* new_array = BuildNewArray(dex_pc, type_index, length);
 
-      HNewArray* new_array = new (allocator_) HNewArray(cls, length, dex_pc);
-      AppendInstruction(new_array);
       UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
       BuildConstructorFenceForAllocation(new_array);
       break;
@@ -2982,7 +2946,7 @@
     case Instruction::IGET_CHAR_QUICK:
     case Instruction::IGET_SHORT:
     case Instruction::IGET_SHORT_QUICK: {
-      if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put */ false, quicken_index)) {
+      if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put= */ false, quicken_index)) {
         return false;
       }
       break;
@@ -3002,7 +2966,7 @@
     case Instruction::IPUT_CHAR_QUICK:
     case Instruction::IPUT_SHORT:
     case Instruction::IPUT_SHORT_QUICK: {
-      if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put */ true, quicken_index)) {
+      if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put= */ true, quicken_index)) {
         return false;
       }
       break;
@@ -3015,7 +2979,7 @@
     case Instruction::SGET_BYTE:
     case Instruction::SGET_CHAR:
     case Instruction::SGET_SHORT: {
-      BuildStaticFieldAccess(instruction, dex_pc, /* is_put */ false);
+      BuildStaticFieldAccess(instruction, dex_pc, /* is_put= */ false);
       break;
     }
 
@@ -3026,7 +2990,7 @@
     case Instruction::SPUT_BYTE:
     case Instruction::SPUT_CHAR:
     case Instruction::SPUT_SHORT: {
-      BuildStaticFieldAccess(instruction, dex_pc, /* is_put */ true);
+      BuildStaticFieldAccess(instruction, dex_pc, /* is_put= */ true);
       break;
     }
 
@@ -3179,7 +3143,7 @@
 
 ObjPtr<mirror::Class> HInstructionBuilder::LookupReferrerClass() const {
   // TODO: Cache the result in a Handle<mirror::Class>.
-  const DexFile::MethodId& method_id =
+  const dex::MethodId& method_id =
       dex_compilation_unit_->GetDexFile()->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
   return LookupResolvedType(method_id.class_idx_, *dex_compilation_unit_);
 }
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index af1b86c..d701445 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -34,7 +34,6 @@
 class ArtField;
 class ArtMethod;
 class CodeGenerator;
-class CompilerDriver;
 class DexCompilationUnit;
 class HBasicBlockBuilder;
 class Instruction;
@@ -59,7 +58,6 @@
                       DataType::Type return_type,
                       const DexCompilationUnit* dex_compilation_unit,
                       const DexCompilationUnit* outer_compilation_unit,
-                      CompilerDriver* compiler_driver,
                       CodeGenerator* code_generator,
                       ArrayRef<const uint8_t> interpreter_metadata,
                       OptimizingCompilerStats* compiler_stats,
@@ -179,6 +177,9 @@
                          uint32_t call_site_idx,
                          const InstructionOperands& operands);
 
+  // Builds a new array node.
+  HNewArray* BuildNewArray(uint32_t dex_pc, dex::TypeIndex type_index, HInstruction* length);
+
   // Builds a new array node and the instructions that fill it.
   HNewArray* BuildFilledNewArray(uint32_t dex_pc,
                                  dex::TypeIndex type_index,
@@ -219,8 +220,7 @@
   // Builds a `HLoadClass` loading the given `type_index`.
   HLoadClass* BuildLoadClass(dex::TypeIndex type_index, uint32_t dex_pc);
 
-  HLoadClass* BuildLoadClass(ScopedObjectAccess& soa,
-                             dex::TypeIndex type_index,
+  HLoadClass* BuildLoadClass(dex::TypeIndex type_index,
                              const DexFile& dex_file,
                              Handle<mirror::Class> klass,
                              uint32_t dex_pc,
@@ -230,7 +230,7 @@
   Handle<mirror::Class> ResolveClass(ScopedObjectAccess& soa, dex::TypeIndex type_index)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool LoadClassNeedsAccessCheck(ScopedObjectAccess& soa, Handle<mirror::Class> klass)
+  bool LoadClassNeedsAccessCheck(Handle<mirror::Class> klass)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Builds a `HLoadMethodHandle` loading the given `method_handle_index`.
@@ -239,17 +239,6 @@
   // Builds a `HLoadMethodType` loading the given `proto_index`.
   void BuildLoadMethodType(dex::ProtoIndex proto_index, uint32_t dex_pc);
 
-  // Returns the outer-most compiling method's class.
-  ObjPtr<mirror::Class> ResolveOutermostCompilingClass(ScopedObjectAccess& soa) const
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Returns the class whose method is being compiled.
-  ObjPtr<mirror::Class> ResolveCompilingClass(ScopedObjectAccess& soa) const
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Returns whether `type_index` points to the outer-most compiling method's class.
-  bool IsOutermostCompilingClass(dex::TypeIndex type_index) const;
-
   void PotentiallySimplifyFakeString(uint16_t original_dex_register,
                                      uint32_t dex_pc,
                                      HInvoke* invoke);
@@ -272,7 +261,6 @@
   void HandleStringInitResult(HInvokeStaticOrDirect* invoke);
 
   HClinitCheck* ProcessClinitCheckForInvoke(
-      ScopedObjectAccess& soa,
       uint32_t dex_pc,
       ArtMethod* method,
       HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement)
@@ -286,7 +274,7 @@
   void BuildConstructorFenceForAllocation(HInstruction* allocation);
 
   // Return whether the compiler can assume `cls` is initialized.
-  bool IsInitialized(ScopedObjectAccess& soa, Handle<mirror::Class> cls) const
+  bool IsInitialized(Handle<mirror::Class> cls) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Try to resolve a method using the class linker. Return null if a method could
@@ -317,8 +305,6 @@
   HBasicBlockBuilder* const block_builder_;
   SsaBuilder* const ssa_builder_;
 
-  CompilerDriver* const compiler_driver_;
-
   CodeGenerator* const code_generator_;
 
   // The compilation unit of the current method being compiled. Note that
@@ -348,6 +334,10 @@
 
   ScopedArenaVector<HBasicBlock*> loop_headers_;
 
+  // Cached resolved types for the current compilation unit's DexFile.
+  // Handle<>s reference entries in the `handles_`.
+  ScopedArenaSafeMap<dex::TypeIndex, Handle<mirror::Class>> class_cache_;
+
   static constexpr int kDefaultNumberOfLoops = 2;
 
   DISALLOW_COPY_AND_ASSIGN(HInstructionBuilder);
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index bb96c21..a433d7e 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -372,7 +372,7 @@
       // (as defined by shift semantics). This ensures other
       // optimizations do not need to special case for such situations.
       DCHECK_EQ(shift_amount->GetType(), DataType::Type::kInt32);
-      instruction->ReplaceInput(GetGraph()->GetIntConstant(masked_cst), /* index */ 1);
+      instruction->ReplaceInput(GetGraph()->GetIntConstant(masked_cst), /* index= */ 1);
       RecordSimplification();
       return;
     }
@@ -749,8 +749,8 @@
       return new (allocator) HBelowOrEqual(rhs, lhs);
     default:
       LOG(FATAL) << "Unknown ConditionType " << cond->GetKind();
+      UNREACHABLE();
   }
-  return nullptr;
 }
 
 static bool CmpHasBoolType(HInstruction* input, HInstruction* cmp) {
@@ -1181,8 +1181,7 @@
   HInstruction* input = instruction->GetInput();
   DataType::Type input_type = input->GetType();
   DataType::Type result_type = instruction->GetResultType();
-  if (DataType::IsTypeConversionImplicit(input_type, result_type)) {
-    // Remove the implicit conversion; this includes conversion to the same type.
+  if (instruction->IsImplicitConversion()) {
     instruction->ReplaceWith(input);
     instruction->GetBlock()->RemoveInstruction(instruction);
     RecordSimplification();
@@ -1317,7 +1316,7 @@
   }
 
   HNeg* neg = left_is_neg ? left->AsNeg() : right->AsNeg();
-  if ((left_is_neg ^ right_is_neg) && neg->HasOnlyOneNonEnvironmentUse()) {
+  if (left_is_neg != right_is_neg && neg->HasOnlyOneNonEnvironmentUse()) {
     // Replace code looking like
     //    NEG tmp, b
     //    ADD dst, a, tmp
@@ -2290,7 +2289,7 @@
       // the invoke, as we would need to look it up in the current dex file, and it
       // is unlikely that it exists. The most usual situation for such typed
       // arraycopy methods is a direct pointer to the boot image.
-      HSharpening::SharpenInvokeStaticOrDirect(invoke, codegen_);
+      invoke->SetDispatchInfo(HSharpening::SharpenInvokeStaticOrDirect(method, codegen_));
     }
   }
 }
@@ -2362,17 +2361,17 @@
   ArenaAllocator* allocator = GetGraph()->GetAllocator();
   // We treat String as an array to allow DCE and BCE to seamlessly work on strings,
   // so create the HArrayLength, HBoundsCheck and HArrayGet.
-  HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length */ true);
+  HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length= */ true);
   invoke->GetBlock()->InsertInstructionBefore(length, invoke);
   HBoundsCheck* bounds_check = new (allocator) HBoundsCheck(
-      index, length, dex_pc, /* is_string_char_at */ true);
+      index, length, dex_pc, /* is_string_char_at= */ true);
   invoke->GetBlock()->InsertInstructionBefore(bounds_check, invoke);
   HArrayGet* array_get = new (allocator) HArrayGet(str,
                                                    bounds_check,
                                                    DataType::Type::kUint16,
                                                    SideEffects::None(),  // Strings are immutable.
                                                    dex_pc,
-                                                   /* is_string_char_at */ true);
+                                                   /* is_string_char_at= */ true);
   invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, array_get);
   bounds_check->CopyEnvironmentFrom(invoke->GetEnvironment());
   GetGraph()->SetHasBoundsChecks(true);
@@ -2384,7 +2383,7 @@
   // We treat String as an array to allow DCE and BCE to seamlessly work on strings,
   // so create the HArrayLength.
   HArrayLength* length =
-      new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length */ true);
+      new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length= */ true);
   HInstruction* replacement;
   if (invoke->GetIntrinsic() == Intrinsics::kStringIsEmpty) {
     // For String.isEmpty(), create the `HEqual` representing the `length == 0`.
@@ -2535,28 +2534,28 @@
       SimplifySystemArrayCopy(instruction);
       break;
     case Intrinsics::kIntegerRotateRight:
-      SimplifyRotate(instruction, /* is_left */ false, DataType::Type::kInt32);
+      SimplifyRotate(instruction, /* is_left= */ false, DataType::Type::kInt32);
       break;
     case Intrinsics::kLongRotateRight:
-      SimplifyRotate(instruction, /* is_left */ false, DataType::Type::kInt64);
+      SimplifyRotate(instruction, /* is_left= */ false, DataType::Type::kInt64);
       break;
     case Intrinsics::kIntegerRotateLeft:
-      SimplifyRotate(instruction, /* is_left */ true, DataType::Type::kInt32);
+      SimplifyRotate(instruction, /* is_left= */ true, DataType::Type::kInt32);
       break;
     case Intrinsics::kLongRotateLeft:
-      SimplifyRotate(instruction, /* is_left */ true, DataType::Type::kInt64);
+      SimplifyRotate(instruction, /* is_left= */ true, DataType::Type::kInt64);
       break;
     case Intrinsics::kIntegerCompare:
-      SimplifyCompare(instruction, /* is_signum */ false, DataType::Type::kInt32);
+      SimplifyCompare(instruction, /* is_signum= */ false, DataType::Type::kInt32);
       break;
     case Intrinsics::kLongCompare:
-      SimplifyCompare(instruction, /* is_signum */ false, DataType::Type::kInt64);
+      SimplifyCompare(instruction, /* is_signum= */ false, DataType::Type::kInt64);
       break;
     case Intrinsics::kIntegerSignum:
-      SimplifyCompare(instruction, /* is_signum */ true, DataType::Type::kInt32);
+      SimplifyCompare(instruction, /* is_signum= */ true, DataType::Type::kInt32);
       break;
     case Intrinsics::kLongSignum:
-      SimplifyCompare(instruction, /* is_signum */ true, DataType::Type::kInt64);
+      SimplifyCompare(instruction, /* is_signum= */ true, DataType::Type::kInt64);
       break;
     case Intrinsics::kFloatIsNaN:
     case Intrinsics::kDoubleIsNaN:
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 24fbb6c..01e9cff 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -43,11 +43,11 @@
   bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
   bool TryMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op, bool do_merge);
   bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
-    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
+    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ false);
   }
   bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
     DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
-    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
+    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ true);
   }
 
   /**
@@ -202,6 +202,11 @@
     return;
   }
 
+  // TODO: Support intermediate address for object arrays on arm.
+  if (type == DataType::Type::kReference) {
+    return;
+  }
+
   if (type == DataType::Type::kInt64
       || type == DataType::Type::kFloat32
       || type == DataType::Type::kFloat64) {
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index b536cb4..e23decb 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -45,11 +45,11 @@
                                   HInstruction* bitfield_op,
                                   bool do_merge);
   bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
-    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
+    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ false);
   }
   bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
     DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
-    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
+    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ true);
   }
 
   /**
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index ccdcb35..0f30f66 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -245,11 +245,11 @@
     return false;
   }
   if (kEmitCompilerReadBarrier &&
+      !kUseBakerReadBarrier &&
       access->IsArrayGet() &&
       access->GetType() == DataType::Type::kReference) {
-    // For object arrays, the read barrier instrumentation requires
+    // For object arrays, the non-Baker read barrier instrumentation requires
     // the original array pointer.
-    // TODO: This can be relaxed for Baker CC.
     return false;
   }
 
diff --git a/compiler/optimizing/instruction_simplifier_x86.cc b/compiler/optimizing/instruction_simplifier_x86.cc
new file mode 100644
index 0000000..2d8f94a
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86.cc
@@ -0,0 +1,88 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_simplifier_x86.h"
+#include "instruction_simplifier_x86_shared.h"
+#include "code_generator_x86.h"
+
+namespace art {
+
+namespace x86 {
+
+class InstructionSimplifierX86Visitor : public HGraphVisitor {
+ public:
+  InstructionSimplifierX86Visitor(HGraph* graph,
+                                  CodeGenerator* codegen,
+                                  OptimizingCompilerStats* stats)
+      : HGraphVisitor(graph),
+        codegen_(down_cast<CodeGeneratorX86*>(codegen)),
+        stats_(stats) {}
+
+  void RecordSimplification() {
+    MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplificationsArch);
+  }
+
+  bool HasAVX2() {
+    return (codegen_->GetInstructionSetFeatures().HasAVX2());
+  }
+
+  void VisitBasicBlock(HBasicBlock* block) override {
+    for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+      HInstruction* instruction = it.Current();
+      if (instruction->IsInBlock()) {
+        instruction->Accept(this);
+      }
+    }
+  }
+
+  void VisitAnd(HAnd * instruction) override;
+  void VisitXor(HXor* instruction) override;
+
+ private:
+  CodeGeneratorX86* codegen_;
+  OptimizingCompilerStats* stats_;
+};
+
+
+void InstructionSimplifierX86Visitor::VisitAnd(HAnd* instruction) {
+  if (TryCombineAndNot(instruction)) {
+    RecordSimplification();
+  } else if (instruction->GetResultType() == DataType::Type::kInt32) {
+    if (TryGenerateResetLeastSetBit(instruction)) {
+      RecordSimplification();
+    }
+  }
+}
+
+void InstructionSimplifierX86Visitor::VisitXor(HXor* instruction) {
+  if (instruction->GetResultType() == DataType::Type::kInt32) {
+    if (TryGenerateMaskUptoLeastSetBit(instruction)) {
+      RecordSimplification();
+    }
+  }
+}
+
+bool InstructionSimplifierX86::Run() {
+  InstructionSimplifierX86Visitor visitor(graph_, codegen_, stats_);
+  if (visitor.HasAVX2()) {
+    visitor.VisitReversePostOrder();
+    return true;
+  }
+  return false;
+}
+
+}  // namespace x86
+}  // namespace art
+
diff --git a/compiler/optimizing/instruction_simplifier_x86.h b/compiler/optimizing/instruction_simplifier_x86.h
new file mode 100644
index 0000000..6f10006
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86.h
@@ -0,0 +1,44 @@
+/*Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_H_
+#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_H_
+
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+
+class CodeGenerator;
+namespace x86 {
+
+class InstructionSimplifierX86 : public HOptimization {
+ public:
+  InstructionSimplifierX86(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
+      : HOptimization(graph, kInstructionSimplifierX86PassName, stats),
+        codegen_(codegen) {}
+
+  static constexpr const char* kInstructionSimplifierX86PassName = "instruction_simplifier_x86";
+
+  bool Run() override;
+
+ private:
+  CodeGenerator* codegen_;
+};
+
+}  // namespace x86
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_H_
diff --git a/compiler/optimizing/instruction_simplifier_x86_64.cc b/compiler/optimizing/instruction_simplifier_x86_64.cc
new file mode 100644
index 0000000..56c6b41
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86_64.cc
@@ -0,0 +1,82 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_simplifier_x86_64.h"
+#include "instruction_simplifier_x86_shared.h"
+#include "code_generator_x86_64.h"
+
+namespace art {
+
+namespace x86_64 {
+
+class InstructionSimplifierX86_64Visitor : public HGraphVisitor {
+ public:
+  InstructionSimplifierX86_64Visitor(HGraph* graph,
+                                     CodeGenerator* codegen,
+                                     OptimizingCompilerStats* stats)
+      : HGraphVisitor(graph),
+        codegen_(down_cast<CodeGeneratorX86_64*>(codegen)),
+        stats_(stats) {}
+
+  void RecordSimplification() {
+    MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplificationsArch);
+  }
+
+  bool HasAVX2() {
+    return codegen_->GetInstructionSetFeatures().HasAVX2();
+  }
+
+  void VisitBasicBlock(HBasicBlock* block) override {
+    for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+      HInstruction* instruction = it.Current();
+      if (instruction->IsInBlock()) {
+        instruction->Accept(this);
+      }
+    }
+  }
+
+  void VisitAnd(HAnd* instruction) override;
+  void VisitXor(HXor* instruction) override;
+
+ private:
+  CodeGeneratorX86_64* codegen_;
+  OptimizingCompilerStats* stats_;
+};
+
+void InstructionSimplifierX86_64Visitor::VisitAnd(HAnd* instruction) {
+  if (TryCombineAndNot(instruction)) {
+    RecordSimplification();
+  } else if (TryGenerateResetLeastSetBit(instruction)) {
+    RecordSimplification();
+  }
+}
+
+
+void InstructionSimplifierX86_64Visitor::VisitXor(HXor* instruction) {
+  if (TryGenerateMaskUptoLeastSetBit(instruction)) {
+    RecordSimplification();
+  }
+}
+
+bool InstructionSimplifierX86_64::Run() {
+  InstructionSimplifierX86_64Visitor visitor(graph_, codegen_, stats_);
+  if (visitor.HasAVX2()) {
+    visitor.VisitReversePostOrder();
+    return true;
+  }
+  return false;
+}
+}  // namespace x86_64
+}  // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_x86_64.h b/compiler/optimizing/instruction_simplifier_x86_64.h
new file mode 100644
index 0000000..6cae24d
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86_64.h
@@ -0,0 +1,48 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_64_H_
+#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_64_H_
+
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+
+class CodeGenerator;
+
+namespace x86_64 {
+
+class InstructionSimplifierX86_64 : public HOptimization {
+ public:
+  InstructionSimplifierX86_64(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
+      : HOptimization(graph, kInstructionSimplifierX86_64PassName, stats),
+        codegen_(codegen) {}
+
+  static constexpr const char* kInstructionSimplifierX86_64PassName =
+      "instruction_simplifier_x86_64";
+
+  bool Run() override;
+
+ private:
+  CodeGenerator* codegen_;
+};
+
+}  // namespace x86_64
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_64_H_
+
+
diff --git a/compiler/optimizing/instruction_simplifier_x86_shared.cc b/compiler/optimizing/instruction_simplifier_x86_shared.cc
new file mode 100644
index 0000000..2805abb
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86_shared.cc
@@ -0,0 +1,137 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_simplifier_x86_shared.h"
+#include "nodes_x86.h"
+
+namespace art {
+
+bool TryCombineAndNot(HAnd* instruction) {
+  DataType::Type type = instruction->GetType();
+  if (!DataType::IsIntOrLongType(type)) {
+    return false;
+  }
+  // Replace code looking like
+  //    Not tmp, y
+  //    And dst, x, tmp
+  //  with
+  //    AndNot dst, x, y
+  HInstruction* left = instruction->GetLeft();
+  HInstruction* right = instruction->GetRight();
+  // Perform simplication only when either left or right
+  // is Not. When both are Not, instruction should be simplified with
+  // DeMorgan's Laws.
+  if (left->IsNot() ^ right->IsNot()) {
+    bool left_is_not = left->IsNot();
+    HInstruction* other_ins = (left_is_not ? right : left);
+    HNot* not_ins = (left_is_not ? left : right)->AsNot();
+    // Only do the simplification if instruction has only one use
+    // and thus can be safely removed.
+    if (not_ins->HasOnlyOneNonEnvironmentUse()) {
+      ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetAllocator();
+      HX86AndNot* and_not = new (arena) HX86AndNot(type,
+                                                   not_ins->GetInput(),
+                                                   other_ins,
+                                                   instruction->GetDexPc());
+      instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, and_not);
+      DCHECK(!not_ins->HasUses());
+      not_ins->GetBlock()->RemoveInstruction(not_ins);
+      return true;
+    }
+  }
+  return false;
+}
+
+bool TryGenerateResetLeastSetBit(HAnd* instruction) {
+  DataType::Type type = instruction->GetType();
+  if (!DataType::IsIntOrLongType(type)) {
+    return false;
+  }
+  // Replace code looking like
+  //    Add tmp, x, -1 or Sub tmp, x, 1
+  //    And dest x, tmp
+  //  with
+  //    MaskOrResetLeastSetBit dest, x
+  HInstruction* candidate = nullptr;
+  HInstruction* other = nullptr;
+  HInstruction* left = instruction->GetLeft();
+  HInstruction* right = instruction->GetRight();
+  if (AreLeastSetBitInputs(left, right)) {
+    candidate = left;
+    other = right;
+  } else if (AreLeastSetBitInputs(right, left)) {
+    candidate = right;
+    other = left;
+  }
+  if (candidate != nullptr && candidate->HasOnlyOneNonEnvironmentUse()) {
+    ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetAllocator();
+    HX86MaskOrResetLeastSetBit* lsb = new (arena) HX86MaskOrResetLeastSetBit(
+        type, HInstruction::kAnd, other, instruction->GetDexPc());
+    instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, lsb);
+    DCHECK(!candidate->HasUses());
+    candidate->GetBlock()->RemoveInstruction(candidate);
+    return true;
+  }
+  return false;
+}
+
+bool TryGenerateMaskUptoLeastSetBit(HXor* instruction) {
+  DataType::Type type = instruction->GetType();
+  if (!DataType::IsIntOrLongType(type)) {
+    return false;
+  }
+  // Replace code looking like
+  //    Add tmp, x, -1 or Sub tmp, x, 1
+  //    Xor dest x, tmp
+  //  with
+  //    MaskOrResetLeastSetBit dest, x
+  HInstruction* left = instruction->GetLeft();
+  HInstruction* right = instruction->GetRight();
+  HInstruction* other = nullptr;
+  HInstruction* candidate = nullptr;
+  if (AreLeastSetBitInputs(left, right)) {
+    candidate = left;
+    other = right;
+  } else if (AreLeastSetBitInputs(right, left)) {
+    candidate = right;
+    other = left;
+  }
+  if (candidate != nullptr && candidate->HasOnlyOneNonEnvironmentUse()) {
+    ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetAllocator();
+    HX86MaskOrResetLeastSetBit* lsb = new (arena) HX86MaskOrResetLeastSetBit(
+        type, HInstruction::kXor, other, instruction->GetDexPc());
+    instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, lsb);
+    DCHECK(!candidate->HasUses());
+    candidate->GetBlock()->RemoveInstruction(candidate);
+    return true;
+  }
+  return false;
+}
+
+bool AreLeastSetBitInputs(HInstruction* to_test, HInstruction* other) {
+  if (to_test->IsAdd()) {
+    HAdd* add = to_test->AsAdd();
+    HConstant* cst = add->GetConstantRight();
+    return cst != nullptr && cst->IsMinusOne() && other == add->GetLeastConstantLeft();
+  }
+  if (to_test->IsSub()) {
+    HSub* sub = to_test->AsSub();
+    HConstant* cst = sub->GetConstantRight();
+    return cst != nullptr && cst->IsOne() && other == sub->GetLeastConstantLeft();
+  }
+  return false;
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_x86_shared.h b/compiler/optimizing/instruction_simplifier_x86_shared.h
new file mode 100644
index 0000000..7f94d7e
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86_shared.h
@@ -0,0 +1,29 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_SHARED_H_
+#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_SHARED_H_
+
+#include "nodes.h"
+
+namespace art {
+bool TryCombineAndNot(HAnd* instruction);
+bool TryGenerateResetLeastSetBit(HAnd* instruction);
+bool TryGenerateMaskUptoLeastSetBit(HXor* instruction);
+bool AreLeastSetBitInputs(HInstruction* to_test, HInstruction* other);
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_SHARED_H_
+
diff --git a/compiler/optimizing/intrinsic_objects.cc b/compiler/optimizing/intrinsic_objects.cc
index 3c20ad6..c345624 100644
--- a/compiler/optimizing/intrinsic_objects.cc
+++ b/compiler/optimizing/intrinsic_objects.cc
@@ -21,6 +21,7 @@
 #include "class_root.h"
 #include "handle.h"
 #include "obj_ptr-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
 
 namespace art {
@@ -29,7 +30,7 @@
                                                                       ClassLinker* class_linker)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ObjPtr<mirror::Class> integer_cache_class = class_linker->LookupClass(
-      self, "Ljava/lang/Integer$IntegerCache;", /* class_linker */ nullptr);
+      self, "Ljava/lang/Integer$IntegerCache;", /* class_loader= */ nullptr);
   if (integer_cache_class == nullptr || !integer_cache_class->IsInitialized()) {
     return nullptr;
   }
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 1407ea9..2de0f0c 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -33,179 +33,6 @@
 
 namespace art {
 
-// Check that intrinsic enum values fit within space set aside in ArtMethod modifier flags.
-#define CHECK_INTRINSICS_ENUM_VALUES(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  static_assert( \
-      static_cast<uint32_t>(Intrinsics::k ## Name) <= (kAccIntrinsicBits >> CTZ(kAccIntrinsicBits)), \
-      "Instrinsics enumeration space overflow.");
-#include "intrinsics_list.h"
-  INTRINSICS_LIST(CHECK_INTRINSICS_ENUM_VALUES)
-#undef INTRINSICS_LIST
-#undef CHECK_INTRINSICS_ENUM_VALUES
-
-// Function that returns whether an intrinsic is static/direct or virtual.
-static inline InvokeType GetIntrinsicInvokeType(Intrinsics i) {
-  switch (i) {
-    case Intrinsics::kNone:
-      return kInterface;  // Non-sensical for intrinsic.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-    case Intrinsics::k ## Name: \
-      return IsStatic;
-#include "intrinsics_list.h"
-      INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
-  }
-  return kInterface;
-}
-
-// Function that returns whether an intrinsic needs an environment or not.
-static inline IntrinsicNeedsEnvironmentOrCache NeedsEnvironmentOrCache(Intrinsics i) {
-  switch (i) {
-    case Intrinsics::kNone:
-      return kNeedsEnvironmentOrCache;  // Non-sensical for intrinsic.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-    case Intrinsics::k ## Name: \
-      return NeedsEnvironmentOrCache;
-#include "intrinsics_list.h"
-      INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
-  }
-  return kNeedsEnvironmentOrCache;
-}
-
-// Function that returns whether an intrinsic has side effects.
-static inline IntrinsicSideEffects GetSideEffects(Intrinsics i) {
-  switch (i) {
-    case Intrinsics::kNone:
-      return kAllSideEffects;
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-    case Intrinsics::k ## Name: \
-      return SideEffects;
-#include "intrinsics_list.h"
-      INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
-  }
-  return kAllSideEffects;
-}
-
-// Function that returns whether an intrinsic can throw exceptions.
-static inline IntrinsicExceptions GetExceptions(Intrinsics i) {
-  switch (i) {
-    case Intrinsics::kNone:
-      return kCanThrow;
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-    case Intrinsics::k ## Name: \
-      return Exceptions;
-#include "intrinsics_list.h"
-      INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
-  }
-  return kCanThrow;
-}
-
-static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  // Whenever the intrinsic is marked as static, report an error if we find an InvokeVirtual.
-  //
-  // Whenever the intrinsic is marked as direct and we find an InvokeVirtual, a devirtualization
-  // failure occured. We might be in a situation where we have inlined a method that calls an
-  // intrinsic, but that method is in a different dex file on which we do not have a
-  // verified_method that would have helped the compiler driver sharpen the call. In that case,
-  // make sure that the intrinsic is actually for some final method (or in a final class), as
-  // otherwise the intrinsics setup is broken.
-  //
-  // For the last direction, we have intrinsics for virtual functions that will perform a check
-  // inline. If the precise type is known, however, the instruction will be sharpened to an
-  // InvokeStaticOrDirect.
-  InvokeType intrinsic_type = GetIntrinsicInvokeType(intrinsic);
-  InvokeType invoke_type = invoke->GetInvokeType();
-
-  switch (intrinsic_type) {
-    case kStatic:
-      return (invoke_type == kStatic);
-
-    case kDirect:
-      if (invoke_type == kDirect) {
-        return true;
-      }
-      if (invoke_type == kVirtual) {
-        ArtMethod* art_method = invoke->GetResolvedMethod();
-        return (art_method->IsFinal() || art_method->GetDeclaringClass()->IsFinal());
-      }
-      return false;
-
-    case kVirtual:
-      // Call might be devirtualized.
-      return (invoke_type == kVirtual || invoke_type == kDirect || invoke_type == kInterface);
-
-    case kSuper:
-    case kInterface:
-    case kPolymorphic:
-    case kCustom:
-      return false;
-  }
-  LOG(FATAL) << "Unknown intrinsic invoke type: " << intrinsic_type;
-  UNREACHABLE();
-}
-
-bool IntrinsicsRecognizer::Recognize(HInvoke* invoke,
-                                     ArtMethod* art_method,
-                                     /*out*/ bool* wrong_invoke_type) {
-  if (art_method == nullptr) {
-    art_method = invoke->GetResolvedMethod();
-  }
-  *wrong_invoke_type = false;
-  if (art_method == nullptr || !art_method->IsIntrinsic()) {
-    return false;
-  }
-
-  // TODO: b/65872996 The intent is that polymorphic signature methods should
-  // be compiler intrinsics. At present, they are only interpreter intrinsics.
-  if (art_method->IsPolymorphicSignature()) {
-    return false;
-  }
-
-  Intrinsics intrinsic = static_cast<Intrinsics>(art_method->GetIntrinsic());
-  if (CheckInvokeType(intrinsic, invoke) == false) {
-    *wrong_invoke_type = true;
-    return false;
-  }
-
-  invoke->SetIntrinsic(intrinsic,
-                       NeedsEnvironmentOrCache(intrinsic),
-                       GetSideEffects(intrinsic),
-                       GetExceptions(intrinsic));
-  return true;
-}
-
-bool IntrinsicsRecognizer::Run() {
-  bool didRecognize = false;
-  ScopedObjectAccess soa(Thread::Current());
-  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
-    for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
-         inst_it.Advance()) {
-      HInstruction* inst = inst_it.Current();
-      if (inst->IsInvoke()) {
-        bool wrong_invoke_type = false;
-        if (Recognize(inst->AsInvoke(), /* art_method */ nullptr, &wrong_invoke_type)) {
-          didRecognize = true;
-          MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
-        } else if (wrong_invoke_type) {
-          LOG(WARNING)
-              << "Found an intrinsic with unexpected invoke type: "
-              << inst->AsInvoke()->GetResolvedMethod()->PrettyMethod() << " "
-              << inst->DebugName();
-        }
-      }
-    }
-  }
-  return didRecognize;
-}
-
 std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic) {
   switch (intrinsic) {
     case Intrinsics::kNone:
@@ -250,7 +77,7 @@
                                                     const char* descriptor)
         REQUIRES_SHARED(Locks::mutator_lock_) {
   ObjPtr<mirror::Class> klass =
-      class_linker->LookupClass(self, descriptor, /* class_loader */ nullptr);
+      class_linker->LookupClass(self, descriptor, /* class_loader= */ nullptr);
   DCHECK(klass != nullptr);
   DCHECK(klass->IsInitialized());
   return klass;
@@ -340,14 +167,14 @@
     Thread* self = Thread::Current();
     ScopedObjectAccess soa(self);
     ObjPtr<mirror::Class> cache_class = class_linker->LookupClass(
-        self, kIntegerCacheDescriptor, /* class_loader */ nullptr);
+        self, kIntegerCacheDescriptor, /* class_loader= */ nullptr);
     DCHECK(cache_class != nullptr);
     if (UNLIKELY(!cache_class->IsInitialized())) {
       LOG(WARNING) << "Image class " << cache_class->PrettyDescriptor() << " is uninitialized.";
       return;
     }
     ObjPtr<mirror::Class> integer_class =
-        class_linker->LookupClass(self, kIntegerDescriptor, /* class_loader */ nullptr);
+        class_linker->LookupClass(self, kIntegerDescriptor, /* class_loader= */ nullptr);
     DCHECK(integer_class != nullptr);
     if (UNLIKELY(!integer_class->IsInitialized())) {
       LOG(WARNING) << "Image class " << integer_class->PrettyDescriptor() << " is uninitialized.";
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 7594d4a..ab68cce 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -24,7 +24,6 @@
 
 namespace art {
 
-class CompilerDriver;
 class DexFile;
 
 // Positive floating-point infinities.
@@ -34,28 +33,6 @@
 static constexpr uint32_t kNanFloat = 0x7fc00000U;
 static constexpr uint64_t kNanDouble = 0x7ff8000000000000;
 
-// Recognize intrinsics from HInvoke nodes.
-class IntrinsicsRecognizer : public HOptimization {
- public:
-  IntrinsicsRecognizer(HGraph* graph,
-                       OptimizingCompilerStats* stats,
-                       const char* name = kIntrinsicsRecognizerPassName)
-      : HOptimization(graph, name, stats) {}
-
-  bool Run() override;
-
-  // Static helper that recognizes intrinsic call. Returns true on success.
-  // If it fails due to invoke type mismatch, wrong_invoke_type is set.
-  // Useful to recognize intrinsics on individual calls outside this full pass.
-  static bool Recognize(HInvoke* invoke, ArtMethod* method, /*out*/ bool* wrong_invoke_type)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static constexpr const char* kIntrinsicsRecognizerPassName = "intrinsics_recognition";
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(IntrinsicsRecognizer);
-};
-
 class IntrinsicVisitor : public ValueObject {
  public:
   virtual ~IntrinsicVisitor() {}
@@ -264,11 +241,15 @@
 
 // Defines a list of unreached intrinsics: that is, method calls that are recognized as
 // an intrinsic, and then always converted into HIR instructions before they reach any
-// architecture-specific intrinsics code generator.
+// architecture-specific intrinsics code generator. This only applies to non-baseline
+// compilation.
 #define UNREACHABLE_INTRINSIC(Arch, Name)                                \
 void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke) { \
-  LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic()      \
-             << " should have been converted to HIR";                    \
+  if (Runtime::Current()->IsAotCompiler() &&                             \
+      !codegen_->GetCompilerOptions().IsBaseline()) {                    \
+    LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic()    \
+               << " should have been converted to HIR";                  \
+  }                                                                      \
 }                                                                        \
 void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke) {    \
   LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic()      \
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index fcd2788..ec5d17a 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -272,10 +272,10 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
 }
 void IntrinsicCodeGeneratorARM64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
 }
 
 void IntrinsicLocationsBuilderARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -286,10 +286,10 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler());
 }
 void IntrinsicCodeGeneratorARM64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler());
 }
 
 static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -618,7 +618,7 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) {
-  GenMathRound(invoke, /* is_double */ true, GetVIXLAssembler());
+  GenMathRound(invoke, /* is_double= */ true, GetVIXLAssembler());
 }
 
 void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -626,7 +626,7 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitMathRoundFloat(HInvoke* invoke) {
-  GenMathRound(invoke, /* is_double */ false, GetVIXLAssembler());
+  GenMathRound(invoke, /* is_double= */ false, GetVIXLAssembler());
 }
 
 void IntrinsicLocationsBuilderARM64::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -752,13 +752,13 @@
                                                    trg_loc,
                                                    base,
                                                    MemOperand(temp.X()),
-                                                   /* needs_null_check */ false,
+                                                   /* needs_null_check= */ false,
                                                    is_volatile);
   } else {
     // Other cases.
     MemOperand mem_op(base.X(), offset);
     if (is_volatile) {
-      codegen->LoadAcquire(invoke, trg, mem_op, /* needs_null_check */ true);
+      codegen->LoadAcquire(invoke, trg, mem_op, /* needs_null_check= */ true);
     } else {
       codegen->Load(type, trg, mem_op);
     }
@@ -813,22 +813,22 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitUnsafeGet(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafeGetLong(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObject(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
 }
 
 static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -896,7 +896,7 @@
     }
 
     if (is_volatile || is_ordered) {
-      codegen->StoreRelease(invoke, type, source, mem_op, /* needs_null_check */ false);
+      codegen->StoreRelease(invoke, type, source, mem_op, /* needs_null_check= */ false);
     } else {
       codegen->Store(type, source, mem_op);
     }
@@ -911,64 +911,64 @@
 void IntrinsicCodeGeneratorARM64::VisitUnsafePut(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kInt32,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutObject(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kReference,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutLong(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kInt64,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1646,7 +1646,7 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ true);
+  GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero= */ true);
 }
 
 void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1662,7 +1662,7 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ false);
+  GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero= */ false);
 }
 
 void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -2464,8 +2464,8 @@
                                                           src.W(),
                                                           class_offset,
                                                           temp3_loc,
-                                                          /* needs_null_check */ false,
-                                                          /* use_load_acquire */ false);
+                                                          /* needs_null_check= */ false,
+                                                          /* use_load_acquire= */ false);
           // Bail out if the source is not a non primitive array.
           // /* HeapReference<Class> */ temp1 = temp1->component_type_
           codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
@@ -2473,8 +2473,8 @@
                                                           temp1,
                                                           component_offset,
                                                           temp3_loc,
-                                                          /* needs_null_check */ false,
-                                                          /* use_load_acquire */ false);
+                                                          /* needs_null_check= */ false,
+                                                          /* use_load_acquire= */ false);
           __ Cbz(temp1, intrinsic_slow_path->GetEntryLabel());
           // If heap poisoning is enabled, `temp1` has been unpoisoned
           // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2490,8 +2490,8 @@
                                                         dest.W(),
                                                         class_offset,
                                                         temp3_loc,
-                                                        /* needs_null_check */ false,
-                                                        /* use_load_acquire */ false);
+                                                        /* needs_null_check= */ false,
+                                                        /* use_load_acquire= */ false);
 
         if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
           // Bail out if the destination is not a non primitive array.
@@ -2507,8 +2507,8 @@
                                                           temp1,
                                                           component_offset,
                                                           temp3_loc,
-                                                          /* needs_null_check */ false,
-                                                          /* use_load_acquire */ false);
+                                                          /* needs_null_check= */ false,
+                                                          /* use_load_acquire= */ false);
           __ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
           // If heap poisoning is enabled, `temp2` has been unpoisoned
           // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2526,8 +2526,8 @@
                                                         src.W(),
                                                         class_offset,
                                                         temp3_loc,
-                                                        /* needs_null_check */ false,
-                                                        /* use_load_acquire */ false);
+                                                        /* needs_null_check= */ false,
+                                                        /* use_load_acquire= */ false);
         // Note: if heap poisoning is on, we are comparing two unpoisoned references here.
         __ Cmp(temp1, temp2);
 
@@ -2540,8 +2540,8 @@
                                                           temp1,
                                                           component_offset,
                                                           temp3_loc,
-                                                          /* needs_null_check */ false,
-                                                          /* use_load_acquire */ false);
+                                                          /* needs_null_check= */ false,
+                                                          /* use_load_acquire= */ false);
           // /* HeapReference<Class> */ temp1 = temp1->super_class_
           // We do not need to emit a read barrier for the following
           // heap reference load, as `temp1` is only used in a
@@ -2624,16 +2624,16 @@
                                                         src.W(),
                                                         class_offset,
                                                         temp3_loc,
-                                                        /* needs_null_check */ false,
-                                                        /* use_load_acquire */ false);
+                                                        /* needs_null_check= */ false,
+                                                        /* use_load_acquire= */ false);
         // /* HeapReference<Class> */ temp2 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
                                                         temp2_loc,
                                                         temp1,
                                                         component_offset,
                                                         temp3_loc,
-                                                        /* needs_null_check */ false,
-                                                        /* use_load_acquire */ false);
+                                                        /* needs_null_check= */ false,
+                                                        /* use_load_acquire= */ false);
         __ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
         // If heap poisoning is enabled, `temp2` has been unpoisoned
         // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2787,7 +2787,7 @@
   }
 
   // We only need one card marking on the destination array.
-  codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null */ false);
+  codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null= */ false);
 
   __ Bind(intrinsic_slow_path->GetExitLabel());
 }
@@ -2820,7 +2820,7 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) {
-  GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+  GenIsInfinite(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler());
 }
 
 void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -2828,7 +2828,7 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
-  GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+  GenIsInfinite(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
 }
 
 void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) {
@@ -2924,6 +2924,251 @@
 
 void IntrinsicCodeGeneratorARM64::VisitReachabilityFence(HInvoke* invoke ATTRIBUTE_UNUSED) { }
 
+void IntrinsicLocationsBuilderARM64::VisitCRC32Update(HInvoke* invoke) {
+  if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
+    return;
+  }
+
+  LocationSummary* locations = new (allocator_) LocationSummary(invoke,
+                                                                LocationSummary::kNoCall,
+                                                                kIntrinsified);
+
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+// Lower the invoke of CRC32.update(int crc, int b).
+void IntrinsicCodeGeneratorARM64::VisitCRC32Update(HInvoke* invoke) {
+  DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
+
+  MacroAssembler* masm = GetVIXLAssembler();
+
+  Register crc = InputRegisterAt(invoke, 0);
+  Register val = InputRegisterAt(invoke, 1);
+  Register out = OutputRegister(invoke);
+
+  // The general algorithm of the CRC32 calculation is:
+  //   crc = ~crc
+  //   result = crc32_for_byte(crc, b)
+  //   crc = ~result
+  // It is directly lowered to three instructions.
+
+  UseScratchRegisterScope temps(masm);
+  Register tmp = temps.AcquireSameSizeAs(out);
+
+  __ Mvn(tmp, crc);
+  __ Crc32b(tmp, tmp, val);
+  __ Mvn(out, tmp);
+}
+
+// Generate code using CRC32 instructions which calculates
+// a CRC32 value of a byte.
+//
+// Parameters:
+//   masm   - VIXL macro assembler
+//   crc    - a register holding an initial CRC value
+//   ptr    - a register holding a memory address of bytes
+//   length - a register holding a number of bytes to process
+//   out    - a register to put a result of calculation
+static void GenerateCodeForCalculationCRC32ValueOfBytes(MacroAssembler* masm,
+                                                        const Register& crc,
+                                                        const Register& ptr,
+                                                        const Register& length,
+                                                        const Register& out) {
+  // The algorithm of CRC32 of bytes is:
+  //   crc = ~crc
+  //   process a few first bytes to make the array 8-byte aligned
+  //   while array has 8 bytes do:
+  //     crc = crc32_of_8bytes(crc, 8_bytes(array))
+  //   if array has 4 bytes:
+  //     crc = crc32_of_4bytes(crc, 4_bytes(array))
+  //   if array has 2 bytes:
+  //     crc = crc32_of_2bytes(crc, 2_bytes(array))
+  //   if array has a byte:
+  //     crc = crc32_of_byte(crc, 1_byte(array))
+  //   crc = ~crc
+
+  vixl::aarch64::Label loop, done;
+  vixl::aarch64::Label process_4bytes, process_2bytes, process_1byte;
+  vixl::aarch64::Label aligned2, aligned4, aligned8;
+
+  // Use VIXL scratch registers as the VIXL macro assembler won't use them in
+  // instructions below.
+  UseScratchRegisterScope temps(masm);
+  Register len = temps.AcquireW();
+  Register array_elem = temps.AcquireW();
+
+  __ Mvn(out, crc);
+  __ Mov(len, length);
+
+  __ Tbz(ptr, 0, &aligned2);
+  __ Subs(len, len, 1);
+  __ B(&done, lo);
+  __ Ldrb(array_elem, MemOperand(ptr, 1, PostIndex));
+  __ Crc32b(out, out, array_elem);
+
+  __ Bind(&aligned2);
+  __ Tbz(ptr, 1, &aligned4);
+  __ Subs(len, len, 2);
+  __ B(&process_1byte, lo);
+  __ Ldrh(array_elem, MemOperand(ptr, 2, PostIndex));
+  __ Crc32h(out, out, array_elem);
+
+  __ Bind(&aligned4);
+  __ Tbz(ptr, 2, &aligned8);
+  __ Subs(len, len, 4);
+  __ B(&process_2bytes, lo);
+  __ Ldr(array_elem, MemOperand(ptr, 4, PostIndex));
+  __ Crc32w(out, out, array_elem);
+
+  __ Bind(&aligned8);
+  __ Subs(len, len, 8);
+  // If len < 8 go to process data by 4 bytes, 2 bytes and a byte.
+  __ B(&process_4bytes, lo);
+
+  // The main loop processing data by 8 bytes.
+  __ Bind(&loop);
+  __ Ldr(array_elem.X(), MemOperand(ptr, 8, PostIndex));
+  __ Subs(len, len, 8);
+  __ Crc32x(out, out, array_elem.X());
+  // if len >= 8, process the next 8 bytes.
+  __ B(&loop, hs);
+
+  // Process the data which is less than 8 bytes.
+  // The code generated below works with values of len
+  // which come in the range [-8, 0].
+  // The first three bits are used to detect whether 4 bytes or 2 bytes or
+  // a byte can be processed.
+  // The checking order is from bit 2 to bit 0:
+  //  bit 2 is set: at least 4 bytes available
+  //  bit 1 is set: at least 2 bytes available
+  //  bit 0 is set: at least a byte available
+  __ Bind(&process_4bytes);
+  // Goto process_2bytes if less than four bytes available
+  __ Tbz(len, 2, &process_2bytes);
+  __ Ldr(array_elem, MemOperand(ptr, 4, PostIndex));
+  __ Crc32w(out, out, array_elem);
+
+  __ Bind(&process_2bytes);
+  // Goto process_1bytes if less than two bytes available
+  __ Tbz(len, 1, &process_1byte);
+  __ Ldrh(array_elem, MemOperand(ptr, 2, PostIndex));
+  __ Crc32h(out, out, array_elem);
+
+  __ Bind(&process_1byte);
+  // Goto done if no bytes available
+  __ Tbz(len, 0, &done);
+  __ Ldrb(array_elem, MemOperand(ptr));
+  __ Crc32b(out, out, array_elem);
+
+  __ Bind(&done);
+  __ Mvn(out, out);
+}
+
+// The threshold for sizes of arrays to use the library provided implementation
+// of CRC32.updateBytes instead of the intrinsic.
+static constexpr int32_t kCRC32UpdateBytesThreshold = 64 * 1024;
+
+void IntrinsicLocationsBuilderARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
+  if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
+    return;
+  }
+
+  LocationSummary* locations =
+      new (allocator_) LocationSummary(invoke,
+                                       LocationSummary::kCallOnSlowPath,
+                                       kIntrinsified);
+
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RegisterOrConstant(invoke->InputAt(2)));
+  locations->SetInAt(3, Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister());
+}
+
+// Lower the invoke of CRC32.updateBytes(int crc, byte[] b, int off, int len)
+//
+// Note: The intrinsic is not used if len exceeds a threshold.
+void IntrinsicCodeGeneratorARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
+  DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
+
+  MacroAssembler* masm = GetVIXLAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  SlowPathCodeARM64* slow_path =
+    new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
+  codegen_->AddSlowPath(slow_path);
+
+  Register length = WRegisterFrom(locations->InAt(3));
+  __ Cmp(length, kCRC32UpdateBytesThreshold);
+  __ B(slow_path->GetEntryLabel(), hi);
+
+  const uint32_t array_data_offset =
+      mirror::Array::DataOffset(Primitive::kPrimByte).Uint32Value();
+  Register ptr = XRegisterFrom(locations->GetTemp(0));
+  Register array = XRegisterFrom(locations->InAt(1));
+  Location offset = locations->InAt(2);
+  if (offset.IsConstant()) {
+    int32_t offset_value = offset.GetConstant()->AsIntConstant()->GetValue();
+    __ Add(ptr, array, array_data_offset + offset_value);
+  } else {
+    __ Add(ptr, array, array_data_offset);
+    __ Add(ptr, ptr, XRegisterFrom(offset));
+  }
+
+  Register crc = WRegisterFrom(locations->InAt(0));
+  Register out = WRegisterFrom(locations->Out());
+
+  GenerateCodeForCalculationCRC32ValueOfBytes(masm, crc, ptr, length, out);
+
+  __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitCRC32UpdateByteBuffer(HInvoke* invoke) {
+  if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
+    return;
+  }
+
+  LocationSummary* locations =
+      new (allocator_) LocationSummary(invoke,
+                                       LocationSummary::kNoCall,
+                                       kIntrinsified);
+
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetInAt(3, Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister());
+}
+
+// Lower the invoke of CRC32.updateByteBuffer(int crc, long addr, int off, int len)
+//
+// There is no need to generate code checking if addr is 0.
+// The method updateByteBuffer is a private method of java.util.zip.CRC32.
+// This guarantees no calls outside of the CRC32 class.
+// An address of DirectBuffer is always passed to the call of updateByteBuffer.
+// It might be an implementation of an empty DirectBuffer which can use a zero
+// address but it must have the length to be zero. The current generated code
+// correctly works with the zero length.
+void IntrinsicCodeGeneratorARM64::VisitCRC32UpdateByteBuffer(HInvoke* invoke) {
+  DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
+
+  MacroAssembler* masm = GetVIXLAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  Register addr = XRegisterFrom(locations->InAt(1));
+  Register ptr = XRegisterFrom(locations->GetTemp(0));
+  __ Add(ptr, addr, XRegisterFrom(locations->InAt(2)));
+
+  Register crc = WRegisterFrom(locations->InAt(0));
+  Register length = WRegisterFrom(locations->InAt(3));
+  Register out = WRegisterFrom(locations->Out());
+  GenerateCodeForCalculationCRC32ValueOfBytes(masm, crc, ptr, length, out);
+}
+
 UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
 
 UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index f0a4184..f0aa92e 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -229,7 +229,7 @@
     assembler->MaybePoisonHeapReference(tmp);
     __ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex));
     __ Cmp(src_curr_addr, src_stop_addr);
-    __ B(ne, &loop, /* far_target */ false);
+    __ B(ne, &loop, /* is_far_target= */ false);
     __ B(GetExitLabel());
   }
 
@@ -298,10 +298,10 @@
 }
 
 void IntrinsicCodeGeneratorARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -312,10 +312,10 @@
 }
 
 void IntrinsicCodeGeneratorARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -355,7 +355,7 @@
     vixl32::Label end;
     vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end);
     __ Clz(out, in_reg_hi);
-    __ CompareAndBranchIfNonZero(in_reg_hi, final_label, /* far_target */ false);
+    __ CompareAndBranchIfNonZero(in_reg_hi, final_label, /* is_far_target= */ false);
     __ Clz(out, in_reg_lo);
     __ Add(out, out, 32);
     if (end.IsReferenced()) {
@@ -398,7 +398,7 @@
     vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end);
     __ Rbit(out, in_reg_lo);
     __ Clz(out, out);
-    __ CompareAndBranchIfNonZero(in_reg_lo, final_label, /* far_target */ false);
+    __ CompareAndBranchIfNonZero(in_reg_lo, final_label, /* is_far_target= */ false);
     __ Rbit(out, in_reg_hi);
     __ Clz(out, out);
     __ Add(out, out, 32);
@@ -446,7 +446,7 @@
 void IntrinsicCodeGeneratorARMVIXL::VisitMathRint(HInvoke* invoke) {
   DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
   ArmVIXLAssembler* assembler = GetAssembler();
-  __ Vrintn(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+  __ Vrintn(F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitMathRoundFloat(HInvoke* invoke) {
@@ -476,12 +476,12 @@
 
   // For positive, zero or NaN inputs, rounding is done.
   __ Cmp(out_reg, 0);
-  __ B(ge, final_label, /* far_target */ false);
+  __ B(ge, final_label, /* is_far_target= */ false);
 
   // Handle input < 0 cases.
   // If input is negative but not a tie, previous result (round to nearest) is valid.
   // If input is a negative tie, change rounding direction to positive infinity, out_reg += 1.
-  __ Vrinta(F32, F32, temp1, in_reg);
+  __ Vrinta(F32, temp1, in_reg);
   __ Vmov(temp2, 0.5);
   __ Vsub(F32, temp1, in_reg, temp1);
   __ Vcmp(F32, temp1, temp2);
@@ -642,7 +642,7 @@
           __ Add(RegisterFrom(temp), base, Operand(offset));
           MemOperand src(RegisterFrom(temp), 0);
           codegen->GenerateFieldLoadWithBakerReadBarrier(
-              invoke, trg_loc, base, src, /* needs_null_check */ false);
+              invoke, trg_loc, base, src, /* needs_null_check= */ false);
           if (is_volatile) {
             __ Dmb(vixl32::ISH);
           }
@@ -733,22 +733,22 @@
 }
 
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGet(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
 }
 
 static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator,
@@ -778,39 +778,39 @@
 
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePut(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+      allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ false, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+      allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ false, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke);
+      allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ true, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+      allocator_, features_, DataType::Type::kReference, /* is_volatile= */ false, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+      allocator_, features_, DataType::Type::kReference, /* is_volatile= */ false, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke);
+      allocator_, features_, DataType::Type::kReference, /* is_volatile= */ true, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+      allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ false, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+      allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ false, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke);
+      allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ true, invoke);
 }
 
 static void GenUnsafePut(LocationSummary* locations,
@@ -844,7 +844,7 @@
       __ Ldrexd(temp_lo, temp_hi, MemOperand(temp_reg));
       __ Strexd(temp_lo, value_lo, value_hi, MemOperand(temp_reg));
       __ Cmp(temp_lo, 0);
-      __ B(ne, &loop_head, /* far_target */ false);
+      __ B(ne, &loop_head, /* is_far_target= */ false);
     } else {
       __ Strd(value_lo, value_hi, MemOperand(base, offset));
     }
@@ -875,64 +875,64 @@
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePut(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1026,7 +1026,7 @@
     __ Strex(tmp, value, MemOperand(tmp_ptr));
     assembler->MaybeUnpoisonHeapReference(value);
     __ Cmp(tmp, 0);
-    __ B(ne, &loop_head, /* far_target */ false);
+    __ B(ne, &loop_head, /* is_far_target= */ false);
     __ B(GetExitLabel());
   }
 };
@@ -1092,7 +1092,8 @@
     assembler->MaybeUnpoisonHeapReference(tmp);
   }
   __ Subs(tmp, tmp, expected);
-  __ B(ne, failure, (failure == loop_exit) ? kNear : kBranchWithoutHint);
+  static_cast<vixl32::MacroAssembler*>(assembler->GetVIXLAssembler())->
+      B(ne, failure, /* hint= */ (failure == loop_exit) ? kNear : kBranchWithoutHint);
   if (type == DataType::Type::kReference) {
     assembler->MaybePoisonHeapReference(value);
   }
@@ -1101,7 +1102,7 @@
     assembler->MaybeUnpoisonHeapReference(value);
   }
   __ Cmp(tmp, 0);
-  __ B(ne, &loop_head, /* far_target */ false);
+  __ B(ne, &loop_head, /* is_far_target= */ false);
 
   __ Bind(loop_exit);
 
@@ -1112,7 +1113,7 @@
   __ Lsr(out, out, WhichPowerOf2(out.GetSizeInBits()));
 
   if (type == DataType::Type::kReference) {
-    codegen->MaybeGenerateMarkingRegisterCheck(/* code */ 128);
+    codegen->MaybeGenerateMarkingRegisterCheck(/* code= */ 128);
   }
 }
 
@@ -1307,23 +1308,23 @@
   __ Ldr(temp_reg, MemOperand(str, temp1));
   __ Ldr(temp2, MemOperand(arg, temp1));
   __ Cmp(temp_reg, temp2);
-  __ B(ne, &find_char_diff, /* far_target */ false);
+  __ B(ne, &find_char_diff, /* is_far_target= */ false);
   __ Add(temp1, temp1, char_size * 2);
 
   __ Ldr(temp_reg, MemOperand(str, temp1));
   __ Ldr(temp2, MemOperand(arg, temp1));
   __ Cmp(temp_reg, temp2);
-  __ B(ne, &find_char_diff_2nd_cmp, /* far_target */ false);
+  __ B(ne, &find_char_diff_2nd_cmp, /* is_far_target= */ false);
   __ Add(temp1, temp1, char_size * 2);
   // With string compression, we have compared 8 bytes, otherwise 4 chars.
   __ Subs(temp0, temp0, (mirror::kUseStringCompression ? 8 : 4));
-  __ B(hi, &loop, /* far_target */ false);
+  __ B(hi, &loop, /* is_far_target= */ false);
   __ B(end);
 
   __ Bind(&find_char_diff_2nd_cmp);
   if (mirror::kUseStringCompression) {
     __ Subs(temp0, temp0, 4);  // 4 bytes previously compared.
-    __ B(ls, end, /* far_target */ false);  // Was the second comparison fully beyond the end?
+    __ B(ls, end, /* is_far_target= */ false);  // Was the second comparison fully beyond the end?
   } else {
     // Without string compression, we can start treating temp0 as signed
     // and rely on the signed comparison below.
@@ -1351,7 +1352,7 @@
   // the remaining string data, so just return length diff (out).
   // The comparison is unsigned for string compression, otherwise signed.
   __ Cmp(temp0, Operand(temp1, vixl32::LSR, (mirror::kUseStringCompression ? 3 : 4)));
-  __ B((mirror::kUseStringCompression ? ls : le), end, /* far_target */ false);
+  __ B((mirror::kUseStringCompression ? ls : le), end, /* is_far_target= */ false);
 
   // Extract the characters and calculate the difference.
   if (mirror::kUseStringCompression) {
@@ -1418,9 +1419,9 @@
     __ Ldrb(temp_reg, MemOperand(temp1, c_char_size, PostIndex));
     __ Ldrh(temp3, MemOperand(temp2, char_size, PostIndex));
     __ Cmp(temp_reg, temp3);
-    __ B(ne, &different_compression_diff, /* far_target */ false);
+    __ B(ne, &different_compression_diff, /* is_far_target= */ false);
     __ Subs(temp0, temp0, 2);
-    __ B(hi, &different_compression_loop, /* far_target */ false);
+    __ B(hi, &different_compression_loop, /* is_far_target= */ false);
     __ B(end);
 
     // Calculate the difference.
@@ -1516,12 +1517,12 @@
   StringEqualsOptimizations optimizations(invoke);
   if (!optimizations.GetArgumentNotNull()) {
     // Check if input is null, return false if it is.
-    __ CompareAndBranchIfZero(arg, &return_false, /* far_target */ false);
+    __ CompareAndBranchIfZero(arg, &return_false, /* is_far_target= */ false);
   }
 
   // Reference equality check, return true if same reference.
   __ Cmp(str, arg);
-  __ B(eq, &return_true, /* far_target */ false);
+  __ B(eq, &return_true, /* is_far_target= */ false);
 
   if (!optimizations.GetArgumentIsString()) {
     // Instanceof check for the argument by comparing class fields.
@@ -1539,7 +1540,7 @@
     // Also, because we use the previously loaded class references only in the
     // following comparison, we don't need to unpoison them.
     __ Cmp(temp, out);
-    __ B(ne, &return_false, /* far_target */ false);
+    __ B(ne, &return_false, /* is_far_target= */ false);
   }
 
   // Check if one of the inputs is a const string. Do not special-case both strings
@@ -1562,7 +1563,7 @@
     // Also compares the compression style, if differs return false.
     __ Ldr(temp, MemOperand(arg, count_offset));
     __ Cmp(temp, Operand(mirror::String::GetFlaggedCount(const_string_length, is_compressed)));
-    __ B(ne, &return_false, /* far_target */ false);
+    __ B(ne, &return_false, /* is_far_target= */ false);
   } else {
     // Load `count` fields of this and argument strings.
     __ Ldr(temp, MemOperand(str, count_offset));
@@ -1570,7 +1571,7 @@
     // Check if `count` fields are equal, return false if they're not.
     // Also compares the compression style, if differs return false.
     __ Cmp(temp, out);
-    __ B(ne, &return_false, /* far_target */ false);
+    __ B(ne, &return_false, /* is_far_target= */ false);
   }
 
   // Assertions that must hold in order to compare strings 4 bytes at a time.
@@ -1593,9 +1594,9 @@
       __ Ldrd(temp, temp1, MemOperand(str, offset));
       __ Ldrd(temp2, out, MemOperand(arg, offset));
       __ Cmp(temp, temp2);
-      __ B(ne, &return_false, /* far_label */ false);
+      __ B(ne, &return_false, /* is_far_target= */ false);
       __ Cmp(temp1, out);
-      __ B(ne, &return_false, /* far_label */ false);
+      __ B(ne, &return_false, /* is_far_target= */ false);
       offset += 2u * sizeof(uint32_t);
       remaining_bytes -= 2u * sizeof(uint32_t);
     }
@@ -1603,13 +1604,13 @@
       __ Ldr(temp, MemOperand(str, offset));
       __ Ldr(out, MemOperand(arg, offset));
       __ Cmp(temp, out);
-      __ B(ne, &return_false, /* far_label */ false);
+      __ B(ne, &return_false, /* is_far_target= */ false);
     }
   } else {
     // Return true if both strings are empty. Even with string compression `count == 0` means empty.
     static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
                   "Expecting 0=compressed, 1=uncompressed");
-    __ CompareAndBranchIfZero(temp, &return_true, /* far_target */ false);
+    __ CompareAndBranchIfZero(temp, &return_true, /* is_far_target= */ false);
 
     if (mirror::kUseStringCompression) {
       // For string compression, calculate the number of bytes to compare (not chars).
@@ -1635,10 +1636,10 @@
     __ Ldr(temp2, MemOperand(arg, temp1));
     __ Add(temp1, temp1, Operand::From(sizeof(uint32_t)));
     __ Cmp(out, temp2);
-    __ B(ne, &return_false, /* far_target */ false);
+    __ B(ne, &return_false, /* is_far_target= */ false);
     // With string compression, we have compared 4 bytes, otherwise 2 chars.
     __ Subs(temp, temp, mirror::kUseStringCompression ? 4 : 2);
-    __ B(hi, &loop, /* far_target */ false);
+    __ B(hi, &loop, /* is_far_target= */ false);
   }
 
   // Return true and exit the function.
@@ -1719,7 +1720,7 @@
 }
 
 void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+  GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1735,7 +1736,7 @@
 }
 
 void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+  GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1957,7 +1958,7 @@
     } else {
       if (!optimizations.GetDestinationIsSource()) {
         __ Cmp(src, dest);
-        __ B(ne, &conditions_on_positions_validated, /* far_target */ false);
+        __ B(ne, &conditions_on_positions_validated, /* is_far_target= */ false);
       }
       __ Cmp(RegisterFrom(dest_pos), src_pos_constant);
       __ B(gt, intrinsic_slow_path->GetEntryLabel());
@@ -1965,7 +1966,7 @@
   } else {
     if (!optimizations.GetDestinationIsSource()) {
       __ Cmp(src, dest);
-      __ B(ne, &conditions_on_positions_validated, /* far_target */ false);
+      __ B(ne, &conditions_on_positions_validated, /* is_far_target= */ false);
     }
     if (dest_pos.IsConstant()) {
       int32_t dest_pos_constant = Int32ConstantFrom(dest_pos);
@@ -2025,11 +2026,11 @@
       if (!optimizations.GetSourceIsNonPrimitiveArray()) {
         // /* HeapReference<Class> */ temp1 = src->klass_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+            invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false);
         // Bail out if the source is not a non primitive array.
         // /* HeapReference<Class> */ temp1 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+            invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
         __ CompareAndBranchIfZero(temp1, intrinsic_slow_path->GetEntryLabel());
         // If heap poisoning is enabled, `temp1` has been unpoisoned
         // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2041,7 +2042,7 @@
 
       // /* HeapReference<Class> */ temp1 = dest->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check */ false);
+          invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check= */ false);
 
       if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
         // Bail out if the destination is not a non primitive array.
@@ -2053,7 +2054,7 @@
         // temporaries such a `temp1`.
         // /* HeapReference<Class> */ temp2 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check */ false);
+            invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check= */ false);
         __ CompareAndBranchIfZero(temp2, intrinsic_slow_path->GetEntryLabel());
         // If heap poisoning is enabled, `temp2` has been unpoisoned
         // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2067,16 +2068,16 @@
       // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
       // /* HeapReference<Class> */ temp2 = src->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check */ false);
+          invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check= */ false);
       // Note: if heap poisoning is on, we are comparing two unpoisoned references here.
       __ Cmp(temp1, temp2);
 
       if (optimizations.GetDestinationIsTypedObjectArray()) {
         vixl32::Label do_copy;
-        __ B(eq, &do_copy, /* far_target */ false);
+        __ B(eq, &do_copy, /* is_far_target= */ false);
         // /* HeapReference<Class> */ temp1 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+            invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
         // /* HeapReference<Class> */ temp1 = temp1->super_class_
         // We do not need to emit a read barrier for the following
         // heap reference load, as `temp1` is only used in a
@@ -2133,7 +2134,7 @@
 
       if (optimizations.GetDestinationIsTypedObjectArray()) {
         vixl32::Label do_copy;
-        __ B(eq, &do_copy, /* far_target */ false);
+        __ B(eq, &do_copy, /* is_far_target= */ false);
         if (!did_unpoison) {
           assembler->MaybeUnpoisonHeapReference(temp1);
         }
@@ -2155,10 +2156,10 @@
     if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
       // /* HeapReference<Class> */ temp1 = src->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+          invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false);
       // /* HeapReference<Class> */ temp3 = temp1->component_type_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+          invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
       __ CompareAndBranchIfZero(temp3, intrinsic_slow_path->GetEntryLabel());
       // If heap poisoning is enabled, `temp3` has been unpoisoned
       // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2186,7 +2187,7 @@
 
     if (length.IsRegister()) {
       // Don't enter the copy loop if the length is null.
-      __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target */ false);
+      __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target= */ false);
     }
 
     if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
@@ -2263,7 +2264,7 @@
         __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
       }
       __ Cmp(temp1, temp3);
-      __ B(ne, &loop, /* far_target */ false);
+      __ B(ne, &loop, /* is_far_target= */ false);
 
       __ Bind(read_barrier_slow_path->GetExitLabel());
     } else {
@@ -2285,13 +2286,13 @@
         __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
       }
       __ Cmp(temp1, temp3);
-      __ B(ne, &loop, /* far_target */ false);
+      __ B(ne, &loop, /* is_far_target= */ false);
     }
     __ Bind(&done);
   }
 
   // We only need one card marking on the destination array.
-  codegen_->MarkGCCard(temp1, temp2, dest, NoReg, /* value_can_be_null */ false);
+  codegen_->MarkGCCard(temp1, temp2, dest, NoReg, /* can_be_null= */ false);
 
   __ Bind(intrinsic_slow_path->GetExitLabel());
 }
@@ -2821,7 +2822,7 @@
 
   __ Subs(num_chr, srcEnd, srcBegin);
   // Early out for valid zero-length retrievals.
-  __ B(eq, final_label, /* far_target */ false);
+  __ B(eq, final_label, /* is_far_target= */ false);
 
   // src range to copy.
   __ Add(src_ptr, srcObj, value_offset);
@@ -2837,7 +2838,7 @@
     __ Ldr(temp, MemOperand(srcObj, count_offset));
     __ Tst(temp, 1);
     temps.Release(temp);
-    __ B(eq, &compressed_string_preloop, /* far_target */ false);
+    __ B(eq, &compressed_string_preloop, /* is_far_target= */ false);
   }
   __ Add(src_ptr, src_ptr, Operand(srcBegin, vixl32::LSL, 1));
 
@@ -2847,7 +2848,7 @@
   temp = temps.Acquire();
   // Save repairing the value of num_chr on the < 4 character path.
   __ Subs(temp, num_chr, 4);
-  __ B(lt, &remainder, /* far_target */ false);
+  __ B(lt, &remainder, /* is_far_target= */ false);
 
   // Keep the result of the earlier subs, we are going to fetch at least 4 characters.
   __ Mov(num_chr, temp);
@@ -2862,10 +2863,10 @@
   __ Ldr(temp, MemOperand(src_ptr, char_size * 4, PostIndex));
   __ Str(temp, MemOperand(dst_ptr, char_size * 4, PostIndex));
   temps.Release(temp);
-  __ B(ge, &loop, /* far_target */ false);
+  __ B(ge, &loop, /* is_far_target= */ false);
 
   __ Adds(num_chr, num_chr, 4);
-  __ B(eq, final_label, /* far_target */ false);
+  __ B(eq, final_label, /* is_far_target= */ false);
 
   // Main loop for < 4 character case and remainder handling. Loads and stores one
   // 16-bit Java character at a time.
@@ -2875,7 +2876,7 @@
   __ Subs(num_chr, num_chr, 1);
   __ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex));
   temps.Release(temp);
-  __ B(gt, &remainder, /* far_target */ false);
+  __ B(gt, &remainder, /* is_far_target= */ false);
 
   if (mirror::kUseStringCompression) {
     __ B(final_label);
@@ -2891,7 +2892,7 @@
     __ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex));
     temps.Release(temp);
     __ Subs(num_chr, num_chr, 1);
-    __ B(gt, &compressed_string_loop, /* far_target */ false);
+    __ B(gt, &compressed_string_loop, /* is_far_target= */ false);
   }
 
   if (done.IsReferenced()) {
@@ -2952,7 +2953,7 @@
 void IntrinsicCodeGeneratorARMVIXL::VisitMathCeil(HInvoke* invoke) {
   ArmVIXLAssembler* assembler = GetAssembler();
   DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
-  __ Vrintp(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+  __ Vrintp(F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitMathFloor(HInvoke* invoke) {
@@ -2964,7 +2965,7 @@
 void IntrinsicCodeGeneratorARMVIXL::VisitMathFloor(HInvoke* invoke) {
   ArmVIXLAssembler* assembler = GetAssembler();
   DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
-  __ Vrintm(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+  __ Vrintm(F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
@@ -3011,7 +3012,7 @@
     __ Add(out, in, -info.low);
     __ Cmp(out, info.length);
     vixl32::Label allocate, done;
-    __ B(hs, &allocate, /* is_far_target */ false);
+    __ B(hs, &allocate, /* is_far_target= */ false);
     // If the value is within the bounds, load the j.l.Integer directly from the array.
     codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_reference);
     codegen_->LoadFromShiftedRegOffset(DataType::Type::kReference, locations->Out(), temp, out);
@@ -3044,7 +3045,7 @@
   vixl32::Register temp = temps.Acquire();
   vixl32::Label done;
   vixl32::Label* const final_label = codegen_->GetFinalLabel(invoke, &done);
-  __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
+  __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false);
   __ Dmb(vixl32::ISH);
   __ Mov(temp, 0);
   assembler->StoreToOffset(kStoreWord, temp, tr, offset);
@@ -3066,6 +3067,9 @@
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong)     // High register pressure.
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, SystemArrayCopyChar)
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32UpdateByteBuffer)
 
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 2ca12b6..3da0e57 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -185,7 +185,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 // int java.lang.Float.floatToRawIntBits(float)
@@ -194,7 +194,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -226,7 +226,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 // float java.lang.Float.intBitsToFloat(int)
@@ -235,7 +235,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 static void CreateIntToIntLocations(ArenaAllocator* allocator,
@@ -411,7 +411,7 @@
              DataType::Type::kInt32,
              IsR2OrNewer(),
              IsR6(),
-             /* reverseBits */ false,
+             /* reverseBits= */ false,
              GetAssembler());
 }
 
@@ -425,7 +425,7 @@
              DataType::Type::kInt64,
              IsR2OrNewer(),
              IsR6(),
-             /* reverseBits */ false,
+             /* reverseBits= */ false,
              GetAssembler());
 }
 
@@ -439,7 +439,7 @@
              DataType::Type::kInt16,
              IsR2OrNewer(),
              IsR6(),
-             /* reverseBits */ false,
+             /* reverseBits= */ false,
              GetAssembler());
 }
 
@@ -479,7 +479,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler());
+  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ false, IsR6(), GetAssembler());
 }
 
 // int java.lang.Long.numberOfLeadingZeros(long i)
@@ -488,7 +488,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler());
+  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ true, IsR6(), GetAssembler());
 }
 
 static void GenNumberOfTrailingZeroes(LocationSummary* locations,
@@ -566,7 +566,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler());
+  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ false, IsR6(), GetAssembler());
 }
 
 // int java.lang.Long.numberOfTrailingZeros(long i)
@@ -575,7 +575,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler());
+  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ true, IsR6(), GetAssembler());
 }
 
 // int java.lang.Integer.reverse(int)
@@ -588,7 +588,7 @@
              DataType::Type::kInt32,
              IsR2OrNewer(),
              IsR6(),
-             /* reverseBits */ true,
+             /* reverseBits= */ true,
              GetAssembler());
 }
 
@@ -602,7 +602,7 @@
              DataType::Type::kInt64,
              IsR2OrNewer(),
              IsR6(),
-             /* reverseBits */ true,
+             /* reverseBits= */ true,
              GetAssembler());
 }
 
@@ -1055,11 +1055,11 @@
           codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
                                                              trg_loc,
                                                              base,
-                                                             /* offset */ 0U,
-                                                             /* index */ offset_loc,
+                                                             /* offset= */ 0U,
+                                                             /* index= */ offset_loc,
                                                              TIMES_1,
                                                              temp,
-                                                             /* needs_null_check */ false);
+                                                             /* needs_null_check= */ false);
           if (is_volatile) {
             __ Sync(0);
           }
@@ -1077,8 +1077,8 @@
                                            trg_loc,
                                            trg_loc,
                                            base_loc,
-                                           /* offset */ 0U,
-                                           /* index */ offset_loc);
+                                           /* offset= */ 0U,
+                                           /* index= */ offset_loc);
         }
       } else {
         if (is_R6) {
@@ -1107,7 +1107,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, IsR6(), codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, IsR6(), codegen_);
 }
 
 // int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
@@ -1116,7 +1116,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, IsR6(), codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, IsR6(), codegen_);
 }
 
 // long sun.misc.Unsafe.getLong(Object o, long offset)
@@ -1125,7 +1125,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, IsR6(), codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, IsR6(), codegen_);
 }
 
 // Object sun.misc.Unsafe.getObject(Object o, long offset)
@@ -1134,7 +1134,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, IsR6(), codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, IsR6(), codegen_);
 }
 
 // Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
@@ -1143,7 +1143,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, IsR6(), codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, IsR6(), codegen_);
 }
 
 static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -1225,8 +1225,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                IsR6(),
                codegen_);
 }
@@ -1239,8 +1239,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                IsR6(),
                codegen_);
 }
@@ -1253,8 +1253,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                IsR6(),
                codegen_);
 }
@@ -1267,8 +1267,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                IsR6(),
                codegen_);
 }
@@ -1281,8 +1281,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                IsR6(),
                codegen_);
 }
@@ -1295,8 +1295,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                IsR6(),
                codegen_);
 }
@@ -1309,8 +1309,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                IsR6(),
                codegen_);
 }
@@ -1323,8 +1323,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                IsR6(),
                codegen_);
 }
@@ -1388,12 +1388,12 @@
           invoke,
           out_loc,  // Unused, used only as a "temporary" within the read barrier.
           base,
-          /* offset */ 0u,
-          /* index */ offset_loc,
+          /* offset= */ 0u,
+          /* index= */ offset_loc,
           ScaleFactor::TIMES_1,
           temp,
-          /* needs_null_check */ false,
-          /* always_update_field */ true);
+          /* needs_null_check= */ false,
+          /* always_update_field= */ true);
     }
   }
 
@@ -1714,7 +1714,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, /* start_at_zero */ true, GetAssembler(), codegen_);
+  GenerateStringIndexOf(invoke, /* start_at_zero= */ true, GetAssembler(), codegen_);
 }
 
 // int java.lang.String.indexOf(int ch, int fromIndex)
@@ -1735,7 +1735,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, /* start_at_zero */ false, GetAssembler(), codegen_);
+  GenerateStringIndexOf(invoke, /* start_at_zero= */ false, GetAssembler(), codegen_);
 }
 
 // java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -2704,6 +2704,10 @@
 UNIMPLEMENTED_INTRINSIC(MIPS, ReferenceGetReferent)
 UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopy)
 
+UNIMPLEMENTED_INTRINSIC(MIPS, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(MIPS, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(MIPS, CRC32UpdateByteBuffer)
+
 UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOfAfter);
 UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferAppend);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index cbe3b42..3e68765 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -169,7 +169,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 // int java.lang.Float.floatToRawIntBits(float)
@@ -178,7 +178,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -205,7 +205,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 // float java.lang.Float.intBitsToFloat(int)
@@ -214,7 +214,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -295,7 +295,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 // int java.lang.Long.numberOfLeadingZeros(long i)
@@ -304,7 +304,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 static void GenNumberOfTrailingZeroes(LocationSummary* locations,
@@ -332,7 +332,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 // int java.lang.Long.numberOfTrailingZeros(long i)
@@ -341,7 +341,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 static void GenReverse(LocationSummary* locations,
@@ -911,11 +911,11 @@
           codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
                                                              trg_loc,
                                                              base,
-                                                             /* offset */ 0U,
-                                                             /* index */ offset_loc,
+                                                             /* offset= */ 0U,
+                                                             /* index= */ offset_loc,
                                                              TIMES_1,
                                                              temp,
-                                                             /* needs_null_check */ false);
+                                                             /* needs_null_check= */ false);
           if (is_volatile) {
             __ Sync(0);
           }
@@ -928,8 +928,8 @@
                                            trg_loc,
                                            trg_loc,
                                            base_loc,
-                                           /* offset */ 0U,
-                                           /* index */ offset_loc);
+                                           /* offset= */ 0U,
+                                           /* index= */ offset_loc);
         }
       } else {
         __ Lwu(trg, TMP, 0);
@@ -952,7 +952,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 
 // int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
@@ -961,7 +961,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
 }
 
 // long sun.misc.Unsafe.getLong(Object o, long offset)
@@ -970,7 +970,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 
 // long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
@@ -979,7 +979,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
 }
 
 // Object sun.misc.Unsafe.getObject(Object o, long offset)
@@ -988,7 +988,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 
 // Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
@@ -997,7 +997,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
 }
 
 static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -1067,8 +1067,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1080,8 +1080,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 
@@ -1093,8 +1093,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1106,8 +1106,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1119,8 +1119,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 
@@ -1132,8 +1132,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1145,8 +1145,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1158,8 +1158,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 
@@ -1171,8 +1171,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1234,12 +1234,12 @@
           invoke,
           out_loc,  // Unused, used only as a "temporary" within the read barrier.
           base,
-          /* offset */ 0u,
-          /* index */ offset_loc,
+          /* offset= */ 0u,
+          /* index= */ offset_loc,
           ScaleFactor::TIMES_1,
           temp,
-          /* needs_null_check */ false,
-          /* always_update_field */ true);
+          /* needs_null_check= */ false,
+          /* always_update_field= */ true);
     }
   }
 
@@ -1556,7 +1556,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
 }
 
 // int java.lang.String.indexOf(int ch, int fromIndex)
@@ -1574,7 +1574,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
 }
 
 // java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -1675,7 +1675,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
-  GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  GenIsInfinite(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 // boolean java.lang.Double.isInfinite(double)
@@ -1684,7 +1684,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
-  GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  GenIsInfinite(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 // void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
@@ -2354,6 +2354,9 @@
 
 UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent)
 UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
+UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32UpdateByteBuffer)
 
 UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 3b23798..de697f0 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -223,31 +223,31 @@
 }
 
 void IntrinsicLocationsBuilderX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  CreateFPToIntLocations(allocator_, invoke, /* is64bit */ true);
+  CreateFPToIntLocations(allocator_, invoke, /* is64bit= */ true);
 }
 void IntrinsicLocationsBuilderX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  CreateIntToFPLocations(allocator_, invoke, /* is64bit */ true);
+  CreateIntToFPLocations(allocator_, invoke, /* is64bit= */ true);
 }
 
 void IntrinsicCodeGeneratorX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 void IntrinsicCodeGeneratorX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 void IntrinsicLocationsBuilderX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  CreateFPToIntLocations(allocator_, invoke, /* is64bit */ false);
+  CreateFPToIntLocations(allocator_, invoke, /* is64bit= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  CreateIntToFPLocations(allocator_, invoke, /* is64bit */ false);
+  CreateIntToFPLocations(allocator_, invoke, /* is64bit= */ false);
 }
 
 void IntrinsicCodeGeneratorX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 void IntrinsicCodeGeneratorX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -545,6 +545,96 @@
   __ cfi().AdjustCFAOffset(-16);
 }
 
+static void CreateLowestOneBitLocations(ArenaAllocator* allocator, bool is_long, HInvoke* invoke) {
+  LocationSummary* locations =
+      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+  if (is_long) {
+    locations->SetInAt(0, Location::RequiresRegister());
+  } else {
+    locations->SetInAt(0, Location::Any());
+  }
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+static void GenLowestOneBit(X86Assembler* assembler,
+                      CodeGeneratorX86* codegen,
+                      bool is_long,
+                      HInvoke* invoke) {
+  LocationSummary* locations = invoke->GetLocations();
+  Location src = locations->InAt(0);
+  Location out_loc = locations->Out();
+
+  if (invoke->InputAt(0)->IsConstant()) {
+    // Evaluate this at compile time.
+    int64_t value = Int64FromConstant(invoke->InputAt(0)->AsConstant());
+    if (value == 0) {
+      if (is_long) {
+        __ xorl(out_loc.AsRegisterPairLow<Register>(), out_loc.AsRegisterPairLow<Register>());
+        __ xorl(out_loc.AsRegisterPairHigh<Register>(), out_loc.AsRegisterPairHigh<Register>());
+      } else {
+        __ xorl(out_loc.AsRegister<Register>(), out_loc.AsRegister<Register>());
+      }
+      return;
+    }
+    // Nonzero value.
+    value = is_long ? CTZ(static_cast<uint64_t>(value))
+                    : CTZ(static_cast<uint32_t>(value));
+    if (is_long) {
+      if (value >= 32) {
+        int shift = value-32;
+        codegen->Load32BitValue(out_loc.AsRegisterPairLow<Register>(), 0);
+        codegen->Load32BitValue(out_loc.AsRegisterPairHigh<Register>(), 1 << shift);
+      } else {
+        codegen->Load32BitValue(out_loc.AsRegisterPairLow<Register>(), 1 << value);
+        codegen->Load32BitValue(out_loc.AsRegisterPairHigh<Register>(), 0);
+      }
+    } else {
+      codegen->Load32BitValue(out_loc.AsRegister<Register>(), 1 << value);
+    }
+    return;
+  }
+  // Handle non constant case
+  if (is_long) {
+    DCHECK(src.IsRegisterPair());
+    Register src_lo = src.AsRegisterPairLow<Register>();
+    Register src_hi = src.AsRegisterPairHigh<Register>();
+
+    Register out_lo = out_loc.AsRegisterPairLow<Register>();
+    Register out_hi = out_loc.AsRegisterPairHigh<Register>();
+
+    __ movl(out_lo, src_lo);
+    __ movl(out_hi, src_hi);
+
+    __ negl(out_lo);
+    __ adcl(out_hi, Immediate(0));
+    __ negl(out_hi);
+
+    __ andl(out_lo, src_lo);
+    __ andl(out_hi, src_hi);
+  } else {
+    if (codegen->GetInstructionSetFeatures().HasAVX2() && src.IsRegister()) {
+      Register out = out_loc.AsRegister<Register>();
+      __ blsi(out, src.AsRegister<Register>());
+    } else {
+      Register out = out_loc.AsRegister<Register>();
+      // Do tmp & -tmp
+      if (src.IsRegister()) {
+        __ movl(out, src.AsRegister<Register>());
+      } else {
+        DCHECK(src.IsStackSlot());
+        __ movl(out, Address(ESP, src.GetStackIndex()));
+      }
+      __ negl(out);
+
+      if (src.IsRegister()) {
+        __ andl(out, src.AsRegister<Register>());
+      } else {
+        __ andl(out, Address(ESP, src.GetStackIndex()));
+      }
+    }
+  }
+}
+
 void IntrinsicLocationsBuilderX86::VisitMathCos(HInvoke* invoke) {
   CreateFPToFPCallLocations(allocator_, invoke);
 }
@@ -657,6 +747,21 @@
   GenFPToFPCall(invoke, codegen_, kQuickTanh);
 }
 
+void IntrinsicLocationsBuilderX86::VisitIntegerLowestOneBit(HInvoke* invoke) {
+  CreateLowestOneBitLocations(allocator_, /*is_long=*/ false, invoke);
+}
+void IntrinsicCodeGeneratorX86::VisitIntegerLowestOneBit(HInvoke* invoke) {
+  GenLowestOneBit(GetAssembler(), codegen_, /*is_long=*/ false, invoke);
+}
+
+void IntrinsicLocationsBuilderX86::VisitLongLowestOneBit(HInvoke* invoke) {
+  CreateLowestOneBitLocations(allocator_, /*is_long=*/ true, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitLongLowestOneBit(HInvoke* invoke) {
+  GenLowestOneBit(GetAssembler(), codegen_, /*is_long=*/ true, invoke);
+}
+
 static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
   LocationSummary* locations =
       new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
@@ -1220,19 +1325,19 @@
 }
 
 void IntrinsicLocationsBuilderX86::VisitStringIndexOf(HInvoke* invoke) {
-  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
+  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ true);
 }
 
 void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
 }
 
 void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
-  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
+  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ false);
 }
 
 void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
 }
 
 void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1594,7 +1699,7 @@
         if (kUseBakerReadBarrier) {
           Address src(base, offset, ScaleFactor::TIMES_1, 0);
           codegen->GenerateReferenceLoadWithBakerReadBarrier(
-              invoke, output_loc, base, src, /* needs_null_check */ false);
+              invoke, output_loc, base, src, /* needs_null_check= */ false);
         } else {
           __ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
           codegen->GenerateReadBarrierSlow(
@@ -1665,45 +1770,45 @@
 
 void IntrinsicLocationsBuilderX86::VisitUnsafeGet(HInvoke* invoke) {
   CreateIntIntIntToIntLocations(
-      allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ false);
+      allocator_, invoke, DataType::Type::kInt32, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ true);
+  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile= */ true);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafeGetLong(HInvoke* invoke) {
   CreateIntIntIntToIntLocations(
-      allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ false);
+      allocator_, invoke, DataType::Type::kInt64, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
-  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ true);
+  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile= */ true);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafeGetObject(HInvoke* invoke) {
   CreateIntIntIntToIntLocations(
-      allocator_, invoke, DataType::Type::kReference, /* is_volatile */ false);
+      allocator_, invoke, DataType::Type::kReference, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
   CreateIntIntIntToIntLocations(
-      allocator_, invoke, DataType::Type::kReference, /* is_volatile */ true);
+      allocator_, invoke, DataType::Type::kReference, /* is_volatile= */ true);
 }
 
 
 void IntrinsicCodeGeneratorX86::VisitUnsafeGet(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafeGetLong(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafeGetObject(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
 }
 
 
@@ -1730,39 +1835,39 @@
 
 void IntrinsicLocationsBuilderX86::VisitUnsafePut(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+      allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutOrdered(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+      allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutVolatile(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ true);
+      allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ true);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutObject(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+      allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+      allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kReference, invoke, /* is_volatile */ true);
+      allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ true);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutLong(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+      allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+      allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ true);
+      allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ true);
 }
 
 // We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86
@@ -1814,34 +1919,34 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitUnsafePut(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutOrdered(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutVolatile(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutObject(HInvoke* invoke) {
   GenUnsafePut(
-      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   GenUnsafePut(
-      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   GenUnsafePut(
-      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ true, codegen_);
+      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutLong(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
 }
 
 static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
@@ -1938,8 +2043,8 @@
           temp1_loc,  // Unused, used only as a "temporary" within the read barrier.
           base,
           field_addr,
-          /* needs_null_check */ false,
-          /* always_update_field */ true,
+          /* needs_null_check= */ false,
+          /* always_update_field= */ true,
           &temp2);
     }
 
@@ -2170,19 +2275,19 @@
 }
 
 void IntrinsicLocationsBuilderX86::VisitIntegerBitCount(HInvoke* invoke) {
-  CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ false);
+  CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicCodeGeneratorX86::VisitIntegerBitCount(HInvoke* invoke) {
-  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ false);
+  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86::VisitLongBitCount(HInvoke* invoke) {
-  CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ true);
+  CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long= */ true);
 }
 
 void IntrinsicCodeGeneratorX86::VisitLongBitCount(HInvoke* invoke) {
-  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
+  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ true);
 }
 
 static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
@@ -2274,19 +2379,19 @@
 }
 
 void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ false);
+  CreateLeadingZeroLocations(allocator_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ true);
+  CreateLeadingZeroLocations(allocator_, invoke, /* is_long= */ true);
 }
 
 void IntrinsicCodeGeneratorX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
 }
 
 static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
@@ -2365,19 +2470,19 @@
 }
 
 void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ false);
+  CreateTrailingZeroLocations(allocator_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ true);
+  CreateTrailingZeroLocations(allocator_, invoke, /* is_long= */ true);
 }
 
 void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
 }
 
 static bool IsSameInput(HInstruction* instruction, size_t input0, size_t input1) {
@@ -2585,11 +2690,11 @@
       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
         // /* HeapReference<Class> */ temp1 = src->klass_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp1_loc, src, class_offset, /* needs_null_check */ false);
+            invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
         // Bail out if the source is not a non primitive array.
         // /* HeapReference<Class> */ temp1 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+            invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
         __ testl(temp1, temp1);
         __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
         // If heap poisoning is enabled, `temp1` has been unpoisoned
@@ -2622,7 +2727,7 @@
 
       // /* HeapReference<Class> */ temp1 = dest->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp1_loc, dest, class_offset, /* needs_null_check */ false);
+          invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false);
 
       if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
         // Bail out if the destination is not a non primitive array.
@@ -2634,7 +2739,7 @@
         // temporaries such a `temp1`.
         // /* HeapReference<Class> */ temp2 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp2_loc, temp1, component_offset, /* needs_null_check */ false);
+            invoke, temp2_loc, temp1, component_offset, /* needs_null_check= */ false);
         __ testl(temp2, temp2);
         __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
         // If heap poisoning is enabled, `temp2` has been unpoisoned
@@ -2647,7 +2752,7 @@
       // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
       // /* HeapReference<Class> */ temp2 = src->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp2_loc, src, class_offset, /* needs_null_check */ false);
+          invoke, temp2_loc, src, class_offset, /* needs_null_check= */ false);
       // Note: if heap poisoning is on, we are comparing two unpoisoned references here.
       __ cmpl(temp1, temp2);
 
@@ -2656,7 +2761,7 @@
         __ j(kEqual, &do_copy);
         // /* HeapReference<Class> */ temp1 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+            invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
         // We do not need to emit a read barrier for the following
         // heap reference load, as `temp1` is only used in a
         // comparison with null below, and this reference is not
@@ -2710,10 +2815,10 @@
     if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
       // /* HeapReference<Class> */ temp1 = src->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp1_loc, src, class_offset, /* needs_null_check */ false);
+          invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
       // /* HeapReference<Class> */ temp1 = temp1->component_type_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+          invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
       __ testl(temp1, temp1);
       __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
       // If heap poisoning is enabled, `temp1` has been unpoisoned
@@ -2846,7 +2951,7 @@
   }
 
   // We only need one card marking on the destination array.
-  codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null */ false);
+  codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null= */ false);
 
   __ Bind(intrinsic_slow_path->GetExitLabel());
 }
@@ -2973,8 +3078,9 @@
 UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite)
 UNIMPLEMENTED_INTRINSIC(X86, IntegerHighestOneBit)
 UNIMPLEMENTED_INTRINSIC(X86, LongHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(X86, IntegerLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(X86, LongLowestOneBit)
+UNIMPLEMENTED_INTRINSIC(X86, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(X86, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(X86, CRC32UpdateByteBuffer)
 
 UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 0469b02..e79c0c9 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -162,10 +162,10 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 void IntrinsicCodeGeneratorX86_64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -176,10 +176,10 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 void IntrinsicCodeGeneratorX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -430,12 +430,12 @@
   // direct x86 instruction, since NaN should map to 0 and large positive
   // values need to be clipped to the extreme value.
   codegen_->Load64BitValue(out, kPrimLongMax);
-  __ cvtsi2sd(t2, out, /* is64bit */ true);
+  __ cvtsi2sd(t2, out, /* is64bit= */ true);
   __ comisd(t1, t2);
   __ j(kAboveEqual, &done);  // clipped to max (already in out), does not jump on unordered
   __ movl(out, Immediate(0));  // does not change flags, implicit zero extension to 64-bit
   __ j(kUnordered, &done);  // NaN mapped to 0 (just moved in out)
-  __ cvttsd2si(out, t1, /* is64bit */ true);
+  __ cvttsd2si(out, t1, /* is64bit= */ true);
   __ Bind(&done);
 }
 
@@ -979,7 +979,7 @@
     if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
       // /* HeapReference<Class> */ temp1 = dest->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp1_loc, dest, class_offset, /* needs_null_check */ false);
+          invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false);
       // Register `temp1` is not trashed by the read barrier emitted
       // by GenerateFieldLoadWithBakerReadBarrier below, as that
       // method produces a call to a ReadBarrierMarkRegX entry point,
@@ -987,7 +987,7 @@
       // temporaries such a `temp1`.
       // /* HeapReference<Class> */ temp2 = src->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp2_loc, src, class_offset, /* needs_null_check */ false);
+          invoke, temp2_loc, src, class_offset, /* needs_null_check= */ false);
       // If heap poisoning is enabled, `temp1` and `temp2` have been
       // unpoisoned by the the previous calls to
       // GenerateFieldLoadWithBakerReadBarrier.
@@ -1011,7 +1011,7 @@
       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
         // /* HeapReference<Class> */ TMP = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, TMP_loc, temp1, component_offset, /* needs_null_check */ false);
+            invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false);
         __ testl(CpuRegister(TMP), CpuRegister(TMP));
         __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
         // If heap poisoning is enabled, `TMP` has been unpoisoned by
@@ -1034,7 +1034,7 @@
         // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
         // /* HeapReference<Class> */ TMP = temp2->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, TMP_loc, temp2, component_offset, /* needs_null_check */ false);
+            invoke, TMP_loc, temp2, component_offset, /* needs_null_check= */ false);
         __ testl(CpuRegister(TMP), CpuRegister(TMP));
         __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
         // If heap poisoning is enabled, `TMP` has been unpoisoned by
@@ -1058,7 +1058,7 @@
       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
         // /* HeapReference<Class> */ temp1 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+            invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
         // We do not need to emit a read barrier for the following
         // heap reference load, as `temp1` is only used in a
         // comparison with null below, and this reference is not
@@ -1086,10 +1086,10 @@
     if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
       // /* HeapReference<Class> */ temp1 = src->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp1_loc, src, class_offset, /* needs_null_check */ false);
+          invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
       // /* HeapReference<Class> */ TMP = temp1->component_type_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, TMP_loc, temp1, component_offset, /* needs_null_check */ false);
+          invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false);
       __ testl(CpuRegister(TMP), CpuRegister(TMP));
       __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
     } else {
@@ -1198,7 +1198,7 @@
   }
 
   // We only need one card marking on the destination array.
-  codegen_->MarkGCCard(temp1, temp2, dest, CpuRegister(kNoRegister), /* value_can_be_null */ false);
+  codegen_->MarkGCCard(temp1, temp2, dest, CpuRegister(kNoRegister), /* value_can_be_null= */ false);
 
   __ Bind(intrinsic_slow_path->GetExitLabel());
 }
@@ -1452,7 +1452,7 @@
     // Ensure we have a start index >= 0;
     __ xorl(counter, counter);
     __ cmpl(start_index, Immediate(0));
-    __ cmov(kGreater, counter, start_index, /* is64bit */ false);  // 32-bit copy is enough.
+    __ cmov(kGreater, counter, start_index, /* is64bit= */ false);  // 32-bit copy is enough.
 
     if (mirror::kUseStringCompression) {
       NearLabel modify_counter, offset_uncompressed_label;
@@ -1514,19 +1514,19 @@
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitStringIndexOf(HInvoke* invoke) {
-  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
+  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ true);
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
-  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
+  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ false);
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1840,7 +1840,7 @@
 void IntrinsicCodeGeneratorX86_64::VisitThreadCurrentThread(HInvoke* invoke) {
   CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
   GetAssembler()->gs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86_64PointerSize>(),
-                                                    /* no_rip */ true));
+                                                    /* no_rip= */ true));
 }
 
 static void GenUnsafeGet(HInvoke* invoke,
@@ -1866,7 +1866,7 @@
         if (kUseBakerReadBarrier) {
           Address src(base, offset, ScaleFactor::TIMES_1, 0);
           codegen->GenerateReferenceLoadWithBakerReadBarrier(
-              invoke, output_loc, base, src, /* needs_null_check */ false);
+              invoke, output_loc, base, src, /* needs_null_check= */ false);
         } else {
           __ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
           codegen->GenerateReadBarrierSlow(
@@ -1930,22 +1930,22 @@
 
 
 void IntrinsicCodeGeneratorX86_64::VisitUnsafeGet(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLong(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObject(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
 }
 
 
@@ -2028,34 +2028,34 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePut(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutOrdered(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutVolatile(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObject(HInvoke* invoke) {
   GenUnsafePut(
-      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   GenUnsafePut(
-      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   GenUnsafePut(
-      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ true, codegen_);
+      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLong(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
 }
 
 static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
@@ -2140,8 +2140,8 @@
           out_loc,  // Unused, used only as a "temporary" within the read barrier.
           base,
           field_addr,
-          /* needs_null_check */ false,
-          /* always_update_field */ true,
+          /* needs_null_check= */ false,
+          /* always_update_field= */ true,
           &temp1,
           &temp2);
     }
@@ -2369,7 +2369,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitIntegerBitCount(HInvoke* invoke) {
-  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ false);
+  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitLongBitCount(HInvoke* invoke) {
@@ -2377,7 +2377,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitLongBitCount(HInvoke* invoke) {
-  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
+  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ true);
 }
 
 static void CreateOneBitLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_high) {
@@ -2421,93 +2421,98 @@
   }
 
   // Handle the non-constant cases.
-  CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
-  if (is_high) {
-    // Use architectural support: basically 1 << bsr.
-    if (src.IsRegister()) {
-      if (is_long) {
-        __ bsrq(tmp, src.AsRegister<CpuRegister>());
+  if (!is_high && codegen->GetInstructionSetFeatures().HasAVX2() &&
+      src.IsRegister()) {
+      __ blsi(out, src.AsRegister<CpuRegister>());
+  } else {
+    CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
+    if (is_high) {
+      // Use architectural support: basically 1 << bsr.
+      if (src.IsRegister()) {
+        if (is_long) {
+          __ bsrq(tmp, src.AsRegister<CpuRegister>());
+        } else {
+          __ bsrl(tmp, src.AsRegister<CpuRegister>());
+        }
+      } else if (is_long) {
+        DCHECK(src.IsDoubleStackSlot());
+        __ bsrq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
       } else {
-        __ bsrl(tmp, src.AsRegister<CpuRegister>());
+        DCHECK(src.IsStackSlot());
+        __ bsrl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
       }
-    } else if (is_long) {
-      DCHECK(src.IsDoubleStackSlot());
-      __ bsrq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
-    } else {
-      DCHECK(src.IsStackSlot());
-      __ bsrl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
-    }
-    // BSR sets ZF if the input was zero.
-    NearLabel is_zero, done;
-    __ j(kEqual, &is_zero);
-    __ movl(out, Immediate(1));  // Clears upper bits too.
-    if (is_long) {
-      __ shlq(out, tmp);
-    } else {
-      __ shll(out, tmp);
-    }
-    __ jmp(&done);
-    __ Bind(&is_zero);
-    __ xorl(out, out);  // Clears upper bits too.
-    __ Bind(&done);
-  } else  {
-    // Copy input into temporary.
-    if (src.IsRegister()) {
+      // BSR sets ZF if the input was zero.
+      NearLabel is_zero, done;
+      __ j(kEqual, &is_zero);
+      __ movl(out, Immediate(1));  // Clears upper bits too.
       if (is_long) {
-        __ movq(tmp, src.AsRegister<CpuRegister>());
+        __ shlq(out, tmp);
       } else {
-        __ movl(tmp, src.AsRegister<CpuRegister>());
+        __ shll(out, tmp);
       }
-    } else if (is_long) {
-      DCHECK(src.IsDoubleStackSlot());
-      __ movq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
-    } else {
-      DCHECK(src.IsStackSlot());
-      __ movl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
-    }
-    // Do the bit twiddling: basically tmp & -tmp;
-    if (is_long) {
-      __ movq(out, tmp);
-      __ negq(tmp);
-      __ andq(out, tmp);
-    } else {
-      __ movl(out, tmp);
-      __ negl(tmp);
-      __ andl(out, tmp);
+      __ jmp(&done);
+      __ Bind(&is_zero);
+      __ xorl(out, out);  // Clears upper bits too.
+      __ Bind(&done);
+    } else  {
+      // Copy input into temporary.
+      if (src.IsRegister()) {
+        if (is_long) {
+          __ movq(tmp, src.AsRegister<CpuRegister>());
+        } else {
+          __ movl(tmp, src.AsRegister<CpuRegister>());
+        }
+      } else if (is_long) {
+        DCHECK(src.IsDoubleStackSlot());
+        __ movq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
+      } else {
+        DCHECK(src.IsStackSlot());
+        __ movl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
+      }
+      // Do the bit twiddling: basically tmp & -tmp;
+      if (is_long) {
+        __ movq(out, tmp);
+        __ negq(tmp);
+        __ andq(out, tmp);
+      } else {
+        __ movl(out, tmp);
+        __ negl(tmp);
+        __ andl(out, tmp);
+      }
     }
   }
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
-  CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
+  CreateOneBitLocations(allocator_, invoke, /* is_high= */ true);
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
-  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ true, /* is_long */ false);
+  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ true, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
-  CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
+  CreateOneBitLocations(allocator_, invoke, /* is_high= */ true);
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
-  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ true, /* is_long */ true);
+  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ true, /* is_long= */ true);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
-  CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
+  CreateOneBitLocations(allocator_, invoke, /* is_high= */ false);
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
-  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ false);
+  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ false, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
-  CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
+  CreateOneBitLocations(allocator_, invoke, /* is_high= */ false);
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
-  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ true);
+  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ false, /* is_long= */ true);
 }
 
 static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -2572,7 +2577,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -2580,7 +2585,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
 }
 
 static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -2640,7 +2645,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -2648,7 +2653,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) {
@@ -2719,7 +2724,7 @@
   X86_64Assembler* assembler = GetAssembler();
   CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
   Address address = Address::Absolute
-      (Thread::InterruptedOffset<kX86_64PointerSize>().Int32Value(), /* no_rip */ true);
+      (Thread::InterruptedOffset<kX86_64PointerSize>().Int32Value(), /* no_rip= */ true);
   NearLabel done;
   __ gs()->movl(out, address);
   __ testl(out, out);
@@ -2740,6 +2745,9 @@
 UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent)
 UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
 UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
+UNIMPLEMENTED_INTRINSIC(X86_64, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(X86_64, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(X86_64, CRC32UpdateByteBuffer)
 
 UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 7f71745..b33d0f4 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -692,7 +692,7 @@
     VisitSetLocation(instruction, idx, instruction->InputAt(2));
   }
 
-  void VisitDeoptimize(HDeoptimize* instruction) {
+  void VisitDeoptimize(HDeoptimize* instruction) override {
     const ScopedArenaVector<HInstruction*>& heap_values =
         heap_values_for_[instruction->GetBlock()->GetBlockId()];
     for (HInstruction* heap_value : heap_values) {
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 7d66155..12b180d 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -351,7 +351,10 @@
 
 // Translates vector operation to reduction kind.
 static HVecReduce::ReductionKind GetReductionKind(HVecOperation* reduction) {
-  if (reduction->IsVecAdd() || reduction->IsVecSub() || reduction->IsVecSADAccumulate()) {
+  if (reduction->IsVecAdd() ||
+      reduction->IsVecSub() ||
+      reduction->IsVecSADAccumulate() ||
+      reduction->IsVecDotProd()) {
     return HVecReduce::kSum;
   }
   LOG(FATAL) << "Unsupported SIMD reduction " << reduction->GetId();
@@ -431,6 +434,23 @@
   }
 }
 
+// Returns the narrower type out of instructions a and b types.
+static DataType::Type GetNarrowerType(HInstruction* a, HInstruction* b) {
+  DataType::Type type = a->GetType();
+  if (DataType::Size(b->GetType()) < DataType::Size(type)) {
+    type = b->GetType();
+  }
+  if (a->IsTypeConversion() &&
+      DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(type)) {
+    type = a->InputAt(0)->GetType();
+  }
+  if (b->IsTypeConversion() &&
+      DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(type)) {
+    type = b->InputAt(0)->GetType();
+  }
+  return type;
+}
+
 //
 // Public methods.
 //
@@ -1289,6 +1309,7 @@
     DataType::Type type = instruction->GetType();
     // Recognize SAD idiom or direct reduction.
     if (VectorizeSADIdiom(node, instruction, generate_code, type, restrictions) ||
+        VectorizeDotProdIdiom(node, instruction, generate_code, type, restrictions) ||
         (TrySetVectorType(type, &restrictions) &&
          VectorizeUse(node, instruction, generate_code, type, restrictions))) {
       if (generate_code) {
@@ -1531,11 +1552,11 @@
         case DataType::Type::kBool:
         case DataType::Type::kUint8:
         case DataType::Type::kInt8:
-          *restrictions |= kNoDiv | kNoReduction;
+          *restrictions |= kNoDiv | kNoReduction | kNoDotProd;
           return TrySetVectorLength(8);
         case DataType::Type::kUint16:
         case DataType::Type::kInt16:
-          *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction;
+          *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction | kNoDotProd;
           return TrySetVectorLength(4);
         case DataType::Type::kInt32:
           *restrictions |= kNoDiv | kNoWideSAD;
@@ -1580,12 +1601,23 @@
           case DataType::Type::kBool:
           case DataType::Type::kUint8:
           case DataType::Type::kInt8:
-            *restrictions |=
-                kNoMul | kNoDiv | kNoShift | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd | kNoSAD;
+            *restrictions |= kNoMul |
+                             kNoDiv |
+                             kNoShift |
+                             kNoAbs |
+                             kNoSignedHAdd |
+                             kNoUnroundedHAdd |
+                             kNoSAD |
+                             kNoDotProd;
             return TrySetVectorLength(16);
           case DataType::Type::kUint16:
           case DataType::Type::kInt16:
-            *restrictions |= kNoDiv | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd | kNoSAD;
+            *restrictions |= kNoDiv |
+                             kNoAbs |
+                             kNoSignedHAdd |
+                             kNoUnroundedHAdd |
+                             kNoSAD|
+                             kNoDotProd;
             return TrySetVectorLength(8);
           case DataType::Type::kInt32:
             *restrictions |= kNoDiv | kNoSAD;
@@ -1610,11 +1642,11 @@
           case DataType::Type::kBool:
           case DataType::Type::kUint8:
           case DataType::Type::kInt8:
-            *restrictions |= kNoDiv;
+            *restrictions |= kNoDiv | kNoDotProd;
             return TrySetVectorLength(16);
           case DataType::Type::kUint16:
           case DataType::Type::kInt16:
-            *restrictions |= kNoDiv | kNoStringCharAt;
+            *restrictions |= kNoDiv | kNoStringCharAt | kNoDotProd;
             return TrySetVectorLength(8);
           case DataType::Type::kInt32:
             *restrictions |= kNoDiv;
@@ -1639,11 +1671,11 @@
           case DataType::Type::kBool:
           case DataType::Type::kUint8:
           case DataType::Type::kInt8:
-            *restrictions |= kNoDiv;
+            *restrictions |= kNoDiv | kNoDotProd;
             return TrySetVectorLength(16);
           case DataType::Type::kUint16:
           case DataType::Type::kInt16:
-            *restrictions |= kNoDiv | kNoStringCharAt;
+            *restrictions |= kNoDiv | kNoStringCharAt | kNoDotProd;
             return TrySetVectorLength(8);
           case DataType::Type::kInt32:
             *restrictions |= kNoDiv;
@@ -2071,18 +2103,7 @@
   HInstruction* r = a;
   HInstruction* s = b;
   bool is_unsigned = false;
-  DataType::Type sub_type = a->GetType();
-  if (DataType::Size(b->GetType()) < DataType::Size(sub_type)) {
-    sub_type = b->GetType();
-  }
-  if (a->IsTypeConversion() &&
-      DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(sub_type)) {
-    sub_type = a->InputAt(0)->GetType();
-  }
-  if (b->IsTypeConversion() &&
-      DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(sub_type)) {
-    sub_type = b->InputAt(0)->GetType();
-  }
+  DataType::Type sub_type = GetNarrowerType(a, b);
   if (reduction_type != sub_type &&
       (!IsNarrowerOperands(a, b, sub_type, &r, &s, &is_unsigned) || is_unsigned)) {
     return false;
@@ -2123,6 +2144,75 @@
   return false;
 }
 
+// Method recognises the following dot product idiom:
+//   q += a * b for operands a, b whose type is narrower than the reduction one.
+// Provided that the operands have the same type or are promoted to a wider form.
+// Since this may involve a vector length change, the idiom is handled by going directly
+// to a dot product node (rather than relying combining finer grained nodes later).
+bool HLoopOptimization::VectorizeDotProdIdiom(LoopNode* node,
+                                              HInstruction* instruction,
+                                              bool generate_code,
+                                              DataType::Type reduction_type,
+                                              uint64_t restrictions) {
+  if (!instruction->IsAdd() || (reduction_type != DataType::Type::kInt32)) {
+    return false;
+  }
+
+  HInstruction* q = instruction->InputAt(0);
+  HInstruction* v = instruction->InputAt(1);
+  if (!v->IsMul() || v->GetType() != reduction_type) {
+    return false;
+  }
+
+  HInstruction* a = v->InputAt(0);
+  HInstruction* b = v->InputAt(1);
+  HInstruction* r = a;
+  HInstruction* s = b;
+  DataType::Type op_type = GetNarrowerType(a, b);
+  bool is_unsigned = false;
+
+  if (!IsNarrowerOperands(a, b, op_type, &r, &s, &is_unsigned)) {
+    return false;
+  }
+  op_type = HVecOperation::ToProperType(op_type, is_unsigned);
+
+  if (!TrySetVectorType(op_type, &restrictions) ||
+      HasVectorRestrictions(restrictions, kNoDotProd)) {
+    return false;
+  }
+
+  DCHECK(r != nullptr && s != nullptr);
+  // Accept dot product idiom for vectorizable operands. Vectorized code uses the shorthand
+  // idiomatic operation. Sequential code uses the original scalar expressions.
+  if (generate_code && vector_mode_ != kVector) {  // de-idiom
+    r = a;
+    s = b;
+  }
+  if (VectorizeUse(node, q, generate_code, op_type, restrictions) &&
+      VectorizeUse(node, r, generate_code, op_type, restrictions) &&
+      VectorizeUse(node, s, generate_code, op_type, restrictions)) {
+    if (generate_code) {
+      if (vector_mode_ == kVector) {
+        vector_map_->Put(instruction, new (global_allocator_) HVecDotProd(
+            global_allocator_,
+            vector_map_->Get(q),
+            vector_map_->Get(r),
+            vector_map_->Get(s),
+            reduction_type,
+            is_unsigned,
+            GetOtherVL(reduction_type, op_type, vector_length_),
+            kNoDexPc));
+        MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
+      } else {
+        GenerateVecOp(v, vector_map_->Get(r), vector_map_->Get(s), reduction_type);
+        GenerateVecOp(instruction, vector_map_->Get(q), vector_map_->Get(v), reduction_type);
+      }
+    }
+    return true;
+  }
+  return false;
+}
+
 //
 // Vectorization heuristics.
 //
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 2b202fd..1a842c4 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -82,6 +82,7 @@
     kNoReduction     = 1 << 9,   // no reduction
     kNoSAD           = 1 << 10,  // no sum of absolute differences (SAD)
     kNoWideSAD       = 1 << 11,  // no sum of absolute differences (SAD) with operand widening
+    kNoDotProd       = 1 << 12,  // no dot product
   };
 
   /*
@@ -217,6 +218,11 @@
                          bool generate_code,
                          DataType::Type type,
                          uint64_t restrictions);
+  bool VectorizeDotProdIdiom(LoopNode* node,
+                             HInstruction* instruction,
+                             bool generate_code,
+                             DataType::Type type,
+                             uint64_t restrictions);
 
   // Vectorization heuristics.
   Alignment ComputeAlignment(HInstruction* offset,
diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc
index c7cc661..310d98b 100644
--- a/compiler/optimizing/loop_optimization_test.cc
+++ b/compiler/optimizing/loop_optimization_test.cc
@@ -30,7 +30,7 @@
       : graph_(CreateGraph()),
         iva_(new (GetAllocator()) HInductionVarAnalysis(graph_)),
         loop_opt_(new (GetAllocator()) HLoopOptimization(
-            graph_, /* compiler_options */ nullptr, iva_, /* stats */ nullptr)) {
+            graph_, /* compiler_options= */ nullptr, iva_, /* stats= */ nullptr)) {
     BuildGraph();
   }
 
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 79a7e2c..f7c16d1 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -20,6 +20,7 @@
 #include "art_method-inl.h"
 #include "base/bit_utils.h"
 #include "base/bit_vector-inl.h"
+#include "base/logging.h"
 #include "base/stl_util.h"
 #include "class_linker-inl.h"
 #include "class_root.h"
@@ -43,7 +44,7 @@
   // Create the inexact Object reference type and store it in the HGraph.
   inexact_object_rti_ = ReferenceTypeInfo::Create(
       handles->NewHandle(GetClassRoot<mirror::Object>()),
-      /* is_exact */ false);
+      /* is_exact= */ false);
 }
 
 void HGraph::AddBlock(HBasicBlock* block) {
@@ -59,7 +60,7 @@
   ScopedArenaAllocator allocator(GetArenaStack());
   // Nodes that we're currently visiting, indexed by block id.
   ArenaBitVector visiting(
-      &allocator, blocks_.size(), /* expandable */ false, kArenaAllocGraphBuilder);
+      &allocator, blocks_.size(), /* expandable= */ false, kArenaAllocGraphBuilder);
   visiting.ClearAllBits();
   // Number of successors visited from a given node, indexed by block id.
   ScopedArenaVector<size_t> successors_visited(blocks_.size(),
@@ -688,7 +689,7 @@
 }
 
 const char* HGraph::GetMethodName() const {
-  const DexFile::MethodId& method_id = dex_file_.GetMethodId(method_idx_);
+  const dex::MethodId& method_id = dex_file_.GetMethodId(method_idx_);
   return dex_file_.GetMethodName(method_id);
 }
 
@@ -825,7 +826,7 @@
     ScopedArenaAllocator allocator(graph->GetArenaStack());
     ArenaBitVector visited(&allocator,
                            graph->GetBlocks().size(),
-                           /* expandable */ false,
+                           /* expandable= */ false,
                            kArenaAllocGraphBuilder);
     visited.ClearAllBits();
     // Stop marking blocks at the loop header.
@@ -1230,7 +1231,7 @@
     }
   }
   LOG(FATAL) << "Did not find an order between two instructions of the same block.";
-  return true;
+  UNREACHABLE();
 }
 
 bool HInstruction::StrictlyDominates(HInstruction* other_instruction) const {
@@ -1253,7 +1254,7 @@
       } else {
         // There is no order among phis.
         LOG(FATAL) << "There is no dominance between phis of a same block.";
-        return false;
+        UNREACHABLE();
       }
     } else {
       // `this` is not a phi.
@@ -2526,7 +2527,7 @@
         current->SetGraph(outer_graph);
         outer_graph->AddBlock(current);
         outer_graph->reverse_post_order_[++index_of_at] = current;
-        UpdateLoopAndTryInformationOfNewBlock(current, at,  /* replace_if_back_edge */ false);
+        UpdateLoopAndTryInformationOfNewBlock(current, at,  /* replace_if_back_edge= */ false);
       }
     }
 
@@ -2536,7 +2537,7 @@
     outer_graph->reverse_post_order_[++index_of_at] = to;
     // Only `to` can become a back edge, as the inlined blocks
     // are predecessors of `to`.
-    UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge */ true);
+    UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge= */ true);
 
     // Update all predecessors of the exit block (now the `to` block)
     // to not `HReturn` but `HGoto` instead. Special case throwing blocks
@@ -2710,13 +2711,13 @@
   DCHECK((old_pre_header->GetLoopInformation() == nullptr) ||
          !old_pre_header->GetLoopInformation()->IsBackEdge(*old_pre_header));
   UpdateLoopAndTryInformationOfNewBlock(
-      if_block, old_pre_header, /* replace_if_back_edge */ false);
+      if_block, old_pre_header, /* replace_if_back_edge= */ false);
   UpdateLoopAndTryInformationOfNewBlock(
-      true_block, old_pre_header, /* replace_if_back_edge */ false);
+      true_block, old_pre_header, /* replace_if_back_edge= */ false);
   UpdateLoopAndTryInformationOfNewBlock(
-      false_block, old_pre_header, /* replace_if_back_edge */ false);
+      false_block, old_pre_header, /* replace_if_back_edge= */ false);
   UpdateLoopAndTryInformationOfNewBlock(
-      new_pre_header, old_pre_header, /* replace_if_back_edge */ false);
+      new_pre_header, old_pre_header, /* replace_if_back_edge= */ false);
 }
 
 HBasicBlock* HGraph::TransformLoopForVectorization(HBasicBlock* header,
@@ -3180,4 +3181,77 @@
   }
 }
 
+// Check that intrinsic enum values fit within space set aside in ArtMethod modifier flags.
+#define CHECK_INTRINSICS_ENUM_VALUES(Name, InvokeType, _, SideEffects, Exceptions, ...) \
+  static_assert( \
+    static_cast<uint32_t>(Intrinsics::k ## Name) <= (kAccIntrinsicBits >> CTZ(kAccIntrinsicBits)), \
+    "Instrinsics enumeration space overflow.");
+#include "intrinsics_list.h"
+  INTRINSICS_LIST(CHECK_INTRINSICS_ENUM_VALUES)
+#undef INTRINSICS_LIST
+#undef CHECK_INTRINSICS_ENUM_VALUES
+
+// Function that returns whether an intrinsic needs an environment or not.
+static inline IntrinsicNeedsEnvironmentOrCache NeedsEnvironmentOrCacheIntrinsic(Intrinsics i) {
+  switch (i) {
+    case Intrinsics::kNone:
+      return kNeedsEnvironmentOrCache;  // Non-sensical for intrinsic.
+#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvOrCache, SideEffects, Exceptions, ...) \
+    case Intrinsics::k ## Name: \
+      return NeedsEnvOrCache;
+#include "intrinsics_list.h"
+      INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+  }
+  return kNeedsEnvironmentOrCache;
+}
+
+// Function that returns whether an intrinsic has side effects.
+static inline IntrinsicSideEffects GetSideEffectsIntrinsic(Intrinsics i) {
+  switch (i) {
+    case Intrinsics::kNone:
+      return kAllSideEffects;
+#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvOrCache, SideEffects, Exceptions, ...) \
+    case Intrinsics::k ## Name: \
+      return SideEffects;
+#include "intrinsics_list.h"
+      INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+  }
+  return kAllSideEffects;
+}
+
+// Function that returns whether an intrinsic can throw exceptions.
+static inline IntrinsicExceptions GetExceptionsIntrinsic(Intrinsics i) {
+  switch (i) {
+    case Intrinsics::kNone:
+      return kCanThrow;
+#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvOrCache, SideEffects, Exceptions, ...) \
+    case Intrinsics::k ## Name: \
+      return Exceptions;
+#include "intrinsics_list.h"
+      INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+  }
+  return kCanThrow;
+}
+
+void HInvoke::SetResolvedMethod(ArtMethod* method) {
+  // TODO: b/65872996 The intent is that polymorphic signature methods should
+  // be compiler intrinsics. At present, they are only interpreter intrinsics.
+  if (method != nullptr &&
+      method->IsIntrinsic() &&
+      !method->IsPolymorphicSignature()) {
+    Intrinsics intrinsic = static_cast<Intrinsics>(method->GetIntrinsic());
+    SetIntrinsic(intrinsic,
+                 NeedsEnvironmentOrCacheIntrinsic(intrinsic),
+                 GetSideEffectsIntrinsic(intrinsic),
+                 GetExceptionsIntrinsic(intrinsic));
+  }
+  resolved_method_ = method;
+}
+
 }  // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 68f1a24..c70674b 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -26,9 +26,11 @@
 #include "base/arena_object.h"
 #include "base/array_ref.h"
 #include "base/iteration_range.h"
+#include "base/mutex.h"
 #include "base/quasi_atomic.h"
 #include "base/stl_util.h"
 #include "base/transform_array_ref.h"
+#include "art_method.h"
 #include "data_type.h"
 #include "deoptimization_kind.h"
 #include "dex/dex_file.h"
@@ -128,6 +130,7 @@
   kAnalysisInvalidBytecode,
   kAnalysisFailThrowCatchLoop,
   kAnalysisFailAmbiguousArrayOp,
+  kAnalysisFailIrreducibleLoopAndStringInit,
   kAnalysisSuccess,
 };
 
@@ -314,6 +317,7 @@
          uint32_t method_idx,
          InstructionSet instruction_set,
          InvokeType invoke_type = kInvalidInvokeType,
+         bool dead_reference_safe = false,
          bool debuggable = false,
          bool osr = false,
          int start_instruction_id = 0)
@@ -333,6 +337,7 @@
         has_simd_(false),
         has_loops_(false),
         has_irreducible_loops_(false),
+        dead_reference_safe_(dead_reference_safe),
         debuggable_(debuggable),
         current_instruction_id_(start_instruction_id),
         dex_file_(dex_file),
@@ -523,6 +528,12 @@
     has_bounds_checks_ = value;
   }
 
+  // Is the code known to be robust against eliminating dead references
+  // and the effects of early finalization?
+  bool IsDeadReferenceSafe() const { return dead_reference_safe_; }
+
+  void MarkDeadReferenceUnsafe() { dead_reference_safe_ = false; }
+
   bool IsDebuggable() const { return debuggable_; }
 
   // Returns a constant of the given type and value. If it does not exist
@@ -701,6 +712,14 @@
   // so there might be false positives.
   bool has_irreducible_loops_;
 
+  // Is the code known to be robust against eliminating dead references
+  // and the effects of early finalization? If false, dead reference variables
+  // are kept if they might be visible to the garbage collector.
+  // Currently this means that the class was declared to be dead-reference-safe,
+  // the method accesses no reachability-sensitive fields or data, and the same
+  // is true for any methods that were inlined into the current one.
+  bool dead_reference_safe_;
+
   // Indicates whether the graph should be compiled in a way that
   // ensures full debuggability. If false, we can apply more
   // aggressive optimizations that may limit the level of debugging.
@@ -892,7 +911,7 @@
   explicit TryCatchInformation(const HTryBoundary& try_entry)
       : try_entry_(&try_entry),
         catch_dex_file_(nullptr),
-        catch_type_index_(DexFile::kDexNoIndex16) {
+        catch_type_index_(dex::TypeIndex::Invalid()) {
     DCHECK(try_entry_ != nullptr);
   }
 
@@ -911,9 +930,9 @@
 
   bool IsCatchBlock() const { return catch_dex_file_ != nullptr; }
 
-  bool IsCatchAllTypeIndex() const {
+  bool IsValidTypeIndex() const {
     DCHECK(IsCatchBlock());
-    return !catch_type_index_.IsValid();
+    return catch_type_index_.IsValid();
   }
 
   dex::TypeIndex GetCatchTypeIndex() const {
@@ -926,6 +945,10 @@
     return *catch_dex_file_;
   }
 
+  void SetInvalidTypeIndex() {
+    catch_type_index_ = dex::TypeIndex::Invalid();
+  }
+
  private:
   // One of possibly several TryBoundary instructions entering the block's try.
   // Only set for try blocks.
@@ -933,7 +956,7 @@
 
   // Exception type information. Only set for catch blocks.
   const DexFile* catch_dex_file_;
-  const dex::TypeIndex catch_type_index_;
+  dex::TypeIndex catch_type_index_;
 };
 
 static constexpr size_t kNoLifetime = -1;
@@ -1453,6 +1476,7 @@
   M(VecSetScalars, VecOperation)                                        \
   M(VecMultiplyAccumulate, VecOperation)                                \
   M(VecSADAccumulate, VecOperation)                                     \
+  M(VecDotProd, VecOperation)                                           \
   M(VecLoad, VecMemoryOperation)                                        \
   M(VecStore, VecMemoryOperation)                                       \
 
@@ -1494,6 +1518,14 @@
   M(X86PackedSwitch, Instruction)
 #endif
 
+#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
+#define FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(M)                     \
+  M(X86AndNot, Instruction)                                                \
+  M(X86MaskOrResetLeastSetBit, Instruction)
+#else
+#define FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(M)
+#endif
+
 #define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
 
 #define FOR_EACH_CONCRETE_INSTRUCTION(M)                                \
@@ -1504,7 +1536,8 @@
   FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M)                                 \
   FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M)                               \
   FOR_EACH_CONCRETE_INSTRUCTION_X86(M)                                  \
-  FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
+  FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)                               \
+  FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(M)
 
 #define FOR_EACH_ABSTRACT_INSTRUCTION(M)                                \
   M(Condition, BinaryOperation)                                         \
@@ -3229,7 +3262,7 @@
             SideEffects::All(),
             dex_pc,
             allocator,
-            /* number_of_inputs */ 1,
+            /* number_of_inputs= */ 1,
             kArenaAllocMisc) {
     SetPackedFlag<kFieldCanBeMoved>(false);
     SetPackedField<DeoptimizeKindField>(kind);
@@ -3254,7 +3287,7 @@
             SideEffects::CanTriggerGC(),
             dex_pc,
             allocator,
-            /* number_of_inputs */ 2,
+            /* number_of_inputs= */ 2,
             kArenaAllocMisc) {
     SetPackedFlag<kFieldCanBeMoved>(true);
     SetPackedField<DeoptimizeKindField>(kind);
@@ -4322,7 +4355,7 @@
   bool IsIntrinsic() const { return intrinsic_ != Intrinsics::kNone; }
 
   ArtMethod* GetResolvedMethod() const { return resolved_method_; }
-  void SetResolvedMethod(ArtMethod* method) { resolved_method_ = method; }
+  void SetResolvedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
 
   DECLARE_ABSTRACT_INSTRUCTION(Invoke);
 
@@ -4354,12 +4387,14 @@
           number_of_arguments + number_of_other_inputs,
           kArenaAllocInvokeInputs),
       number_of_arguments_(number_of_arguments),
-      resolved_method_(resolved_method),
       dex_method_index_(dex_method_index),
       intrinsic_(Intrinsics::kNone),
       intrinsic_optimizations_(0) {
     SetPackedField<InvokeTypeField>(invoke_type);
     SetPackedFlag<kFlagCanThrow>(true);
+    // Check mutator lock, constructors lack annotalysis support.
+    Locks::mutator_lock_->AssertNotExclusiveHeld(Thread::Current());
+    SetResolvedMethod(resolved_method);
   }
 
   DEFAULT_COPY_CONSTRUCTOR(Invoke);
@@ -4384,7 +4419,7 @@
       : HInvoke(kInvokeUnresolved,
                 allocator,
                 number_of_arguments,
-                0u /* number_of_other_inputs */,
+                /* number_of_other_inputs= */ 0u,
                 return_type,
                 dex_pc,
                 dex_method_index,
@@ -4410,7 +4445,7 @@
       : HInvoke(kInvokePolymorphic,
                 allocator,
                 number_of_arguments,
-                0u /* number_of_other_inputs */,
+                /* number_of_other_inputs= */ 0u,
                 return_type,
                 dex_pc,
                 dex_method_index,
@@ -4436,11 +4471,11 @@
       : HInvoke(kInvokeCustom,
                 allocator,
                 number_of_arguments,
-                /* number_of_other_inputs */ 0u,
+                /* number_of_other_inputs= */ 0u,
                 return_type,
                 dex_pc,
-                /* dex_method_index */ dex::kDexNoIndex,
-                /* resolved_method */ nullptr,
+                /* dex_method_index= */ dex::kDexNoIndex,
+                /* resolved_method= */ nullptr,
                 kStatic),
       call_site_index_(call_site_index) {
   }
@@ -4533,8 +4568,7 @@
                 allocator,
                 number_of_arguments,
                 // There is potentially one extra argument for the HCurrentMethod node, and
-                // potentially one other if the clinit check is explicit, and potentially
-                // one other if the method is a string factory.
+                // potentially one other if the clinit check is explicit.
                 (NeedsCurrentMethodInput(dispatch_info.method_load_kind) ? 1u : 0u) +
                     (clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u),
                 return_type,
@@ -4845,10 +4879,11 @@
 
 class HNewArray final : public HExpression<2> {
  public:
-  HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc)
+  HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc, size_t component_size_shift)
       : HExpression(kNewArray, DataType::Type::kReference, SideEffects::CanTriggerGC(), dex_pc) {
     SetRawInputAt(0, cls);
     SetRawInputAt(1, length);
+    SetPackedField<ComponentSizeShiftField>(component_size_shift);
   }
 
   bool IsClonable() const override { return true; }
@@ -4870,10 +4905,23 @@
     return InputAt(1);
   }
 
+  size_t GetComponentSizeShift() {
+    return GetPackedField<ComponentSizeShiftField>();
+  }
+
   DECLARE_INSTRUCTION(NewArray);
 
  protected:
   DEFAULT_COPY_CONSTRUCTOR(NewArray);
+
+ private:
+  static constexpr size_t kFieldComponentSizeShift = kNumberOfGenericPackedBits;
+  static constexpr size_t kFieldComponentSizeShiftSize = MinimumBitsToStore(3u);
+  static constexpr size_t kNumberOfNewArrayPackedBits =
+      kFieldComponentSizeShift + kFieldComponentSizeShiftSize;
+  static_assert(kNumberOfNewArrayPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+  using ComponentSizeShiftField =
+      BitField<size_t, kFieldComponentSizeShift, kFieldComponentSizeShift>;
 };
 
 class HAdd final : public HBinaryOperation {
@@ -5656,6 +5704,10 @@
   bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
+  // Return whether the conversion is implicit. This includes conversion to the same type.
+  bool IsImplicitConversion() const {
+    return DataType::IsTypeConversionImplicit(GetInputType(), GetResultType());
+  }
 
   // Try to statically evaluate the conversion and return a HConstant
   // containing the result.  If the input cannot be converted, return nullptr.
@@ -5862,7 +5914,7 @@
                  type,
                  SideEffects::ArrayReadOfType(type),
                  dex_pc,
-                 /* is_string_char_at */ false) {
+                 /* is_string_char_at= */ false) {
   }
 
   HArrayGet(HInstruction* array,
@@ -6136,6 +6188,9 @@
 
  private:
   static constexpr size_t kFlagIsStringCharAt = kNumberOfGenericPackedBits;
+  static constexpr size_t kNumberOfBoundsCheckPackedBits = kFlagIsStringCharAt + 1;
+  static_assert(kNumberOfBoundsCheckPackedBits <= HInstruction::kMaxNumberOfPackedBits,
+                "Too many packed fields.");
 };
 
 class HSuspendCheck final : public HExpression<0> {
@@ -6301,7 +6356,7 @@
   ReferenceTypeInfo GetLoadedClassRTI() {
     if (GetPackedFlag<kFlagValidLoadedClassRTI>()) {
       // Note: The is_exact flag from the return value should not be used.
-      return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact */ true);
+      return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact= */ true);
     } else {
       return ReferenceTypeInfo::CreateInvalid();
     }
@@ -7054,7 +7109,7 @@
           side_effects,
           dex_pc,
           allocator,
-          /* number_of_inputs */ check_kind == TypeCheckKind::kBitstringCheck ? 4u : 2u,
+          /* number_of_inputs= */ check_kind == TypeCheckKind::kBitstringCheck ? 4u : 2u,
           kArenaAllocTypeCheckInputs),
         klass_(klass) {
     SetPackedField<TypeCheckKindField>(check_kind);
@@ -7110,7 +7165,7 @@
   ReferenceTypeInfo GetTargetClassRTI() {
     if (GetPackedFlag<kFlagValidTargetClassRTI>()) {
       // Note: The is_exact flag from the return value should not be used.
-      return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact */ true);
+      return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact= */ true);
     } else {
       return ReferenceTypeInfo::CreateInvalid();
     }
@@ -7377,7 +7432,7 @@
 //     }
 //
 // See also:
-// * CompilerDriver::RequiresConstructorBarrier
+// * DexCompilationUnit::RequiresConstructorBarrier
 // * QuasiAtomic::ThreadFenceForConstructor
 //
 class HConstructorFence final : public HVariableInputSizeInstruction {
@@ -7423,7 +7478,7 @@
                                       SideEffects::AllReads(),
                                       dex_pc,
                                       allocator,
-                                      /* number_of_inputs */ 1,
+                                      /* number_of_inputs= */ 1,
                                       kArenaAllocConstructorFenceInputs) {
     DCHECK(fence_object != nullptr);
     SetRawInputAt(0, fence_object);
@@ -7741,7 +7796,7 @@
 #ifdef ART_ENABLE_CODEGEN_mips
 #include "nodes_mips.h"
 #endif
-#ifdef ART_ENABLE_CODEGEN_x86
+#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
 #include "nodes_x86.h"
 #endif
 
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index c7539f2..efe4d6b 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -207,7 +207,7 @@
                       allocator,
                       packed_type,
                       SideEffects::None(),
-                      /* number_of_inputs */ 1,
+                      /* number_of_inputs= */ 1,
                       vector_length,
                       dex_pc) {
     SetRawInputAt(0, input);
@@ -235,7 +235,7 @@
                       allocator,
                       packed_type,
                       SideEffects::None(),
-                      /* number_of_inputs */ 2,
+                      /* number_of_inputs= */ 2,
                       vector_length,
                       dex_pc) {
     SetRawInputAt(0, left);
@@ -384,21 +384,21 @@
              HInstruction* input,
              DataType::Type packed_type,
              size_t vector_length,
-             ReductionKind kind,
+             ReductionKind reduction_kind,
              uint32_t dex_pc)
       : HVecUnaryOperation(kVecReduce, allocator, input, packed_type, vector_length, dex_pc),
-        kind_(kind) {
+        reduction_kind_(reduction_kind) {
     DCHECK(HasConsistentPackedTypes(input, packed_type));
   }
 
-  ReductionKind GetKind() const { return kind_; }
+  ReductionKind GetReductionKind() const { return reduction_kind_; }
 
   bool CanBeMoved() const override { return true; }
 
   bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsVecReduce());
     const HVecReduce* o = other->AsVecReduce();
-    return HVecOperation::InstructionDataEquals(o) && GetKind() == o->GetKind();
+    return HVecOperation::InstructionDataEquals(o) && GetReductionKind() == o->GetReductionKind();
   }
 
   DECLARE_INSTRUCTION(VecReduce);
@@ -407,7 +407,7 @@
   DEFAULT_COPY_CONSTRUCTOR(VecReduce);
 
  private:
-  const ReductionKind kind_;
+  const ReductionKind reduction_kind_;
 };
 
 // Converts every component in the vector,
@@ -948,7 +948,7 @@
                       allocator,
                       packed_type,
                       SideEffects::None(),
-                      /* number_of_inputs */ 3,
+                      /* number_of_inputs= */ 3,
                       vector_length,
                       dex_pc),
         op_kind_(op) {
@@ -1002,7 +1002,7 @@
                       allocator,
                       packed_type,
                       SideEffects::None(),
-                      /* number_of_inputs */ 3,
+                      /* number_of_inputs= */ 3,
                       vector_length,
                       dex_pc) {
     DCHECK(HasConsistentPackedTypes(accumulator, packed_type));
@@ -1021,6 +1021,66 @@
   DEFAULT_COPY_CONSTRUCTOR(VecSADAccumulate);
 };
 
+// Performs dot product of two vectors and adds the result to wider precision components in
+// the accumulator.
+//
+// viz. DOT_PRODUCT([ a1, .. , am], [ x1, .. , xn ], [ y1, .. , yn ]) =
+//                  [ a1 + sum(xi * yi), .. , am + sum(xj * yj) ],
+//      for m <= n, non-overlapping sums,
+//      for either both signed or both unsigned operands x, y.
+//
+// Notes:
+//   - packed type reflects the type of sum reduction, not the type of the operands.
+//   - IsZeroExtending() is used to determine the kind of signed/zero extension to be
+//     performed for the operands.
+//
+// TODO: Support types other than kInt32 for packed type.
+class HVecDotProd final : public HVecOperation {
+ public:
+  HVecDotProd(ArenaAllocator* allocator,
+              HInstruction* accumulator,
+              HInstruction* left,
+              HInstruction* right,
+              DataType::Type packed_type,
+              bool is_zero_extending,
+              size_t vector_length,
+              uint32_t dex_pc)
+    : HVecOperation(kVecDotProd,
+                    allocator,
+                    packed_type,
+                    SideEffects::None(),
+                    /* number_of_inputs= */ 3,
+                    vector_length,
+                    dex_pc) {
+    DCHECK(HasConsistentPackedTypes(accumulator, packed_type));
+    DCHECK(DataType::IsIntegralType(packed_type));
+    DCHECK(left->IsVecOperation());
+    DCHECK(right->IsVecOperation());
+    DCHECK_EQ(ToSignedType(left->AsVecOperation()->GetPackedType()),
+              ToSignedType(right->AsVecOperation()->GetPackedType()));
+    SetRawInputAt(0, accumulator);
+    SetRawInputAt(1, left);
+    SetRawInputAt(2, right);
+    SetPackedFlag<kFieldHDotProdIsZeroExtending>(is_zero_extending);
+  }
+
+  bool IsZeroExtending() const { return GetPackedFlag<kFieldHDotProdIsZeroExtending>(); }
+
+  bool CanBeMoved() const override { return true; }
+
+  DECLARE_INSTRUCTION(VecDotProd);
+
+ protected:
+  DEFAULT_COPY_CONSTRUCTOR(VecDotProd);
+
+ private:
+  // Additional packed bits.
+  static constexpr size_t kFieldHDotProdIsZeroExtending =
+      HVecOperation::kNumberOfVectorOpPackedBits;
+  static constexpr size_t kNumberOfHDotProdPackedBits = kFieldHDotProdIsZeroExtending + 1;
+  static_assert(kNumberOfHDotProdPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+};
+
 // Loads a vector from memory, viz. load(mem, 1)
 // yield the vector [ mem(1), .. , mem(n) ].
 class HVecLoad final : public HVecMemoryOperation {
@@ -1037,7 +1097,7 @@
                             allocator,
                             packed_type,
                             side_effects,
-                            /* number_of_inputs */ 2,
+                            /* number_of_inputs= */ 2,
                             vector_length,
                             dex_pc) {
     SetRawInputAt(0, base);
@@ -1083,7 +1143,7 @@
                             allocator,
                             packed_type,
                             side_effects,
-                            /* number_of_inputs */ 3,
+                            /* number_of_inputs= */ 3,
                             vector_length,
                             dex_pc) {
     DCHECK(HasConsistentPackedTypes(value, packed_type));
diff --git a/compiler/optimizing/nodes_vector_test.cc b/compiler/optimizing/nodes_vector_test.cc
index af13449..b0a665d 100644
--- a/compiler/optimizing/nodes_vector_test.cc
+++ b/compiler/optimizing/nodes_vector_test.cc
@@ -401,9 +401,9 @@
   EXPECT_TRUE(v2->CanBeMoved());
   EXPECT_TRUE(v3->CanBeMoved());
 
-  EXPECT_EQ(HVecReduce::kSum, v1->GetKind());
-  EXPECT_EQ(HVecReduce::kMin, v2->GetKind());
-  EXPECT_EQ(HVecReduce::kMax, v3->GetKind());
+  EXPECT_EQ(HVecReduce::kSum, v1->GetReductionKind());
+  EXPECT_EQ(HVecReduce::kMin, v2->GetReductionKind());
+  EXPECT_EQ(HVecReduce::kMax, v3->GetReductionKind());
 
   EXPECT_TRUE(v1->Equals(v1));
   EXPECT_TRUE(v2->Equals(v2));
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index a551104..8e8fbc1 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -128,6 +128,92 @@
   const int32_t num_entries_;
 };
 
+class HX86AndNot final : public HBinaryOperation {
+ public:
+  HX86AndNot(DataType::Type result_type,
+       HInstruction* left,
+       HInstruction* right,
+       uint32_t dex_pc = kNoDexPc)
+      : HBinaryOperation(kX86AndNot, result_type, left, right, SideEffects::None(), dex_pc) {
+  }
+
+  bool IsCommutative() const override { return false; }
+
+  template <typename T> static T Compute(T x, T y) { return ~x & y; }
+
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
+    return GetBlock()->GetGraph()->GetIntConstant(
+        Compute(x->GetValue(), y->GetValue()), GetDexPc());
+  }
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
+    return GetBlock()->GetGraph()->GetLongConstant(
+        Compute(x->GetValue(), y->GetValue()), GetDexPc());
+  }
+  HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
+    LOG(FATAL) << DebugName() << " is not defined for float values";
+    UNREACHABLE();
+  }
+  HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
+    LOG(FATAL) << DebugName() << " is not defined for double values";
+    UNREACHABLE();
+  }
+
+  DECLARE_INSTRUCTION(X86AndNot);
+
+ protected:
+  DEFAULT_COPY_CONSTRUCTOR(X86AndNot);
+};
+
+class HX86MaskOrResetLeastSetBit final : public HUnaryOperation {
+ public:
+  HX86MaskOrResetLeastSetBit(DataType::Type result_type, InstructionKind op,
+                             HInstruction* input, uint32_t dex_pc = kNoDexPc)
+      : HUnaryOperation(kX86MaskOrResetLeastSetBit, result_type, input, dex_pc),
+        op_kind_(op) {
+    DCHECK_EQ(result_type, DataType::Kind(input->GetType()));
+    DCHECK(op == HInstruction::kAnd || op == HInstruction::kXor) << op;
+  }
+  template <typename T>
+  auto Compute(T x) const -> decltype(x & (x-1)) {
+    static_assert(std::is_same<decltype(x & (x-1)), decltype(x ^(x-1))>::value,
+                  "Inconsistent  bitwise types");
+    switch (op_kind_) {
+      case HInstruction::kAnd:
+        return x & (x-1);
+      case HInstruction::kXor:
+        return x ^ (x-1);
+      default:
+        LOG(FATAL) << "Unreachable";
+        UNREACHABLE();
+    }
+  }
+
+  HConstant* Evaluate(HIntConstant* x) const override {
+    return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
+  }
+  HConstant* Evaluate(HLongConstant* x) const override {
+    return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc());
+  }
+  HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const override {
+    LOG(FATAL) << DebugName() << "is not defined for float values";
+    UNREACHABLE();
+  }
+  HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const override {
+    LOG(FATAL) << DebugName() << "is not defined for double values";
+    UNREACHABLE();
+  }
+  InstructionKind GetOpKind() const { return op_kind_; }
+
+  DECLARE_INSTRUCTION(X86MaskOrResetLeastSetBit);
+
+ protected:
+  const InstructionKind op_kind_;
+
+  DEFAULT_COPY_CONSTRUCTOR(X86MaskOrResetLeastSetBit);
+};
+
 }  // namespace art
 
 #endif  // ART_COMPILER_OPTIMIZING_NODES_X86_H_
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index 142ddb5..8864a12 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -28,10 +28,14 @@
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
 #include "pc_relative_fixups_x86.h"
+#include "instruction_simplifier_x86.h"
 #endif
 #if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
 #include "x86_memory_gen.h"
 #endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+#include "instruction_simplifier_x86_64.h"
+#endif
 
 #include "bounds_check_elimination.h"
 #include "cha_guard_optimization.h"
@@ -84,14 +88,10 @@
       return HDeadCodeElimination::kDeadCodeEliminationPassName;
     case OptimizationPass::kInliner:
       return HInliner::kInlinerPassName;
-    case OptimizationPass::kSharpening:
-      return HSharpening::kSharpeningPassName;
     case OptimizationPass::kSelectGenerator:
       return HSelectGenerator::kSelectGeneratorPassName;
     case OptimizationPass::kInstructionSimplifier:
       return InstructionSimplifier::kInstructionSimplifierPassName;
-    case OptimizationPass::kIntrinsicsRecognizer:
-      return IntrinsicsRecognizer::kIntrinsicsRecognizerPassName;
     case OptimizationPass::kCHAGuardOptimization:
       return CHAGuardOptimization::kCHAGuardOptimizationPassName;
     case OptimizationPass::kCodeSinking:
@@ -117,6 +117,12 @@
 #ifdef ART_ENABLE_CODEGEN_x86
     case OptimizationPass::kPcRelativeFixupsX86:
       return x86::PcRelativeFixups::kPcRelativeFixupsX86PassName;
+    case OptimizationPass::kInstructionSimplifierX86:
+      return x86::InstructionSimplifierX86::kInstructionSimplifierX86PassName;
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+    case OptimizationPass::kInstructionSimplifierX86_64:
+      return x86_64::InstructionSimplifierX86_64::kInstructionSimplifierX86_64PassName;
 #endif
 #if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
     case OptimizationPass::kX86MemoryOperandGeneration:
@@ -141,14 +147,12 @@
   X(OptimizationPass::kInductionVarAnalysis);
   X(OptimizationPass::kInliner);
   X(OptimizationPass::kInstructionSimplifier);
-  X(OptimizationPass::kIntrinsicsRecognizer);
   X(OptimizationPass::kInvariantCodeMotion);
   X(OptimizationPass::kLoadStoreAnalysis);
   X(OptimizationPass::kLoadStoreElimination);
   X(OptimizationPass::kLoopOptimization);
   X(OptimizationPass::kScheduling);
   X(OptimizationPass::kSelectGenerator);
-  X(OptimizationPass::kSharpening);
   X(OptimizationPass::kSideEffectsAnalysis);
 #ifdef ART_ENABLE_CODEGEN_arm
   X(OptimizationPass::kInstructionSimplifierArm);
@@ -177,7 +181,6 @@
     HGraph* graph,
     OptimizingCompilerStats* stats,
     CodeGenerator* codegen,
-    CompilerDriver* driver,
     const DexCompilationUnit& dex_compilation_unit,
     VariableSizedHandleScope* handles) {
   ArenaVector<HOptimization*> optimizations(allocator->Adapter());
@@ -254,28 +257,21 @@
                                        codegen,
                                        dex_compilation_unit,    // outer_compilation_unit
                                        dex_compilation_unit,    // outermost_compilation_unit
-                                       driver,
                                        handles,
                                        stats,
                                        accessor.RegistersSize(),
-                                       /* total_number_of_instructions */ 0,
-                                       /* parent */ nullptr,
-                                       /* depth */ 0,
+                                       /* total_number_of_instructions= */ 0,
+                                       /* parent= */ nullptr,
+                                       /* depth= */ 0,
                                        pass_name);
         break;
       }
-      case OptimizationPass::kSharpening:
-        opt = new (allocator) HSharpening(graph, codegen, pass_name);
-        break;
       case OptimizationPass::kSelectGenerator:
         opt = new (allocator) HSelectGenerator(graph, handles, stats, pass_name);
         break;
       case OptimizationPass::kInstructionSimplifier:
         opt = new (allocator) InstructionSimplifier(graph, codegen, stats, pass_name);
         break;
-      case OptimizationPass::kIntrinsicsRecognizer:
-        opt = new (allocator) IntrinsicsRecognizer(graph, stats, pass_name);
-        break;
       case OptimizationPass::kCHAGuardOptimization:
         opt = new (allocator) CHAGuardOptimization(graph, pass_name);
         break;
@@ -323,6 +319,14 @@
         DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
         opt = new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
         break;
+      case OptimizationPass::kInstructionSimplifierX86:
+       opt = new (allocator) x86::InstructionSimplifierX86(graph, codegen, stats);
+       break;
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+      case OptimizationPass::kInstructionSimplifierX86_64:
+        opt = new (allocator) x86_64::InstructionSimplifierX86_64(graph, codegen, stats);
+        break;
 #endif
       case OptimizationPass::kNone:
         LOG(FATAL) << "kNone does not represent an actual pass";
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index 88b283c..ce44b5f 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -77,14 +77,12 @@
   kInductionVarAnalysis,
   kInliner,
   kInstructionSimplifier,
-  kIntrinsicsRecognizer,
   kInvariantCodeMotion,
   kLoadStoreAnalysis,
   kLoadStoreElimination,
   kLoopOptimization,
   kScheduling,
   kSelectGenerator,
-  kSharpening,
   kSideEffectsAnalysis,
 #ifdef ART_ENABLE_CODEGEN_arm
   kInstructionSimplifierArm,
@@ -98,6 +96,10 @@
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
   kPcRelativeFixupsX86,
+  kInstructionSimplifierX86,
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+  kInstructionSimplifierX86_64,
 #endif
 #if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
   kX86MemoryOperandGeneration,
@@ -145,7 +147,6 @@
     HGraph* graph,
     OptimizingCompilerStats* stats,
     CodeGenerator* codegen,
-    CompilerDriver* driver,
     const DexCompilationUnit& dex_compilation_unit,
     VariableSizedHandleScope* handles);
 
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index be1f7ea..a52031c 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -128,7 +128,7 @@
    public:
     InternalCodeAllocator() {}
 
-    virtual uint8_t* Allocate(size_t size) {
+    uint8_t* Allocate(size_t size) override {
       memory_.resize(size);
       return memory_.data();
     }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 0a74705..e8f8d32 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -26,6 +26,7 @@
 #include "base/arena_allocator.h"
 #include "base/arena_containers.h"
 #include "base/dumpable.h"
+#include "base/logging.h"
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "base/scoped_arena_allocator.h"
@@ -79,7 +80,7 @@
   explicit CodeVectorAllocator(ArenaAllocator* allocator)
       : memory_(allocator->Adapter(kArenaAllocCodeBuffer)) {}
 
-  virtual uint8_t* Allocate(size_t size) {
+  uint8_t* Allocate(size_t size) override {
     memory_.resize(size);
     return &memory_[0];
   }
@@ -161,7 +162,7 @@
     VLOG(compiler) << "Starting pass: " << pass_name;
     // Dump graph first, then start timer.
     if (visualizer_enabled_) {
-      visualizer_.DumpGraph(pass_name, /* is_after_pass */ false, graph_in_bad_state_);
+      visualizer_.DumpGraph(pass_name, /* is_after_pass= */ false, graph_in_bad_state_);
       FlushVisualizer();
     }
     if (timing_logger_enabled_) {
@@ -183,7 +184,7 @@
       timing_logger_.EndTiming();
     }
     if (visualizer_enabled_) {
-      visualizer_.DumpGraph(pass_name, /* is_after_pass */ true, graph_in_bad_state_);
+      visualizer_.DumpGraph(pass_name, /* is_after_pass= */ true, graph_in_bad_state_);
       FlushVisualizer();
     }
 
@@ -271,7 +272,7 @@
 
   bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const override;
 
-  CompiledMethod* Compile(const DexFile::CodeItem* code_item,
+  CompiledMethod* Compile(const dex::CodeItem* code_item,
                           uint32_t access_flags,
                           InvokeType invoke_type,
                           uint16_t class_def_idx,
@@ -298,6 +299,7 @@
   bool JitCompile(Thread* self,
                   jit::JitCodeCache* code_cache,
                   ArtMethod* method,
+                  bool baseline,
                   bool osr,
                   jit::JitLogger* jit_logger)
       override
@@ -319,7 +321,6 @@
         graph,
         compilation_stats_.get(),
         codegen,
-        GetCompilerDriver(),
         dex_compilation_unit,
         handles);
     DCHECK_EQ(length, optimizations.size());
@@ -369,7 +370,7 @@
   CompiledMethod* Emit(ArenaAllocator* allocator,
                        CodeVectorAllocator* code_allocator,
                        CodeGenerator* codegen,
-                       const DexFile::CodeItem* item) const;
+                       const dex::CodeItem* item) const;
 
   // Try compiling a method and return the code generator used for
   // compiling it.
@@ -383,6 +384,7 @@
                             CodeVectorAllocator* code_allocator,
                             const DexCompilationUnit& dex_compilation_unit,
                             ArtMethod* method,
+                            bool baseline,
                             bool osr,
                             VariableSizedHandleScope* handles) const;
 
@@ -399,7 +401,14 @@
                             PassObserver* pass_observer,
                             VariableSizedHandleScope* handles) const;
 
-  void GenerateJitDebugInfo(ArtMethod* method, debug::MethodDebugInfo method_debug_info)
+  bool RunBaselineOptimizations(HGraph* graph,
+                                CodeGenerator* codegen,
+                                const DexCompilationUnit& dex_compilation_unit,
+                                PassObserver* pass_observer,
+                                VariableSizedHandleScope* handles) const;
+
+  void GenerateJitDebugInfo(ArtMethod* method,
+                            const debug::MethodDebugInfo& method_debug_info)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
@@ -456,6 +465,48 @@
       || instruction_set == InstructionSet::kX86_64;
 }
 
+bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
+                                                  CodeGenerator* codegen,
+                                                  const DexCompilationUnit& dex_compilation_unit,
+                                                  PassObserver* pass_observer,
+                                                  VariableSizedHandleScope* handles) const {
+  switch (codegen->GetCompilerOptions().GetInstructionSet()) {
+#ifdef ART_ENABLE_CODEGEN_mips
+    case InstructionSet::kMips: {
+      OptimizationDef mips_optimizations[] = {
+        OptDef(OptimizationPass::kPcRelativeFixupsMips)
+      };
+      return RunOptimizations(graph,
+                              codegen,
+                              dex_compilation_unit,
+                              pass_observer,
+                              handles,
+                              mips_optimizations);
+    }
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+    case InstructionSet::kX86: {
+      OptimizationDef x86_optimizations[] = {
+        OptDef(OptimizationPass::kPcRelativeFixupsX86),
+      };
+      return RunOptimizations(graph,
+                              codegen,
+                              dex_compilation_unit,
+                              pass_observer,
+                              handles,
+                              x86_optimizations);
+    }
+#endif
+    default:
+      UNUSED(graph);
+      UNUSED(codegen);
+      UNUSED(dex_compilation_unit);
+      UNUSED(pass_observer);
+      UNUSED(handles);
+      return false;
+  }
+}
+
 bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
                                               CodeGenerator* codegen,
                                               const DexCompilationUnit& dex_compilation_unit,
@@ -528,6 +579,7 @@
 #ifdef ART_ENABLE_CODEGEN_x86
     case InstructionSet::kX86: {
       OptimizationDef x86_optimizations[] = {
+        OptDef(OptimizationPass::kInstructionSimplifierX86),
         OptDef(OptimizationPass::kSideEffectsAnalysis),
         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
         OptDef(OptimizationPass::kPcRelativeFixupsX86),
@@ -544,6 +596,7 @@
 #ifdef ART_ENABLE_CODEGEN_x86_64
     case InstructionSet::kX86_64: {
       OptimizationDef x86_64_optimizations[] = {
+        OptDef(OptimizationPass::kInstructionSimplifierX86_64),
         OptDef(OptimizationPass::kSideEffectsAnalysis),
         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
         OptDef(OptimizationPass::kX86MemoryOperandGeneration)
@@ -623,8 +676,6 @@
 
   OptimizationDef optimizations[] = {
     // Initial optimizations.
-    OptDef(OptimizationPass::kIntrinsicsRecognizer),
-    OptDef(OptimizationPass::kSharpening),
     OptDef(OptimizationPass::kConstantFolding),
     OptDef(OptimizationPass::kInstructionSimplifier),
     OptDef(OptimizationPass::kDeadCodeElimination,
@@ -709,12 +760,12 @@
 CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
                                          CodeVectorAllocator* code_allocator,
                                          CodeGenerator* codegen,
-                                         const DexFile::CodeItem* code_item_for_osr_check) const {
+                                         const dex::CodeItem* code_item_for_osr_check) const {
   ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
   ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item_for_osr_check);
 
   CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
-      GetCompilerDriver(),
+      GetCompilerDriver()->GetCompiledMethodStorage(),
       codegen->GetInstructionSet(),
       code_allocator->GetMemory(),
       ArrayRef<const uint8_t>(stack_map),
@@ -739,6 +790,7 @@
                                               CodeVectorAllocator* code_allocator,
                                               const DexCompilationUnit& dex_compilation_unit,
                                               ArtMethod* method,
+                                              bool baseline,
                                               bool osr,
                                               VariableSizedHandleScope* handles) const {
   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
@@ -747,7 +799,7 @@
   InstructionSet instruction_set = compiler_options.GetInstructionSet();
   const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
   uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
-  const DexFile::CodeItem* code_item = dex_compilation_unit.GetCodeItem();
+  const dex::CodeItem* code_item = dex_compilation_unit.GetCodeItem();
 
   // Always use the Thumb-2 assembler: some runtime functionality
   // (like implicit stack overflow checks) assume Thumb-2.
@@ -776,6 +828,29 @@
   }
 
   CodeItemDebugInfoAccessor code_item_accessor(dex_file, code_item, method_idx);
+
+  bool dead_reference_safe;
+  ArrayRef<const uint8_t> interpreter_metadata;
+  // For AOT compilation, we may not get a method, for example if its class is erroneous,
+  // possibly due to an unavailable superclass.  JIT should always have a method.
+  DCHECK(Runtime::Current()->IsAotCompiler() || method != nullptr);
+  if (method != nullptr) {
+    const dex::ClassDef* containing_class;
+    {
+      ScopedObjectAccess soa(Thread::Current());
+      containing_class = &method->GetClassDef();
+      interpreter_metadata = method->GetQuickenedInfo();
+    }
+    // MethodContainsRSensitiveAccess is currently slow, but HasDeadReferenceSafeAnnotation()
+    // is currently rarely true.
+    dead_reference_safe =
+        annotations::HasDeadReferenceSafeAnnotation(dex_file, *containing_class)
+        && !annotations::MethodContainsRSensitiveAccess(dex_file, *containing_class, method_idx);
+  } else {
+    // If we could not resolve the class, conservatively assume it's dead-reference unsafe.
+    dead_reference_safe = false;
+  }
+
   HGraph* graph = new (allocator) HGraph(
       allocator,
       arena_stack,
@@ -783,17 +858,12 @@
       method_idx,
       compiler_options.GetInstructionSet(),
       kInvalidInvokeType,
+      dead_reference_safe,
       compiler_driver->GetCompilerOptions().GetDebuggable(),
-      osr);
+      /* osr= */ osr);
 
-  ArrayRef<const uint8_t> interpreter_metadata;
-  // For AOT compilation, we may not get a method, for example if its class is erroneous.
-  // JIT should always have a method.
-  DCHECK(Runtime::Current()->IsAotCompiler() || method != nullptr);
   if (method != nullptr) {
     graph->SetArtMethod(method);
-    ScopedObjectAccess soa(Thread::Current());
-    interpreter_metadata = method->GetQuickenedInfo();
   }
 
   std::unique_ptr<CodeGenerator> codegen(
@@ -820,7 +890,6 @@
                           code_item_accessor,
                           &dex_compilation_unit,
                           &dex_compilation_unit,
-                          compiler_driver,
                           codegen.get(),
                           compilation_stats_.get(),
                           interpreter_metadata,
@@ -848,6 +917,11 @@
                           MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
           break;
         }
+        case kAnalysisFailIrreducibleLoopAndStringInit: {
+          MaybeRecordStat(compilation_stats_.get(),
+                          MethodCompilationStat::kNotCompiledIrreducibleLoopAndStringInit);
+          break;
+        }
         case kAnalysisSuccess:
           UNREACHABLE();
       }
@@ -856,11 +930,11 @@
     }
   }
 
-  RunOptimizations(graph,
-                   codegen.get(),
-                   dex_compilation_unit,
-                   &pass_observer,
-                   handles);
+  if (baseline) {
+    RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
+  } else {
+    RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
+  }
 
   RegisterAllocator::Strategy regalloc_strategy =
     compiler_options.GetRegisterAllocationStrategy();
@@ -905,10 +979,11 @@
       arena_stack,
       dex_file,
       method_idx,
-      compiler_driver->GetCompilerOptions().GetInstructionSet(),
+      compiler_options.GetInstructionSet(),
       kInvalidInvokeType,
-      compiler_driver->GetCompilerOptions().GetDebuggable(),
-      /* osr */ false);
+      /* dead_reference_safe= */ true,  // Intrinsics don't affect dead reference safety.
+      compiler_options.GetDebuggable(),
+      /* osr= */ false);
 
   DCHECK(Runtime::Current()->IsAotCompiler());
   DCHECK(method != nullptr);
@@ -936,18 +1011,16 @@
                           CodeItemDebugInfoAccessor(),  // Null code item.
                           &dex_compilation_unit,
                           &dex_compilation_unit,
-                          compiler_driver,
                           codegen.get(),
                           compilation_stats_.get(),
-                          /* interpreter_metadata */ ArrayRef<const uint8_t>(),
+                          /* interpreter_metadata= */ ArrayRef<const uint8_t>(),
                           handles);
     builder.BuildIntrinsicGraph(method);
   }
 
   OptimizationDef optimizations[] = {
-    OptDef(OptimizationPass::kIntrinsicsRecognizer),
-    // Some intrinsics are converted to HIR by the simplifier and the codegen also
-    // has a few assumptions that only the instruction simplifier can satisfy.
+    // The codegen has a few assumptions that only the instruction simplifier
+    // can satisfy.
     OptDef(OptimizationPass::kInstructionSimplifier),
   };
   RunOptimizations(graph,
@@ -979,7 +1052,7 @@
   return codegen.release();
 }
 
-CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
+CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item,
                                             uint32_t access_flags,
                                             InvokeType invoke_type,
                                             uint16_t class_def_idx,
@@ -988,12 +1061,13 @@
                                             const DexFile& dex_file,
                                             Handle<mirror::DexCache> dex_cache) const {
   CompilerDriver* compiler_driver = GetCompilerDriver();
+  const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
   CompiledMethod* compiled_method = nullptr;
   Runtime* runtime = Runtime::Current();
   DCHECK(runtime->IsAotCompiler());
-  const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
+  const VerifiedMethod* verified_method = compiler_options.GetVerifiedMethod(&dex_file, method_idx);
   DCHECK(!verified_method->HasRuntimeThrow());
-  if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) ||
+  if (compiler_options.IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) ||
       verifier::CanCompilerHandleVerificationFailure(
           verified_method->GetEncounteredVerificationFailures())) {
     ArenaAllocator allocator(runtime->GetArenaPool());
@@ -1002,6 +1076,15 @@
     std::unique_ptr<CodeGenerator> codegen;
     bool compiled_intrinsic = false;
     {
+      ScopedObjectAccess soa(Thread::Current());
+      ArtMethod* method =
+          runtime->GetClassLinker()->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
+              method_idx, dex_cache, jclass_loader, /*referrer=*/ nullptr, invoke_type);
+      DCHECK_EQ(method == nullptr, soa.Self()->IsExceptionPending());
+      soa.Self()->ClearException();  // Suppress exception if any.
+      VariableSizedHandleScope handles(soa.Self());
+      Handle<mirror::Class> compiling_class =
+          handles.NewHandle(method != nullptr ? method->GetDeclaringClass() : nullptr);
       DexCompilationUnit dex_compilation_unit(
           jclass_loader,
           runtime->GetClassLinker(),
@@ -1010,16 +1093,13 @@
           class_def_idx,
           method_idx,
           access_flags,
-          /* verified_method */ nullptr,  // Not needed by the Optimizing compiler.
-          dex_cache);
-      ScopedObjectAccess soa(Thread::Current());
-      ArtMethod* method = compiler_driver->ResolveMethod(
-            soa, dex_cache, jclass_loader, &dex_compilation_unit, method_idx, invoke_type);
-      VariableSizedHandleScope handles(soa.Self());
+          /*verified_method=*/ nullptr,  // Not needed by the Optimizing compiler.
+          dex_cache,
+          compiling_class);
       // Go to native so that we don't block GC during compilation.
       ScopedThreadSuspension sts(soa.Self(), kNative);
       if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
-        DCHECK(compiler_driver->GetCompilerOptions().IsBootImage());
+        DCHECK(compiler_options.IsBootImage());
         codegen.reset(
             TryCompileIntrinsic(&allocator,
                                 &arena_stack,
@@ -1038,7 +1118,8 @@
                        &code_allocator,
                        dex_compilation_unit,
                        method,
-                       /* osr */ false,
+                       compiler_options.IsBaseline(),
+                       /* osr= */ false,
                        &handles));
       }
     }
@@ -1066,7 +1147,7 @@
     }
   } else {
     MethodCompilationStat method_stat;
-    if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
+    if (compiler_options.VerifyAtRuntime()) {
       method_stat = MethodCompilationStat::kNotCompiledVerifyAtRuntime;
     } else {
       method_stat = MethodCompilationStat::kNotCompiledVerificationError;
@@ -1075,8 +1156,8 @@
   }
 
   if (kIsDebugBuild &&
-      IsCompilingWithCoreImage() &&
-      IsInstructionSetSupported(compiler_driver->GetCompilerOptions().GetInstructionSet())) {
+      compiler_options.CompilingWithCoreImage() &&
+      IsInstructionSetSupported(compiler_options.GetInstructionSet())) {
     // For testing purposes, we put a special marker on method names
     // that should be compiled with this compiler (when the
     // instruction set is supported). This makes sure we're not
@@ -1099,7 +1180,7 @@
       jni_compiled_method.GetFrameSize(),
       jni_compiled_method.GetCoreSpillMask(),
       jni_compiled_method.GetFpSpillMask(),
-      /* num_dex_registers */ 0);
+      /* num_dex_registers= */ 0);
   stack_map_stream->EndMethod();
   return stack_map_stream->Encode();
 }
@@ -1116,21 +1197,23 @@
   if (compiler_options.IsBootImage()) {
     ScopedObjectAccess soa(Thread::Current());
     ArtMethod* method = runtime->GetClassLinker()->LookupResolvedMethod(
-        method_idx, dex_cache.Get(), /* class_loader */ nullptr);
+        method_idx, dex_cache.Get(), /*class_loader=*/ nullptr);
     if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
+      VariableSizedHandleScope handles(soa.Self());
       ScopedNullHandle<mirror::ClassLoader> class_loader;  // null means boot class path loader.
+      Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
       DexCompilationUnit dex_compilation_unit(
           class_loader,
           runtime->GetClassLinker(),
           dex_file,
-          /* code_item */ nullptr,
-          /* class_def_idx */ DexFile::kDexNoIndex16,
+          /*code_item=*/ nullptr,
+          /*class_def_idx=*/ DexFile::kDexNoIndex16,
           method_idx,
           access_flags,
-          /* verified_method */ nullptr,
-          dex_cache);
+          /*verified_method=*/ nullptr,
+          dex_cache,
+          compiling_class);
       CodeVectorAllocator code_allocator(&allocator);
-      VariableSizedHandleScope handles(soa.Self());
       // Go to native so that we don't block GC during compilation.
       ScopedThreadSuspension sts(soa.Self(), kNative);
       std::unique_ptr<CodeGenerator> codegen(
@@ -1144,7 +1227,7 @@
         CompiledMethod* compiled_method = Emit(&allocator,
                                                &code_allocator,
                                                codegen.get(),
-                                               /* code_item_for_osr_check */ nullptr);
+                                               /* item= */ nullptr);
         compiled_method->MarkAsIntrinsic();
         return compiled_method;
       }
@@ -1159,45 +1242,27 @@
   ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
                                                            jni_compiled_method);
   return CompiledMethod::SwapAllocCompiledMethod(
-      GetCompilerDriver(),
+      GetCompilerDriver()->GetCompiledMethodStorage(),
       jni_compiled_method.GetInstructionSet(),
       jni_compiled_method.GetCode(),
       ArrayRef<const uint8_t>(stack_map),
       jni_compiled_method.GetCfi(),
-      /* patches */ ArrayRef<const linker::LinkerPatch>());
+      /* patches= */ ArrayRef<const linker::LinkerPatch>());
 }
 
 Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
   return new OptimizingCompiler(driver);
 }
 
-bool IsCompilingWithCoreImage() {
-  const std::string& image = Runtime::Current()->GetImageLocation();
-  return CompilerDriver::IsCoreImageFilename(image);
-}
-
 bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
   // Note: the runtime is null only for unit testing.
   return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
 }
 
-bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee) {
-  if (!Runtime::Current()->IsAotCompiler()) {
-    // JIT can always encode methods in stack maps.
-    return true;
-  }
-  if (IsSameDexFile(caller_dex_file, *callee->GetDexFile())) {
-    return true;
-  }
-  // TODO(ngeoffray): Support more AOT cases for inlining:
-  // - methods in multidex
-  // - methods in boot image for on-device non-PIC compilation.
-  return false;
-}
-
 bool OptimizingCompiler::JitCompile(Thread* self,
                                     jit::JitCodeCache* code_cache,
                                     ArtMethod* method,
+                                    bool baseline,
                                     bool osr,
                                     jit::JitLogger* jit_logger) {
   StackHandleScope<3> hs(self);
@@ -1208,7 +1273,7 @@
 
   const DexFile* dex_file = method->GetDexFile();
   const uint16_t class_def_idx = method->GetClassDefIndex();
-  const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
+  const dex::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
   const uint32_t method_idx = method->GetDexMethodIndex();
   const uint32_t access_flags = method->GetAccessFlags();
 
@@ -1219,7 +1284,7 @@
     const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
     JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
         compiler_options, access_flags, method_idx, *dex_file);
-    ScopedNullHandle<mirror::ObjectArray<mirror::Object>> roots;
+    std::vector<Handle<mirror::Object>> roots;
     ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
         allocator.Adapter(kArenaAllocCHA));
     ArenaStack arena_stack(runtime->GetJitArenaPool());
@@ -1231,7 +1296,7 @@
     uint8_t* roots_data = nullptr;
     uint32_t data_size = code_cache->ReserveData(self,
                                                  stack_map.size(),
-                                                 /* number_of_roots */ 0,
+                                                 /* number_of_roots= */ 0,
                                                  method,
                                                  &stack_map_data,
                                                  &roots_data);
@@ -1251,7 +1316,7 @@
         data_size,
         osr,
         roots,
-        /* has_should_deoptimize_flag */ false,
+        /* has_should_deoptimize_flag= */ false,
         cha_single_implementation_list);
     if (code == nullptr) {
       return false;
@@ -1293,6 +1358,7 @@
 
   std::unique_ptr<CodeGenerator> codegen;
   {
+    Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
     DexCompilationUnit dex_compilation_unit(
         class_loader,
         runtime->GetClassLinker(),
@@ -1301,8 +1367,9 @@
         class_def_idx,
         method_idx,
         access_flags,
-        /* verified_method */ nullptr,
-        dex_cache);
+        /*verified_method=*/ nullptr,
+        dex_cache,
+        compiling_class);
 
     // Go to native so that we don't block GC during compilation.
     ScopedThreadSuspension sts(self, kNative);
@@ -1312,6 +1379,7 @@
                    &code_allocator,
                    dex_compilation_unit,
                    method,
+                   baseline,
                    osr,
                    &handles));
     if (codegen.get() == nullptr) {
@@ -1321,19 +1389,6 @@
 
   ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item);
   size_t number_of_roots = codegen->GetNumberOfJitRoots();
-  // We allocate an object array to ensure the JIT roots that we will collect in EmitJitRoots
-  // will be visible by the GC between EmitLiterals and CommitCode. Once CommitCode is
-  // executed, this array is not needed.
-  Handle<mirror::ObjectArray<mirror::Object>> roots(
-      hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(
-          self, GetClassRoot<mirror::ObjectArray<mirror::Object>>(), number_of_roots)));
-  if (roots == nullptr) {
-    // Out of memory, just clear the exception to avoid any Java exception uncaught problems.
-    MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
-    DCHECK(self->IsExceptionPending());
-    self->ClearException();
-    return false;
-  }
   uint8_t* stack_map_data = nullptr;
   uint8_t* roots_data = nullptr;
   uint32_t data_size = code_cache->ReserveData(self,
@@ -1347,7 +1402,14 @@
     return false;
   }
   memcpy(stack_map_data, stack_map.data(), stack_map.size());
-  codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data);
+  std::vector<Handle<mirror::Object>> roots;
+  codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots);
+  // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope.
+  DCHECK(std::all_of(roots.begin(),
+                     roots.end(),
+                     [&handles](Handle<mirror::Object> root){
+                       return handles.Contains(root.GetReference());
+                     }));
 
   const void* code = code_cache->CommitCode(
       self,
@@ -1413,26 +1475,31 @@
   return true;
 }
 
-void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method, debug::MethodDebugInfo info) {
+void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method ATTRIBUTE_UNUSED,
+                                              const debug::MethodDebugInfo& info) {
   const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
   DCHECK(compiler_options.GenerateAnyDebugInfo());
+  TimingLogger logger("Generate JIT debug info logger", true, VLOG_IS_ON(jit));
+  {
+    TimingLogger::ScopedTiming st("Generate JIT debug info", &logger);
 
-  // If both flags are passed, generate full debug info.
-  const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
+    // If both flags are passed, generate full debug info.
+    const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
 
-  // Create entry for the single method that we just compiled.
-  std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
-      compiler_options.GetInstructionSet(),
-      compiler_options.GetInstructionSetFeatures(),
-      mini_debug_info,
-      ArrayRef<const debug::MethodDebugInfo>(&info, 1));
-  MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
-  AddNativeDebugInfoForJit(reinterpret_cast<const void*>(info.code_address), elf_file);
-
-  VLOG(jit)
-      << "JIT mini-debug-info added for " << ArtMethod::PrettyMethod(method)
-      << " size=" << PrettySize(elf_file.size())
-      << " total_size=" << PrettySize(GetJitNativeDebugInfoMemUsage());
+    // Create entry for the single method that we just compiled.
+    std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
+        compiler_options.GetInstructionSet(),
+        compiler_options.GetInstructionSetFeatures(),
+        mini_debug_info,
+        info);
+    AddNativeDebugInfoForJit(Thread::Current(),
+                             reinterpret_cast<const void*>(info.code_address),
+                             elf_file,
+                             debug::PackElfFileForJIT,
+                             compiler_options.GetInstructionSet(),
+                             compiler_options.GetInstructionSetFeatures());
+  }
+  Runtime::Current()->GetJit()->AddTimingLogger(logger);
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/optimizing_compiler.h b/compiler/optimizing/optimizing_compiler.h
index 6ee9c70..f5279e8 100644
--- a/compiler/optimizing/optimizing_compiler.h
+++ b/compiler/optimizing/optimizing_compiler.h
@@ -29,14 +29,7 @@
 
 Compiler* CreateOptimizingCompiler(CompilerDriver* driver);
 
-// Returns whether we are compiling against a "core" image, which
-// is an indicative we are running tests. The compiler will use that
-// information for checking invariants.
-bool IsCompilingWithCoreImage();
-
 bool EncodeArtMethodInInlineInfo(ArtMethod* method);
-bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee)
-      REQUIRES_SHARED(Locks::mutator_lock_);
 
 }  // namespace art
 
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 9a26f2f..ddd57f5 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -22,9 +22,10 @@
 #include <string>
 #include <type_traits>
 
+#include <android-base/logging.h>
+
 #include "base/atomic.h"
 #include "base/globals.h"
-#include "base/logging.h"  // For VLOG_IS_ON.
 
 namespace art {
 
@@ -59,6 +60,7 @@
   kNotCompiledUnsupportedIsa,
   kNotCompiledVerificationError,
   kNotCompiledVerifyAtRuntime,
+  kNotCompiledIrreducibleLoopAndStringInit,
   kInlinedMonomorphicCall,
   kInlinedPolymorphicCall,
   kMonomorphicCall,
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index f903f82..e5f6941 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -155,7 +155,7 @@
     void* aligned_data = GetAllocator()->Alloc(code_item_size);
     memcpy(aligned_data, &data[0], code_item_size);
     CHECK_ALIGNED(aligned_data, StandardDexFile::CodeItem::kAlignment);
-    const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(aligned_data);
+    const dex::CodeItem* code_item = reinterpret_cast<const dex::CodeItem*>(aligned_data);
 
     {
       ScopedObjectAccess soa(Thread::Current());
@@ -165,13 +165,13 @@
       const DexCompilationUnit* dex_compilation_unit =
           new (graph->GetAllocator()) DexCompilationUnit(
               handles_->NewHandle<mirror::ClassLoader>(nullptr),
-              /* class_linker */ nullptr,
+              /* class_linker= */ nullptr,
               graph->GetDexFile(),
               code_item,
-              /* class_def_index */ DexFile::kDexNoIndex16,
-              /* method_idx */ dex::kDexNoIndex,
-              /* access_flags */ 0u,
-              /* verified_method */ nullptr,
+              /* class_def_index= */ DexFile::kDexNoIndex16,
+              /* method_idx= */ dex::kDexNoIndex,
+              /* access_flags= */ 0u,
+              /* verified_method= */ nullptr,
               handles_->NewHandle<mirror::DexCache>(nullptr));
       CodeItemDebugInfoAccessor accessor(graph->GetDexFile(), code_item, /*dex_method_idx*/ 0u);
       HGraphBuilder builder(graph, dex_compilation_unit, accessor, handles_.get(), return_type);
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 399a6d8..a8ab6cd 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -174,8 +174,8 @@
 template<> const bool ParallelMoveTest<TestParallelMoveResolverWithSwap>::has_swap = true;
 template<> const bool ParallelMoveTest<TestParallelMoveResolverNoSwap>::has_swap = false;
 
-typedef ::testing::Types<TestParallelMoveResolverWithSwap, TestParallelMoveResolverNoSwap>
-    ParallelMoveResolverTestTypes;
+using ParallelMoveResolverTestTypes =
+    ::testing::Types<TestParallelMoveResolverWithSwap, TestParallelMoveResolverNoSwap>;
 
 TYPED_TEST_CASE(ParallelMoveTest, ParallelMoveResolverTestTypes);
 
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 4b07d5b..4ff293c 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -17,7 +17,6 @@
 #include "pc_relative_fixups_x86.h"
 #include "code_generator_x86.h"
 #include "intrinsics_x86.h"
-#include "runtime.h"
 
 namespace art {
 namespace x86 {
@@ -239,7 +238,7 @@
       case Intrinsics::kIntegerValueOf:
         // This intrinsic can be call free if it loads the address of the boot image object.
         // If we're compiling PIC, we need the address base for loading from .data.bimg.rel.ro.
-        if (Runtime::Current()->UseJitCompilation()) {
+        if (!codegen_->GetCompilerOptions().GetCompilePic()) {
           break;
         }
         FALLTHROUGH_INTENDED;
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index fc81740..fbdbf9d 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -87,9 +87,9 @@
     if (GetGraph()->GetArtMethod() != char_at_method) {
       ArenaAllocator* allocator = GetGraph()->GetAllocator();
       HEnvironment* environment = new (allocator) HEnvironment(allocator,
-                                                               /* number_of_vregs */ 0u,
+                                                               /* number_of_vregs= */ 0u,
                                                                char_at_method,
-                                                               /* dex_pc */ dex::kDexNoIndex,
+                                                               /* dex_pc= */ dex::kDexNoIndex,
                                                                check);
       check->InsertRawEnvironment(environment);
     }
@@ -304,4 +304,13 @@
   return true;
 }
 
+void PrepareForRegisterAllocation::VisitTypeConversion(HTypeConversion* instruction) {
+  // For simplicity, our code generators don't handle implicit type conversion, so ensure
+  // there are none before hitting codegen.
+  if (instruction->IsImplicitConversion()) {
+    instruction->ReplaceWith(instruction->GetInput());
+    instruction->GetBlock()->RemoveInstruction(instruction);
+  }
+}
+
 }  // namespace art
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index a8ab256..e0bb76e 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -55,6 +55,7 @@
   void VisitConstructorFence(HConstructorFence* constructor_fence) override;
   void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override;
   void VisitDeoptimize(HDeoptimize* deoptimize) override;
+  void VisitTypeConversion(HTypeConversion* instruction) override;
 
   bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const;
   bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index a9d5902..4929e0a 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -114,9 +114,9 @@
   void VisitCheckCast(HCheckCast* instr) override;
   void VisitBoundType(HBoundType* instr) override;
   void VisitNullCheck(HNullCheck* instr) override;
-  void VisitPhi(HPhi* phi);
+  void VisitPhi(HPhi* phi) override;
 
-  void VisitBasicBlock(HBasicBlock* block);
+  void VisitBasicBlock(HBasicBlock* block) override;
   void ProcessWorklist();
 
  private:
@@ -278,7 +278,7 @@
       if (ShouldCreateBoundType(
             insert_point, receiver, class_rti, start_instruction, start_block)) {
         bound_type = new (receiver->GetBlock()->GetGraph()->GetAllocator()) HBoundType(receiver);
-        bound_type->SetUpperBound(class_rti, /* bound_can_be_null */ false);
+        bound_type->SetUpperBound(class_rti, /* can_be_null= */ false);
         start_block->InsertInstructionBefore(bound_type, insert_point);
         // To comply with the RTP algorithm, don't type the bound type just yet, it will
         // be handled in RTPVisitor::VisitBoundType.
@@ -350,7 +350,7 @@
     HBasicBlock* trueBlock = compare->IsEqual()
         ? check->AsIf()->IfTrueSuccessor()
         : check->AsIf()->IfFalseSuccessor();
-    BoundTypeIn(receiver, trueBlock, /* start_instruction */ nullptr, class_rti);
+    BoundTypeIn(receiver, trueBlock, /* start_instruction= */ nullptr, class_rti);
   } else {
     DCHECK(check->IsDeoptimize());
     if (compare->IsEqual() && check->AsDeoptimize()->GuardsAnInput()) {
@@ -427,9 +427,9 @@
       : ifInstruction->IfFalseSuccessor();
 
   ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
-      handle_cache_->GetObjectClassHandle(), /* is_exact */ false);
+      handle_cache_->GetObjectClassHandle(), /* is_exact= */ false);
 
-  BoundTypeIn(obj, notNullBlock, /* start_instruction */ nullptr, object_rti);
+  BoundTypeIn(obj, notNullBlock, /* start_instruction= */ nullptr, object_rti);
 }
 
 // Returns true if one of the patterns below has been recognized. If so, the
@@ -538,10 +538,10 @@
   {
     ScopedObjectAccess soa(Thread::Current());
     if (!class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
-      class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false);
+      class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact= */ false);
     }
   }
-  BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction */ nullptr, class_rti);
+  BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction= */ nullptr, class_rti);
 }
 
 void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* instr,
@@ -561,7 +561,7 @@
       // Use a null loader, the target method is in a boot classpath dex file.
       Handle<mirror::ClassLoader> loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
       ArtMethod* method = cl->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
-          dex_method_index, dex_cache, loader, /* referrer */ nullptr, kDirect);
+          dex_method_index, dex_cache, loader, /* referrer= */ nullptr, kDirect);
       DCHECK(method != nullptr);
       ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
       DCHECK(declaring_class != nullptr);
@@ -571,7 +571,7 @@
           << "Expected String.<init>: " << method->PrettyMethod();
     }
     instr->SetReferenceTypeInfo(
-        ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true));
+        ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact= */ true));
   } else if (IsAdmissible(klass)) {
     ReferenceTypeInfo::TypeHandle handle = handle_cache_->NewHandle(klass);
     is_exact = is_exact || handle->CannotBeAssignedFromOtherTypes();
@@ -600,12 +600,12 @@
 
 void ReferenceTypePropagation::RTPVisitor::VisitNewInstance(HNewInstance* instr) {
   ScopedObjectAccess soa(Thread::Current());
-  SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
+  SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact= */ true);
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitNewArray(HNewArray* instr) {
   ScopedObjectAccess soa(Thread::Current());
-  SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
+  SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact= */ true);
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue* instr) {
@@ -614,7 +614,7 @@
     UpdateReferenceTypeInfo(instr,
                             instr->GetTypeIndex(),
                             instr->GetDexFile(),
-                            /* is_exact */ false);
+                            /* is_exact= */ false);
   }
 }
 
@@ -632,7 +632,7 @@
     klass = info.GetField()->LookupResolvedType();
   }
 
-  SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
+  SetClassAsTypeInfo(instr, klass, /* is_exact= */ false);
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitInstanceFieldGet(HInstanceFieldGet* instr) {
@@ -665,7 +665,7 @@
     instr->SetValidLoadedClassRTI();
   }
   instr->SetReferenceTypeInfo(
-      ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact */ true));
+      ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact= */ true));
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitInstanceOf(HInstanceOf* instr) {
@@ -682,31 +682,31 @@
 void ReferenceTypePropagation::RTPVisitor::VisitLoadMethodHandle(HLoadMethodHandle* instr) {
   instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
       handle_cache_->GetMethodHandleClassHandle(),
-      /* is_exact */ true));
+      /* is_exact= */ true));
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitLoadMethodType(HLoadMethodType* instr) {
   instr->SetReferenceTypeInfo(
-      ReferenceTypeInfo::Create(handle_cache_->GetMethodTypeClassHandle(), /* is_exact */ true));
+      ReferenceTypeInfo::Create(handle_cache_->GetMethodTypeClassHandle(), /* is_exact= */ true));
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitLoadString(HLoadString* instr) {
   instr->SetReferenceTypeInfo(
-      ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true));
+      ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact= */ true));
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitLoadException(HLoadException* instr) {
   DCHECK(instr->GetBlock()->IsCatchBlock());
   TryCatchInformation* catch_info = instr->GetBlock()->GetTryCatchInformation();
 
-  if (catch_info->IsCatchAllTypeIndex()) {
-    instr->SetReferenceTypeInfo(
-        ReferenceTypeInfo::Create(handle_cache_->GetThrowableClassHandle(), /* is_exact */ false));
-  } else {
+  if (catch_info->IsValidTypeIndex()) {
     UpdateReferenceTypeInfo(instr,
                             catch_info->GetCatchTypeIndex(),
                             catch_info->GetCatchDexFile(),
-                            /* is_exact */ false);
+                            /* is_exact= */ false);
+  } else {
+    instr->SetReferenceTypeInfo(
+        ReferenceTypeInfo::Create(handle_cache_->GetThrowableClassHandle(), /* is_exact= */ false));
   }
 }
 
@@ -736,7 +736,7 @@
         // bound type is dead. To not confuse potential other optimizations, we mark
         // the bound as non-exact.
         instr->SetReferenceTypeInfo(
-            ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false));
+            ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact= */ false));
       }
     } else {
       // Object not typed yet. Leave BoundType untyped for now rather than
@@ -914,7 +914,7 @@
   ScopedObjectAccess soa(Thread::Current());
   ArtMethod* method = instr->GetResolvedMethod();
   ObjPtr<mirror::Class> klass = (method == nullptr) ? nullptr : method->LookupResolvedReturnType();
-  SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
+  SetClassAsTypeInfo(instr, klass, /* is_exact= */ false);
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitArrayGet(HArrayGet* instr) {
@@ -947,7 +947,7 @@
     // bound type is dead. To not confuse potential other optimizations, we mark
     // the bound as non-exact.
     instr->SetReferenceTypeInfo(
-        ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), /* is_exact */ false));
+        ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), /* is_exact= */ false));
   }
 }
 
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 27f9ac3..b1f0a1a 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -280,16 +280,16 @@
     LocationSummary* locations = instruction->GetLocations();
     if (locations->OnlyCallsOnSlowPath()) {
       size_t core_spills =
-          codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true);
+          codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ true);
       size_t fp_spills =
-          codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false);
+          codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ false);
       size_t spill_size =
           core_register_spill_size * core_spills + fp_register_spill_size * fp_spills;
       maximum_safepoint_spill_size = std::max(maximum_safepoint_spill_size, spill_size);
     } else if (locations->CallsOnMainAndSlowPath()) {
       // Nothing to spill on the slow path if the main path already clobbers caller-saves.
-      DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true));
-      DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false));
+      DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ true));
+      DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ false));
     }
   }
   return maximum_safepoint_spill_size;
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 1e00003..0d6c5a3 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -252,7 +252,7 @@
           temp_intervals_.push_back(interval);
           interval->AddTempUse(instruction, i);
           if (codegen_->NeedsTwoRegisters(DataType::Type::kFloat64)) {
-            interval->AddHighInterval(/* is_temp */ true);
+            interval->AddHighInterval(/* is_temp= */ true);
             LiveInterval* high = interval->GetHighInterval();
             temp_intervals_.push_back(high);
             unhandled_fp_intervals_.push_back(high);
@@ -284,7 +284,7 @@
   }
 
   if (locations->WillCall()) {
-    BlockRegisters(position, position + 1, /* caller_save_only */ true);
+    BlockRegisters(position, position + 1, /* caller_save_only= */ true);
   }
 
   for (size_t i = 0; i < locations->GetInputCount(); ++i) {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index db6a760..79eb082 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -68,11 +68,11 @@
   bool ValidateIntervals(const ScopedArenaVector<LiveInterval*>& intervals,
                          const CodeGenerator& codegen) {
     return RegisterAllocator::ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
-                                                /* number_of_spill_slots */ 0u,
-                                                /* number_of_out_slots */ 0u,
+                                                /* number_of_spill_slots= */ 0u,
+                                                /* number_of_out_slots= */ 0u,
                                                 codegen,
-                                                /* processing_core_registers */ true,
-                                                /* log_fatal_on_failure */ false);
+                                                /* processing_core_registers= */ true,
+                                                /* log_fatal_on_failure= */ false);
   }
 };
 
@@ -872,9 +872,9 @@
   // Create an interval with lifetime holes.
   static constexpr size_t ranges1[][2] = {{0, 2}, {4, 6}, {8, 10}};
   LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), -1, one);
-  first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8));
-  first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 7));
-  first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 6));
+  first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 8));
+  first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 7));
+  first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 6));
 
   locations = new (GetAllocator()) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall);
   locations->SetOut(Location::RequiresRegister());
@@ -895,9 +895,9 @@
   // before lifetime position 6 yet.
   static constexpr size_t ranges3[][2] = {{2, 4}, {8, 10}};
   LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), GetScopedAllocator(), -1, three);
-  third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8));
-  third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 4));
-  third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 3));
+  third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 8));
+  third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 4));
+  third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 3));
   locations = new (GetAllocator()) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall);
   locations->SetOut(Location::RequiresRegister());
   third = third->SplitAt(3);
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index df897a4..fdef45e 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -680,7 +680,7 @@
   DCHECK_NE(cursor, cursor->GetBlock()->GetLastInstruction());
   DCHECK(!instruction->IsControlFlow());
   DCHECK(!cursor->IsControlFlow());
-  instruction->MoveBefore(cursor->GetNext(), /* do_checks */ false);
+  instruction->MoveBefore(cursor->GetNext(), /* do_checks= */ false);
 }
 
 void HScheduler::Schedule(HInstruction* instruction) {
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index d89d117..858a555 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -563,7 +563,7 @@
     last_visited_internal_latency_ = kArmIntegerOpLatency;
     last_visited_latency_ = kArmIntegerOpLatency;
   } else {
-    HandleGenerateDataProcInstruction(/* internal_latency */ true);
+    HandleGenerateDataProcInstruction(/* internal_latency= */ true);
     HandleGenerateDataProcInstruction();
   }
 }
@@ -585,8 +585,8 @@
     DCHECK_LT(shift_value, 32U);
 
     if (kind == HInstruction::kOr || kind == HInstruction::kXor) {
-      HandleGenerateDataProcInstruction(/* internal_latency */ true);
-      HandleGenerateDataProcInstruction(/* internal_latency */ true);
+      HandleGenerateDataProcInstruction(/* internal_latency= */ true);
+      HandleGenerateDataProcInstruction(/* internal_latency= */ true);
       HandleGenerateDataProcInstruction();
     } else {
       last_visited_internal_latency_ += 2 * kArmIntegerOpLatency;
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 981fcc4..e0e265a 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -148,7 +148,7 @@
 
     SchedulingGraph scheduling_graph(scheduler,
                                      GetScopedAllocator(),
-                                     /* heap_location_collector */ nullptr);
+                                     /* heap_location_collector= */ nullptr);
     // Instructions must be inserted in reverse order into the scheduling graph.
     for (HInstruction* instr : ReverseRange(block_instructions)) {
       scheduling_graph.AddNode(instr);
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 5c2f57e..885a08d 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -35,22 +35,6 @@
 
 namespace art {
 
-bool HSharpening::Run() {
-  // We don't care about the order of the blocks here.
-  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
-    for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
-      HInstruction* instruction = it.Current();
-      if (instruction->IsInvokeStaticOrDirect()) {
-        SharpenInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect(), codegen_);
-      }
-      // TODO: Move the sharpening of invoke-virtual/-interface/-super from HGraphBuilder
-      //       here. Rewrite it to avoid the CompilerDriver's reliance on verifier data
-      //       because we know the type better when inlining.
-    }
-  }
-  return true;
-}
-
 static bool IsInBootImage(ArtMethod* method) {
   const std::vector<gc::space::ImageSpace*>& image_spaces =
       Runtime::Current()->GetHeap()->GetBootImageSpaces();
@@ -72,17 +56,14 @@
   return compiler_options.IsImageClass(dex_file.StringByTypeIdx(klass->GetDexTypeIndex()));
 }
 
-void HSharpening::SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke,
-                                              CodeGenerator* codegen) {
-  if (invoke->IsStringInit()) {
-    // Not using the dex cache arrays. But we could still try to use a better dispatch...
-    // TODO: Use direct_method and direct_code for the appropriate StringFactory method.
-    return;
+HInvokeStaticOrDirect::DispatchInfo HSharpening::SharpenInvokeStaticOrDirect(
+    ArtMethod* callee, CodeGenerator* codegen) {
+  if (kIsDebugBuild) {
+    ScopedObjectAccess soa(Thread::Current());  // Required for GetDeclaringClass below.
+    DCHECK(callee != nullptr);
+    DCHECK(!(callee->IsConstructor() && callee->GetDeclaringClass()->IsStringClass()));
   }
 
-  ArtMethod* callee = invoke->GetResolvedMethod();
-  DCHECK(callee != nullptr);
-
   HInvokeStaticOrDirect::MethodLoadKind method_load_kind;
   HInvokeStaticOrDirect::CodePtrLocation code_ptr_location;
   uint64_t method_load_data = 0u;
@@ -141,9 +122,7 @@
   HInvokeStaticOrDirect::DispatchInfo desired_dispatch_info = {
       method_load_kind, code_ptr_location, method_load_data
   };
-  HInvokeStaticOrDirect::DispatchInfo dispatch_info =
-      codegen->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info, invoke);
-  invoke->SetDispatchInfo(dispatch_info);
+  return codegen->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info, callee);
 }
 
 HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
@@ -254,7 +233,7 @@
 
   // Try to assign a type check bitstring.
   MutexLock subtype_check_lock(Thread::Current(), *Locks::subtype_check_lock_);
-  if ((false) &&  // FIXME: Inliner does not respect CompilerDriver::IsClassToCompile()
+  if ((false) &&  // FIXME: Inliner does not respect CompilerDriver::ShouldCompileMethod()
                   // and we're hitting an unassigned bitstring in dex2oat_image_test. b/26687569
       kIsDebugBuild &&
       codegen->GetCompilerOptions().IsBootImage() &&
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index dc55eea..b818672 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -25,24 +25,13 @@
 class CodeGenerator;
 class DexCompilationUnit;
 
-// Optimization that tries to improve the way we dispatch methods and access types,
-// fields, etc. Besides actual method sharpening based on receiver type (for example
-// virtual->direct), this includes selecting the best available dispatch for
-// invoke-static/-direct based on code generator support.
-class HSharpening : public HOptimization {
+// Utility methods that try to improve the way we dispatch methods, and access
+// types and strings.
+class HSharpening {
  public:
-  HSharpening(HGraph* graph,
-              CodeGenerator* codegen,
-              const char* name = kSharpeningPassName)
-      : HOptimization(graph, name),
-        codegen_(codegen) { }
-
-  bool Run() override;
-
-  static constexpr const char* kSharpeningPassName = "sharpening";
-
-  // Used by Sharpening and InstructionSimplifier.
-  static void SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke, CodeGenerator* codegen);
+  // Used by the builder and InstructionSimplifier.
+  static HInvokeStaticOrDirect::DispatchInfo SharpenInvokeStaticOrDirect(
+      ArtMethod* callee, CodeGenerator* codegen);
 
   // Used by the builder and the inliner.
   static HLoadClass::LoadKind ComputeLoadClassKind(HLoadClass* load_class,
@@ -61,9 +50,6 @@
                                 CodeGenerator* codegen,
                                 const DexCompilationUnit& dex_compilation_unit,
                                 VariableSizedHandleScope* handles);
-
- private:
-  CodeGenerator* codegen_;
 };
 
 }  // namespace art
diff --git a/compiler/optimizing/side_effects_test.cc b/compiler/optimizing/side_effects_test.cc
index 4b0be07..cf26e79 100644
--- a/compiler/optimizing/side_effects_test.cc
+++ b/compiler/optimizing/side_effects_test.cc
@@ -141,13 +141,13 @@
 
 TEST(SideEffectsTest, VolatileDependences) {
   SideEffects volatile_write =
-      SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ true);
+      SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ true);
   SideEffects any_write =
-      SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ false);
+      SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ false);
   SideEffects volatile_read =
-      SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile */ true);
+      SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile= */ true);
   SideEffects any_read =
-      SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile */ false);
+      SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile= */ false);
 
   EXPECT_FALSE(volatile_write.MayDependOn(any_read));
   EXPECT_TRUE(any_read.MayDependOn(volatile_write));
@@ -163,15 +163,15 @@
 TEST(SideEffectsTest, SameWidthTypesNoAlias) {
   // Type I/F.
   testNoWriteAndReadDependence(
-      SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ false),
-      SideEffects::FieldReadOfType(DataType::Type::kFloat32, /* is_volatile */ false));
+      SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ false),
+      SideEffects::FieldReadOfType(DataType::Type::kFloat32, /* is_volatile= */ false));
   testNoWriteAndReadDependence(
       SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
       SideEffects::ArrayReadOfType(DataType::Type::kFloat32));
   // Type L/D.
   testNoWriteAndReadDependence(
-      SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile */ false),
-      SideEffects::FieldReadOfType(DataType::Type::kFloat64, /* is_volatile */ false));
+      SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile= */ false),
+      SideEffects::FieldReadOfType(DataType::Type::kFloat64, /* is_volatile= */ false));
   testNoWriteAndReadDependence(
       SideEffects::ArrayWriteOfType(DataType::Type::kInt64),
       SideEffects::ArrayReadOfType(DataType::Type::kFloat64));
@@ -181,9 +181,9 @@
   SideEffects s = SideEffects::None();
   // Keep taking the union of different writes and reads.
   for (DataType::Type type : kTestTypes) {
-    s = s.Union(SideEffects::FieldWriteOfType(type, /* is_volatile */ false));
+    s = s.Union(SideEffects::FieldWriteOfType(type, /* is_volatile= */ false));
     s = s.Union(SideEffects::ArrayWriteOfType(type));
-    s = s.Union(SideEffects::FieldReadOfType(type, /* is_volatile */ false));
+    s = s.Union(SideEffects::FieldReadOfType(type, /* is_volatile= */ false));
     s = s.Union(SideEffects::ArrayReadOfType(type));
   }
   EXPECT_TRUE(s.DoesAllReadWrite());
@@ -254,10 +254,10 @@
       "||I|||||",
       SideEffects::ArrayReadOfType(DataType::Type::kInt32).ToString().c_str());
   SideEffects s = SideEffects::None();
-  s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kUint16, /* is_volatile */ false));
-  s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile */ false));
+  s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kUint16, /* is_volatile= */ false));
+  s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile= */ false));
   s = s.Union(SideEffects::ArrayWriteOfType(DataType::Type::kInt16));
-  s = s.Union(SideEffects::FieldReadOfType(DataType::Type::kInt32, /* is_volatile */ false));
+  s = s.Union(SideEffects::FieldReadOfType(DataType::Type::kInt32, /* is_volatile= */ false));
   s = s.Union(SideEffects::ArrayReadOfType(DataType::Type::kFloat32));
   s = s.Union(SideEffects::ArrayReadOfType(DataType::Type::kFloat64));
   EXPECT_STREQ("||DF|I||S|JC|", s.ToString().c_str());
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index dda29a1..0d0e1ec 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -16,6 +16,9 @@
 
 #include "ssa_builder.h"
 
+#include "base/arena_bit_vector.h"
+#include "base/bit_vector-inl.h"
+#include "base/logging.h"
 #include "data_type-inl.h"
 #include "dex/bytecode_utils.h"
 #include "mirror/class-inl.h"
@@ -388,7 +391,7 @@
           // succeed in code validated by the verifier.
           HInstruction* equivalent = GetFloatOrDoubleEquivalent(value, array_type);
           DCHECK(equivalent != nullptr);
-          aset->ReplaceInput(equivalent, /* input_index */ 2);
+          aset->ReplaceInput(equivalent, /* index= */ 2);
           if (equivalent->IsPhi()) {
             // Returned equivalent is a phi which may not have had its inputs
             // replaced yet. We need to run primitive type propagation on it.
@@ -415,85 +418,36 @@
   return true;
 }
 
-static bool HasAliasInEnvironments(HInstruction* instruction) {
-  HEnvironment* last_user = nullptr;
+bool SsaBuilder::HasAliasInEnvironments(HInstruction* instruction) {
+  ScopedArenaHashSet<size_t> seen_users(
+      local_allocator_->Adapter(kArenaAllocGraphBuilder));
   for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
     DCHECK(use.GetUser() != nullptr);
-    // Note: The first comparison (== null) always fails.
-    if (use.GetUser() == last_user) {
+    size_t id = use.GetUser()->GetHolder()->GetId();
+    if (seen_users.find(id) != seen_users.end()) {
       return true;
     }
-    last_user = use.GetUser();
-  }
-
-  if (kIsDebugBuild) {
-    // Do a quadratic search to ensure same environment uses are next
-    // to each other.
-    const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
-    for (auto current = env_uses.begin(), end = env_uses.end(); current != end; ++current) {
-      auto next = current;
-      for (++next; next != end; ++next) {
-        DCHECK(next->GetUser() != current->GetUser());
-      }
-    }
+    seen_users.insert(id);
   }
   return false;
 }
 
-void SsaBuilder::ReplaceUninitializedStringPhis() {
-  ScopedArenaHashSet<HInstruction*> seen_instructions(
-      local_allocator_->Adapter(kArenaAllocGraphBuilder));
-  ScopedArenaVector<HInstruction*> worklist(local_allocator_->Adapter(kArenaAllocGraphBuilder));
-
-  // Iterate over all inputs and uses of the phi, recursively, until all related instructions
-  // have been visited.
-  for (const auto& pair : uninitialized_string_phis_) {
-    HPhi* string_phi = pair.first;
-    HInvoke* invoke = pair.second;
-    worklist.push_back(string_phi);
-    HNewInstance* found_instance = nullptr;
-    do {
-      HInstruction* current = worklist.back();
-      worklist.pop_back();
-      if (seen_instructions.find(current) != seen_instructions.end()) {
-        continue;
-      }
-      seen_instructions.insert(current);
-      if (current->IsNewInstance()) {
-        // If it is the first time we see the allocation, replace its uses. We don't register
-        // it through `RemoveRedundantUninitializedStrings`, as that method makes assumption about
-        // aliasing and environment uses that don't hold when the string escapes to phis.
-        // Note that this also means we will keep the (useless) allocation.
-        if (found_instance == nullptr) {
-          found_instance = current->AsNewInstance();
-        } else {
-          DCHECK(found_instance == current);
-        }
-      } else if (current->IsPhi()) {
-        // Push all inputs to the worklist. Those should be Phis or NewInstance.
-        for (HInstruction* input : current->GetInputs()) {
-          DCHECK(input->IsPhi() || input->IsNewInstance()) << input->DebugName();
-          worklist.push_back(input);
-        }
-      } else {
-        // The verifier prevents any other DEX uses of the uninitialized string.
-        DCHECK(current->IsEqual() || current->IsNotEqual());
-        continue;
-      }
-      current->ReplaceUsesDominatedBy(invoke, invoke);
-      current->ReplaceEnvUsesDominatedBy(invoke, invoke);
-      // Push all users to the worklist. Now that we have replaced
-      // the uses dominated by the invokes, the remaining users should only
-      // be Phi, or Equal/NotEqual.
-      for (const HUseListNode<HInstruction*>& use : current->GetUses()) {
-        HInstruction* user = use.GetUser();
-        DCHECK(user->IsPhi() || user->IsEqual() || user->IsNotEqual()) << user->DebugName();
-        worklist.push_back(user);
-      }
-    } while (!worklist.empty());
-    seen_instructions.clear();
-    DCHECK(found_instance != nullptr);
+bool SsaBuilder::ReplaceUninitializedStringPhis() {
+  for (HInvoke* invoke : uninitialized_string_phis_) {
+    HInstruction* str = invoke->InputAt(invoke->InputCount() - 1);
+    if (str->IsPhi()) {
+      // If after redundant phi and dead phi elimination, it's still a phi that feeds
+      // the invoke, then we must be compiling a method with irreducible loops. Just bail.
+      DCHECK(graph_->HasIrreducibleLoops());
+      return false;
+    }
+    DCHECK(str->IsNewInstance());
+    AddUninitializedString(str->AsNewInstance());
+    str->ReplaceUsesDominatedBy(invoke, invoke);
+    str->ReplaceEnvUsesDominatedBy(invoke, invoke);
+    invoke->RemoveInputAt(invoke->InputCount() - 1);
   }
+  return true;
 }
 
 void SsaBuilder::RemoveRedundantUninitializedStrings() {
@@ -508,8 +462,9 @@
     DCHECK(new_instance->IsStringAlloc());
 
     // Replace NewInstance of String with NullConstant if not used prior to
-    // calling StringFactory. In case of deoptimization, the interpreter is
-    // expected to skip null check on the `this` argument of the StringFactory call.
+    // calling StringFactory. We check for alias environments in case of deoptimization.
+    // The interpreter is expected to skip null check on the `this` argument of the
+    // StringFactory call.
     if (!new_instance->HasNonEnvironmentUses() && !HasAliasInEnvironments(new_instance)) {
       new_instance->ReplaceWith(graph_->GetNullConstant());
       new_instance->GetBlock()->RemoveInstruction(new_instance);
@@ -544,11 +499,6 @@
 GraphAnalysisResult SsaBuilder::BuildSsa() {
   DCHECK(!graph_->IsInSsaForm());
 
-  // Replace Phis that feed in a String.<init>, as well as their aliases, with
-  // the actual String allocation invocation. We do this first, as the phis stored in
-  // the data structure might get removed from the graph in later stages during `BuildSsa`.
-  ReplaceUninitializedStringPhis();
-
   // Propagate types of phis. At this point, phis are typed void in the general
   // case, or float/double/reference if we created an equivalent phi. So we need
   // to propagate the types across phis to give them a correct type. If a type
@@ -575,7 +525,7 @@
                            class_loader_,
                            dex_cache_,
                            handles_,
-                           /* is_first_run */ true).Run();
+                           /* is_first_run= */ true).Run();
 
   // HInstructionBuilder duplicated ArrayGet instructions with ambiguous type
   // (int/float or long/double) and marked ArraySets with ambiguous input type.
@@ -607,6 +557,14 @@
   // input types.
   dead_phi_elimimation.EliminateDeadPhis();
 
+  // Replace Phis that feed in a String.<init> during instruction building. We
+  // run this after redundant and dead phi elimination to make sure the phi will have
+  // been replaced by the actual allocation. Only with an irreducible loop
+  // a phi can still be the input, in which case we bail.
+  if (!ReplaceUninitializedStringPhis()) {
+    return kAnalysisFailIrreducibleLoopAndStringInit;
+  }
+
   // HInstructionBuidler replaced uses of NewInstances of String with the
   // results of their corresponding StringFactory calls. Unless the String
   // objects are used before they are initialized, they can be replaced with
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 7655445..bb892c9 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -97,8 +97,8 @@
     }
   }
 
-  void AddUninitializedStringPhi(HPhi* phi, HInvoke* invoke) {
-    uninitialized_string_phis_.push_back(std::make_pair(phi, invoke));
+  void AddUninitializedStringPhi(HInvoke* invoke) {
+    uninitialized_string_phis_.push_back(invoke);
   }
 
  private:
@@ -123,7 +123,8 @@
   HArrayGet* GetFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget);
 
   void RemoveRedundantUninitializedStrings();
-  void ReplaceUninitializedStringPhis();
+  bool ReplaceUninitializedStringPhis();
+  bool HasAliasInEnvironments(HInstruction* instruction);
 
   HGraph* const graph_;
   Handle<mirror::ClassLoader> class_loader_;
@@ -137,7 +138,7 @@
   ScopedArenaVector<HArrayGet*> ambiguous_agets_;
   ScopedArenaVector<HArraySet*> ambiguous_asets_;
   ScopedArenaVector<HNewInstance*> uninitialized_strings_;
-  ScopedArenaVector<std::pair<HPhi*, HInvoke*>> uninitialized_string_phis_;
+  ScopedArenaVector<HInvoke*> uninitialized_string_phis_;
 
   DISALLOW_COPY_AND_ASSIGN(SsaBuilder);
 };
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 62a70d6..7b2c3a9 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -120,7 +120,7 @@
       DCHECK(input->HasSsaIndex());
       // `input` generates a result used by `current`. Add use and update
       // the live-in set.
-      input->GetLiveInterval()->AddUse(current, /* environment */ nullptr, i, actual_user);
+      input->GetLiveInterval()->AddUse(current, /* environment= */ nullptr, i, actual_user);
       live_in->SetBit(input->GetSsaIndex());
     } else if (has_out_location) {
       // `input` generates a result but it is not used by `current`.
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 92d0b08..c883907 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1155,10 +1155,11 @@
  *
  * (a) Non-environment uses of an instruction always make
  *     the instruction live.
- * (b) Environment uses of an instruction whose type is
- *     object (that is, non-primitive), make the instruction live.
- *     This is due to having to keep alive objects that have
- *     finalizers deleting native objects.
+ * (b) Environment uses of an instruction whose type is object (that is, non-primitive), make the
+ *     instruction live, unless the class has an @DeadReferenceSafe annotation.
+ *     This avoids unexpected premature reference enqueuing or finalization, which could
+ *     result in premature deletion of native objects.  In the presence of @DeadReferenceSafe,
+ *     object references are treated like primitive types.
  * (c) When the graph has the debuggable property, environment uses
  *     of an instruction that has a primitive type make the instruction live.
  *     If the graph does not have the debuggable property, the environment
@@ -1287,6 +1288,7 @@
     // When compiling in OSR mode, all loops in the compiled method may be entered
     // from the interpreter via SuspendCheck; thus we need to preserve the environment.
     if (env_holder->IsSuspendCheck() && graph->IsCompilingOsr()) return true;
+    if (graph -> IsDeadReferenceSafe()) return false;
     return instruction->GetType() == DataType::Type::kReference;
   }
 
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index 4b52553..352c44f 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -94,25 +94,25 @@
   HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
   block->AddInstruction(null_check);
   HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
-                                                                   /* number_of_vregs */ 5,
-                                                                   /* method */ nullptr,
-                                                                   /* dex_pc */ 0u,
+                                                                   /* number_of_vregs= */ 5,
+                                                                   /* method= */ nullptr,
+                                                                   /* dex_pc= */ 0u,
                                                                    null_check);
   null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
   null_check->SetRawEnvironment(null_check_env);
   HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
   block->AddInstruction(length);
-  HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc */ 0u);
+  HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc= */ 0u);
   block->AddInstruction(bounds_check);
   HEnvironment* bounds_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
-                                                                     /* number_of_vregs */ 5,
-                                                                     /* method */ nullptr,
-                                                                     /* dex_pc */ 0u,
+                                                                     /* number_of_vregs= */ 5,
+                                                                     /* method= */ nullptr,
+                                                                     /* dex_pc= */ 0u,
                                                                      bounds_check);
   bounds_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
   bounds_check->SetRawEnvironment(bounds_check_env);
   HInstruction* array_set =
-      new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+      new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc= */ 0);
   block->AddInstruction(array_set);
 
   graph_->BuildDominatorTree();
@@ -163,9 +163,9 @@
   HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
   block->AddInstruction(null_check);
   HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
-                                                                   /* number_of_vregs */ 5,
-                                                                   /* method */ nullptr,
-                                                                   /* dex_pc */ 0u,
+                                                                   /* number_of_vregs= */ 5,
+                                                                   /* method= */ nullptr,
+                                                                   /* dex_pc= */ 0u,
                                                                    null_check);
   null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
   null_check->SetRawEnvironment(null_check_env);
@@ -175,17 +175,17 @@
   HInstruction* ae = new (GetAllocator()) HAboveOrEqual(index, length);
   block->AddInstruction(ae);
   HInstruction* deoptimize = new(GetAllocator()) HDeoptimize(
-      GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc */ 0u);
+      GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc= */ 0u);
   block->AddInstruction(deoptimize);
   HEnvironment* deoptimize_env = new (GetAllocator()) HEnvironment(GetAllocator(),
-                                                                   /* number_of_vregs */ 5,
-                                                                   /* method */ nullptr,
-                                                                   /* dex_pc */ 0u,
+                                                                   /* number_of_vregs= */ 5,
+                                                                   /* method= */ nullptr,
+                                                                   /* dex_pc= */ 0u,
                                                                    deoptimize);
   deoptimize_env->CopyFrom(ArrayRef<HInstruction* const>(args));
   deoptimize->SetRawEnvironment(deoptimize_env);
   HInstruction* array_set =
-      new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+      new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc= */ 0);
   block->AddInstruction(array_set);
 
   graph_->BuildDominatorTree();
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 5370f43..3fcb72e 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -141,7 +141,7 @@
 
   ArenaBitVector visited_phis_in_cycle(&allocator,
                                        graph_->GetCurrentInstructionId(),
-                                       /* expandable */ false,
+                                       /* expandable= */ false,
                                        kArenaAllocSsaPhiElimination);
   visited_phis_in_cycle.ClearAllBits();
   ScopedArenaVector<HPhi*> cycle_worklist(allocator.Adapter(kArenaAllocSsaPhiElimination));
diff --git a/compiler/optimizing/superblock_cloner.h b/compiler/optimizing/superblock_cloner.h
index f211721..dbe9008 100644
--- a/compiler/optimizing/superblock_cloner.h
+++ b/compiler/optimizing/superblock_cloner.h
@@ -372,8 +372,8 @@
   // Returns whether the loop can be peeled/unrolled.
   bool IsLoopClonable() const { return cloner_.IsSubgraphClonable(); }
 
-  HBasicBlock* DoPeeling() { return DoPeelUnrollImpl(/* to_unroll */ false); }
-  HBasicBlock* DoUnrolling() { return DoPeelUnrollImpl(/* to_unroll */ true); }
+  HBasicBlock* DoPeeling() { return DoPeelUnrollImpl(/* to_unroll= */ false); }
+  HBasicBlock* DoUnrolling() { return DoPeelUnrollImpl(/* to_unroll= */ true); }
   HLoopInformation* GetRegionToBeAdjusted() const { return cloner_.GetRegionToBeAdjusted(); }
 
  protected:
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
index ebb631e..77f5d70 100644
--- a/compiler/utils/arm/assembler_arm_vixl.cc
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -91,7 +91,7 @@
   ___ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value()));
   // Check that mr == self.tls32_.is.gc_marking.
   ___ Cmp(mr, temp);
-  ___ B(eq, &mr_is_ok, /* far_target */ false);
+  ___ B(eq, &mr_is_ok, /* is_far_target= */ false);
   ___ Bkpt(code);
   ___ Bind(&mr_is_ok);
 }
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index c83fd44..d7ade05 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "arch/arm64/instruction_set_features_arm64.h"
 #include "assembler_arm64.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "heap_poisoning.h"
@@ -31,6 +32,37 @@
 #define ___   vixl_masm_.
 #endif
 
+// Sets vixl::CPUFeatures according to ART instruction set features.
+static void SetVIXLCPUFeaturesFromART(vixl::aarch64::MacroAssembler* vixl_masm_,
+                                      const Arm64InstructionSetFeatures* art_features) {
+  // Retrieve already initialized default features of vixl.
+  vixl::CPUFeatures* features = vixl_masm_->GetCPUFeatures();
+
+  DCHECK(features->Has(vixl::CPUFeatures::kFP));
+  DCHECK(features->Has(vixl::CPUFeatures::kNEON));
+  DCHECK(art_features != nullptr);
+  if (art_features->HasCRC()) {
+    features->Combine(vixl::CPUFeatures::kCRC32);
+  }
+  if (art_features->HasDotProd()) {
+    features->Combine(vixl::CPUFeatures::kDotProduct);
+  }
+  if (art_features->HasFP16()) {
+    features->Combine(vixl::CPUFeatures::kFPHalf);
+  }
+  if (art_features->HasLSE()) {
+    features->Combine(vixl::CPUFeatures::kAtomics);
+  }
+}
+
+Arm64Assembler::Arm64Assembler(ArenaAllocator* allocator,
+                               const Arm64InstructionSetFeatures* art_features)
+    : Assembler(allocator) {
+  if (art_features != nullptr) {
+    SetVIXLCPUFeaturesFromART(&vixl_masm_, art_features);
+  }
+}
+
 void Arm64Assembler::FinalizeCode() {
   ___ FinalizeCode();
 }
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 74537dd..fdecab8 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -37,6 +37,9 @@
 #pragma GCC diagnostic pop
 
 namespace art {
+
+class Arm64InstructionSetFeatures;
+
 namespace arm64 {
 
 #define MEM_OP(...)      vixl::aarch64::MemOperand(__VA_ARGS__)
@@ -63,7 +66,8 @@
 
 class Arm64Assembler final : public Assembler {
  public:
-  explicit Arm64Assembler(ArenaAllocator* allocator) : Assembler(allocator) {}
+  explicit Arm64Assembler(
+      ArenaAllocator* allocator, const Arm64InstructionSetFeatures* features = nullptr);
 
   virtual ~Arm64Assembler() {}
 
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 096410d..0537225 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -295,7 +295,7 @@
   void ImplicitlyAdvancePC() final;
 
   explicit DebugFrameOpCodeWriterForAssembler(Assembler* buffer)
-      : dwarf::DebugFrameOpCodeWriter<>(false /* enabled */),
+      : dwarf::DebugFrameOpCodeWriter<>(/* enabled= */ false),
         assembler_(buffer),
         delay_emitting_advance_pc_(false),
         delayed_advance_pcs_() {
diff --git a/compiler/utils/assembler_test_base.h b/compiler/utils/assembler_test_base.h
index 778a015..5fa0b3c 100644
--- a/compiler/utils/assembler_test_base.h
+++ b/compiler/utils/assembler_test_base.h
@@ -59,12 +59,12 @@
       disassembler_cmd_name_(disasm),
       disassembler_parameters_(disasm_params) {
     // Fake a runtime test for ScratchFile
-    CommonRuntimeTest::SetUpAndroidData(android_data_);
+    CommonRuntimeTest::SetUpAndroidDataDir(android_data_);
   }
 
   virtual ~AssemblerTestInfrastructure() {
     // We leave temporaries in case this failed so we can debug issues.
-    CommonRuntimeTest::TearDownAndroidData(android_data_, false);
+    CommonRuntimeTest::TearDownAndroidDataDir(android_data_, false);
     tmpnam_ = "";
   }
 
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 053e202..c9ece1d 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -125,7 +125,7 @@
   // Assemble the .S
   snprintf(cmd, sizeof(cmd), "%sas %s -o %s.o", toolsdir.c_str(), filename, filename);
   int cmd_result = system(cmd);
-  ASSERT_EQ(cmd_result, 0) << strerror(errno);
+  ASSERT_EQ(cmd_result, 0) << cmd << strerror(errno);
 
   // Disassemble.
   snprintf(cmd, sizeof(cmd), "%sobjdump -D -M force-thumb --section=.text %s.o  | grep '^  *[0-9a-f][0-9a-f]*:'",
@@ -239,7 +239,7 @@
   __ Load(scratch_register, FrameOffset(4092), 4);
   __ Load(scratch_register, FrameOffset(4096), 4);
   __ LoadRawPtrFromThread(scratch_register, ThreadOffset32(512));
-  __ LoadRef(method_register, scratch_register, MemberOffset(128), /* unpoison_reference */ false);
+  __ LoadRef(method_register, scratch_register, MemberOffset(128), /* unpoison_reference= */ false);
 
   // Stores
   __ Store(FrameOffset(32), method_register, 4);
@@ -284,7 +284,7 @@
 
   __ DecreaseFrameSize(4096);
   __ DecreaseFrameSize(32);
-  __ RemoveFrame(frame_size, callee_save_regs, /* may_suspend */ true);
+  __ RemoveFrame(frame_size, callee_save_regs, /* may_suspend= */ true);
 
   EmitAndCheck(&assembler, "VixlJniHelpers");
 }
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 85e4326..0d279ed 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -76,7 +76,7 @@
   "  f0:	f1bc 0f00 	cmp.w	ip, #0\n",
   "  f4:	bf18      	it	ne\n",
   "  f6:	f20d 4c01 	addwne	ip, sp, #1025	; 0x401\n",
-  "  fa:	f8d9 c08c 	ldr.w	ip, [r9, #140]	; 0x8c\n",
+  "  fa:	f8d9 c094 	ldr.w	ip, [r9, #148]	; 0x94\n",
   "  fe:	f1bc 0f00 	cmp.w	ip, #0\n",
   " 102:	d171      	bne.n	1e8 <VixlJniHelpers+0x1e8>\n",
   " 104:	f8cd c7ff 	str.w	ip, [sp, #2047]	; 0x7ff\n",
@@ -153,7 +153,7 @@
   " 21c:	f8d9 8034 	ldr.w	r8, [r9, #52]	; 0x34\n",
   " 220:	4770      	bx	lr\n",
   " 222:	4660      	mov	r0, ip\n",
-  " 224:	f8d9 c2d4 	ldr.w	ip, [r9, #724]	; 0x2d4\n",
+  " 224:	f8d9 c2dc 	ldr.w	ip, [r9, #732]	; 0x2dc\n",
   " 228:	47e0      	blx	ip\n",
   nullptr
 };
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index c0b6f98..a9d1a25 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -179,7 +179,7 @@
     return;
   }
 
-  typedef DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC DelayedAdvancePC;
+  using DelayedAdvancePC = DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC;
   const auto data = cfi().ReleaseStreamAndPrepareForDelayedAdvancePC();
   const std::vector<uint8_t>& old_stream = data.first;
   const std::vector<DelayedAdvancePC>& advances = data.second;
@@ -463,7 +463,7 @@
 }
 
 void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) {
-  Addiu(rt, rs, imm16, /* patcher_label */ nullptr);
+  Addiu(rt, rs, imm16, /* patcher_label= */ nullptr);
 }
 
 void MipsAssembler::Subu(Register rd, Register rs, Register rt) {
@@ -732,7 +732,7 @@
 }
 
 void MipsAssembler::Lw(Register rt, Register rs, uint16_t imm16) {
-  Lw(rt, rs, imm16, /* patcher_label */ nullptr);
+  Lw(rt, rs, imm16, /* patcher_label= */ nullptr);
 }
 
 void MipsAssembler::Lwl(Register rt, Register rs, uint16_t imm16) {
@@ -814,7 +814,7 @@
 }
 
 void MipsAssembler::Sw(Register rt, Register rs, uint16_t imm16) {
-  Sw(rt, rs, imm16, /* patcher_label */ nullptr);
+  Sw(rt, rs, imm16, /* patcher_label= */ nullptr);
 }
 
 void MipsAssembler::Swl(Register rt, Register rs, uint16_t imm16) {
@@ -3610,7 +3610,7 @@
     label->LinkTo(branch_id);
   }
   // Reserve space for the branch.
-  while (length--) {
+  for (; length != 0u; --length) {
     Nop();
   }
 }
@@ -3755,7 +3755,7 @@
 
 void MipsAssembler::Buncond(MipsLabel* label, bool is_r6, bool is_bare) {
   uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
-  branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call */ false, is_bare);
+  branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call= */ false, is_bare);
   MoveInstructionToDelaySlot(branches_.back());
   FinalizeLabeledBranch(label);
 }
@@ -3778,7 +3778,7 @@
 
 void MipsAssembler::Call(MipsLabel* label, bool is_r6, bool is_bare) {
   uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
-  branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call */ true, is_bare);
+  branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call= */ true, is_bare);
   MoveInstructionToDelaySlot(branches_.back());
   FinalizeLabeledBranch(label);
 }
@@ -4300,43 +4300,43 @@
 }
 
 void MipsAssembler::B(MipsLabel* label, bool is_bare) {
-  Buncond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare);
+  Buncond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare);
 }
 
 void MipsAssembler::Bal(MipsLabel* label, bool is_bare) {
-  Call(label, /* is_r6 */ (IsR6() && !is_bare), is_bare);
+  Call(label, /* is_r6= */ (IsR6() && !is_bare), is_bare);
 }
 
 void MipsAssembler::Beq(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondEQ, rs, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondEQ, rs, rt);
 }
 
 void MipsAssembler::Bne(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondNE, rs, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondNE, rs, rt);
 }
 
 void MipsAssembler::Beqz(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondEQZ, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondEQZ, rt);
 }
 
 void MipsAssembler::Bnez(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondNEZ, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondNEZ, rt);
 }
 
 void MipsAssembler::Bltz(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondLTZ, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondLTZ, rt);
 }
 
 void MipsAssembler::Bgez(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondGEZ, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondGEZ, rt);
 }
 
 void MipsAssembler::Blez(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondLEZ, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondLEZ, rt);
 }
 
 void MipsAssembler::Bgtz(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondGTZ, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondGTZ, rt);
 }
 
 bool MipsAssembler::CanExchangeWithSlt(Register rs, Register rt) const {
@@ -4392,7 +4392,7 @@
     Bcond(label, IsR6(), is_bare, kCondLT, rs, rt);
   } else if (!Branch::IsNop(kCondLT, rs, rt)) {
     // Synthesize the instruction (not available on R2).
-    GenerateSltForCondBranch(/* unsigned_slt */ false, rs, rt);
+    GenerateSltForCondBranch(/* unsigned_slt= */ false, rs, rt);
     Bnez(AT, label, is_bare);
   }
 }
@@ -4404,7 +4404,7 @@
     B(label, is_bare);
   } else {
     // Synthesize the instruction (not available on R2).
-    GenerateSltForCondBranch(/* unsigned_slt */ false, rs, rt);
+    GenerateSltForCondBranch(/* unsigned_slt= */ false, rs, rt);
     Beqz(AT, label, is_bare);
   }
 }
@@ -4414,7 +4414,7 @@
     Bcond(label, IsR6(), is_bare, kCondLTU, rs, rt);
   } else if (!Branch::IsNop(kCondLTU, rs, rt)) {
     // Synthesize the instruction (not available on R2).
-    GenerateSltForCondBranch(/* unsigned_slt */ true, rs, rt);
+    GenerateSltForCondBranch(/* unsigned_slt= */ true, rs, rt);
     Bnez(AT, label, is_bare);
   }
 }
@@ -4426,7 +4426,7 @@
     B(label, is_bare);
   } else {
     // Synthesize the instruction (not available on R2).
-    GenerateSltForCondBranch(/* unsigned_slt */ true, rs, rt);
+    GenerateSltForCondBranch(/* unsigned_slt= */ true, rs, rt);
     Beqz(AT, label, is_bare);
   }
 }
@@ -4437,7 +4437,7 @@
 
 void MipsAssembler::Bc1f(int cc, MipsLabel* label, bool is_bare) {
   CHECK(IsUint<3>(cc)) << cc;
-  Bcond(label, /* is_r6 */ false, is_bare, kCondF, static_cast<Register>(cc), ZERO);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondF, static_cast<Register>(cc), ZERO);
 }
 
 void MipsAssembler::Bc1t(MipsLabel* label, bool is_bare) {
@@ -4446,71 +4446,71 @@
 
 void MipsAssembler::Bc1t(int cc, MipsLabel* label, bool is_bare) {
   CHECK(IsUint<3>(cc)) << cc;
-  Bcond(label, /* is_r6 */ false, is_bare, kCondT, static_cast<Register>(cc), ZERO);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondT, static_cast<Register>(cc), ZERO);
 }
 
 void MipsAssembler::Bc(MipsLabel* label, bool is_bare) {
-  Buncond(label, /* is_r6 */ true, is_bare);
+  Buncond(label, /* is_r6= */ true, is_bare);
 }
 
 void MipsAssembler::Balc(MipsLabel* label, bool is_bare) {
-  Call(label, /* is_r6 */ true, is_bare);
+  Call(label, /* is_r6= */ true, is_bare);
 }
 
 void MipsAssembler::Beqc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondEQ, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondEQ, rs, rt);
 }
 
 void MipsAssembler::Bnec(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondNE, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondNE, rs, rt);
 }
 
 void MipsAssembler::Beqzc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondEQZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondEQZ, rt);
 }
 
 void MipsAssembler::Bnezc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondNEZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondNEZ, rt);
 }
 
 void MipsAssembler::Bltzc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLTZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLTZ, rt);
 }
 
 void MipsAssembler::Bgezc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGEZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGEZ, rt);
 }
 
 void MipsAssembler::Blezc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLEZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLEZ, rt);
 }
 
 void MipsAssembler::Bgtzc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGTZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGTZ, rt);
 }
 
 void MipsAssembler::Bltc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLT, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLT, rs, rt);
 }
 
 void MipsAssembler::Bgec(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGE, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGE, rs, rt);
 }
 
 void MipsAssembler::Bltuc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLTU, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLTU, rs, rt);
 }
 
 void MipsAssembler::Bgeuc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGEU, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGEU, rs, rt);
 }
 
 void MipsAssembler::Bc1eqz(FRegister ft, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondF, static_cast<Register>(ft), ZERO);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondF, static_cast<Register>(ft), ZERO);
 }
 
 void MipsAssembler::Bc1nez(FRegister ft, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondT, static_cast<Register>(ft), ZERO);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondT, static_cast<Register>(ft), ZERO);
 }
 
 void MipsAssembler::AdjustBaseAndOffset(Register& base,
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 8a1e1df..69189a4 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -862,7 +862,7 @@
     // We permit `base` and `temp` to coincide (however, we check that neither is AT),
     // in which case the `base` register may be overwritten in the process.
     CHECK_NE(temp, AT);  // Must not use AT as temp, so as not to overwrite the adjusted base.
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
     uint32_t low = Low32Bits(value);
     uint32_t high = High32Bits(value);
     Register reg;
@@ -917,7 +917,7 @@
                       Register base,
                       int32_t offset,
                       ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword));
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kLoadDoubleword));
     switch (type) {
       case kLoadSignedByte:
         Lb(reg, base, offset);
@@ -960,7 +960,7 @@
                        Register base,
                        int32_t offset,
                        ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true);
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ false, /* is_float= */ true);
     Lwc1(reg, base, offset);
     null_checker();
   }
@@ -970,7 +970,7 @@
                        Register base,
                        int32_t offset,
                        ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true);
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ true, /* is_float= */ true);
     if (IsAligned<kMipsDoublewordSize>(offset)) {
       Ldc1(reg, base, offset);
       null_checker();
@@ -1016,7 +1016,7 @@
     // Must not use AT as `reg`, so as not to overwrite the value being stored
     // with the adjusted `base`.
     CHECK_NE(reg, AT);
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
     switch (type) {
       case kStoreByte:
         Sb(reg, base, offset);
@@ -1047,7 +1047,7 @@
                       Register base,
                       int32_t offset,
                       ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true);
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ false, /* is_float= */ true);
     Swc1(reg, base, offset);
     null_checker();
   }
@@ -1057,7 +1057,7 @@
                       Register base,
                       int32_t offset,
                       ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true);
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ true, /* is_float= */ true);
     if (IsAligned<kMipsDoublewordSize>(offset)) {
       Sdc1(reg, base, offset);
       null_checker();
diff --git a/compiler/utils/mips/assembler_mips32r5_test.cc b/compiler/utils/mips/assembler_mips32r5_test.cc
index f9919f5..98fc44b 100644
--- a/compiler/utils/mips/assembler_mips32r5_test.cc
+++ b/compiler/utils/mips/assembler_mips32r5_test.cc
@@ -38,12 +38,12 @@
                                                    uint32_t,
                                                    mips::VectorRegister> {
  public:
-  typedef AssemblerTest<mips::MipsAssembler,
-                        mips::MipsLabel,
-                        mips::Register,
-                        mips::FRegister,
-                        uint32_t,
-                        mips::VectorRegister> Base;
+  using Base = AssemblerTest<mips::MipsAssembler,
+                             mips::MipsLabel,
+                             mips::Register,
+                             mips::FRegister,
+                             uint32_t,
+                             mips::VectorRegister>;
 
   // These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
   // and reimplement it without the verification against `assembly_string`. b/73903608
@@ -229,7 +229,7 @@
     STLDeleteElements(&vec_registers_);
   }
 
-  std::vector<mips::MipsLabel> GetAddresses() {
+  std::vector<mips::MipsLabel> GetAddresses() override {
     UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
     UNREACHABLE();
   }
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 1ec7a6a..4e27bbf 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -38,12 +38,12 @@
                                                    uint32_t,
                                                    mips::VectorRegister> {
  public:
-  typedef AssemblerTest<mips::MipsAssembler,
-                        mips::MipsLabel,
-                        mips::Register,
-                        mips::FRegister,
-                        uint32_t,
-                        mips::VectorRegister> Base;
+  using Base = AssemblerTest<mips::MipsAssembler,
+                             mips::MipsLabel,
+                             mips::Register,
+                             mips::FRegister,
+                             uint32_t,
+                             mips::VectorRegister>;
 
   // These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
   // and reimplement it without the verification against `assembly_string`. b/73903608
@@ -242,7 +242,7 @@
     STLDeleteElements(&vec_registers_);
   }
 
-  std::vector<mips::MipsLabel> GetAddresses() {
+  std::vector<mips::MipsLabel> GetAddresses() override {
     UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
     UNREACHABLE();
   }
@@ -1078,11 +1078,11 @@
 //////////////
 
 TEST_F(AssemblerMIPS32r6Test, Bc) {
-  BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot */ false);
+  BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot= */ false);
 }
 
 TEST_F(AssemblerMIPS32r6Test, Balc) {
-  BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot */ false);
+  BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot= */ false);
 }
 
 TEST_F(AssemblerMIPS32r6Test, Beqc) {
@@ -1142,11 +1142,11 @@
 }
 
 TEST_F(AssemblerMIPS32r6Test, B) {
-  BranchHelper(&mips::MipsAssembler::B, "Bc", /* has_slot */ false);
+  BranchHelper(&mips::MipsAssembler::B, "Bc", /* has_slot= */ false);
 }
 
 TEST_F(AssemblerMIPS32r6Test, Bal) {
-  BranchHelper(&mips::MipsAssembler::Bal, "Balc", /* has_slot */ false);
+  BranchHelper(&mips::MipsAssembler::Bal, "Balc", /* has_slot= */ false);
 }
 
 TEST_F(AssemblerMIPS32r6Test, Beq) {
@@ -1198,123 +1198,123 @@
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBc) {
-  BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot */ false, /* is_bare */ true);
+  BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot= */ false, /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBalc) {
-  BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot */ false, /* is_bare */ true);
+  BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot= */ false, /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBeqc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beqc, "Beqc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beqc, "Beqc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBnec) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bnec, "Bnec", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bnec, "Bnec", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBeqzc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Beqzc, "Beqzc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Beqzc, "Beqzc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBnezc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bnezc, "Bnezc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bnezc, "Bnezc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBltzc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bltzc, "Bltzc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bltzc, "Bltzc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBgezc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgezc, "Bgezc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bgezc, "Bgezc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBlezc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Blezc, "Blezc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Blezc, "Blezc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBgtzc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtzc, "Bgtzc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtzc, "Bgtzc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBltc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltc, "Bltc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltc, "Bltc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBgec) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgec, "Bgec", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgec, "Bgec", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBltuc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltuc, "Bltuc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltuc, "Bltuc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBgeuc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeuc, "Bgeuc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeuc, "Bgeuc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBc1eqz) {
-  BranchFpuCondHelper(&mips::MipsAssembler::Bc1eqz, "Bc1eqz", /* is_bare */ true);
+  BranchFpuCondHelper(&mips::MipsAssembler::Bc1eqz, "Bc1eqz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBc1nez) {
-  BranchFpuCondHelper(&mips::MipsAssembler::Bc1nez, "Bc1nez", /* is_bare */ true);
+  BranchFpuCondHelper(&mips::MipsAssembler::Bc1nez, "Bc1nez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareB) {
-  BranchHelper(&mips::MipsAssembler::B, "B", /* has_slot */ true, /* is_bare */ true);
+  BranchHelper(&mips::MipsAssembler::B, "B", /* has_slot= */ true, /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBal) {
-  BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* has_slot */ true, /* is_bare */ true);
+  BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* has_slot= */ true, /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBeq) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBne) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBeqz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBnez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBltz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBgez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBlez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBgtz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBlt) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBge) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBltu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBgeu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, LongBeqc) {
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index 9527fa6..c0894d3 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -37,11 +37,11 @@
                                                mips::FRegister,
                                                uint32_t> {
  public:
-  typedef AssemblerTest<mips::MipsAssembler,
-                        mips::MipsLabel,
-                        mips::Register,
-                        mips::FRegister,
-                        uint32_t> Base;
+  using Base = AssemblerTest<mips::MipsAssembler,
+                             mips::MipsLabel,
+                             mips::Register,
+                             mips::FRegister,
+                             uint32_t>;
 
   // These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
   // and reimplement it without the verification against `assembly_string`. b/73903608
@@ -176,7 +176,7 @@
     STLDeleteElements(&fp_registers_);
   }
 
-  std::vector<mips::MipsLabel> GetAddresses() {
+  std::vector<mips::MipsLabel> GetAddresses() override {
     UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
     UNREACHABLE();
   }
@@ -2241,67 +2241,67 @@
 }
 
 TEST_F(AssemblerMIPSTest, BareB) {
-  BranchHelper(&mips::MipsAssembler::B, "B", /* is_bare */ true);
+  BranchHelper(&mips::MipsAssembler::B, "B", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBal) {
-  BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* is_bare */ true);
+  BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBeq) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBne) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBeqz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBnez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBltz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBgez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBlez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBgtz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBlt) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBge) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBltu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBgeu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBc1f) {
-  BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1f, "Bc1f", /* is_bare */ true);
+  BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1f, "Bc1f", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBc1t) {
-  BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1t, "Bc1t", /* is_bare */ true);
+  BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1t, "Bc1t", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, ImpossibleReordering) {
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 5b1c5d9..70313ca 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -52,7 +52,7 @@
     return;
   }
 
-  typedef DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC DelayedAdvancePC;
+  using DelayedAdvancePC = DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC;
   const auto data = cfi().ReleaseStreamAndPrepareForDelayedAdvancePC();
   const std::vector<uint8_t>& old_stream = data.first;
   const std::vector<DelayedAdvancePC>& advances = data.second;
@@ -2455,7 +2455,7 @@
       condition_(kUncond) {
   InitializeType(
       (is_call ? (is_bare ? kBareCall : kCall) : (is_bare ? kBareCondBranch : kCondBranch)),
-      /* is_r6 */ true);
+      /* is_r6= */ true);
 }
 
 Mips64Assembler::Branch::Branch(bool is_r6,
@@ -2516,7 +2516,7 @@
       rhs_reg_(ZERO),
       condition_(kUncond) {
   CHECK_NE(dest_reg, ZERO);
-  InitializeType(label_or_literal_type, /* is_r6 */ true);
+  InitializeType(label_or_literal_type, /* is_r6= */ true);
 }
 
 Mips64Assembler::BranchCondition Mips64Assembler::Branch::OppositeCondition(
@@ -2889,14 +2889,14 @@
     label->LinkTo(branch_id);
   }
   // Reserve space for the branch.
-  while (length--) {
+  for (; length != 0u; --length) {
     Nop();
   }
 }
 
 void Mips64Assembler::Buncond(Mips64Label* label, bool is_bare) {
   uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
-  branches_.emplace_back(buffer_.Size(), target, /* is_call */ false, is_bare);
+  branches_.emplace_back(buffer_.Size(), target, /* is_call= */ false, is_bare);
   FinalizeLabeledBranch(label);
 }
 
@@ -2917,7 +2917,7 @@
 
 void Mips64Assembler::Call(Mips64Label* label, bool is_bare) {
   uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
-  branches_.emplace_back(buffer_.Size(), target, /* is_call */ true, is_bare);
+  branches_.emplace_back(buffer_.Size(), target, /* is_call= */ true, is_bare);
   FinalizeLabeledBranch(label);
 }
 
@@ -3278,99 +3278,99 @@
 }
 
 void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLT, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLT, rs, rt);
 }
 
 void Mips64Assembler::Bltzc(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLTZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLTZ, rt);
 }
 
 void Mips64Assembler::Bgtzc(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGTZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGTZ, rt);
 }
 
 void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGE, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGE, rs, rt);
 }
 
 void Mips64Assembler::Bgezc(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGEZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGEZ, rt);
 }
 
 void Mips64Assembler::Blezc(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLEZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLEZ, rt);
 }
 
 void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLTU, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLTU, rs, rt);
 }
 
 void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGEU, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGEU, rs, rt);
 }
 
 void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondEQ, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondEQ, rs, rt);
 }
 
 void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondNE, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondNE, rs, rt);
 }
 
 void Mips64Assembler::Beqzc(GpuRegister rs, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondEQZ, rs);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondEQZ, rs);
 }
 
 void Mips64Assembler::Bnezc(GpuRegister rs, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondNEZ, rs);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondNEZ, rs);
 }
 
 void Mips64Assembler::Bc1eqz(FpuRegister ft, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondF, static_cast<GpuRegister>(ft), ZERO);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondF, static_cast<GpuRegister>(ft), ZERO);
 }
 
 void Mips64Assembler::Bc1nez(FpuRegister ft, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondT, static_cast<GpuRegister>(ft), ZERO);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondT, static_cast<GpuRegister>(ft), ZERO);
 }
 
 void Mips64Assembler::Bltz(GpuRegister rt, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondLTZ, rt);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondLTZ, rt);
 }
 
 void Mips64Assembler::Bgtz(GpuRegister rt, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondGTZ, rt);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondGTZ, rt);
 }
 
 void Mips64Assembler::Bgez(GpuRegister rt, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondGEZ, rt);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondGEZ, rt);
 }
 
 void Mips64Assembler::Blez(GpuRegister rt, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondLEZ, rt);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondLEZ, rt);
 }
 
 void Mips64Assembler::Beq(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondEQ, rs, rt);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondEQ, rs, rt);
 }
 
 void Mips64Assembler::Bne(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondNE, rs, rt);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondNE, rs, rt);
 }
 
 void Mips64Assembler::Beqz(GpuRegister rs, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondEQZ, rs);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondEQZ, rs);
 }
 
 void Mips64Assembler::Bnez(GpuRegister rs, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondNEZ, rs);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondNEZ, rs);
 }
 
 void Mips64Assembler::AdjustBaseAndOffset(GpuRegister& base,
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index ce447db..2f991e9 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -1058,7 +1058,7 @@
     // We permit `base` and `temp` to coincide (however, we check that neither is AT),
     // in which case the `base` register may be overwritten in the process.
     CHECK_NE(temp, AT);  // Must not use AT as temp, so as not to overwrite the adjusted base.
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
     GpuRegister reg;
     // If the adjustment left `base` unchanged and equal to `temp`, we can't use `temp`
     // to load and hold the value but we can use AT instead as AT hasn't been used yet.
@@ -1127,7 +1127,7 @@
                       GpuRegister base,
                       int32_t offset,
                       ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword));
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kLoadDoubleword));
 
     switch (type) {
       case kLoadSignedByte:
@@ -1178,7 +1178,7 @@
                          ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
     int element_size_shift = -1;
     if (type != kLoadQuadword) {
-      AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword));
+      AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kLoadDoubleword));
     } else {
       AdjustBaseOffsetAndElementSizeShift(base, offset, element_size_shift);
     }
@@ -1226,7 +1226,7 @@
     // Must not use AT as `reg`, so as not to overwrite the value being stored
     // with the adjusted `base`.
     CHECK_NE(reg, AT);
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
 
     switch (type) {
       case kStoreByte:
@@ -1267,7 +1267,7 @@
                         ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
     int element_size_shift = -1;
     if (type != kStoreQuadword) {
-      AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+      AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
     } else {
       AdjustBaseOffsetAndElementSizeShift(base, offset, element_size_shift);
     }
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 4ceb356..499e8f4 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -41,12 +41,12 @@
                                                  uint32_t,
                                                  mips64::VectorRegister> {
  public:
-  typedef AssemblerTest<mips64::Mips64Assembler,
-                        mips64::Mips64Label,
-                        mips64::GpuRegister,
-                        mips64::FpuRegister,
-                        uint32_t,
-                        mips64::VectorRegister> Base;
+  using Base = AssemblerTest<mips64::Mips64Assembler,
+                             mips64::Mips64Label,
+                             mips64::GpuRegister,
+                             mips64::FpuRegister,
+                             uint32_t,
+                             mips64::VectorRegister>;
 
   // These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
   // and reimplement it without the verification against `assembly_string`. b/73903608
@@ -240,7 +240,7 @@
     STLDeleteElements(&vec_registers_);
   }
 
-  std::vector<mips64::Mips64Label> GetAddresses() {
+  std::vector<mips64::Mips64Label> GetAddresses() override {
     UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
     UNREACHABLE();
   }
@@ -852,99 +852,99 @@
 }
 
 TEST_F(AssemblerMIPS64Test, BareBc) {
-  BranchHelper(&mips64::Mips64Assembler::Bc, "Bc", /* is_bare */ true);
+  BranchHelper(&mips64::Mips64Assembler::Bc, "Bc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBalc) {
-  BranchHelper(&mips64::Mips64Assembler::Balc, "Balc", /* is_bare */ true);
+  BranchHelper(&mips64::Mips64Assembler::Balc, "Balc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBeqzc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqzc, "Beqzc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqzc, "Beqzc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBnezc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnezc, "Bnezc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnezc, "Bnezc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBltzc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltzc, "Bltzc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltzc, "Bltzc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBgezc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgezc, "Bgezc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgezc, "Bgezc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBlezc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Blezc, "Blezc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Blezc, "Blezc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBgtzc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtzc, "Bgtzc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtzc, "Bgtzc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBeqc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beqc, "Beqc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beqc, "Beqc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBnec) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bnec, "Bnec", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bnec, "Bnec", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBltc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltc, "Bltc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltc, "Bltc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBgec) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgec, "Bgec", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgec, "Bgec", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBltuc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltuc, "Bltuc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltuc, "Bltuc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBgeuc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgeuc, "Bgeuc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgeuc, "Bgeuc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBc1eqz) {
-  BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1eqz, "Bc1eqz", /* is_bare */ true);
+  BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1eqz, "Bc1eqz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBc1nez) {
-  BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1nez, "Bc1nez", /* is_bare */ true);
+  BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1nez, "Bc1nez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBeqz) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqz, "Beqz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqz, "Beqz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBnez) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnez, "Bnez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnez, "Bnez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBltz) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltz, "Bltz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltz, "Bltz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBgez) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgez, "Bgez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgez, "Bgez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBlez) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Blez, "Blez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Blez, "Blez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBgtz) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtz, "Bgtz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtz, "Bgtz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBeq) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beq, "Beq", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beq, "Beq", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBne) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bne, "Bne", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bne, "Bne", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, LongBeqc) {
@@ -1252,7 +1252,7 @@
   std::vector<mips64::GpuRegister*> reg1_registers = GetRegisters();
   std::vector<mips64::GpuRegister*> reg2_registers = GetRegisters();
   reg2_registers.erase(reg2_registers.begin());  // reg2 can't be ZERO, remove it.
-  std::vector<int64_t> imms = CreateImmediateValuesBits(/* imm_bits */ 16, /* as_uint */ true);
+  std::vector<int64_t> imms = CreateImmediateValuesBits(/* imm_bits= */ 16, /* as_uint= */ true);
   WarnOnCombinations(reg1_registers.size() * reg2_registers.size() * imms.size());
   std::ostringstream expected;
   for (mips64::GpuRegister* reg1 : reg1_registers) {
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 86f9010..4b073bd 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -59,6 +59,98 @@
   }
 }
 
+uint8_t X86Assembler::EmitVexByteZero(bool is_two_byte) {
+  uint8_t vex_zero = 0xC0;
+  if (!is_two_byte) {
+    vex_zero |= 0xC4;
+  } else {
+    vex_zero |= 0xC5;
+  }
+  return vex_zero;
+}
+
+uint8_t X86Assembler::EmitVexByte1(bool r, bool x, bool b, int mmmmm ) {
+  // VEX Byte 1
+  uint8_t vex_prefix = 0;
+  if (!r) {
+    vex_prefix |= 0x80;  // VEX.R
+  }
+  if (!x) {
+    vex_prefix |= 0x40;  // VEX.X
+  }
+  if (!b) {
+    vex_prefix |= 0x20;  // VEX.B
+  }
+
+  // VEX.mmmmm
+  switch (mmmmm) {
+  case 1:
+    // implied 0F leading opcode byte
+    vex_prefix |= 0x01;
+    break;
+  case 2:
+    // implied leading 0F 38 opcode byte
+    vex_prefix |= 0x02;
+    break;
+  case 3:
+    // implied leading OF 3A opcode byte
+    vex_prefix |= 0x03;
+    break;
+  default:
+    LOG(FATAL) << "unknown opcode bytes";
+  }
+  return vex_prefix;
+}
+
+uint8_t X86Assembler::EmitVexByte2(bool w, int l, X86ManagedRegister operand, int pp) {
+  uint8_t vex_prefix = 0;
+  // VEX Byte 2
+  if (w) {
+    vex_prefix |= 0x80;
+  }
+  // VEX.vvvv
+  if (operand.IsXmmRegister()) {
+    XmmRegister vvvv = operand.AsXmmRegister();
+    int inverted_reg = 15-static_cast<int>(vvvv);
+    uint8_t reg = static_cast<uint8_t>(inverted_reg);
+    vex_prefix |= ((reg & 0x0F) << 3);
+  } else if (operand.IsCpuRegister()) {
+    Register vvvv = operand.AsCpuRegister();
+    int inverted_reg = 15 - static_cast<int>(vvvv);
+    uint8_t reg = static_cast<uint8_t>(inverted_reg);
+    vex_prefix |= ((reg & 0x0F) << 3);
+  }
+
+  // VEX.L
+  if (l == 256) {
+    vex_prefix |= 0x04;
+  }
+
+  // VEX.pp
+  switch (pp) {
+  case 0:
+    // SIMD Pefix - None
+    vex_prefix |= 0x00;
+    break;
+  case 1:
+    // SIMD Prefix - 66
+    vex_prefix |= 0x01;
+    break;
+  case 2:
+    // SIMD Prefix - F3
+    vex_prefix |= 0x02;
+    break;
+  case 3:
+    // SIMD Prefix - F2
+    vex_prefix |= 0x03;
+    break;
+  default:
+    LOG(FATAL) << "unknown SIMD Prefix";
+  }
+
+  return vex_prefix;
+}
+
 void X86Assembler::call(Register reg) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0xFF);
@@ -179,6 +271,60 @@
   EmitOperand(src, dst);
 }
 
+void X86Assembler::blsi(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+  uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+                                  /*x=*/ false,
+                                  /*b=*/ false,
+                                  /*mmmmm=*/ 2);
+  uint8_t byte_two = EmitVexByte2(/*w=*/ false,
+                                  /*l=*/ 128,
+                                  X86ManagedRegister::FromCpuRegister(dst),
+                                  /*pp=*/ 0);
+  EmitUint8(byte_zero);
+  EmitUint8(byte_one);
+  EmitUint8(byte_two);
+  EmitUint8(0xF3);
+  EmitRegisterOperand(3, src);
+}
+
+void X86Assembler::blsmsk(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+  uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+                                  /*x=*/ false,
+                                  /*b=*/ false,
+                                  /*mmmmm=*/ 2);
+  uint8_t byte_two = EmitVexByte2(/*w=*/ false,
+                                  /*l=*/ 128,
+                                  X86ManagedRegister::FromCpuRegister(dst),
+                                  /*pp=*/ 0);
+  EmitUint8(byte_zero);
+  EmitUint8(byte_one);
+  EmitUint8(byte_two);
+  EmitUint8(0xF3);
+  EmitRegisterOperand(2, src);
+}
+
+void X86Assembler::blsr(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+  uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+                                  /*x=*/ false,
+                                  /*b=*/ false,
+                                  /*mmmmm=*/ 2);
+  uint8_t byte_two = EmitVexByte2(/*w=*/ false,
+                                  /*l=*/ 128,
+                                  X86ManagedRegister::FromCpuRegister(dst),
+                                  /*pp=*/ 0);
+  EmitUint8(byte_zero);
+  EmitUint8(byte_one);
+  EmitUint8(byte_two);
+  EmitUint8(0xF3);
+  EmitRegisterOperand(1, src);
+}
+
 void X86Assembler::bswapl(Register dst) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x0F);
@@ -1267,6 +1413,25 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+void X86Assembler::andn(Register dst, Register src1, Register src2) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+  uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+                                  /*x=*/ false,
+                                  /*b=*/ false,
+                                  /*mmmmm=*/ 2);
+  uint8_t byte_two = EmitVexByte2(/*w=*/ false,
+                                  /*l=*/ 128,
+                                  X86ManagedRegister::FromCpuRegister(src1),
+                                  /*pp=*/ 0);
+  EmitUint8(byte_zero);
+  EmitUint8(byte_one);
+  EmitUint8(byte_two);
+  // Opcode field
+  EmitUint8(0xF2);
+  EmitRegisterOperand(dst, src2);
+}
+
 
 void X86Assembler::andnpd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1986,7 +2151,7 @@
 void X86Assembler::cmpw(const Address& address, const Immediate& imm) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
-  EmitComplex(7, address, imm, /* is_16_op */ true);
+  EmitComplex(7, address, imm, /* is_16_op= */ true);
 }
 
 
@@ -2176,7 +2341,7 @@
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   CHECK(imm.is_uint16() || imm.is_int16()) << imm.value();
   EmitUint8(0x66);
-  EmitComplex(0, address, imm, /* is_16_op */ true);
+  EmitComplex(0, address, imm, /* is_16_op= */ true);
 }
 
 
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 5ac9236..275e5c1 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -337,6 +337,10 @@
 
   void movntl(const Address& dst, Register src);
 
+  void blsi(Register dst, Register src);  // no addr variant (for now)
+  void blsmsk(Register dst, Register src);  // no addr variant (for now)
+  void blsr(Register dst, Register src);  // no addr varianr (for now)
+
   void bswapl(Register dst);
 
   void bsfl(Register dst, Register src);
@@ -500,6 +504,7 @@
   void andps(XmmRegister dst, const Address& src);
   void pand(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
 
+  void andn(Register dst, Register src1, Register src2);  // no addr variant (for now)
   void andnpd(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
   void andnps(XmmRegister dst, XmmRegister src);
   void pandn(XmmRegister dst, XmmRegister src);
@@ -837,6 +842,11 @@
   void EmitGenericShift(int rm, const Operand& operand, const Immediate& imm);
   void EmitGenericShift(int rm, const Operand& operand, Register shifter);
 
+  // Emit a 3 byte VEX Prefix
+  uint8_t EmitVexByteZero(bool is_two_byte);
+  uint8_t EmitVexByte1(bool r, bool x, bool b, int mmmmm);
+  uint8_t EmitVexByte2(bool w , int l , X86ManagedRegister operand, int pp);
+
   ConstantArea constant_area_;
 
   DISALLOW_COPY_AND_ASSIGN(X86Assembler);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index b03c40a..1d8bfe7 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -44,11 +44,11 @@
                                               x86::XmmRegister,
                                               x86::Immediate> {
  public:
-  typedef AssemblerTest<x86::X86Assembler,
-                        x86::Address,
-                        x86::Register,
-                        x86::XmmRegister,
-                        x86::Immediate> Base;
+  using Base = AssemblerTest<x86::X86Assembler,
+                             x86::Address,
+                             x86::Register,
+                             x86::XmmRegister,
+                             x86::Immediate>;
 
  protected:
   std::string GetArchitectureString() override {
@@ -349,6 +349,18 @@
   DriverStr(expected, "rep_movsw");
 }
 
+TEST_F(AssemblerX86Test, Blsmask) {
+  DriverStr(RepeatRR(&x86::X86Assembler::blsmsk, "blsmsk %{reg2}, %{reg1}"), "blsmsk");
+}
+
+TEST_F(AssemblerX86Test, Blsi) {
+  DriverStr(RepeatRR(&x86::X86Assembler::blsi, "blsi %{reg2}, %{reg1}"), "blsi");
+}
+
+TEST_F(AssemblerX86Test, Blsr) {
+  DriverStr(RepeatRR(&x86::X86Assembler::blsr, "blsr %{reg2}, %{reg1}"), "blsr");
+}
+
 TEST_F(AssemblerX86Test, Bsfl) {
   DriverStr(RepeatRR(&x86::X86Assembler::bsfl, "bsfl %{reg2}, %{reg1}"), "bsfl");
 }
@@ -657,6 +669,10 @@
   DriverStr(RepeatFF(&x86::X86Assembler::pand, "pand %{reg2}, %{reg1}"), "pand");
 }
 
+TEST_F(AssemblerX86Test, Andn) {
+  DriverStr(RepeatRRR(&x86::X86Assembler::andn, "andn %{reg3}, %{reg2}, %{reg1}"), "andn");
+}
+
 TEST_F(AssemblerX86Test, AndnPD) {
   DriverStr(RepeatFF(&x86::X86Assembler::andnpd, "andnpd %{reg2}, %{reg1}"), "andnpd");
 }
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index bd31561..c118bc6 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -64,6 +64,99 @@
   }
 }
 
+uint8_t X86_64Assembler::EmitVexByteZero(bool is_two_byte) {
+  uint8_t vex_zero = 0xC0;
+  if (!is_two_byte) {
+    vex_zero |= 0xC4;
+  } else {
+    vex_zero |= 0xC5;
+  }
+  return vex_zero;
+}
+
+uint8_t X86_64Assembler::EmitVexByte1(bool r, bool x, bool b, int mmmmm) {
+  // VEX Byte 1
+  uint8_t vex_prefix = 0;
+  if (!r) {
+    vex_prefix |= 0x80;  // VEX.R
+  }
+  if (!x) {
+    vex_prefix |= 0x40;  // VEX.X
+  }
+  if (!b) {
+    vex_prefix |= 0x20;  // VEX.B
+  }
+
+  // VEX.mmmmm
+  switch (mmmmm) {
+  case 1:
+    // implied 0F leading opcode byte
+    vex_prefix |= 0x01;
+    break;
+  case 2:
+    // implied leading 0F 38 opcode byte
+    vex_prefix |= 0x02;
+    break;
+  case 3:
+    // implied leading OF 3A opcode byte
+    vex_prefix |= 0x03;
+    break;
+  default:
+    LOG(FATAL) << "unknown opcode bytes";
+  }
+
+  return vex_prefix;
+}
+
+uint8_t X86_64Assembler::EmitVexByte2(bool w, int l, X86_64ManagedRegister operand, int pp) {
+  // VEX Byte 2
+  uint8_t vex_prefix = 0;
+  if (w) {
+    vex_prefix |= 0x80;
+  }
+  // VEX.vvvv
+  if (operand.IsXmmRegister()) {
+    XmmRegister vvvv = operand.AsXmmRegister();
+    int inverted_reg = 15-static_cast<int>(vvvv.AsFloatRegister());
+    uint8_t reg = static_cast<uint8_t>(inverted_reg);
+    vex_prefix |= ((reg & 0x0F) << 3);
+  } else if (operand.IsCpuRegister()) {
+    CpuRegister vvvv = operand.AsCpuRegister();
+    int inverted_reg = 15 - static_cast<int>(vvvv.AsRegister());
+    uint8_t reg = static_cast<uint8_t>(inverted_reg);
+    vex_prefix |= ((reg & 0x0F) << 3);
+  }
+
+  // VEX.L
+  if (l == 256) {
+    vex_prefix |= 0x04;
+  }
+
+  // VEX.pp
+  switch (pp) {
+  case 0:
+    // SIMD Pefix - None
+    vex_prefix |= 0x00;
+    break;
+  case 1:
+    // SIMD Prefix - 66
+    vex_prefix |= 0x01;
+    break;
+  case 2:
+    // SIMD Prefix - F3
+    vex_prefix |= 0x02;
+    break;
+  case 3:
+    // SIMD Prefix - F2
+    vex_prefix |= 0x03;
+    break;
+  default:
+    LOG(FATAL) << "unknown SIMD Prefix";
+  }
+
+  return vex_prefix;
+}
+
 void X86_64Assembler::call(CpuRegister reg) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitOptionalRex32(reg);
@@ -1483,6 +1576,25 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+void X86_64Assembler::andn(CpuRegister dst, CpuRegister src1, CpuRegister src2) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+  uint8_t byte_one = EmitVexByte1(dst.NeedsRex(),
+                                  /*x=*/ false,
+                                  src2.NeedsRex(),
+                                  /*mmmmm=*/ 2);
+  uint8_t byte_two = EmitVexByte2(/*w=*/ true,
+                                  /*l=*/ 128,
+                                  X86_64ManagedRegister::FromCpuRegister(src1.AsRegister()),
+                                  /*pp=*/ 0);
+  EmitUint8(byte_zero);
+  EmitUint8(byte_one);
+  EmitUint8(byte_two);
+  // Opcode field
+  EmitUint8(0xF2);
+  EmitRegisterOperand(dst.LowBits(), src2.LowBits());
+}
+
 void X86_64Assembler::andnpd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
@@ -2279,7 +2391,7 @@
   CHECK(imm.is_int32());
   EmitOperandSizeOverride();
   EmitOptionalRex32(address);
-  EmitComplex(7, address, imm, /* is_16_op */ true);
+  EmitComplex(7, address, imm, /* is_16_op= */ true);
 }
 
 
@@ -2693,7 +2805,7 @@
   CHECK(imm.is_uint16() || imm.is_int16()) << imm.value();
   EmitUint8(0x66);
   EmitOptionalRex32(address);
-  EmitComplex(0, address, imm, /* is_16_op */ true);
+  EmitComplex(0, address, imm, /* is_16_op= */ true);
 }
 
 
@@ -3260,6 +3372,60 @@
   EmitUint8(0xC0 + dst.LowBits());
 }
 
+void X86_64Assembler::blsi(CpuRegister dst, CpuRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+  uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+                                  /*x=*/ false,
+                                  src.NeedsRex(),
+                                  /*mmmmm=*/ 2);
+  uint8_t byte_two = EmitVexByte2(/*w=*/ true,
+                                  /*l=*/ 128,
+                                  X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()),
+                                  /*pp=*/ 0);
+  EmitUint8(byte_zero);
+  EmitUint8(byte_one);
+  EmitUint8(byte_two);
+  EmitUint8(0xF3);
+  EmitRegisterOperand(3, src.LowBits());
+}
+
+void X86_64Assembler::blsmsk(CpuRegister dst, CpuRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+  uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+                                  /*x=*/ false,
+                                  src.NeedsRex(),
+                                  /*mmmmm=*/ 2);
+  uint8_t byte_two = EmitVexByte2(/*w=*/ true,
+                                  /*l=*/ 128,
+                                  X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()),
+                                  /*pp=*/ 0);
+  EmitUint8(byte_zero);
+  EmitUint8(byte_one);
+  EmitUint8(byte_two);
+  EmitUint8(0xF3);
+  EmitRegisterOperand(2, src.LowBits());
+}
+
+void X86_64Assembler::blsr(CpuRegister dst, CpuRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+  uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+                                  /*x=*/ false,
+                                  src.NeedsRex(),
+                                  /*mmmmm=*/ 2);
+  uint8_t byte_two = EmitVexByte2(/*w=*/ true,
+                                  /*l=*/ 128,
+                                  X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()),
+                                  /*pp=*/ 0);
+  EmitUint8(byte_zero);
+  EmitUint8(byte_one);
+  EmitUint8(byte_two);
+  EmitUint8(0xF3);
+  EmitRegisterOperand(1, src.LowBits());
+}
+
 void X86_64Assembler::bswapl(CpuRegister dst) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitOptionalRex(false, false, false, false, dst.NeedsRex());
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index e696635..ff13ea3 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -543,6 +543,7 @@
   void andps(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
   void pand(XmmRegister dst, XmmRegister src);
 
+  void andn(CpuRegister dst, CpuRegister src1, CpuRegister src2);
   void andnpd(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
   void andnps(XmmRegister dst, XmmRegister src);
   void pandn(XmmRegister dst, XmmRegister src);
@@ -796,6 +797,10 @@
   void bsfq(CpuRegister dst, CpuRegister src);
   void bsfq(CpuRegister dst, const Address& src);
 
+  void blsi(CpuRegister dst, CpuRegister src);  // no addr variant (for now)
+  void blsmsk(CpuRegister dst, CpuRegister src);  // no addr variant (for now)
+  void blsr(CpuRegister dst, CpuRegister src);  // no addr variant (for now)
+
   void bsrl(CpuRegister dst, CpuRegister src);
   void bsrl(CpuRegister dst, const Address& src);
   void bsrq(CpuRegister dst, CpuRegister src);
@@ -951,6 +956,11 @@
   void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, CpuRegister src);
   void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const Operand& operand);
 
+  // Emit a 3 byte VEX Prefix
+  uint8_t EmitVexByteZero(bool is_two_byte);
+  uint8_t EmitVexByte1(bool r, bool x, bool b, int mmmmm);
+  uint8_t EmitVexByte2(bool w , int l , X86_64ManagedRegister operand, int pp);
+
   ConstantArea constant_area_;
 
   DISALLOW_COPY_AND_ASSIGN(X86_64Assembler);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index e1de1f1..461f028 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -137,11 +137,11 @@
                                                  x86_64::XmmRegister,
                                                  x86_64::Immediate> {
  public:
-  typedef AssemblerTest<x86_64::X86_64Assembler,
-                        x86_64::Address,
-                        x86_64::CpuRegister,
-                        x86_64::XmmRegister,
-                        x86_64::Immediate> Base;
+  using Base = AssemblerTest<x86_64::X86_64Assembler,
+                             x86_64::Address,
+                             x86_64::CpuRegister,
+                             x86_64::XmmRegister,
+                             x86_64::Immediate>;
 
  protected:
   // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
@@ -297,7 +297,7 @@
     STLDeleteElements(&fp_registers_);
   }
 
-  std::vector<x86_64::Address> GetAddresses() {
+  std::vector<x86_64::Address> GetAddresses() override {
     return addresses_;
   }
 
@@ -1414,7 +1414,9 @@
 TEST_F(AssemblerX86_64Test, Pand) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::pand, "pand %{reg2}, %{reg1}"), "pand");
 }
-
+TEST_F(AssemblerX86_64Test, Andn) {
+  DriverStr(RepeatRRR(&x86_64::X86_64Assembler::andn, "andn %{reg3}, %{reg2}, %{reg1}"), "andn");
+}
 TEST_F(AssemblerX86_64Test, andnpd) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::andnpd, "andnpd %{reg2}, %{reg1}"), "andnpd");
 }
@@ -1785,6 +1787,18 @@
   DriverFn(&ret_and_leave_fn, "retleave");
 }
 
+TEST_F(AssemblerX86_64Test, Blsmask) {
+  DriverStr(RepeatRR(&x86_64::X86_64Assembler::blsmsk, "blsmsk %{reg2}, %{reg1}"), "blsmsk");
+}
+
+TEST_F(AssemblerX86_64Test, Blsi) {
+  DriverStr(RepeatRR(&x86_64::X86_64Assembler::blsi, "blsi %{reg2}, %{reg1}"), "blsi");
+}
+
+TEST_F(AssemblerX86_64Test, Blsr) {
+  DriverStr(RepeatRR(&x86_64::X86_64Assembler::blsr, "blsr %{reg2}, %{reg1}"), "blsr");
+}
+
 TEST_F(AssemblerX86_64Test, Bswapl) {
   DriverStr(Repeatr(&x86_64::X86_64Assembler::bswapl, "bswap %{reg}"), "bswapl");
 }
@@ -2080,7 +2094,7 @@
   ArrayRef<const ManagedRegister> spill_regs(raw_spill_regs);
 
   size_t frame_size = 10 * kStackAlignment;
-  assembler->RemoveFrame(frame_size, spill_regs, /* may_suspend */ true);
+  assembler->RemoveFrame(frame_size, spill_regs, /* may_suspend= */ true);
 
   // Construct assembly text counterpart.
   std::ostringstream str;
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 81932a9..092e931 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -49,10 +49,9 @@
 
   void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) override {}
   void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) override {}
-  bool IsRelocationPossible() override { return false; }
 
   verifier::VerifierDeps* GetVerifierDeps() const override { return deps_; }
-  void SetVerifierDeps(verifier::VerifierDeps* deps) { deps_ = deps; }
+  void SetVerifierDeps(verifier::VerifierDeps* deps) override { deps_ = deps; }
 
  private:
   verifier::VerifierDeps* deps_;
@@ -60,7 +59,7 @@
 
 class VerifierDepsTest : public CommonCompilerTest {
  public:
-  void SetUpRuntimeOptions(RuntimeOptions* options) {
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
     CommonCompilerTest::SetUpRuntimeOptions(options);
     callbacks_.reset(new VerifierDepsCompilerCallbacks());
   }
@@ -80,26 +79,26 @@
   }
 
   void SetupCompilerDriver() {
-    compiler_options_->boot_image_ = false;
+    compiler_options_->image_type_ = CompilerOptions::ImageType::kNone;
     compiler_driver_->InitializeThreadPools();
   }
 
-  void VerifyWithCompilerDriver(verifier::VerifierDeps* deps) {
+  void VerifyWithCompilerDriver(verifier::VerifierDeps* verifier_deps) {
     TimingLogger timings("Verify", false, false);
     // The compiler driver handles the verifier deps in the callbacks, so
     // remove what this class did for unit testing.
-    if (deps == nullptr) {
+    if (verifier_deps == nullptr) {
       // Create some verifier deps by default if they are not already specified.
-      deps = new verifier::VerifierDeps(dex_files_);
-      verifier_deps_.reset(deps);
+      verifier_deps = new verifier::VerifierDeps(dex_files_);
+      verifier_deps_.reset(verifier_deps);
     }
-    callbacks_->SetVerifierDeps(deps);
-    compiler_driver_->Verify(class_loader_, dex_files_, &timings);
+    callbacks_->SetVerifierDeps(verifier_deps);
+    compiler_driver_->Verify(class_loader_, dex_files_, &timings, verification_results_.get());
     callbacks_->SetVerifierDeps(nullptr);
     // Clear entries in the verification results to avoid hitting a DCHECK that
     // we always succeed inserting a new entry after verifying.
     AtomicDexRefMap<MethodReference, const VerifiedMethod*>* map =
-        &compiler_driver_->GetVerificationResults()->atomic_verified_methods_;
+        &verification_results_->atomic_verified_methods_;
     map->Visit([](const DexFileReference& ref ATTRIBUTE_UNUSED, const VerifiedMethod* method) {
       delete method;
     });
@@ -127,7 +126,7 @@
       class_linker_->RegisterDexFile(*dex_file, loader.Get());
     }
     for (const DexFile* dex_file : dex_files_) {
-      compiler_driver_->GetVerificationResults()->AddDexFile(dex_file);
+      verification_results_->AddDexFile(dex_file);
     }
     SetDexFilesForOatFile(dex_files_);
   }
@@ -148,7 +147,7 @@
         hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader_)));
     Handle<mirror::DexCache> dex_cache_handle(hs.NewHandle(klass_Main_->GetDexCache()));
 
-    const DexFile::ClassDef* class_def = klass_Main_->GetClassDef();
+    const dex::ClassDef* class_def = klass_Main_->GetClassDef();
     ClassAccessor accessor(*primary_dex_file_, *class_def);
 
     bool has_failures = true;
@@ -160,7 +159,7 @@
               method.GetIndex(),
               dex_cache_handle,
               class_loader_handle,
-              /* referrer */ nullptr,
+              /* referrer= */ nullptr,
               method.GetInvokeType(class_def->access_flags_));
       CHECK(resolved_method != nullptr);
       if (method_name == resolved_method->GetName()) {
@@ -174,12 +173,12 @@
                                 method.GetIndex(),
                                 resolved_method,
                                 method.GetAccessFlags(),
-                                true /* can_load_classes */,
-                                true /* allow_soft_failures */,
-                                true /* need_precise_constants */,
-                                false /* verify to dump */,
-                                true /* allow_thread_suspension */,
-                                0 /* api_level */);
+                                /* can_load_classes= */ true,
+                                /* allow_soft_failures= */ true,
+                                /* need_precise_constants= */ true,
+                                /* verify to dump */ false,
+                                /* allow_thread_suspension= */ true,
+                                /* api_level= */ 0);
         verifier.Verify();
         soa.Self()->SetVerifierDeps(nullptr);
         has_failures = verifier.HasFailures();
@@ -196,7 +195,7 @@
       LoadDexFile(soa, "VerifierDeps", multidex);
     }
     SetupCompilerDriver();
-    VerifyWithCompilerDriver(/* verifier_deps */ nullptr);
+    VerifyWithCompilerDriver(/* verifier_deps= */ nullptr);
   }
 
   bool TestAssignabilityRecording(const std::string& dst,
@@ -229,7 +228,7 @@
     for (const DexFile* dex_file : dex_files_) {
       const std::set<dex::TypeIndex>& unverified_classes = deps.GetUnverifiedClasses(*dex_file);
       for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
-        const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+        const dex::ClassDef& class_def = dex_file->GetClassDef(i);
         const char* descriptor = dex_file->GetClassDescriptor(class_def);
         cls.Assign(class_linker_->FindClass(soa.Self(), descriptor, class_loader_handle));
         if (cls == nullptr) {
@@ -251,7 +250,7 @@
   }
 
   bool HasUnverifiedClass(const std::string& cls, const DexFile& dex_file) {
-    const DexFile::TypeId* type_id = dex_file.FindTypeId(cls.c_str());
+    const dex::TypeId* type_id = dex_file.FindTypeId(cls.c_str());
     DCHECK(type_id != nullptr);
     dex::TypeIndex index = dex_file.GetIndexForTypeId(*type_id);
     for (const auto& dex_dep : verifier_deps_->dex_deps_) {
@@ -330,7 +329,7 @@
           continue;
         }
 
-        const DexFile::FieldId& field_id = dex_dep.first->GetFieldId(entry.GetDexFieldIndex());
+        const dex::FieldId& field_id = dex_dep.first->GetFieldId(entry.GetDexFieldIndex());
 
         std::string actual_klass = dex_dep.first->StringByTypeIdx(field_id.class_idx_);
         if (expected_klass != actual_klass) {
@@ -373,16 +372,16 @@
   bool HasMethod(const std::string& expected_klass,
                  const std::string& expected_name,
                  const std::string& expected_signature,
-                 bool expected_resolved,
+                 bool expect_resolved,
                  const std::string& expected_access_flags = "",
                  const std::string& expected_decl_klass = "") {
     for (auto& dex_dep : verifier_deps_->dex_deps_) {
       for (const VerifierDeps::MethodResolution& entry : dex_dep.second->methods_) {
-        if (expected_resolved != entry.IsResolved()) {
+        if (expect_resolved != entry.IsResolved()) {
           continue;
         }
 
-        const DexFile::MethodId& method_id = dex_dep.first->GetMethodId(entry.GetDexMethodIndex());
+        const dex::MethodId& method_id = dex_dep.first->GetMethodId(entry.GetDexMethodIndex());
 
         std::string actual_klass = dex_dep.first->StringByTypeIdx(method_id.class_idx_);
         if (expected_klass != actual_klass) {
@@ -399,7 +398,7 @@
           continue;
         }
 
-        if (expected_resolved) {
+        if (expect_resolved) {
           // Test access flags. Note that PrettyJavaAccessFlags always appends
           // a space after the modifiers. Add it to the expected access flags.
           std::string actual_access_flags = PrettyJavaAccessFlags(entry.GetAccessFlags());
@@ -424,7 +423,7 @@
     return verifier_deps_->dex_deps_.size();
   }
 
-  size_t HasEachKindOfRecord() {
+  bool HasEachKindOfRecord() {
     bool has_strings = false;
     bool has_assignability = false;
     bool has_classes = false;
@@ -483,42 +482,42 @@
 }
 
 TEST_F(VerifierDepsTest, Assignable_BothInBoot) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/util/TimeZone;",
-                                         /* src */ "Ljava/util/SimpleTimeZone;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ true));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/util/TimeZone;",
+                                         /* src= */ "Ljava/util/SimpleTimeZone;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ true));
   ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
 }
 
 TEST_F(VerifierDepsTest, Assignable_DestinationInBoot1) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/net/Socket;",
-                                         /* src */ "LMySSLSocket;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ true));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/net/Socket;",
+                                         /* src= */ "LMySSLSocket;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ true));
   ASSERT_TRUE(HasAssignable("Ljava/net/Socket;", "Ljavax/net/ssl/SSLSocket;", true));
 }
 
 TEST_F(VerifierDepsTest, Assignable_DestinationInBoot2) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/util/TimeZone;",
-                                         /* src */ "LMySimpleTimeZone;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ true));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/util/TimeZone;",
+                                         /* src= */ "LMySimpleTimeZone;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ true));
   ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
 }
 
 TEST_F(VerifierDepsTest, Assignable_DestinationInBoot3) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/util/Collection;",
-                                         /* src */ "LMyThreadSet;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ true));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/util/Collection;",
+                                         /* src= */ "LMyThreadSet;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ true));
   ASSERT_TRUE(HasAssignable("Ljava/util/Collection;", "Ljava/util/Set;", true));
 }
 
 TEST_F(VerifierDepsTest, Assignable_BothArrays_Resolved) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[[Ljava/util/TimeZone;",
-                                         /* src */ "[[Ljava/util/SimpleTimeZone;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ true));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "[[Ljava/util/TimeZone;",
+                                         /* src= */ "[[Ljava/util/SimpleTimeZone;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ true));
   // If the component types of both arrays are resolved, we optimize the list of
   // dependencies by recording a dependency on the component types.
   ASSERT_FALSE(HasAssignable("[[Ljava/util/TimeZone;", "[[Ljava/util/SimpleTimeZone;", true));
@@ -527,34 +526,34 @@
 }
 
 TEST_F(VerifierDepsTest, NotAssignable_BothInBoot) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
-                                         /* src */ "Ljava/util/SimpleTimeZone;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ false));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+                                         /* src= */ "Ljava/util/SimpleTimeZone;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ false));
   ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/util/SimpleTimeZone;", false));
 }
 
 TEST_F(VerifierDepsTest, NotAssignable_DestinationInBoot1) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
-                                         /* src */ "LMySSLSocket;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ false));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+                                         /* src= */ "LMySSLSocket;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ false));
   ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljavax/net/ssl/SSLSocket;", false));
 }
 
 TEST_F(VerifierDepsTest, NotAssignable_DestinationInBoot2) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
-                                         /* src */ "LMySimpleTimeZone;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ false));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+                                         /* src= */ "LMySimpleTimeZone;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ false));
   ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/util/SimpleTimeZone;", false));
 }
 
 TEST_F(VerifierDepsTest, NotAssignable_BothArrays) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[Ljava/lang/Exception;",
-                                         /* src */ "[Ljava/util/SimpleTimeZone;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ false));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "[Ljava/lang/Exception;",
+                                         /* src= */ "[Ljava/util/SimpleTimeZone;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ false));
   ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/util/SimpleTimeZone;", false));
 }
 
@@ -590,7 +589,7 @@
   ASSERT_TRUE(HasMethod("Ljava/text/SimpleDateFormat;",
                         "setTimeZone",
                         "(Ljava/util/TimeZone;)V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/text/DateFormat;"));
   ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
@@ -825,7 +824,7 @@
   ASSERT_TRUE(HasMethod("Ljava/net/Socket;",
                         "setSocketImplFactory",
                         "(Ljava/net/SocketImplFactory;)V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public static",
                         "Ljava/net/Socket;"));
 }
@@ -836,7 +835,7 @@
   ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
                         "setSocketImplFactory",
                         "(Ljava/net/SocketImplFactory;)V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public static",
                         "Ljava/net/Socket;"));
 }
@@ -846,7 +845,7 @@
   ASSERT_TRUE(HasMethod("LMySSLSocket;",
                         "setSocketImplFactory",
                         "(Ljava/net/SocketImplFactory;)V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public static",
                         "Ljava/net/Socket;"));
 }
@@ -857,7 +856,7 @@
   ASSERT_TRUE(HasMethod("Ljava/util/Map$Entry;",
                         "comparingByKey",
                         "()Ljava/util/Comparator;",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public static",
                         "Ljava/util/Map$Entry;"));
 }
@@ -868,7 +867,7 @@
   ASSERT_TRUE(HasMethod("Ljava/util/AbstractMap$SimpleEntry;",
                         "comparingByKey",
                         "()Ljava/util/Comparator;",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeStatic_Unresolved1) {
@@ -877,7 +876,7 @@
   ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
                         "x",
                         "()V",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeStatic_Unresolved2) {
@@ -885,7 +884,7 @@
   ASSERT_TRUE(HasMethod("LMySSLSocket;",
                         "x",
                         "()V",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInReferenced) {
@@ -894,7 +893,7 @@
   ASSERT_TRUE(HasMethod("Ljava/net/Socket;",
                         "<init>",
                         "()V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/net/Socket;"));
 }
@@ -905,7 +904,7 @@
   ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
                         "checkOldImpl",
                         "()V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "private",
                         "Ljava/net/Socket;"));
 }
@@ -915,7 +914,7 @@
   ASSERT_TRUE(HasMethod("LMySSLSocket;",
                         "checkOldImpl",
                         "()V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "private",
                         "Ljava/net/Socket;"));
 }
@@ -926,7 +925,7 @@
   ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
                         "x",
                         "()V",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeDirect_Unresolved2) {
@@ -934,7 +933,7 @@
   ASSERT_TRUE(HasMethod("LMySSLSocket;",
                         "x",
                         "()V",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInReferenced) {
@@ -943,7 +942,7 @@
   ASSERT_TRUE(HasMethod("Ljava/lang/Throwable;",
                         "getMessage",
                         "()Ljava/lang/String;",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/lang/Throwable;"));
   // Type dependency on `this` argument.
@@ -956,7 +955,7 @@
   ASSERT_TRUE(HasMethod("Ljava/io/InterruptedIOException;",
                         "getMessage",
                         "()Ljava/lang/String;",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/lang/Throwable;"));
   // Type dependency on `this` argument.
@@ -968,7 +967,7 @@
   ASSERT_TRUE(HasMethod("LMySocketTimeoutException;",
                         "getMessage",
                         "()Ljava/lang/String;",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/lang/Throwable;"));
 }
@@ -978,7 +977,7 @@
   ASSERT_TRUE(HasMethod("LMyThreadSet;",
                         "size",
                         "()I",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/util/Set;"));
 }
@@ -989,7 +988,7 @@
   ASSERT_TRUE(HasMethod("Ljava/io/InterruptedIOException;",
                         "x",
                         "()V",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeVirtual_Unresolved2) {
@@ -997,7 +996,7 @@
   ASSERT_TRUE(HasMethod("LMySocketTimeoutException;",
                         "x",
                         "()V",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInReferenced) {
@@ -1006,7 +1005,7 @@
   ASSERT_TRUE(HasMethod("Ljava/lang/Runnable;",
                         "run",
                         "()V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/lang/Runnable;"));
 }
@@ -1017,7 +1016,7 @@
   ASSERT_TRUE(HasMethod("LMyThread;",
                         "join",
                         "()V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/lang/Thread;"));
 }
@@ -1028,7 +1027,7 @@
   ASSERT_TRUE(HasMethod("LMyThreadSet;",
                         "run",
                         "()V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/lang/Thread;"));
 }
@@ -1038,7 +1037,7 @@
   ASSERT_TRUE(HasMethod("LMyThreadSet;",
                         "isEmpty",
                         "()Z",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/util/Set;"));
 }
@@ -1049,12 +1048,12 @@
   ASSERT_TRUE(HasMethod("Ljava/lang/Runnable;",
                         "x",
                         "()V",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeInterface_Unresolved2) {
   ASSERT_FALSE(VerifyMethod("InvokeInterface_Unresolved2"));
-  ASSERT_TRUE(HasMethod("LMyThreadSet;", "x", "()V", /* expect_resolved */ false));
+  ASSERT_TRUE(HasMethod("LMyThreadSet;", "x", "()V", /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeSuper_ThisAssignable) {
@@ -1064,7 +1063,7 @@
   ASSERT_TRUE(HasMethod("Ljava/lang/Runnable;",
                         "run",
                         "()V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/lang/Runnable;"));
 }
@@ -1075,7 +1074,7 @@
   ASSERT_TRUE(HasAssignable("Ljava/lang/Integer;", "Ljava/lang/Thread;", false));
   ASSERT_TRUE(HasMethod("Ljava/lang/Integer;",
                         "intValue", "()I",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public", "Ljava/lang/Integer;"));
 }
 
@@ -1444,7 +1443,7 @@
         ScopedObjectAccess soa(Thread::Current());
         LoadDexFile(soa, "VerifierDeps", multi);
       }
-      VerifyWithCompilerDriver(/* verifier_deps */ nullptr);
+      VerifyWithCompilerDriver(/* verifier_deps= */ nullptr);
 
       std::vector<uint8_t> buffer;
       verifier_deps_->Encode(dex_files_, &buffer);
@@ -1494,22 +1493,22 @@
 }
 
 TEST_F(VerifierDepsTest, NotAssignable_InterfaceWithClassInBoot) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
-                                         /* src */ "LIface;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ false));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+                                         /* src= */ "LIface;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ false));
   ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "LIface;", false));
 }
 
 TEST_F(VerifierDepsTest, Assignable_Arrays) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[LIface;",
-                                         /* src */ "[LMyClassExtendingInterface;",
-                                         /* is_strict */ false,
-                                         /* is_assignable */ true));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "[LIface;",
+                                         /* src= */ "[LMyClassExtendingInterface;",
+                                         /* is_strict= */ false,
+                                         /* is_assignable= */ true));
   ASSERT_FALSE(HasAssignable(
-      "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable */ true));
+      "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable= */ true));
   ASSERT_FALSE(HasAssignable(
-      "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable */ false));
+      "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable= */ false));
 }
 
 }  // namespace verifier
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index 88e69cd..20d41b4 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -64,19 +64,19 @@
 
     target: {
         android: {
-            // For atrace.
-            shared_libs: ["libcutils"],
+            static_libs: [
+                "libz",
+            ],
+        },
+        host: {
+            shared_libs: [
+                "libz",
+            ],
         },
     },
     generated_sources: ["art_dex2oat_operator_srcs"],
     shared_libs: [
         "libbase",
-        "liblz4",
-        "liblzma",
-    ],
-    include_dirs: [
-        "external/lz4/lib",
-        "external/zlib",
     ],
     export_include_dirs: ["."],
 
@@ -89,6 +89,14 @@
     },
 }
 
+cc_defaults {
+    name: "libart-dex2oat_static_base_defaults",
+    static_libs: [
+        "libbase",
+        "libz",
+    ],
+}
+
 gensrcs {
     name: "art_dex2oat_operator_srcs",
     cmd: "$(location generate_operator_out) art/dex2oat $(in) > $(out)",
@@ -99,7 +107,7 @@
     output_extension: "operator_out.cc",
 }
 
-art_cc_static_library {
+art_cc_library_static {
     name: "libart-dex2oat",
     defaults: ["libart-dex2oat-defaults"],
     shared_libs: [
@@ -110,7 +118,21 @@
     ],
 }
 
-art_cc_static_library {
+cc_defaults {
+    name: "libart-dex2oat_static_defaults",
+    defaults: [
+        "libart-dex2oat_static_base_defaults",
+        "libart_static_defaults",
+        "libprofile_static_defaults",
+    ],
+    static_libs: [
+        "libart-compiler",
+        "libart-dexlayout",
+        "libart-dex2oat",
+    ],
+}
+
+art_cc_library_static {
     name: "libartd-dex2oat",
     defaults: [
         "art_debug_defaults",
@@ -124,6 +146,20 @@
     ],
 }
 
+cc_defaults {
+    name: "libartd-dex2oat_static_defaults",
+    defaults: [
+        "libart-dex2oat_static_base_defaults",
+        "libartd_static_defaults",
+        "libprofiled_static_defaults",
+    ],
+    static_libs: [
+        "libartd-compiler",
+        "libartd-dexlayout",
+        "libartd-dex2oat",
+    ],
+}
+
 cc_library_headers {
     name: "dex2oat_headers",
     host_supported: true,
@@ -141,7 +177,7 @@
 
     target: {
         android: {
-            // Use the 32-bit version of dex2oat on devices
+            // Use the 32-bit version of dex2oat on devices.
             compile_multilib: "prefer32",
         },
     },
@@ -205,7 +241,6 @@
         "libdexfile",
         "libartbase",
         "libbase",
-        "liblz4",
         "libsigchain",
     ],
     static_libs: [
@@ -225,6 +260,14 @@
             lto: {
                  thin: true,
             },
+            static_libs: [
+                "libz",
+            ],
+        },
+        host: {
+            shared_libs: [
+                "libz",
+            ],
         },
     },
 }
@@ -243,19 +286,44 @@
         "libdexfiled",
         "libartbased",
         "libbase",
-        "liblz4",
         "libsigchain",
     ],
     static_libs: [
         "libartd-dex2oat",
     ],
+    target: {
+        android: {
+            static_libs: [
+                "libz",
+            ],
+        },
+        host: {
+            shared_libs: [
+                "libz",
+            ],
+            compile_multilib: "both",
+        },
+    },
+
+    multilib: {
+        lib32: {
+            suffix: "32",
+        },
+        lib64: {
+            suffix: "64",
+        },
+    },
+
+    symlink_preferred_arch: true,
 }
 
 cc_defaults {
     name: "dex2oats-defaults",
     device_supported: false,
     static_executable: true,
-    defaults: ["dex2oat-defaults"],
+    defaults: [
+        "dex2oat-defaults",
+    ],
     target: {
         darwin: {
             enabled: false,
@@ -269,22 +337,24 @@
         // Try to get rid of it.
         "-z muldefs",
     ],
-    static_libs: art_static_dependencies,
+    static_libs: [
+        "libbase",
+        "libsigchain_dummy",
+        "libz",
+    ],
 }
 
 art_cc_binary {
     name: "dex2oats",
-    defaults: ["dex2oats-defaults"],
-    static_libs: [
-        "libart-dex2oat",
-        "libart-compiler",
-        "libart-dexlayout",
-        "libart",
-        "libartbase",
-        "libdexfile",
-        "libprofile",
-        "libvixl-arm",
-        "libvixl-arm64",
+    defaults: [
+        "dex2oats-defaults",
+        "libart_static_defaults",
+        "libart-compiler_static_defaults",
+        "libart-dexlayout_static_defaults",
+        "libartbase_static_defaults",
+        "libdexfile_static_defaults",
+        "libprofile_static_defaults",
+        "libart-dex2oat_static_defaults",
     ],
 }
 
@@ -293,6 +363,13 @@
     defaults: [
         "art_debug_defaults",
         "dex2oats-defaults",
+        "libartd_static_defaults",
+        "libartd-compiler_static_defaults",
+        "libartd-dexlayout_static_defaults",
+        "libartbased_static_defaults",
+        "libdexfiled_static_defaults",
+        "libprofiled_static_defaults",
+        "libartd-dex2oat_static_defaults",
     ],
     target: {
         linux_glibc_x86_64: {
@@ -301,17 +378,6 @@
     },
     // b/79417743, oatdump 32-bit tests failed with clang lld
     use_clang_lld: false,
-    static_libs: [
-        "libartd-dex2oat",
-        "libartd-compiler",
-        "libartd-dexlayout",
-        "libartd",
-        "libartbased",
-        "libprofiled",
-        "libdexfiled",
-        "libvixld-arm",
-        "libvixld-arm64",
-    ],
 }
 
 art_cc_test {
@@ -369,11 +435,11 @@
         "external/zlib",
     ],
     shared_libs: [
-        "libprofiled",
+        "libartbased",
         "libartd-compiler",
         "libartd-dexlayout",
         "libbase",
-        "liblz4",
+        "libprofiled",
         "libsigchain",
         "libziparchive",
     ],
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index b4aa327..ad1dda4 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -103,6 +103,7 @@
 
 using android::base::StringAppendV;
 using android::base::StringPrintf;
+using gc::space::ImageSpace;
 
 static constexpr size_t kDefaultMinDexFilesForSwap = 2;
 static constexpr size_t kDefaultMinDexFileCumulativeSizeForSwap = 20 * MB;
@@ -115,6 +116,7 @@
 
 static std::string CommandLine() {
   std::vector<std::string> command;
+  command.reserve(original_argc);
   for (int i = 0; i < original_argc; ++i) {
     command.push_back(original_argv[i]);
   }
@@ -186,7 +188,8 @@
 
   // Construct the final output.
   if (command.size() <= 1U) {
-    // It seems only "/system/bin/dex2oat" is left, or not even that. Use a pretty line.
+    // It seems only "/apex/com.android.runtime/bin/dex2oat" is left, or not
+    // even that. Use a pretty line.
     return "Starting dex2oat.";
   }
   return android::base::Join(command, ' ');
@@ -293,12 +296,13 @@
   UsageError("      Default: arm");
   UsageError("");
   UsageError("  --instruction-set-features=...,: Specify instruction set features");
+  UsageError("      On target the value 'runtime' can be used to detect features at run time.");
+  UsageError("      If target does not support run-time detection the value 'runtime'");
+  UsageError("      has the same effect as the value 'default'.");
+  UsageError("      Note: the value 'runtime' has no effect if it is used on host.");
   UsageError("      Example: --instruction-set-features=div");
   UsageError("      Default: default");
   UsageError("");
-  UsageError("  --compile-pic: Force indirect use of code, methods, and classes");
-  UsageError("      Default: disabled for apps (ignored for boot image which is always PIC)");
-  UsageError("");
   UsageError("  --compiler-backend=(Quick|Optimizing): select compiler backend");
   UsageError("      set.");
   UsageError("      Example: --compiler-backend=Optimizing");
@@ -382,6 +386,9 @@
   UsageError("  --avoid-storing-invocation: Avoid storing the invocation args in the key value");
   UsageError("      store. Used to test determinism with different args.");
   UsageError("");
+  UsageError("  --write-invocation-to=<file>: Write the invocation commandline to the given file");
+  UsageError("      for later use. Used to test determinism with different host architectures.");
+  UsageError("");
   UsageError("  --runtime-arg <argument>: used to specify various arguments for the runtime,");
   UsageError("      such as initial heap size, maximum heap size, and verbose output.");
   UsageError("      Use a separate --runtime-arg switch for each argument.");
@@ -419,8 +426,7 @@
   UsageError("  --app-image-file=<file-name>: specify a file name for app image.");
   UsageError("      Example: --app-image-file=/data/dalvik-cache/system@app@Calculator.apk.art");
   UsageError("");
-  UsageError("  --multi-image: specify that separate oat and image files be generated for each "
-             "input dex file.");
+  UsageError("  --multi-image: obsolete, ignored");
   UsageError("");
   UsageError("  --force-determinism: force the compiler to emit a deterministic output.");
   UsageError("");
@@ -480,9 +486,13 @@
   UsageError("  --compilation-reason=<string>: optional metadata specifying the reason for");
   UsageError("      compiling the apk. If specified, the string will be embedded verbatim in");
   UsageError("      the key value store of the oat file.");
-  UsageError("");
   UsageError("      Example: --compilation-reason=install");
   UsageError("");
+  UsageError("  --resolve-startup-const-strings=true|false: If true, the compiler eagerly");
+  UsageError("      resolves strings referenced from const-string of startup methods.");
+  UsageError("");
+  UsageError("  --max-image-block-size=<size>: Maximum solid block size for compressed images.");
+  UsageError("");
   std::cerr << "See log for usage error information\n";
   exit(EXIT_FAILURE);
 }
@@ -590,13 +600,14 @@
     const char* reason = "dex2oat watch dog thread waiting";
     CHECK_WATCH_DOG_PTHREAD_CALL(pthread_mutex_lock, (&mutex_), reason);
     while (!shutting_down_) {
-      int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &mutex_, &timeout_ts));
-      if (rc == ETIMEDOUT) {
+      int rc = pthread_cond_timedwait(&cond_, &mutex_, &timeout_ts);
+      if (rc == EINTR) {
+        continue;
+      } else if (rc == ETIMEDOUT) {
         Fatal(StringPrintf("dex2oat did not finish after %" PRId64 " seconds",
                            timeout_in_milliseconds_/1000));
       } else if (rc != 0) {
-        std::string message(StringPrintf("pthread_cond_timedwait failed: %s",
-                                         strerror(errno)));
+        std::string message(StringPrintf("pthread_cond_timedwait failed: %s", strerror(rc)));
         Fatal(message.c_str());
       }
     }
@@ -618,13 +629,13 @@
   explicit Dex2Oat(TimingLogger* timings) :
       compiler_kind_(Compiler::kOptimizing),
       // Take the default set of instruction features from the build.
-      image_file_location_oat_checksum_(0),
       key_value_store_(nullptr),
       verification_results_(nullptr),
       runtime_(nullptr),
       thread_count_(sysconf(_SC_NPROCESSORS_CONF)),
       start_ns_(NanoTime()),
       start_cputime_ns_(ProcessCpuNanoTime()),
+      strip_(false),
       oat_fd_(-1),
       input_vdex_fd_(-1),
       output_vdex_fd_(-1),
@@ -637,7 +648,6 @@
       image_storage_mode_(ImageHeader::kStorageModeUncompressed),
       passes_to_run_filename_(nullptr),
       dirty_image_objects_filename_(nullptr),
-      multi_image_(false),
       is_host_(false),
       elf_writers_(),
       oat_writers_(),
@@ -662,26 +672,26 @@
     if (!kIsDebugBuild && !(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
       // We want to just exit on non-debug builds, not bringing the runtime down
       // in an orderly fashion. So release the following fields.
-      driver_.release();
-      image_writer_.release();
+      driver_.release();                // NOLINT
+      image_writer_.release();          // NOLINT
       for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files_) {
-        dex_file.release();
+        dex_file.release();             // NOLINT
       }
       new std::vector<MemMap>(std::move(opened_dex_files_maps_));  // Leak MemMaps.
       for (std::unique_ptr<File>& vdex_file : vdex_files_) {
-        vdex_file.release();
+        vdex_file.release();            // NOLINT
       }
       for (std::unique_ptr<File>& oat_file : oat_files_) {
-        oat_file.release();
+        oat_file.release();             // NOLINT
       }
-      runtime_.release();
-      verification_results_.release();
-      key_value_store_.release();
+      runtime_.release();               // NOLINT
+      verification_results_.release();  // NOLINT
+      key_value_store_.release();       // NOLINT
     }
   }
 
   struct ParserOptions {
-    std::vector<const char*> oat_symbols;
+    std::vector<std::string> oat_symbols;
     std::string boot_image_filename;
     int64_t watch_dog_timeout_in_ms = -1;
     bool watch_dog_enabled = true;
@@ -727,19 +737,16 @@
   }
 
   void ProcessOptions(ParserOptions* parser_options) {
-    compiler_options_->boot_image_ = !image_filenames_.empty();
-    if (compiler_options_->boot_image_) {
-      compiler_options_->compile_pic_ = true;
+    compiler_options_->compile_pic_ = true;  // All AOT compilation is PIC.
+    DCHECK(compiler_options_->image_type_ == CompilerOptions::ImageType::kNone);
+    if (!image_filenames_.empty()) {
+      compiler_options_->image_type_ = CompilerOptions::ImageType::kBootImage;
     }
-    compiler_options_->app_image_ = app_image_fd_ != -1 || !app_image_file_name_.empty();
-
-    if (IsBootImage() && image_filenames_.size() == 1) {
-      const std::string& boot_image_filename = image_filenames_[0];
-      compiler_options_->core_image_ = CompilerDriver::IsCoreImageFilename(boot_image_filename);
-    }
-
-    if (IsAppImage() && IsBootImage()) {
-      Usage("Can't have both --image and (--app-image-fd or --app-image-file)");
+    if (app_image_fd_ != -1 || !app_image_file_name_.empty()) {
+      if (compiler_options_->IsBootImage()) {
+        Usage("Can't have both --image and (--app-image-fd or --app-image-file)");
+      }
+      compiler_options_->image_type_ = CompilerOptions::ImageType::kAppImage;
     }
 
     if (oat_filenames_.empty() && oat_fd_ == -1) {
@@ -834,9 +841,7 @@
     }
 
     if (dex_locations_.empty()) {
-      for (const char* dex_file_name : dex_filenames_) {
-        dex_locations_.push_back(dex_file_name);
-      }
+      dex_locations_ = dex_filenames_;
     } else if (dex_locations_.size() != dex_filenames_.size()) {
       Usage("--dex-location arguments do not match --dex-file arguments");
     }
@@ -874,9 +879,9 @@
       oat_unstripped_ = std::move(parser_options->oat_symbols);
     }
 
-    // If no instruction set feature was given, use the default one for the target
-    // instruction set.
-    if (compiler_options_->instruction_set_features_.get() == nullptr) {
+    if (compiler_options_->instruction_set_features_ == nullptr) {
+      // '--instruction-set-features/--instruction-set-variant' were not used.
+      // Use features for the 'default' variant.
       compiler_options_->instruction_set_features_ = InstructionSetFeatures::FromVariant(
           compiler_options_->instruction_set_, "default", &parser_options->error_msg);
       if (compiler_options_->instruction_set_features_ == nullptr) {
@@ -889,9 +894,9 @@
       std::unique_ptr<const InstructionSetFeatures> runtime_features(
           InstructionSetFeatures::FromCppDefines());
       if (!compiler_options_->GetInstructionSetFeatures()->Equals(runtime_features.get())) {
-        LOG(WARNING) << "Mismatch between dex2oat instruction set features ("
+        LOG(WARNING) << "Mismatch between dex2oat instruction set features to use ("
             << *compiler_options_->GetInstructionSetFeatures()
-            << ") and those of dex2oat executable (" << *runtime_features
+            << ") and those from CPP defines (" << *runtime_features
             << ") for the command line:\n" << CommandLine();
       }
     }
@@ -919,20 +924,6 @@
         break;
     }
 
-    if (!IsBootImage() && multi_image_) {
-      Usage("--multi-image can only be used when creating boot images");
-    }
-    if (IsBootImage() && multi_image_ && image_filenames_.size() > 1) {
-      Usage("--multi-image cannot be used with multiple image names");
-    }
-
-    // For now, if we're on the host and compile the boot image, *always* use multiple image files.
-    if (!kIsTargetBuild && IsBootImage()) {
-      if (image_filenames_.size() == 1) {
-        multi_image_ = true;
-      }
-    }
-
     // Done with usage checks, enable watchdog if requested
     if (parser_options->watch_dog_enabled) {
       int64_t timeout = parser_options->watch_dog_timeout_in_ms > 0
@@ -966,6 +957,9 @@
       }
     }
     compiler_options_->passes_to_run_ = passes_to_run_.get();
+    compiler_options_->compiling_with_core_image_ =
+        !boot_image_filename_.empty() &&
+        CompilerDriver::IsCoreImageFilename(boot_image_filename_);
   }
 
   static bool SupportsDeterministicCompilation() {
@@ -975,122 +969,27 @@
   }
 
   void ExpandOatAndImageFilenames() {
-    std::string base_oat = oat_filenames_[0];
-    size_t last_oat_slash = base_oat.rfind('/');
-    if (last_oat_slash == std::string::npos) {
-      Usage("--multi-image used with unusable oat filename %s", base_oat.c_str());
+    if (image_filenames_[0].rfind('/') == std::string::npos) {
+      Usage("Unusable boot image filename %s", image_filenames_[0].c_str());
     }
-    // We also need to honor path components that were encoded through '@'. Otherwise the loading
-    // code won't be able to find the images.
-    if (base_oat.find('@', last_oat_slash) != std::string::npos) {
-      last_oat_slash = base_oat.rfind('@');
-    }
-    base_oat = base_oat.substr(0, last_oat_slash + 1);
+    image_filenames_ = ImageSpace::ExpandMultiImageLocations(dex_locations_, image_filenames_[0]);
 
-    std::string base_img = image_filenames_[0];
-    size_t last_img_slash = base_img.rfind('/');
-    if (last_img_slash == std::string::npos) {
-      Usage("--multi-image used with unusable image filename %s", base_img.c_str());
+    if (oat_filenames_[0].rfind('/') == std::string::npos) {
+      Usage("Unusable boot image oat filename %s", oat_filenames_[0].c_str());
     }
-    // We also need to honor path components that were encoded through '@'. Otherwise the loading
-    // code won't be able to find the images.
-    if (base_img.find('@', last_img_slash) != std::string::npos) {
-      last_img_slash = base_img.rfind('@');
-    }
+    oat_filenames_ = ImageSpace::ExpandMultiImageLocations(dex_locations_, oat_filenames_[0]);
 
-    // Get the prefix, which is the primary image name (without path components). Strip the
-    // extension.
-    std::string prefix = base_img.substr(last_img_slash + 1);
-    if (prefix.rfind('.') != std::string::npos) {
-      prefix = prefix.substr(0, prefix.rfind('.'));
-    }
-    if (!prefix.empty()) {
-      prefix = prefix + "-";
-    }
-
-    base_img = base_img.substr(0, last_img_slash + 1);
-
-    // Note: we have some special case here for our testing. We have to inject the differentiating
-    //       parts for the different core images.
-    std::string infix;  // Empty infix by default.
-    {
-      // Check the first name.
-      std::string dex_file = oat_filenames_[0];
-      size_t last_dex_slash = dex_file.rfind('/');
-      if (last_dex_slash != std::string::npos) {
-        dex_file = dex_file.substr(last_dex_slash + 1);
-      }
-      size_t last_dex_dot = dex_file.rfind('.');
-      if (last_dex_dot != std::string::npos) {
-        dex_file = dex_file.substr(0, last_dex_dot);
-      }
-      if (android::base::StartsWith(dex_file, "core-")) {
-        infix = dex_file.substr(strlen("core"));
-      }
-    }
-
-    std::string base_symbol_oat;
     if (!oat_unstripped_.empty()) {
-      base_symbol_oat = oat_unstripped_[0];
-      size_t last_symbol_oat_slash = base_symbol_oat.rfind('/');
-      if (last_symbol_oat_slash == std::string::npos) {
-        Usage("--multi-image used with unusable symbol filename %s", base_symbol_oat.c_str());
+      if (oat_unstripped_[0].rfind('/') == std::string::npos) {
+        Usage("Unusable boot image symbol filename %s", oat_unstripped_[0].c_str());
       }
-      base_symbol_oat = base_symbol_oat.substr(0, last_symbol_oat_slash + 1);
+      oat_unstripped_ = ImageSpace::ExpandMultiImageLocations(dex_locations_, oat_unstripped_[0]);
     }
-
-    // Now create the other names. Use a counted loop to skip the first one.
-    for (size_t i = 1; i < dex_locations_.size(); ++i) {
-      // TODO: Make everything properly std::string.
-      std::string image_name = CreateMultiImageName(dex_locations_[i], prefix, infix, ".art");
-      char_backing_storage_.push_front(base_img + image_name);
-      image_filenames_.push_back(char_backing_storage_.front().c_str());
-
-      std::string oat_name = CreateMultiImageName(dex_locations_[i], prefix, infix, ".oat");
-      char_backing_storage_.push_front(base_oat + oat_name);
-      oat_filenames_.push_back(char_backing_storage_.front().c_str());
-
-      if (!base_symbol_oat.empty()) {
-        char_backing_storage_.push_front(base_symbol_oat + oat_name);
-        oat_unstripped_.push_back(char_backing_storage_.front().c_str());
-      }
-    }
-  }
-
-  // Modify the input string in the following way:
-  //   0) Assume input is /a/b/c.d
-  //   1) Strip the path  -> c.d
-  //   2) Inject prefix p -> pc.d
-  //   3) Inject infix i  -> pci.d
-  //   4) Replace suffix with s if it's "jar"  -> d == "jar" -> pci.s
-  static std::string CreateMultiImageName(std::string in,
-                                          const std::string& prefix,
-                                          const std::string& infix,
-                                          const char* replace_suffix) {
-    size_t last_dex_slash = in.rfind('/');
-    if (last_dex_slash != std::string::npos) {
-      in = in.substr(last_dex_slash + 1);
-    }
-    if (!prefix.empty()) {
-      in = prefix + in;
-    }
-    if (!infix.empty()) {
-      // Inject infix.
-      size_t last_dot = in.rfind('.');
-      if (last_dot != std::string::npos) {
-        in.insert(last_dot, infix);
-      }
-    }
-    if (android::base::EndsWith(in, ".jar")) {
-      in = in.substr(0, in.length() - strlen(".jar")) +
-          (replace_suffix != nullptr ? replace_suffix : "");
-    }
-    return in;
   }
 
   void InsertCompileOptions(int argc, char** argv) {
-    std::ostringstream oss;
     if (!avoid_storing_invocation_) {
+      std::ostringstream oss;
       for (int i = 0; i < argc; ++i) {
         if (i > 0) {
           oss << ' ';
@@ -1098,10 +997,7 @@
         oss << argv[i];
       }
       key_value_store_->Put(OatHeader::kDex2OatCmdLineKey, oss.str());
-      oss.str("");  // Reset.
     }
-    oss << kRuntimeISA;
-    key_value_store_->Put(OatHeader::kDex2OatHostKey, oss.str());
     key_value_store_->Put(
         OatHeader::kDebuggableKey,
         compiler_options_->debuggable_ ? OatHeader::kTrueValue : OatHeader::kFalseValue);
@@ -1112,6 +1008,21 @@
         CompilerFilter::NameOfFilter(compiler_options_->GetCompilerFilter()));
     key_value_store_->Put(OatHeader::kConcurrentCopying,
                           kUseReadBarrier ? OatHeader::kTrueValue : OatHeader::kFalseValue);
+    if (invocation_file_.get() != -1) {
+      std::ostringstream oss;
+      for (int i = 0; i < argc; ++i) {
+        if (i > 0) {
+          oss << std::endl;
+        }
+        oss << argv[i];
+      }
+      std::string invocation(oss.str());
+      if (TEMP_FAILURE_RETRY(write(invocation_file_.get(),
+                                   invocation.c_str(),
+                                   invocation.size())) == -1) {
+        Usage("Unable to write invocation file");
+      }
+    }
   }
 
   // This simple forward is here so the string specializations below don't look out of place.
@@ -1227,7 +1138,16 @@
 
     AssignTrueIfExists(args, M::Host, &is_host_);
     AssignTrueIfExists(args, M::AvoidStoringInvocation, &avoid_storing_invocation_);
-    AssignTrueIfExists(args, M::MultiImage, &multi_image_);
+    if (args.Exists(M::InvocationFile)) {
+      invocation_file_.reset(open(args.Get(M::InvocationFile)->c_str(),
+                                  O_CREAT|O_WRONLY|O_TRUNC|O_CLOEXEC,
+                                  S_IRUSR|S_IWUSR));
+      if (invocation_file_.get() == -1) {
+        int err = errno;
+        Usage("Unable to open invocation file '%s' for writing due to %s.",
+              args.Get(M::InvocationFile)->c_str(), strerror(err));
+      }
+    }
     AssignIfExists(args, M::CopyDexFiles, &copy_dex_files_);
 
     if (args.Exists(M::ForceDeterminism)) {
@@ -1291,15 +1211,15 @@
     PruneNonExistentDexFiles();
 
     // Expand oat and image filenames for multi image.
-    if (IsBootImage() && multi_image_) {
+    if (IsBootImage() && image_filenames_.size() == 1) {
       ExpandOatAndImageFilenames();
     }
 
     // OAT and VDEX file handling
     if (oat_fd_ == -1) {
       DCHECK(!oat_filenames_.empty());
-      for (const char* oat_filename : oat_filenames_) {
-        std::unique_ptr<File> oat_file(OS::CreateEmptyFile(oat_filename));
+      for (const std::string& oat_filename : oat_filenames_) {
+        std::unique_ptr<File> oat_file(OS::CreateEmptyFile(oat_filename.c_str()));
         if (oat_file == nullptr) {
           PLOG(ERROR) << "Failed to create oat file: " << oat_filename;
           return false;
@@ -1397,7 +1317,7 @@
       }
       vdex_files_.push_back(std::move(vdex_file));
 
-      oat_filenames_.push_back(oat_location_.c_str());
+      oat_filenames_.push_back(oat_location_);
     }
 
     // If we're updating in place a vdex file, be defensive and put an invalid vdex magic in case
@@ -1445,11 +1365,13 @@
         MemMap input_file = zip_entry->MapDirectlyOrExtract(
             VdexFile::kVdexNameInDmFile,
             kDexMetadata,
-            &error_msg);
+            &error_msg,
+            alignof(VdexFile));
         if (!input_file.IsValid()) {
           LOG(WARNING) << "Could not open vdex file in DexMetadata archive: " << error_msg;
         } else {
           input_vdex_file_ = std::make_unique<VdexFile>(std::move(input_file));
+          VLOG(verifier) << "Doing fast verification with vdex from DexMetadata archive";
         }
       }
     }
@@ -1537,38 +1459,30 @@
       key_value_store_->Put(OatHeader::kCompilationReasonKey, compilation_reason_);
     }
 
-    if (IsBootImage() && image_filenames_.size() > 1) {
+    if (IsBootImage()) {
       // If we're compiling the boot image, store the boot classpath into the Key-Value store.
-      // We need this for the multi-image case.
-      key_value_store_->Put(OatHeader::kBootClassPathKey,
-                            gc::space::ImageSpace::GetMultiImageBootClassPath(dex_locations_,
-                                                                              oat_filenames_,
-                                                                              image_filenames_));
+      // We use this when loading the boot image.
+      key_value_store_->Put(OatHeader::kBootClassPathKey, android::base::Join(dex_locations_, ':'));
     }
 
     if (!IsBootImage()) {
       // When compiling an app, create the runtime early to retrieve
-      // the image location key needed for the oat header.
+      // the boot image checksums needed for the oat header.
       if (!CreateRuntime(std::move(runtime_options))) {
         return dex2oat::ReturnCode::kCreateRuntime;
       }
 
       if (CompilerFilter::DependsOnImageChecksum(compiler_options_->GetCompilerFilter())) {
         TimingLogger::ScopedTiming t3("Loading image checksum", timings_);
-        std::vector<gc::space::ImageSpace*> image_spaces =
-            Runtime::Current()->GetHeap()->GetBootImageSpaces();
-        image_file_location_oat_checksum_ = image_spaces[0]->GetImageHeader().GetOatChecksum();
-        // Store the boot image filename(s).
-        std::vector<std::string> image_filenames;
-        for (const gc::space::ImageSpace* image_space : image_spaces) {
-          image_filenames.push_back(image_space->GetImageFilename());
-        }
-        std::string image_file_location = android::base::Join(image_filenames, ':');
-        if (!image_file_location.empty()) {
-          key_value_store_->Put(OatHeader::kImageLocationKey, image_file_location);
-        }
-      } else {
-        image_file_location_oat_checksum_ = 0u;
+        Runtime* runtime = Runtime::Current();
+        key_value_store_->Put(OatHeader::kBootClassPathKey,
+                              android::base::Join(runtime->GetBootClassPathLocations(), ':'));
+        std::vector<ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces();
+        const std::vector<const DexFile*>& bcp_dex_files =
+            runtime->GetClassLinker()->GetBootClassPath();
+        key_value_store_->Put(
+            OatHeader::kBootClassPathChecksumsKey,
+            gc::space::ImageSpace::GetBootClassPathChecksums(image_spaces, bcp_dex_files));
       }
 
       // Open dex files for class path.
@@ -1624,7 +1538,7 @@
         if (!oat_writers_[i]->WriteAndOpenDexFiles(
             vdex_files_[i].get(),
             rodata_.back(),
-            key_value_store_.get(),
+            (i == 0u) ? key_value_store_.get() : nullptr,
             verify,
             update_input_vdex_,
             copy_dex_files_,
@@ -1653,7 +1567,7 @@
     // If we need to downgrade the compiler-filter for size reasons.
     if (!IsBootImage() && IsVeryLarge(dex_files)) {
       // Disable app image to make sure dex2oat unloading is enabled.
-      compiler_options_->DisableAppImage();
+      compiler_options_->image_type_ = CompilerOptions::ImageType::kNone;
 
       // If we need to downgrade the compiler-filter for size reasons, do that early before we read
       // it below for creating verification callbacks.
@@ -1837,14 +1751,12 @@
         compiler_options_->no_inline_from_.swap(no_inline_from_dex_files);
       }
     }
+    compiler_options_->profile_compilation_info_ = profile_compilation_info_.get();
 
     driver_.reset(new CompilerDriver(compiler_options_.get(),
-                                     verification_results_.get(),
                                      compiler_kind_,
-                                     &compiler_options_->image_classes_,
                                      thread_count_,
-                                     swap_fd_,
-                                     profile_compilation_info_.get()));
+                                     swap_fd_));
     if (!IsBootImage()) {
       driver_->SetClasspathDexFiles(class_loader_context_->FlattenOpenedDexFiles());
     }
@@ -1914,7 +1826,16 @@
                    << soa.Self()->GetException()->Dump();
       }
     }
+    driver_->InitializeThreadPools();
+    driver_->PreCompile(class_loader,
+                        dex_files,
+                        timings_,
+                        &compiler_options_->image_classes_,
+                        verification_results_.get());
+    callbacks_->SetVerificationResults(nullptr);  // Should not be needed anymore.
+    compiler_options_->verification_results_ = verification_results_.get();
     driver_->CompileAll(class_loader, dex_files, timings_);
+    driver_->FreeThreadPools();
     return class_loader;
   }
 
@@ -1983,7 +1904,7 @@
   // ImageWriter, if necessary.
   // Note: Flushing (and closing) the file is the caller's responsibility, except for the failure
   //       case (when the file will be explicitly erased).
-  bool WriteOutputFiles() {
+  bool WriteOutputFiles(jobject class_loader) {
     TimingLogger::ScopedTiming t("dex2oat Oat", timings_);
 
     // Sync the data to the file, in case we did dex2dex transformations.
@@ -1997,7 +1918,7 @@
     if (IsImage()) {
       if (IsAppImage() && image_base_ == 0) {
         gc::Heap* const heap = Runtime::Current()->GetHeap();
-        for (gc::space::ImageSpace* image_space : heap->GetBootImageSpaces()) {
+        for (ImageSpace* image_space : heap->GetBootImageSpaces()) {
           image_base_ = std::max(image_base_, RoundUp(
               reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatFileEnd()),
               kPageSize));
@@ -2015,11 +1936,10 @@
 
       image_writer_.reset(new linker::ImageWriter(*compiler_options_,
                                                   image_base_,
-                                                  compiler_options_->GetCompilePic(),
-                                                  IsAppImage(),
                                                   image_storage_mode_,
                                                   oat_filenames_,
                                                   dex_file_oat_index_map_,
+                                                  class_loader,
                                                   dirty_image_objects_.get()));
 
       // We need to prepare method offsets in the image address space for direct method patching.
@@ -2092,14 +2012,6 @@
                                              oat_writer->GetOatDataOffset(),
                                              oat_writer->GetOatSize());
         }
-
-        if (IsBootImage()) {
-          // Have the image_file_location_oat_checksum_ for boot oat files
-          // depend on the contents of all the boot oat files. This way only
-          // the primary image checksum needs to be checked to determine
-          // whether any of the images are out of date.
-          image_file_location_oat_checksum_ ^= oat_writer->GetOatHeader().GetChecksum();
-        }
       }
 
       for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
@@ -2138,7 +2050,7 @@
           elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
         }
 
-        if (!oat_writer->WriteHeader(elf_writer->GetStream(), image_file_location_oat_checksum_)) {
+        if (!oat_writer->WriteHeader(elf_writer->GetStream())) {
           LOG(ERROR) << "Failed to write oat header to the ELF file " << oat_file->GetPath();
           return false;
         }
@@ -2189,12 +2101,12 @@
     for (size_t i = 0; i < oat_unstripped_.size(); ++i) {
       // If we don't want to strip in place, copy from stripped location to unstripped location.
       // We need to strip after image creation because FixupElf needs to use .strtab.
-      if (strcmp(oat_unstripped_[i], oat_filenames_[i]) != 0) {
+      if (oat_unstripped_[i] != oat_filenames_[i]) {
         DCHECK(oat_files_[i].get() != nullptr && oat_files_[i]->IsOpened());
 
         TimingLogger::ScopedTiming t("dex2oat OatFile copy", timings_);
         std::unique_ptr<File>& in = oat_files_[i];
-        std::unique_ptr<File> out(OS::CreateEmptyFile(oat_unstripped_[i]));
+        std::unique_ptr<File> out(OS::CreateEmptyFile(oat_unstripped_[i].c_str()));
         int64_t in_length = in->GetLength();
         if (in_length < 0) {
           PLOG(ERROR) << "Failed to get the length of oat file: " << in->GetPath();
@@ -2423,11 +2335,13 @@
     DCHECK_EQ(dex_filenames_.size(), dex_locations_.size());
     size_t kept = 0u;
     for (size_t i = 0, size = dex_filenames_.size(); i != size; ++i) {
-      if (!OS::FileExists(dex_filenames_[i])) {
+      if (!OS::FileExists(dex_filenames_[i].c_str())) {
         LOG(WARNING) << "Skipping non-existent dex file '" << dex_filenames_[i] << "'";
       } else {
-        dex_filenames_[kept] = dex_filenames_[i];
-        dex_locations_[kept] = dex_locations_[i];
+        if (kept != i) {
+          dex_filenames_[kept] = dex_filenames_[i];
+          dex_locations_[kept] = dex_locations_[i];
+        }
         ++kept;
       }
     }
@@ -2455,7 +2369,8 @@
       DCHECK_EQ(oat_writers_.size(), dex_filenames_.size());
       DCHECK_EQ(oat_writers_.size(), dex_locations_.size());
       for (size_t i = 0, size = oat_writers_.size(); i != size; ++i) {
-        if (!oat_writers_[i]->AddDexFileSource(dex_filenames_[i], dex_locations_[i])) {
+        if (!oat_writers_[i]->AddDexFileSource(dex_filenames_[i].c_str(),
+                                               dex_locations_[i].c_str())) {
           return false;
         }
       }
@@ -2464,7 +2379,8 @@
       DCHECK_EQ(dex_filenames_.size(), dex_locations_.size());
       DCHECK_NE(dex_filenames_.size(), 0u);
       for (size_t i = 0; i != dex_filenames_.size(); ++i) {
-        if (!oat_writers_[0]->AddDexFileSource(dex_filenames_[i], dex_locations_[i])) {
+        if (!oat_writers_[0]->AddDexFileSource(dex_filenames_[i].c_str(),
+                                               dex_locations_[i].c_str())) {
           return false;
         }
       }
@@ -2624,7 +2540,7 @@
     CHECK(image_writer_ != nullptr);
     if (!IsBootImage()) {
       CHECK(image_filenames_.empty());
-      image_filenames_.push_back(app_image_file_name_.c_str());
+      image_filenames_.push_back(app_image_file_name_);
     }
     if (!image_writer_->Write(app_image_fd_,
                               image_filenames_,
@@ -2638,32 +2554,9 @@
     for (size_t i = 0, size = oat_filenames_.size(); i != size; ++i) {
       oat_data_begins.push_back(image_writer_->GetOatDataBegin(i));
     }
-    // Destroy ImageWriter before doing FixupElf.
+    // Destroy ImageWriter.
     image_writer_.reset();
 
-    for (size_t i = 0, size = oat_filenames_.size(); i != size; ++i) {
-      const char* oat_filename = oat_filenames_[i];
-      // Do not fix up the ELF file if we are --compile-pic or compiling the app image
-      if (!compiler_options_->GetCompilePic() && IsBootImage()) {
-        std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename));
-        if (oat_file.get() == nullptr) {
-          PLOG(ERROR) << "Failed to open ELF file: " << oat_filename;
-          return false;
-        }
-
-        if (!linker::ElfWriter::Fixup(oat_file.get(), oat_data_begins[i])) {
-          oat_file->Erase();
-          LOG(ERROR) << "Failed to fixup ELF file " << oat_file->GetPath();
-          return false;
-        }
-
-        if (oat_file->FlushCloseOrErase()) {
-          PLOG(ERROR) << "Failed to flush and close fixed ELF file " << oat_file->GetPath();
-          return false;
-        }
-      }
-    }
-
     return true;
   }
 
@@ -2788,7 +2681,6 @@
   std::unique_ptr<CompilerOptions> compiler_options_;
   Compiler::Kind compiler_kind_;
 
-  uint32_t image_file_location_oat_checksum_;
   std::unique_ptr<SafeMap<std::string, std::string> > key_value_store_;
 
   std::unique_ptr<VerificationResults> verification_results_;
@@ -2810,8 +2702,8 @@
   std::vector<std::unique_ptr<File>> oat_files_;
   std::vector<std::unique_ptr<File>> vdex_files_;
   std::string oat_location_;
-  std::vector<const char*> oat_filenames_;
-  std::vector<const char*> oat_unstripped_;
+  std::vector<std::string> oat_filenames_;
+  std::vector<std::string> oat_unstripped_;
   bool strip_;
   int oat_fd_;
   int input_vdex_fd_;
@@ -2822,13 +2714,13 @@
   int dm_fd_;
   std::string dm_file_location_;
   std::unique_ptr<ZipArchive> dm_file_;
-  std::vector<const char*> dex_filenames_;
-  std::vector<const char*> dex_locations_;
+  std::vector<std::string> dex_filenames_;
+  std::vector<std::string> dex_locations_;
   int zip_fd_;
   std::string zip_location_;
   std::string boot_image_filename_;
   std::vector<const char*> runtime_args_;
-  std::vector<const char*> image_filenames_;
+  std::vector<std::string> image_filenames_;
   uintptr_t image_base_;
   const char* image_classes_zip_filename_;
   const char* image_classes_filename_;
@@ -2837,7 +2729,6 @@
   const char* dirty_image_objects_filename_;
   std::unique_ptr<HashSet<std::string>> dirty_image_objects_;
   std::unique_ptr<std::vector<std::string>> passes_to_run_;
-  bool multi_image_;
   bool is_host_;
   std::string android_root_;
   std::string no_inline_from_string_;
@@ -2854,6 +2745,7 @@
   std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
 
   bool avoid_storing_invocation_;
+  android::base::unique_fd invocation_file_;
   std::string swap_file_name_;
   int swap_fd_;
   size_t min_dex_files_for_swap_ = kDefaultMinDexFilesForSwap;
@@ -2926,11 +2818,12 @@
 
 static dex2oat::ReturnCode CompileImage(Dex2Oat& dex2oat) {
   dex2oat.LoadClassProfileDescriptors();
+  jobject class_loader = dex2oat.Compile();
   // Keep the class loader that was used for compilation live for the rest of the compilation
   // process.
-  ScopedGlobalRef class_loader(dex2oat.Compile());
+  ScopedGlobalRef global_ref(class_loader);
 
-  if (!dex2oat.WriteOutputFiles()) {
+  if (!dex2oat.WriteOutputFiles(class_loader)) {
     dex2oat.EraseOutputFiles();
     return dex2oat::ReturnCode::kOther;
   }
@@ -2970,11 +2863,12 @@
 }
 
 static dex2oat::ReturnCode CompileApp(Dex2Oat& dex2oat) {
+  jobject class_loader = dex2oat.Compile();
   // Keep the class loader that was used for compilation live for the rest of the compilation
   // process.
-  ScopedGlobalRef class_loader(dex2oat.Compile());
+  ScopedGlobalRef global_ref(class_loader);
 
-  if (!dex2oat.WriteOutputFiles()) {
+  if (!dex2oat.WriteOutputFiles(class_loader)) {
     dex2oat.EraseOutputFiles();
     return dex2oat::ReturnCode::kOther;
   }
diff --git a/dex2oat/dex2oat_options.cc b/dex2oat/dex2oat_options.cc
index 710f14c..783b326 100644
--- a/dex2oat/dex2oat_options.cc
+++ b/dex2oat/dex2oat_options.cc
@@ -187,7 +187,7 @@
 }
 
 static Parser CreateArgumentParser() {
-  std::unique_ptr<Builder> parser_builder = std::unique_ptr<Builder>(new Builder());
+  std::unique_ptr<Builder> parser_builder = std::make_unique<Builder>();
 
   AddInputMappings(*parser_builder);
   AddGeneratedArtifactMappings(*parser_builder);
@@ -229,6 +229,9 @@
                          {"false", linker::CopyOption::kNever},
                          {"always", linker::CopyOption::kAlways}})
           .IntoKey(M::CopyDexFiles)
+      .Define("--write-invocation-to=_")
+          .WithType<std::string>()
+          .IntoKey(M::InvocationFile)
       .Define("--classpath-dir=_")
           .WithType<std::string>()
           .IntoKey(M::ClasspathDir)
@@ -267,7 +270,7 @@
     return nullptr;
   }
 
-  return std::unique_ptr<Dex2oatArgumentMap>(new Dex2oatArgumentMap(parser.ReleaseArgumentsMap()));
+  return std::make_unique<Dex2oatArgumentMap>(parser.ReleaseArgumentsMap());
 }
 
 #pragma GCC diagnostic pop
diff --git a/dex2oat/dex2oat_options.def b/dex2oat/dex2oat_options.def
index c8cb7e7..0b77859 100644
--- a/dex2oat/dex2oat_options.def
+++ b/dex2oat/dex2oat_options.def
@@ -84,6 +84,7 @@
 DEX2OAT_OPTIONS_KEY (std::string,                    NoInlineFrom)
 DEX2OAT_OPTIONS_KEY (Unit,                           ForceDeterminism)
 DEX2OAT_OPTIONS_KEY (std::string,                    ClasspathDir)
+DEX2OAT_OPTIONS_KEY (std::string,                    InvocationFile)
 DEX2OAT_OPTIONS_KEY (std::string,                    ClassLoaderContext)
 DEX2OAT_OPTIONS_KEY (std::string,                    StoredClassLoaderContext)
 DEX2OAT_OPTIONS_KEY (std::string,                    DirtyImageObjects)
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index a1fed5f..d3bfb57 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include <algorithm>
 #include <regex>
 #include <sstream>
 #include <string>
@@ -23,10 +24,12 @@
 #include <unistd.h>
 
 #include <android-base/logging.h>
+#include <android-base/macros.h>
 #include <android-base/stringprintf.h>
 
 #include "common_runtime_test.h"
 
+#include "arch/instruction_set_features.h"
 #include "base/macros.h"
 #include "base/mutex-inl.h"
 #include "base/utils.h"
@@ -39,6 +42,8 @@
 #include "dex/dex_file_loader.h"
 #include "dex2oat_environment_test.h"
 #include "dex2oat_return_codes.h"
+#include "gc_root-inl.h"
+#include "intern_table-inl.h"
 #include "oat.h"
 #include "oat_file.h"
 #include "profile/profile_compilation_info.h"
@@ -88,6 +93,10 @@
     args.push_back("--runtime-arg");
     args.push_back("-Xnorelocate");
 
+    // Unless otherwise stated, use a small amount of threads, so that potential aborts are
+    // shorter. This can be overridden with extra_args.
+    args.push_back("-j4");
+
     args.insert(args.end(), extra_args.begin(), extra_args.end());
 
     int status = Dex2Oat(args, error_msg);
@@ -97,33 +106,33 @@
     return status;
   }
 
-  void GenerateOdexForTest(
+  ::testing::AssertionResult GenerateOdexForTest(
       const std::string& dex_location,
       const std::string& odex_location,
       CompilerFilter::Filter filter,
       const std::vector<std::string>& extra_args = {},
       bool expect_success = true,
-      bool use_fd = false) {
-    GenerateOdexForTest(dex_location,
-                        odex_location,
-                        filter,
-                        extra_args,
-                        expect_success,
-                        use_fd,
-                        [](const OatFile&) {});
+      bool use_fd = false) WARN_UNUSED {
+    return GenerateOdexForTest(dex_location,
+                               odex_location,
+                               filter,
+                               extra_args,
+                               expect_success,
+                               use_fd,
+                               [](const OatFile&) {});
   }
 
   bool test_accepts_odex_file_on_failure = false;
 
   template <typename T>
-  void GenerateOdexForTest(
+  ::testing::AssertionResult GenerateOdexForTest(
       const std::string& dex_location,
       const std::string& odex_location,
       CompilerFilter::Filter filter,
       const std::vector<std::string>& extra_args,
       bool expect_success,
       bool use_fd,
-      T check_oat) {
+      T check_oat) WARN_UNUSED {
     std::string error_msg;
     int status = GenerateOdexForTestWithStatus({dex_location},
                                                odex_location,
@@ -133,41 +142,49 @@
                                                use_fd);
     bool success = (WIFEXITED(status) && WEXITSTATUS(status) == 0);
     if (expect_success) {
-      ASSERT_TRUE(success) << error_msg << std::endl << output_;
+      if (!success) {
+        return ::testing::AssertionFailure()
+            << "Failed to compile odex: " << error_msg << std::endl << output_;
+      }
 
       // Verify the odex file was generated as expected.
-      std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+      std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                        odex_location.c_str(),
                                                        odex_location.c_str(),
-                                                       /* requested_base */ nullptr,
-                                                       /* executable */ false,
-                                                       /* low_4gb */ false,
+                                                       /*executable=*/ false,
+                                                       /*low_4gb=*/ false,
                                                        dex_location.c_str(),
-                                                       /* reservation */ nullptr,
+                                                       /*reservation=*/ nullptr,
                                                        &error_msg));
-      ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
+      if (odex_file == nullptr) {
+        return ::testing::AssertionFailure() << "Could not open odex file: " << error_msg;
+      }
 
       CheckFilter(filter, odex_file->GetCompilerFilter());
       check_oat(*(odex_file.get()));
     } else {
-      ASSERT_FALSE(success) << output_;
+      if (success) {
+        return ::testing::AssertionFailure() << "Succeeded to compile odex: " << output_;
+      }
 
       error_msg_ = error_msg;
 
       if (!test_accepts_odex_file_on_failure) {
         // Verify there's no loadable odex file.
-        std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+        std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                          odex_location.c_str(),
                                                          odex_location.c_str(),
-                                                         /* requested_base */ nullptr,
-                                                         /* executable */ false,
-                                                         /* low_4gb */ false,
+                                                         /*executable=*/ false,
+                                                         /*low_4gb=*/ false,
                                                          dex_location.c_str(),
-                                                         /* reservation */ nullptr,
+                                                         /*reservation=*/ nullptr,
                                                          &error_msg));
-        ASSERT_TRUE(odex_file.get() == nullptr);
+        if (odex_file != nullptr) {
+          return ::testing::AssertionFailure() << "Could open odex file: " << error_msg;
+        }
       }
     }
+    return ::testing::AssertionSuccess();
   }
 
   // Check the input compiler filter against the generated oat file's filter. May be overridden
@@ -177,24 +194,12 @@
   }
 
   int Dex2Oat(const std::vector<std::string>& dex2oat_args, std::string* error_msg) {
-    Runtime* runtime = Runtime::Current();
-
-    const std::vector<gc::space::ImageSpace*>& image_spaces =
-        runtime->GetHeap()->GetBootImageSpaces();
-    if (image_spaces.empty()) {
-      *error_msg = "No image location found for Dex2Oat.";
+    std::vector<std::string> argv;
+    if (!CommonRuntimeTest::StartDex2OatCommandLine(&argv, error_msg)) {
       return false;
     }
-    std::string image_location = image_spaces[0]->GetImageLocation();
 
-    std::vector<std::string> argv;
-    argv.push_back(runtime->GetCompilerExecutable());
-
-    if (runtime->IsJavaDebuggable()) {
-      argv.push_back("--debuggable");
-    }
-    runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
-
+    Runtime* runtime = Runtime::Current();
     if (!runtime->IsVerificationEnabled()) {
       argv.push_back("--compiler-filter=assume-verified");
     }
@@ -211,11 +216,6 @@
       argv.push_back("--host");
     }
 
-    argv.push_back("--boot-image=" + image_location);
-
-    std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
-    argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
-
     argv.insert(argv.end(), dex2oat_args.begin(), dex2oat_args.end());
 
     // We must set --android-root.
@@ -265,7 +265,7 @@
       std::string swap_location = GetOdexDir() + "/Dex2OatSwapTest.odex.swap";
       copy.push_back("--swap-file=" + swap_location);
     }
-    GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed, copy);
+    ASSERT_TRUE(GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed, copy));
 
     CheckValidity();
     ASSERT_TRUE(success_);
@@ -324,26 +324,26 @@
 };
 
 TEST_F(Dex2oatSwapTest, DoNotUseSwapDefaultSingleSmall) {
-  RunTest(false /* use_fd */, false /* expect_use */);
-  RunTest(true /* use_fd */, false /* expect_use */);
+  RunTest(/*use_fd=*/ false, /*expect_use=*/ false);
+  RunTest(/*use_fd=*/ true, /*expect_use=*/ false);
 }
 
 TEST_F(Dex2oatSwapTest, DoNotUseSwapSingle) {
-  RunTest(false /* use_fd */, false /* expect_use */, { "--swap-dex-size-threshold=0" });
-  RunTest(true /* use_fd */, false /* expect_use */, { "--swap-dex-size-threshold=0" });
+  RunTest(/*use_fd=*/ false, /*expect_use=*/ false, { "--swap-dex-size-threshold=0" });
+  RunTest(/*use_fd=*/ true, /*expect_use=*/ false, { "--swap-dex-size-threshold=0" });
 }
 
 TEST_F(Dex2oatSwapTest, DoNotUseSwapSmall) {
-  RunTest(false /* use_fd */, false /* expect_use */, { "--swap-dex-count-threshold=0" });
-  RunTest(true /* use_fd */, false /* expect_use */, { "--swap-dex-count-threshold=0" });
+  RunTest(/*use_fd=*/ false, /*expect_use=*/ false, { "--swap-dex-count-threshold=0" });
+  RunTest(/*use_fd=*/ true, /*expect_use=*/ false, { "--swap-dex-count-threshold=0" });
 }
 
 TEST_F(Dex2oatSwapTest, DoUseSwapSingleSmall) {
-  RunTest(false /* use_fd */,
-          true /* expect_use */,
+  RunTest(/*use_fd=*/ false,
+          /*expect_use=*/ true,
           { "--swap-dex-size-threshold=0", "--swap-dex-count-threshold=0" });
-  RunTest(true /* use_fd */,
-          true /* expect_use */,
+  RunTest(/*use_fd=*/ true,
+          /*expect_use=*/ true,
           { "--swap-dex-size-threshold=0", "--swap-dex-count-threshold=0" });
 }
 
@@ -369,7 +369,7 @@
   void GrabResult1() {
     if (!kIsTargetBuild) {
       native_alloc_1_ = ParseNativeAlloc();
-      swap_1_ = ParseSwap(false /* expected */);
+      swap_1_ = ParseSwap(/*expected=*/ false);
     } else {
       native_alloc_1_ = std::numeric_limits<size_t>::max();
       swap_1_ = 0;
@@ -379,7 +379,7 @@
   void GrabResult2() {
     if (!kIsTargetBuild) {
       native_alloc_2_ = ParseNativeAlloc();
-      swap_2_ = ParseSwap(true /* expected */);
+      swap_2_ = ParseSwap(/*expected=*/ true);
     } else {
       native_alloc_2_ = 0;
       swap_2_ = std::numeric_limits<size_t>::max();
@@ -449,15 +449,15 @@
   // investigate (b/29259363).
   TEST_DISABLED_FOR_X86();
 
-  RunTest(false /* use_fd */,
-          false /* expect_use */);
+  RunTest(/*use_fd=*/ false,
+          /*expect_use=*/ false);
   GrabResult1();
   std::string output_1 = output_;
 
   output_ = "";
 
-  RunTest(false /* use_fd */,
-          true /* expect_use */,
+  RunTest(/*use_fd=*/ false,
+          /*expect_use=*/ true,
           { "--swap-dex-size-threshold=0", "--swap-dex-count-threshold=0" });
   GrabResult2();
   std::string output_2 = output_;
@@ -490,7 +490,7 @@
 
     std::vector<std::string> new_args(extra_args);
     new_args.push_back("--app-image-file=" + app_image_file);
-    GenerateOdexForTest(dex_location, odex_location, filter, new_args);
+    ASSERT_TRUE(GenerateOdexForTest(dex_location, odex_location, filter, new_args));
 
     CheckValidity();
     ASSERT_TRUE(success_);
@@ -513,14 +513,13 @@
     }
     // Host/target independent checks.
     std::string error_msg;
-    std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+    std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                      odex_location.c_str(),
                                                      odex_location.c_str(),
-                                                     /* requested_base */ nullptr,
-                                                     /* executable */ false,
-                                                     /* low_4gb */ false,
+                                                     /*executable=*/ false,
+                                                     /*low_4gb=*/ false,
                                                      dex_location.c_str(),
-                                                     /* reservation */ nullptr,
+                                                     /*reservation=*/ nullptr,
                                                      &error_msg));
     ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
     EXPECT_GT(app_image_file.length(), 0u);
@@ -636,7 +635,9 @@
                        const std::string& dex_location,
                        size_t num_classes,
                        uint32_t checksum) {
-    int profile_test_fd = open(test_profile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
+    int profile_test_fd = open(test_profile.c_str(),
+                               O_CREAT | O_TRUNC | O_WRONLY | O_CLOEXEC,
+                               0644);
     CHECK_GE(profile_test_fd, 0);
 
     ProfileCompilationInfo info;
@@ -662,7 +663,7 @@
     std::vector<std::unique_ptr<const DexFile>> dex_files;
     const ArtDexFileLoader dex_file_loader;
     ASSERT_TRUE(dex_file_loader.Open(
-        location, location, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files));
+        location, location, /*verify=*/ true, /*verify_checksum=*/ true, &error_msg, &dex_files));
     EXPECT_EQ(dex_files.size(), 1U);
     std::unique_ptr<const DexFile>& dex_file = dex_files[0];
     GenerateProfile(profile_location,
@@ -680,12 +681,12 @@
         copy.push_back("--app-image-file=" + app_image_file_name);
       }
     }
-    GenerateOdexForTest(dex_location,
-                        odex_location,
-                        CompilerFilter::kSpeedProfile,
-                        copy,
-                        expect_success,
-                        use_fd);
+    ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                    odex_location,
+                                    CompilerFilter::kSpeedProfile,
+                                    copy,
+                                    expect_success,
+                                    use_fd));
     if (app_image_file != nullptr) {
       ASSERT_EQ(app_image_file->FlushCloseOrErase(), 0) << "Could not flush and close art file";
     }
@@ -714,8 +715,8 @@
       CompileProfileOdex(dex_location,
                          odex_location,
                          app_image_file,
-                         /* use_fd */ false,
-                         /* num_profile_classes */ 0);
+                         /*use_fd=*/ false,
+                         /*num_profile_classes=*/ 0);
       CheckValidity();
       ASSERT_TRUE(success_);
       // Don't check the result since CheckResult relies on the class being in the profile.
@@ -727,8 +728,8 @@
     CompileProfileOdex(dex_location,
                        odex_location,
                        app_image_file,
-                       /* use_fd */ false,
-                       /* num_profile_classes */ 1);
+                       /*use_fd=*/ false,
+                       /*num_profile_classes=*/ 1);
     CheckValidity();
     ASSERT_TRUE(success_);
     CheckResult(dex_location, odex_location, app_image_file);
@@ -756,8 +757,8 @@
       CompileProfileOdex(dex_location,
                          odex_location,
                          app_image_file_name,
-                         /* use_fd */ true,
-                         /* num_profile_classes */ 1,
+                         /*use_fd=*/ true,
+                         /*num_profile_classes=*/ 1,
                          { input_vdex, output_vdex });
       EXPECT_GT(vdex_file1->GetLength(), 0u);
     }
@@ -768,10 +769,10 @@
       CompileProfileOdex(dex_location,
                          odex_location,
                          app_image_file_name,
-                         /* use_fd */ true,
-                         /* num_profile_classes */ 1,
+                         /*use_fd=*/ true,
+                         /*num_profile_classes=*/ 1,
                          { input_vdex, output_vdex },
-                         /* expect_success */ true);
+                         /*expect_success=*/ true);
       EXPECT_GT(vdex_file2.GetFile()->GetLength(), 0u);
     }
     ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
@@ -784,14 +785,13 @@
                    const std::string& app_image_file_name) {
     // Host/target independent checks.
     std::string error_msg;
-    std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+    std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                      odex_location.c_str(),
                                                      odex_location.c_str(),
-                                                     /* requested_base */ nullptr,
-                                                     /* executable */ false,
-                                                     /* low_4gb */ false,
+                                                     /*executable=*/ false,
+                                                     /*low_4gb=*/ false,
                                                      dex_location.c_str(),
-                                                     /* reservation */ nullptr,
+                                                     /*reservation=*/ nullptr,
                                                      &error_msg));
     ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
 
@@ -799,7 +799,7 @@
     std::vector<std::unique_ptr<const DexFile>> dex_files;
     const ArtDexFileLoader dex_file_loader;
     ASSERT_TRUE(dex_file_loader.Open(
-        location, location, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files));
+        location, location, /*verify=*/ true, /*verify_checksum=*/ true, &error_msg, &dex_files));
     EXPECT_EQ(dex_files.size(), 1U);
     std::unique_ptr<const DexFile>& old_dex_file = dex_files[0];
 
@@ -852,11 +852,11 @@
 };
 
 TEST_F(Dex2oatLayoutTest, TestLayout) {
-  RunTest(/* app-image */ false);
+  RunTest(/*app_image=*/ false);
 }
 
 TEST_F(Dex2oatLayoutTest, TestLayoutAppImage) {
-  RunTest(/* app-image */ true);
+  RunTest(/*app_image=*/ true);
 }
 
 TEST_F(Dex2oatLayoutTest, TestVdexLayout) {
@@ -877,24 +877,24 @@
     {
       std::string input_vdex = "--input-vdex-fd=-1";
       std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file1->Fd());
-      GenerateOdexForTest(dex_location,
-                          odex_location,
-                          CompilerFilter::kQuicken,
-                          { input_vdex, output_vdex },
-                          /* expect_success */ true,
-                          /* use_fd */ true);
+      ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                      odex_location,
+                                      CompilerFilter::kQuicken,
+                                      { input_vdex, output_vdex },
+                                      /* expect_success= */ true,
+                                      /* use_fd= */ true));
       EXPECT_GT(vdex_file1->GetLength(), 0u);
     }
     // Unquicken by running the verify compiler filter on the vdex file.
     {
       std::string input_vdex = StringPrintf("--input-vdex-fd=%d", vdex_file1->Fd());
       std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file1->Fd());
-      GenerateOdexForTest(dex_location,
-                          odex_location,
-                          CompilerFilter::kVerify,
-                          { input_vdex, output_vdex, kDisableCompactDex },
-                          /* expect_success */ true,
-                          /* use_fd */ true);
+      ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                      odex_location,
+                                      CompilerFilter::kVerify,
+                                      { input_vdex, output_vdex, kDisableCompactDex },
+                                      /* expect_success= */ true,
+                                      /* use_fd= */ true));
     }
     ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
     CheckResult(dex_location, odex_location);
@@ -918,12 +918,12 @@
     {
       std::string input_vdex = "--input-vdex-fd=-1";
       std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file1->Fd());
-      GenerateOdexForTest(dex_location,
-                          odex_location,
-                          CompilerFilter::kQuicken,
-                          { input_vdex, output_vdex, "--compact-dex-level=fast"},
-                          /* expect_success */ true,
-                          /* use_fd */ true);
+      ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                      odex_location,
+                                      CompilerFilter::kQuicken,
+                                      { input_vdex, output_vdex, "--compact-dex-level=fast"},
+                                      /* expect_success= */ true,
+                                      /* use_fd= */ true));
       EXPECT_GT(vdex_file1->GetLength(), 0u);
     }
 
@@ -931,12 +931,12 @@
     {
       std::string input_vdex = StringPrintf("--input-vdex-fd=%d", vdex_file1->Fd());
       std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file2->Fd());
-      GenerateOdexForTest(dex_location,
-                          odex_location2,
-                          CompilerFilter::kVerify,
-                          { input_vdex, output_vdex, "--compact-dex-level=none"},
-                          /* expect_success */ true,
-                          /* use_fd */ true);
+      ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                      odex_location2,
+                                      CompilerFilter::kVerify,
+                                      { input_vdex, output_vdex, "--compact-dex-level=none"},
+                                      /* expect_success= */ true,
+                                      /* use_fd= */ true));
     }
     ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
     ASSERT_EQ(vdex_file2->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
@@ -946,14 +946,13 @@
 
   void CheckResult(const std::string& dex_location, const std::string& odex_location) {
     std::string error_msg;
-    std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+    std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                      odex_location.c_str(),
                                                      odex_location.c_str(),
-                                                     /* requested_base */ nullptr,
-                                                     /* executable */ false,
-                                                     /* low_4gb */ false,
+                                                     /*executable=*/ false,
+                                                     /*low_4gb=*/ false,
                                                      dex_location.c_str(),
-                                                     /* reservation */ nullptr,
+                                                     /*reservation=*/ nullptr,
                                                      &error_msg));
     ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
     ASSERT_GE(odex_file->GetOatDexFiles().size(), 1u);
@@ -993,11 +992,11 @@
     std::string swap_location = GetOdexDir() + "/Dex2OatSwapTest.odex.swap";
     copy.push_back("--swap-file=" + swap_location);
     copy.push_back("-j512");  // Excessive idle threads just slow down dex2oat.
-    GenerateOdexForTest(dex_location,
-                        odex_location,
-                        CompilerFilter::kSpeed,
-                        copy,
-                        expect_success);
+    ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                    odex_location,
+                                    CompilerFilter::kSpeed,
+                                    copy,
+                                    expect_success));
   }
 
   std::string GetTestDexFileName() {
@@ -1014,6 +1013,10 @@
 }
 
 TEST_F(Dex2oatWatchdogTest, TestWatchdogTrigger) {
+  // This test is frequently interrupted by timeout_dumper on host (x86);
+  // disable it while we investigate (b/121352534).
+  TEST_DISABLED_FOR_X86();
+
   // The watchdog is independent of dex2oat and will not delete intermediates. It is possible
   // that the compilation succeeds and the file is completely written by the time the watchdog
   // kills dex2oat (but the dex2oat threads must have been scheduled pretty badly).
@@ -1055,7 +1058,8 @@
   void RunTest(const char* class_loader_context,
                const char* expected_classpath_key,
                bool expected_success,
-               bool use_second_source = false) {
+               bool use_second_source = false,
+               bool generate_image = false) {
     std::string dex_location = GetUsedDexLocation();
     std::string odex_location = GetUsedOatLocation();
 
@@ -1066,6 +1070,9 @@
     if (class_loader_context != nullptr) {
       extra_args.push_back(std::string("--class-loader-context=") + class_loader_context);
     }
+    if (generate_image) {
+      extra_args.push_back(std::string("--app-image-file=") + GetUsedImageLocation());
+    }
     auto check_oat = [expected_classpath_key](const OatFile& oat_file) {
       ASSERT_TRUE(expected_classpath_key != nullptr);
       const char* classpath = oat_file.GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey);
@@ -1073,13 +1080,13 @@
       ASSERT_STREQ(expected_classpath_key, classpath);
     };
 
-    GenerateOdexForTest(dex_location,
-                        odex_location,
-                        CompilerFilter::kQuicken,
-                        extra_args,
-                        expected_success,
-                        /*use_fd*/ false,
-                        check_oat);
+    ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                    odex_location,
+                                    CompilerFilter::kQuicken,
+                                    extra_args,
+                                    expected_success,
+                                    /*use_fd*/ false,
+                                    check_oat));
   }
 
   std::string GetUsedDexLocation() {
@@ -1090,6 +1097,10 @@
     return GetOdexDir() + "/Context.odex";
   }
 
+  std::string GetUsedImageLocation() {
+    return GetOdexDir() + "/Context.art";
+  }
+
   const char* kEmptyClassPathKey = "PCL[]";
 };
 
@@ -1136,11 +1147,11 @@
 
   Copy(GetDexSrc1(), stripped_classpath);
 
-  GenerateOdexForTest(stripped_classpath,
-                      odex_for_classpath,
-                      CompilerFilter::kQuicken,
-                      {},
-                      true);
+  ASSERT_TRUE(GenerateOdexForTest(stripped_classpath,
+                                  odex_for_classpath,
+                                  CompilerFilter::kQuicken,
+                                  {},
+                                  true));
 
   // Strip the dex file
   Copy(GetStrippedDexSrc1(), stripped_classpath);
@@ -1188,6 +1199,66 @@
   RunTest(context.c_str(), expected_classpath_key.c_str(), true);
 }
 
+TEST_F(Dex2oatClassLoaderContextTest, ContextWithSharedLibrary) {
+  std::vector<std::unique_ptr<const DexFile>> dex_files1 = OpenTestDexFiles("Nested");
+  std::vector<std::unique_ptr<const DexFile>> dex_files2 = OpenTestDexFiles("MultiDex");
+
+  std::string context = "PCL[" + GetTestDexFileName("Nested") + "]" +
+      "{PCL[" + GetTestDexFileName("MultiDex") + "]}";
+  std::string expected_classpath_key = "PCL[" + CreateClassPathWithChecksums(dex_files1) + "]" +
+      "{PCL[" + CreateClassPathWithChecksums(dex_files2) + "]}";
+  RunTest(context.c_str(), expected_classpath_key.c_str(), true);
+}
+
+TEST_F(Dex2oatClassLoaderContextTest, ContextWithSharedLibraryAndImage) {
+  std::vector<std::unique_ptr<const DexFile>> dex_files1 = OpenTestDexFiles("Nested");
+  std::vector<std::unique_ptr<const DexFile>> dex_files2 = OpenTestDexFiles("MultiDex");
+
+  std::string context = "PCL[" + GetTestDexFileName("Nested") + "]" +
+      "{PCL[" + GetTestDexFileName("MultiDex") + "]}";
+  std::string expected_classpath_key = "PCL[" + CreateClassPathWithChecksums(dex_files1) + "]" +
+      "{PCL[" + CreateClassPathWithChecksums(dex_files2) + "]}";
+  RunTest(context.c_str(),
+          expected_classpath_key.c_str(),
+          /*expected_success=*/ true,
+          /*use_second_source=*/ false,
+          /*generate_image=*/ true);
+}
+
+TEST_F(Dex2oatClassLoaderContextTest, ContextWithSameSharedLibrariesAndImage) {
+  std::vector<std::unique_ptr<const DexFile>> dex_files1 = OpenTestDexFiles("Nested");
+  std::vector<std::unique_ptr<const DexFile>> dex_files2 = OpenTestDexFiles("MultiDex");
+
+  std::string context = "PCL[" + GetTestDexFileName("Nested") + "]" +
+      "{PCL[" + GetTestDexFileName("MultiDex") + "]" +
+      "#PCL[" + GetTestDexFileName("MultiDex") + "]}";
+  std::string expected_classpath_key = "PCL[" + CreateClassPathWithChecksums(dex_files1) + "]" +
+      "{PCL[" + CreateClassPathWithChecksums(dex_files2) + "]" +
+      "#PCL[" + CreateClassPathWithChecksums(dex_files2) + "]}";
+  RunTest(context.c_str(),
+          expected_classpath_key.c_str(),
+          /*expected_success=*/ true,
+          /*use_second_source=*/ false,
+          /*generate_image=*/ true);
+}
+
+TEST_F(Dex2oatClassLoaderContextTest, ContextWithSharedLibrariesDependenciesAndImage) {
+  std::vector<std::unique_ptr<const DexFile>> dex_files1 = OpenTestDexFiles("Nested");
+  std::vector<std::unique_ptr<const DexFile>> dex_files2 = OpenTestDexFiles("MultiDex");
+
+  std::string context = "PCL[" + GetTestDexFileName("Nested") + "]" +
+      "{PCL[" + GetTestDexFileName("MultiDex") + "]" +
+      "{PCL[" + GetTestDexFileName("Nested") + "]}}";
+  std::string expected_classpath_key = "PCL[" + CreateClassPathWithChecksums(dex_files1) + "]" +
+      "{PCL[" + CreateClassPathWithChecksums(dex_files2) + "]" +
+      "{PCL[" + CreateClassPathWithChecksums(dex_files1) + "]}}";
+  RunTest(context.c_str(),
+          expected_classpath_key.c_str(),
+          /*expected_success=*/ true,
+          /*use_second_source=*/ false,
+          /*generate_image=*/ true);
+}
+
 class Dex2oatDeterminism : public Dex2oatTest {};
 
 TEST_F(Dex2oatDeterminism, UnloadCompile) {
@@ -1217,7 +1288,7 @@
       CompilerFilter::Filter::kQuicken,
       &error_msg,
       {"--force-determinism", "--avoid-storing-invocation"});
-  EXPECT_EQ(res, 0);
+  ASSERT_EQ(res, 0);
   Copy(base_oat_name, unload_oat_name);
   Copy(base_vdex_name, unload_vdex_name);
   std::unique_ptr<File> unload_oat(OS::OpenFileForReading(unload_oat_name.c_str()));
@@ -1234,7 +1305,7 @@
       CompilerFilter::Filter::kQuicken,
       &error_msg,
       {"--force-determinism", "--avoid-storing-invocation", "--app-image-file=" + app_image_name});
-  EXPECT_EQ(res2, 0);
+  ASSERT_EQ(res2, 0);
   Copy(base_oat_name, no_unload_oat_name);
   Copy(base_vdex_name, no_unload_vdex_name);
   std::unique_ptr<File> no_unload_oat(OS::OpenFileForReading(no_unload_oat_name.c_str()));
@@ -1266,7 +1337,7 @@
   // first.
   std::vector<uint16_t> methods;
   {
-    const DexFile::TypeId* type_id = dex->FindTypeId("LManyMethods;");
+    const dex::TypeId* type_id = dex->FindTypeId("LManyMethods;");
     dex::TypeIndex type_idx = dex->GetIndexForTypeId(*type_id);
     ClassAccessor accessor(*dex, *dex->FindClassDef(type_idx));
     std::set<size_t> code_item_offsets;
@@ -1326,14 +1397,13 @@
   EXPECT_EQ(res, 0);
 
   // Open our generated oat file.
-  std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                    oat_filename.c_str(),
                                                    oat_filename.c_str(),
-                                                   /* requested_base */ nullptr,
-                                                   /* executable */ false,
-                                                   /* low_4gb */ false,
+                                                   /*executable=*/ false,
+                                                   /*low_4gb=*/ false,
                                                    dex->GetLocation().c_str(),
-                                                   /* reservation */ nullptr,
+                                                   /*reservation=*/ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1367,10 +1437,10 @@
     // we expect.
     std::unique_ptr<const DexFile> dex_file(oat_dex->OpenDexFile(&error_msg));
     ASSERT_TRUE(dex_file != nullptr) << error_msg;
-    const DexFile::TypeId* type_id = dex_file->FindTypeId("LManyMethods;");
+    const dex::TypeId* type_id = dex_file->FindTypeId("LManyMethods;");
     ASSERT_TRUE(type_id != nullptr);
     dex::TypeIndex type_idx = dex_file->GetIndexForTypeId(*type_id);
-    const DexFile::ClassDef* class_def = dex_file->FindClassDef(type_idx);
+    const dex::ClassDef* class_def = dex_file->FindClassDef(type_idx);
     ASSERT_TRUE(class_def != nullptr);
 
     // Count how many code items are for each category, there should be at least one per category.
@@ -1436,14 +1506,13 @@
       {"--compact-dex-level=fast"});
   EXPECT_EQ(res, 0);
   // Open our generated oat file.
-  std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                    oat_filename.c_str(),
                                                    oat_filename.c_str(),
-                                                   /* requested_base */ nullptr,
-                                                   /* executable */ false,
-                                                   /* low_4gb */ false,
+                                                   /*executable=*/ false,
+                                                   /*low_4gb=*/ false,
                                                    dex_location.c_str(),
-                                                   /* reservation */ nullptr,
+                                                   /*reservation=*/ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1538,26 +1607,26 @@
   std::string out_dir = GetScratchDir();
   const std::string base_oat_name = out_dir + "/base.oat";
   size_t no_dedupe_size = 0;
-  GenerateOdexForTest(dex->GetLocation(),
-                      base_oat_name,
-                      CompilerFilter::Filter::kSpeed,
-                      { "--deduplicate-code=false" },
-                      true,  // expect_success
-                      false,  // use_fd
-                      [&no_dedupe_size](const OatFile& o) {
-                        no_dedupe_size = o.Size();
-                      });
+  ASSERT_TRUE(GenerateOdexForTest(dex->GetLocation(),
+                                  base_oat_name,
+                                  CompilerFilter::Filter::kSpeed,
+                                  { "--deduplicate-code=false" },
+                                  true,  // expect_success
+                                  false,  // use_fd
+                                  [&no_dedupe_size](const OatFile& o) {
+                                    no_dedupe_size = o.Size();
+                                  }));
 
   size_t dedupe_size = 0;
-  GenerateOdexForTest(dex->GetLocation(),
-                      base_oat_name,
-                      CompilerFilter::Filter::kSpeed,
-                      { "--deduplicate-code=true" },
-                      true,  // expect_success
-                      false,  // use_fd
-                      [&dedupe_size](const OatFile& o) {
-                        dedupe_size = o.Size();
-                      });
+  ASSERT_TRUE(GenerateOdexForTest(dex->GetLocation(),
+                                  base_oat_name,
+                                  CompilerFilter::Filter::kSpeed,
+                                  { "--deduplicate-code=true" },
+                                  true,  // expect_success
+                                  false,  // use_fd
+                                  [&dedupe_size](const OatFile& o) {
+                                    dedupe_size = o.Size();
+                                  }));
 
   EXPECT_LT(dedupe_size, no_dedupe_size);
 }
@@ -1566,15 +1635,15 @@
   std::unique_ptr<const DexFile> dex(OpenTestDexFile("MainUncompressed"));
   std::string out_dir = GetScratchDir();
   const std::string base_oat_name = out_dir + "/base.oat";
-  GenerateOdexForTest(dex->GetLocation(),
-                      base_oat_name,
-                      CompilerFilter::Filter::kQuicken,
-                      { },
-                      true,  // expect_success
-                      false,  // use_fd
-                      [](const OatFile& o) {
-                        CHECK(!o.ContainsDexCode());
-                      });
+  ASSERT_TRUE(GenerateOdexForTest(dex->GetLocation(),
+                                  base_oat_name,
+                                  CompilerFilter::Filter::kQuicken,
+                                  { },
+                                  true,  // expect_success
+                                  false,  // use_fd
+                                  [](const OatFile& o) {
+                                    CHECK(!o.ContainsDexCode());
+                                  }));
 }
 
 TEST_F(Dex2oatTest, EmptyUncompressedDexTest) {
@@ -1670,25 +1739,24 @@
   std::string out_dir = GetScratchDir();
   const std::string oat_filename = out_dir + "/base.oat";
   // The dex won't pass the method verifier, only use the verify filter.
-  GenerateOdexForTest(temp_dex.GetFilename(),
-                      oat_filename,
-                      CompilerFilter::Filter::kVerify,
-                      { },
-                      true,  // expect_success
-                      false,  // use_fd
-                      [](const OatFile& o) {
-                        CHECK(o.ContainsDexCode());
-                      });
+  ASSERT_TRUE(GenerateOdexForTest(temp_dex.GetFilename(),
+                                  oat_filename,
+                                  CompilerFilter::Filter::kVerify,
+                                  { },
+                                  true,  // expect_success
+                                  false,  // use_fd
+                                  [](const OatFile& o) {
+                                    CHECK(o.ContainsDexCode());
+                                  }));
   // Open our generated oat file.
   std::string error_msg;
-  std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                    oat_filename.c_str(),
                                                    oat_filename.c_str(),
-                                                   /* requested_base */ nullptr,
-                                                   /* executable */ false,
-                                                   /* low_4gb */ false,
+                                                   /*executable=*/ false,
+                                                   /*low_4gb=*/ false,
                                                    temp_dex.GetFilename().c_str(),
-                                                   /* reservation */ nullptr,
+                                                   /*reservation=*/ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1705,7 +1773,7 @@
   // Create a multidex file with only one dex that gets rejected for cdex conversion.
   ScratchFile apk_file;
   {
-    FILE* file = fdopen(dup(apk_file.GetFd()), "w+b");
+    FILE* file = fdopen(DupCloexec(apk_file.GetFd()), "w+b");
     ZipWriter writer(file);
     // Add vdex to zip.
     writer.StartEntry("classes.dex", ZipWriter::kCompress);
@@ -1722,11 +1790,11 @@
   }
   const std::string& dex_location = apk_file.GetFilename();
   const std::string odex_location = GetOdexDir() + "/output.odex";
-  GenerateOdexForTest(dex_location,
-                      odex_location,
-                      CompilerFilter::kQuicken,
-                      { "--compact-dex-level=fast" },
-                      true);
+  ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                  odex_location,
+                                  CompilerFilter::kQuicken,
+                                  { "--compact-dex-level=fast" },
+                                  true));
 }
 
 TEST_F(Dex2oatTest, StderrLoggerOutput) {
@@ -1736,11 +1804,11 @@
   // Test file doesn't matter.
   Copy(GetDexSrc1(), dex_location);
 
-  GenerateOdexForTest(dex_location,
-                      odex_location,
-                      CompilerFilter::kQuicken,
-                      { "--runtime-arg", "-Xuse-stderr-logger" },
-                      true);
+  ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                  odex_location,
+                                  CompilerFilter::kQuicken,
+                                  { "--runtime-arg", "-Xuse-stderr-logger" },
+                                  true));
   // Look for some random part of dex2oat logging. With the stderr logger this should be captured,
   // even on device.
   EXPECT_NE(std::string::npos, output_.find("dex2oat took"));
@@ -1753,20 +1821,19 @@
   // Test file doesn't matter.
   Copy(GetDexSrc1(), dex_location);
 
-  GenerateOdexForTest(dex_location,
-                      odex_location,
-                      CompilerFilter::kVerify,
-                      { "--compilation-reason=install" },
-                      true);
+  ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                  odex_location,
+                                  CompilerFilter::kVerify,
+                                  { "--compilation-reason=install" },
+                                  true));
   std::string error_msg;
-  std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                    odex_location.c_str(),
                                                    odex_location.c_str(),
-                                                   /* requested_base */ nullptr,
-                                                   /* executable */ false,
-                                                   /* low_4gb */ false,
+                                                   /*executable=*/ false,
+                                                   /*low_4gb=*/ false,
                                                    dex_location.c_str(),
-                                                   /* reservation */ nullptr,
+                                                   /*reservation=*/ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   ASSERT_STREQ("install", odex_file->GetCompilationReason());
@@ -1779,20 +1846,19 @@
   // Test file doesn't matter.
   Copy(GetDexSrc1(), dex_location);
 
-  GenerateOdexForTest(dex_location,
-                      odex_location,
-                      CompilerFilter::kVerify,
-                      {},
-                      true);
+  ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                  odex_location,
+                                  CompilerFilter::kVerify,
+                                  {},
+                                  true));
   std::string error_msg;
-  std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                    odex_location.c_str(),
                                                    odex_location.c_str(),
-                                                   /* requested_base */ nullptr,
-                                                   /* executable */ false,
-                                                   /* low_4gb */ false,
+                                                   /*executable=*/ false,
+                                                   /*low_4gb=*/ false,
                                                    dex_location.c_str(),
-                                                   /* reservation */ nullptr,
+                                                   /*reservation=*/ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   ASSERT_EQ(nullptr, odex_file->GetCompilationReason());
@@ -1805,32 +1871,30 @@
   const std::string dex_location = dex->GetLocation();
   const std::string odex_location = out_dir + "/base.oat";
   const std::string vdex_location = out_dir + "/base.vdex";
-  GenerateOdexForTest(dex_location,
-                      odex_location,
-                      CompilerFilter::Filter::kVerify,
-                      { "--copy-dex-files=false" },
-                      true,  // expect_success
-                      false,  // use_fd
-                      [](const OatFile&) {
-                      });
+  ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                  odex_location,
+                                  CompilerFilter::Filter::kVerify,
+                                  { "--copy-dex-files=false" },
+                                  true,  // expect_success
+                                  false,  // use_fd
+                                  [](const OatFile&) {}));
   {
     // Check the vdex doesn't have dex.
     std::unique_ptr<VdexFile> vdex(VdexFile::Open(vdex_location.c_str(),
-                                                  /*writable*/ false,
-                                                  /*low_4gb*/ false,
-                                                  /*unquicken*/ false,
+                                                  /*writable=*/ false,
+                                                  /*low_4gb=*/ false,
+                                                  /*unquicken=*/ false,
                                                   &error_msg));
     ASSERT_TRUE(vdex != nullptr);
     EXPECT_FALSE(vdex->HasDexSection()) << output_;
   }
-  std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                    odex_location.c_str(),
                                                    odex_location.c_str(),
-                                                   /* requested_base */ nullptr,
-                                                   /* executable */ false,
-                                                   /* low_4gb */ false,
+                                                   /*executable=*/ false,
+                                                   /*low_4gb=*/ false,
                                                    dex_location.c_str(),
-                                                   /* reservation */ nullptr,
+                                                   /*reservation=*/ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr) << dex_location;
   std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1847,7 +1911,7 @@
     std::unique_ptr<File> vdex_file(OS::OpenFileForReading(vdex_location.c_str()));
     ASSERT_TRUE(vdex_file != nullptr);
     ASSERT_GT(vdex_file->GetLength(), 0u);
-    FILE* file = fdopen(dup(dm_file.GetFd()), "w+b");
+    FILE* file = fdopen(DupCloexec(dm_file.GetFd()), "w+b");
     ZipWriter writer(file);
     auto write_all_bytes = [&](File* file) {
       std::unique_ptr<uint8_t[]> bytes(new uint8_t[file->GetLength()]);
@@ -1863,19 +1927,21 @@
   }
 
   auto generate_and_check = [&](CompilerFilter::Filter filter) {
-    GenerateOdexForTest(dex_location,
-                        odex_location,
-                        filter,
-                        { "--dump-timings",
-                          "--dm-file=" + dm_file.GetFilename(),
-                          // Pass -Xuse-stderr-logger have dex2oat output in output_ on target.
-                          "--runtime-arg",
-                          "-Xuse-stderr-logger" },
-                        true,  // expect_success
-                        false,  // use_fd
-                        [](const OatFile& o) {
-                          CHECK(o.ContainsDexCode());
-                        });
+    output_.clear();
+    ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                    odex_location,
+                                    filter,
+                                    { "--dump-timings",
+                                      "--dm-file=" + dm_file.GetFilename(),
+                                      // Pass -Xuse-stderr-logger have dex2oat output in output_ on
+                                      // target.
+                                      "--runtime-arg",
+                                      "-Xuse-stderr-logger" },
+                                    true,  // expect_success
+                                    false,  // use_fd
+                                    [](const OatFile& o) {
+                                      CHECK(o.ContainsDexCode());
+                                    }));
     // Check the output for "Fast verify", this is printed from --dump-timings.
     std::istringstream iss(output_);
     std::string line;
@@ -1923,14 +1989,14 @@
   {
     std::string input_vdex = "--input-vdex-fd=-1";
     std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_output->Fd());
-    GenerateOdexForTest(dex_location,
-                        odex_location,
-                        CompilerFilter::kQuicken,
-                        // Disable cdex since we want to compare against the original dex file
-                        // after unquickening.
-                        { input_vdex, output_vdex, kDisableCompactDex },
-                        /* expect_success */ true,
-                        /* use_fd */ true);
+    ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                    odex_location,
+                                    CompilerFilter::kQuicken,
+                                    // Disable cdex since we want to compare against the original
+                                    // dex file after unquickening.
+                                    { input_vdex, output_vdex, kDisableCompactDex },
+                                    /* expect_success= */ true,
+                                    /* use_fd= */ true));
   }
   // Unquicken by running the verify compiler filter on the vdex file and verify it matches.
   std::string odex_location2 = GetOdexDir() + "/unquickened.odex";
@@ -1939,13 +2005,14 @@
   {
     std::string input_vdex = StringPrintf("--input-vdex-fd=%d", vdex_output->Fd());
     std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_unquickened->Fd());
-    GenerateOdexForTest(dex_location,
-                        odex_location2,
-                        CompilerFilter::kVerify,
-                        // Disable cdex to avoid needing to write out the shared section.
-                        { input_vdex, output_vdex, kDisableCompactDex },
-                        /* expect_success */ true,
-                        /* use_fd */ true);
+    ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                    odex_location2,
+                                    CompilerFilter::kVerify,
+                                    // Disable cdex to avoid needing to write out the shared
+                                    // section.
+                                    { input_vdex, output_vdex, kDisableCompactDex },
+                                    /* expect_success= */ true,
+                                    /* use_fd= */ true));
   }
   ASSERT_EQ(vdex_unquickened->Flush(), 0) << "Could not flush and close vdex file";
   ASSERT_TRUE(success_);
@@ -1973,7 +2040,7 @@
 TEST_F(Dex2oatTest, CompactDexInvalidSource) {
   ScratchFile invalid_dex;
   {
-    FILE* file = fdopen(dup(invalid_dex.GetFd()), "w+b");
+    FILE* file = fdopen(DupCloexec(invalid_dex.GetFd()), "w+b");
     ZipWriter writer(file);
     writer.StartEntry("classes.dex", ZipWriter::kAlign32);
     DexFile::Header header = {};
@@ -2015,7 +2082,7 @@
   // Create a zip containing the invalid dex.
   ScratchFile invalid_dex_zip;
   {
-    FILE* file = fdopen(dup(invalid_dex_zip.GetFd()), "w+b");
+    FILE* file = fdopen(DupCloexec(invalid_dex_zip.GetFd()), "w+b");
     ZipWriter writer(file);
     writer.StartEntry("classes.dex", ZipWriter::kCompress);
     ASSERT_GE(writer.WriteBytes(&header, sizeof(header)), 0);
@@ -2053,23 +2120,22 @@
   ScratchFile app_image_file;
   const std::string out_dir = GetScratchDir();
   const std::string odex_location = out_dir + "/base.odex";
-  GenerateOdexForTest(GetTestDexFileName("ManyMethods"),
-                      odex_location,
-                      CompilerFilter::Filter::kSpeedProfile,
-                      { "--app-image-fd=" + std::to_string(app_image_file.GetFd()) },
-                      true,  // expect_success
-                      false,  // use_fd
-                      [](const OatFile&) {});
+  ASSERT_TRUE(GenerateOdexForTest(GetTestDexFileName("ManyMethods"),
+                                  odex_location,
+                                  CompilerFilter::Filter::kSpeedProfile,
+                                  { "--app-image-fd=" + std::to_string(app_image_file.GetFd()) },
+                                  true,  // expect_success
+                                  false,  // use_fd
+                                  [](const OatFile&) {}));
   // Open our generated oat file.
   std::string error_msg;
-  std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                    odex_location.c_str(),
                                                    odex_location.c_str(),
-                                                   /* requested_base */ nullptr,
-                                                   /* executable */ false,
-                                                   /* low_4gb */ false,
+                                                   /*executable=*/ false,
+                                                   /*low_4gb=*/ false,
                                                    odex_location.c_str(),
-                                                   /* reservation */ nullptr,
+                                                   /*reservation=*/ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   ImageHeader header = {};
@@ -2082,6 +2148,130 @@
   EXPECT_EQ(header.GetImageSection(ImageHeader::kSectionArtFields).Size(), 0u);
 }
 
+TEST_F(Dex2oatTest, AppImageResolveStrings) {
+  using Hotness = ProfileCompilationInfo::MethodHotness;
+  // Create a profile with the startup method marked.
+  ScratchFile profile_file;
+  std::vector<uint16_t> methods;
+  std::vector<dex::TypeIndex> classes;
+  std::unique_ptr<const DexFile> dex(OpenTestDexFile("StringLiterals"));
+  {
+    for (ClassAccessor accessor : dex->GetClasses()) {
+      if (accessor.GetDescriptor() == std::string("LStringLiterals$StartupClass;")) {
+        classes.push_back(accessor.GetClassIdx());
+      }
+      for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+        std::string method_name(dex->GetMethodName(dex->GetMethodId(method.GetIndex())));
+        if (method_name == "startUpMethod") {
+          methods.push_back(method.GetIndex());
+        }
+      }
+    }
+    ASSERT_GT(classes.size(), 0u);
+    ASSERT_GT(methods.size(), 0u);
+    // Here, we build the profile from the method lists.
+    ProfileCompilationInfo info;
+    info.AddClassesForDex(dex.get(), classes.begin(), classes.end());
+    info.AddMethodsForDex(Hotness::kFlagStartup, dex.get(), methods.begin(), methods.end());
+    // Save the profile since we want to use it with dex2oat to produce an oat file.
+    ASSERT_TRUE(info.Save(profile_file.GetFd()));
+  }
+  const std::string out_dir = GetScratchDir();
+  const std::string odex_location = out_dir + "/base.odex";
+  const std::string app_image_location = out_dir + "/base.art";
+  ASSERT_TRUE(GenerateOdexForTest(GetTestDexFileName("StringLiterals"),
+                                  odex_location,
+                                  CompilerFilter::Filter::kSpeedProfile,
+                                  { "--app-image-file=" + app_image_location,
+                                    "--resolve-startup-const-strings=true",
+                                    "--profile-file=" + profile_file.GetFilename()},
+                                  /* expect_success= */ true,
+                                  /* use_fd= */ false,
+                                  [](const OatFile&) {}));
+  // Open our generated oat file.
+  std::string error_msg;
+  std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
+                                                   odex_location.c_str(),
+                                                   odex_location.c_str(),
+                                                   /*executable=*/ false,
+                                                   /*low_4gb=*/ false,
+                                                   odex_location.c_str(),
+                                                   /*reservation=*/ nullptr,
+                                                   &error_msg));
+  ASSERT_TRUE(odex_file != nullptr);
+  // Check the strings in the app image intern table only contain the "startup" strigs.
+  {
+    ScopedObjectAccess soa(Thread::Current());
+    std::unique_ptr<gc::space::ImageSpace> space =
+        gc::space::ImageSpace::CreateFromAppImage(app_image_location.c_str(),
+                                                  odex_file.get(),
+                                                  &error_msg);
+    ASSERT_TRUE(space != nullptr) << error_msg;
+    std::set<std::string> seen;
+    InternTable intern_table;
+    intern_table.AddImageStringsToTable(space.get(), [&](InternTable::UnorderedSet& interns)
+        REQUIRES_SHARED(Locks::mutator_lock_) {
+      for (const GcRoot<mirror::String>& str : interns) {
+        seen.insert(str.Read()->ToModifiedUtf8());
+      }
+    });
+    // Ensure that the dex cache has a preresolved string array.
+    std::set<std::string> preresolved_seen;
+    bool saw_dexcache = false;
+    space->GetLiveBitmap()->VisitAllMarked(
+        [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+      if (obj->IsDexCache<kVerifyNone>()) {
+        ObjPtr<mirror::DexCache> dex_cache = obj->AsDexCache();
+        GcRoot<mirror::String>* preresolved_strings = dex_cache->GetPreResolvedStrings();
+        ASSERT_EQ(dex->NumStringIds(), dex_cache->NumPreResolvedStrings());
+        for (size_t i = 0; i < dex_cache->NumPreResolvedStrings(); ++i) {
+          ObjPtr<mirror::String> string = preresolved_strings[i].Read<kWithoutReadBarrier>();
+          if (string != nullptr) {
+            preresolved_seen.insert(string->ToModifiedUtf8());
+          }
+        }
+        saw_dexcache = true;
+      }
+    });
+    ASSERT_TRUE(saw_dexcache);
+    // Everything in the preresolved array should also be in the intern table.
+    for (const std::string& str : preresolved_seen) {
+      EXPECT_TRUE(seen.find(str) != seen.end());
+    }
+    // Normal methods
+    EXPECT_TRUE(preresolved_seen.find("Loading ") != preresolved_seen.end());
+    EXPECT_TRUE(preresolved_seen.find("Starting up") != preresolved_seen.end());
+    EXPECT_TRUE(preresolved_seen.find("abcd.apk") != preresolved_seen.end());
+    EXPECT_TRUE(seen.find("Unexpected error") == seen.end());
+    EXPECT_TRUE(seen.find("Shutting down!") == seen.end());
+    EXPECT_TRUE(preresolved_seen.find("Unexpected error") == preresolved_seen.end());
+    EXPECT_TRUE(preresolved_seen.find("Shutting down!") == preresolved_seen.end());
+    // Classes initializers
+    EXPECT_TRUE(preresolved_seen.find("Startup init") != preresolved_seen.end());
+    EXPECT_TRUE(seen.find("Other class init") == seen.end());
+    EXPECT_TRUE(preresolved_seen.find("Other class init") == preresolved_seen.end());
+    // Expect the sets match.
+    EXPECT_GE(seen.size(), preresolved_seen.size());
+
+    // Verify what strings are marked as boot image.
+    std::set<std::string> boot_image_strings;
+    std::set<std::string> app_image_strings;
+
+    MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+    intern_table.VisitInterns([&](const GcRoot<mirror::String>& root)
+        REQUIRES_SHARED(Locks::mutator_lock_) {
+      boot_image_strings.insert(root.Read()->ToModifiedUtf8());
+    }, /*visit_boot_images=*/true, /*visit_non_boot_images=*/false);
+    intern_table.VisitInterns([&](const GcRoot<mirror::String>& root)
+        REQUIRES_SHARED(Locks::mutator_lock_) {
+      app_image_strings.insert(root.Read()->ToModifiedUtf8());
+    }, /*visit_boot_images=*/false, /*visit_non_boot_images=*/true);
+    EXPECT_EQ(boot_image_strings.size(), 0u);
+    EXPECT_TRUE(app_image_strings == seen);
+  }
+}
+
+
 TEST_F(Dex2oatClassLoaderContextTest, StoredClassLoaderContext) {
   std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("MultiDex");
   const std::string out_dir = GetScratchDir();
@@ -2104,27 +2294,61 @@
   }
   expected_stored_context +=    + "]";
   // The class path should not be valid and should fail being stored.
-  GenerateOdexForTest(GetTestDexFileName("ManyMethods"),
-                      odex_location,
-                      CompilerFilter::Filter::kQuicken,
-                      { "--class-loader-context=" + stored_context },
-                      true,  // expect_success
-                      false,  // use_fd
-                      [&](const OatFile& oat_file) {
+  EXPECT_TRUE(GenerateOdexForTest(GetTestDexFileName("ManyMethods"),
+                                  odex_location,
+                                  CompilerFilter::Filter::kQuicken,
+                                  { "--class-loader-context=" + stored_context },
+                                  true,  // expect_success
+                                  false,  // use_fd
+                                  [&](const OatFile& oat_file) {
     EXPECT_NE(oat_file.GetClassLoaderContext(), stored_context) << output_;
     EXPECT_NE(oat_file.GetClassLoaderContext(), valid_context) << output_;
-  });
+  }));
   // The stored context should match what we expect even though it's invalid.
-  GenerateOdexForTest(GetTestDexFileName("ManyMethods"),
-                      odex_location,
-                      CompilerFilter::Filter::kQuicken,
-                      { "--class-loader-context=" + valid_context,
-                        "--stored-class-loader-context=" + stored_context },
-                      true,  // expect_success
-                      false,  // use_fd
-                      [&](const OatFile& oat_file) {
+  EXPECT_TRUE(GenerateOdexForTest(GetTestDexFileName("ManyMethods"),
+                                  odex_location,
+                                  CompilerFilter::Filter::kQuicken,
+                                  { "--class-loader-context=" + valid_context,
+                                    "--stored-class-loader-context=" + stored_context },
+                                  true,  // expect_success
+                                  false,  // use_fd
+                                  [&](const OatFile& oat_file) {
     EXPECT_EQ(oat_file.GetClassLoaderContext(), expected_stored_context) << output_;
-  });
+  }));
+}
+
+class Dex2oatISAFeaturesRuntimeDetectionTest : public Dex2oatTest {
+ protected:
+  void RunTest(const std::vector<std::string>& extra_args = {}) {
+    std::string dex_location = GetScratchDir() + "/Dex2OatSwapTest.jar";
+    std::string odex_location = GetOdexDir() + "/Dex2OatSwapTest.odex";
+
+    Copy(GetTestDexFileName(), dex_location);
+
+    ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                    odex_location,
+                                    CompilerFilter::kSpeed,
+                                    extra_args));
+  }
+
+  std::string GetTestDexFileName() {
+    return GetDexSrc1();
+  }
+};
+
+TEST_F(Dex2oatISAFeaturesRuntimeDetectionTest, TestCurrentRuntimeFeaturesAsDex2OatArguments) {
+  std::vector<std::string> argv;
+  Runtime::Current()->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
+  auto option_pos =
+      std::find(std::begin(argv), std::end(argv), "--instruction-set-features=runtime");
+  if (InstructionSetFeatures::IsRuntimeDetectionSupported()) {
+    EXPECT_TRUE(kIsTargetBuild);
+    EXPECT_NE(option_pos, std::end(argv));
+  } else {
+    EXPECT_EQ(option_pos, std::end(argv));
+  }
+
+  RunTest();
 }
 
 }  // namespace art
diff --git a/dex2oat/linker/arm/relative_patcher_thumb2_test.cc b/dex2oat/linker/arm/relative_patcher_thumb2_test.cc
index 3d7277a..d8cbbaf 100644
--- a/dex2oat/linker/arm/relative_patcher_thumb2_test.cc
+++ b/dex2oat/linker/arm/relative_patcher_thumb2_test.cc
@@ -18,6 +18,7 @@
 
 #include "arch/arm/instruction_set_features_arm.h"
 #include "base/casts.h"
+#include "driver/compiler_options.h"
 #include "linker/relative_patcher_test.h"
 #include "lock_word.h"
 #include "mirror/array-inl.h"
@@ -196,8 +197,8 @@
                                     /*out*/ std::string* debug_name = nullptr) {
     OptimizingUnitTestHelper helper;
     HGraph* graph = helper.CreateGraph();
-    std::string error_msg;
-    arm::CodeGeneratorARMVIXL codegen(graph, *compiler_options_);
+    CompilerOptions compiler_options;
+    arm::CodeGeneratorARMVIXL codegen(graph, compiler_options);
     ArenaVector<uint8_t> code(helper.GetAllocator()->Adapter());
     codegen.EmitThunkCode(patch, &code, debug_name);
     return std::vector<uint8_t>(code.begin(), code.end());
@@ -827,26 +828,38 @@
   }
 }
 
-#define TEST_BAKER_FIELD_WIDE(offset, ref_reg)    \
-  TEST_F(Thumb2RelativePatcherTest,               \
-    BakerOffsetWide##offset##_##ref_reg) {        \
-    TestBakerFieldWide(offset, ref_reg);          \
+TEST_F(Thumb2RelativePatcherTest, BakerOffsetWide) {
+  struct TestCase {
+    uint32_t offset;
+    uint32_t ref_reg;
+  };
+  static const TestCase test_cases[] = {
+      { 0u, 0u },
+      { 8u, 3u },
+      { 28u, 7u },
+      { 0xffcu, 11u },
+  };
+  for (const TestCase& test_case : test_cases) {
+    Reset();
+    TestBakerFieldWide(test_case.offset, test_case.ref_reg);
   }
+}
 
-TEST_BAKER_FIELD_WIDE(/* offset */ 0, /* ref_reg */ 0)
-TEST_BAKER_FIELD_WIDE(/* offset */ 8, /* ref_reg */ 3)
-TEST_BAKER_FIELD_WIDE(/* offset */ 28, /* ref_reg */ 7)
-TEST_BAKER_FIELD_WIDE(/* offset */ 0xffc, /* ref_reg */ 11)
-
-#define TEST_BAKER_FIELD_NARROW(offset, ref_reg)  \
-  TEST_F(Thumb2RelativePatcherTest,               \
-    BakerOffsetNarrow##offset##_##ref_reg) {      \
-    TestBakerFieldNarrow(offset, ref_reg);        \
+TEST_F(Thumb2RelativePatcherTest, BakerOffsetNarrow) {
+  struct TestCase {
+    uint32_t offset;
+    uint32_t ref_reg;
+  };
+  static const TestCase test_cases[] = {
+      { 0, 0u },
+      { 8, 3u },
+      { 28, 7u },
+  };
+  for (const TestCase& test_case : test_cases) {
+    Reset();
+    TestBakerFieldNarrow(test_case.offset, test_case.ref_reg);
   }
-
-TEST_BAKER_FIELD_NARROW(/* offset */ 0, /* ref_reg */ 0)
-TEST_BAKER_FIELD_NARROW(/* offset */ 8, /* ref_reg */ 3)
-TEST_BAKER_FIELD_NARROW(/* offset */ 28, /* ref_reg */ 7)
+}
 
 TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkInTheMiddle) {
   // One thunk in the middle with maximum distance branches to it from both sides.
diff --git a/dex2oat/linker/arm64/relative_patcher_arm64_test.cc b/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
index 9e3bb97..678574be2 100644
--- a/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
+++ b/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
@@ -18,6 +18,7 @@
 
 #include "arch/arm64/instruction_set_features_arm64.h"
 #include "base/casts.h"
+#include "driver/compiler_options.h"
 #include "linker/relative_patcher_test.h"
 #include "lock_word.h"
 #include "mirror/array-inl.h"
@@ -175,8 +176,15 @@
                                     /*out*/ std::string* debug_name = nullptr) {
     OptimizingUnitTestHelper helper;
     HGraph* graph = helper.CreateGraph();
-    std::string error_msg;
-    arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+    CompilerOptions compiler_options;
+
+    // Set isa to arm64.
+    compiler_options.instruction_set_ = instruction_set_;
+    compiler_options.instruction_set_features_ =
+        InstructionSetFeatures::FromBitmap(instruction_set_, instruction_set_features_->AsBitmap());
+    CHECK(compiler_options.instruction_set_features_->Equals(instruction_set_features_.get()));
+
+    arm64::CodeGeneratorARM64 codegen(graph, compiler_options);
     ArenaVector<uint8_t> code(helper.GetAllocator()->Adapter());
     codegen.EmitThunkCode(patch, &code, debug_name);
     return std::vector<uint8_t>(code.begin(), code.end());
@@ -567,11 +575,6 @@
   Arm64RelativePatcherTestDefault() : Arm64RelativePatcherTest("default") { }
 };
 
-class Arm64RelativePatcherTestDenver64 : public Arm64RelativePatcherTest {
- public:
-  Arm64RelativePatcherTestDenver64() : Arm64RelativePatcherTest("denver64") { }
-};
-
 TEST_F(Arm64RelativePatcherTestDefault, CallSelf) {
   const LinkerPatch patches[] = {
       LinkerPatch::RelativeCodePatch(0u, nullptr, 1u),
@@ -783,174 +786,221 @@
   EXPECT_TRUE(CheckThunk(thunk_offset));
 }
 
-TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry1) {
-  TestNopsAdrpLdr(0u, 0x12345678u, 0x1234u);
-}
-
-TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry2) {
-  TestNopsAdrpLdr(0u, -0x12345678u, 0x4444u);
-}
-
-TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry3) {
-  TestNopsAdrpLdr(0u, 0x12345000u, 0x3ffcu);
-}
-
-TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry4) {
-  TestNopsAdrpLdr(0u, 0x12345000u, 0x4000u);
-}
-
-TEST_F(Arm64RelativePatcherTestDefault, StringReference1) {
-  TestNopsAdrpAdd(0u, 0x12345678u);
-}
-
-TEST_F(Arm64RelativePatcherTestDefault, StringReference2) {
-  TestNopsAdrpAdd(0u, -0x12345678u);
-}
-
-TEST_F(Arm64RelativePatcherTestDefault, StringReference3) {
-  TestNopsAdrpAdd(0u, 0x12345000u);
-}
-
-TEST_F(Arm64RelativePatcherTestDefault, StringReference4) {
-  TestNopsAdrpAdd(0u, 0x12345ffcu);
-}
-
-#define TEST_FOR_OFFSETS(test, disp1, disp2) \
-  test(0xff4u, disp1) test(0xff8u, disp1) test(0xffcu, disp1) test(0x1000u, disp1) \
-  test(0xff4u, disp2) test(0xff8u, disp2) test(0xffcu, disp2) test(0x1000u, disp2)
-
-#define DEFAULT_LDUR_LDR_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry ## adrp_offset ## Ldur ## disp) { \
-    bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu); \
-    TestAdrpLdurLdr(adrp_offset, has_thunk, 0x12345678u, disp); \
+TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry) {
+  struct TestCase {
+    uint32_t bss_begin;
+    uint32_t string_entry_offset;
+  };
+  static const TestCase test_cases[] = {
+      { 0x12345678u, 0x1234u },
+      { -0x12345678u, 0x4444u },
+      { 0x12345000u, 0x3ffcu },
+      { 0x12345000u, 0x4000u }
+  };
+  for (const TestCase& test_case : test_cases) {
+    Reset();
+    TestNopsAdrpLdr(/*num_nops=*/ 0u, test_case.bss_begin, test_case.string_entry_offset);
   }
+}
 
-TEST_FOR_OFFSETS(DEFAULT_LDUR_LDR_TEST, 0x1234, 0x1238)
-
-#define DENVER64_LDUR_LDR_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDenver64, StringBssEntry ## adrp_offset ## Ldur ## disp) { \
-    TestAdrpLdurLdr(adrp_offset, false, 0x12345678u, disp); \
+TEST_F(Arm64RelativePatcherTestDefault, StringReference) {
+  for (uint32_t string_offset : { 0x12345678u, -0x12345678u, 0x12345000u, 0x12345ffcu}) {
+    Reset();
+    TestNopsAdrpAdd(/*num_nops=*/ 0u, string_offset);
   }
+}
 
-TEST_FOR_OFFSETS(DENVER64_LDUR_LDR_TEST, 0x1234, 0x1238)
+template <typename Test>
+void TestForAdrpOffsets(Test test, std::initializer_list<uint32_t> args) {
+  for (uint32_t adrp_offset : { 0xff4u, 0xff8u, 0xffcu, 0x1000u }) {
+    for (uint32_t arg : args) {
+      test(adrp_offset, arg);
+    }
+  }
+}
+
+TEST_F(Arm64RelativePatcherTestDefault, StringBssEntryLdur) {
+  TestForAdrpOffsets(
+      [&](uint32_t adrp_offset, uint32_t string_entry_offset) {
+        Reset();
+        bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu);
+        TestAdrpLdurLdr(adrp_offset, has_thunk, /*bss_begin=*/ 0x12345678u, string_entry_offset);
+      },
+      { 0x1234u, 0x1238u });
+}
 
 // LDR <Wt>, <label> is always aligned. We should never have to use a fixup.
-#define LDRW_PCREL_LDR_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry ## adrp_offset ## WPcRel ## disp) { \
-    TestAdrpLdrPcRelLdr(kLdrWPcRelInsn, disp, adrp_offset, false, 0x12345678u, 0x1234u); \
-  }
-
-TEST_FOR_OFFSETS(LDRW_PCREL_LDR_TEST, 0x1234, 0x1238)
+TEST_F(Arm64RelativePatcherTestDefault, StringBssEntryWPcRel) {
+  TestForAdrpOffsets(
+      [&](uint32_t adrp_offset, uint32_t pcrel_disp) {
+        Reset();
+        TestAdrpLdrPcRelLdr(kLdrWPcRelInsn,
+                            pcrel_disp,
+                            adrp_offset,
+                            /*has_thunk=*/ false,
+                            /*bss_begin=*/ 0x12345678u,
+                            /*string_entry_offset=*/ 0x1234u);
+      },
+      { 0x1234u, 0x1238u });
+}
 
 // LDR <Xt>, <label> is aligned when offset + displacement is a multiple of 8.
-#define LDRX_PCREL_LDR_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry ## adrp_offset ## XPcRel ## disp) { \
-    bool unaligned = !IsAligned<8u>((adrp_offset) + 4u + static_cast<uint32_t>(disp)); \
-    bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu) && unaligned; \
-    TestAdrpLdrPcRelLdr(kLdrXPcRelInsn, disp, adrp_offset, has_thunk, 0x12345678u, 0x1234u); \
-  }
-
-TEST_FOR_OFFSETS(LDRX_PCREL_LDR_TEST, 0x1234, 0x1238)
+TEST_F(Arm64RelativePatcherTestDefault, StringBssEntryXPcRel) {
+  TestForAdrpOffsets(
+      [&](uint32_t adrp_offset, uint32_t pcrel_disp) {
+        Reset();
+        bool unaligned = !IsAligned<8u>((adrp_offset) + 4u + static_cast<uint32_t>(pcrel_disp));
+        bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu) && unaligned;
+        TestAdrpLdrPcRelLdr(kLdrXPcRelInsn,
+                            pcrel_disp,
+                            adrp_offset,
+                            has_thunk,
+                            /*bss_begin=*/ 0x12345678u,
+                            /*string_entry_offset=*/ 0x1234u);
+      },
+      { 0x1234u, 0x1238u });
+}
 
 // LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned. No fixup needed.
-#define LDRW_SPREL_LDR_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry ## adrp_offset ## WSpRel ## disp) { \
-    TestAdrpLdrSpRelLdr(kLdrWSpRelInsn, (disp) >> 2, adrp_offset, false, 0x12345678u, 0x1234u); \
-  }
+TEST_F(Arm64RelativePatcherTestDefault, StringBssEntryWSpRel) {
+  TestForAdrpOffsets(
+      [&](uint32_t adrp_offset, uint32_t disp) {
+        Reset();
+        TestAdrpLdrSpRelLdr(kLdrWSpRelInsn,
+                            /*sprel_disp_in_load_units=*/ disp >> 2,
+                            adrp_offset,
+                            /*has_thunk=*/ false,
+                            /*bss_begin=*/ 0x12345678u,
+                            /*string_entry_offset=*/ 0x1234u);
+      },
+      { 0u, 4u });
+}
 
-TEST_FOR_OFFSETS(LDRW_SPREL_LDR_TEST, 0, 4)
+TEST_F(Arm64RelativePatcherTestDefault, StringBssEntryXSpRel) {
+  TestForAdrpOffsets(
+      [&](uint32_t adrp_offset, uint32_t disp) {
+        Reset();
+        TestAdrpLdrSpRelLdr(kLdrXSpRelInsn,
+                            /*sprel_disp_in_load_units=*/ (disp) >> 3,
+                            adrp_offset,
+                            /*has_thunk=*/ false,
+                            /*bss_begin=*/ 0x12345678u,
+                            /*string_entry_offset=*/ 0x1234u);
+      },
+      { 0u, 8u });
+}
 
-#define LDRX_SPREL_LDR_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry ## adrp_offset ## XSpRel ## disp) { \
-    TestAdrpLdrSpRelLdr(kLdrXSpRelInsn, (disp) >> 3, adrp_offset, false, 0x12345678u, 0x1234u); \
-  }
+TEST_F(Arm64RelativePatcherTestDefault, StringReferenceLdur) {
+  TestForAdrpOffsets(
+      [&](uint32_t adrp_offset, uint32_t string_offset) {
+        Reset();
+        bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu);
+        TestAdrpLdurAdd(adrp_offset, has_thunk, string_offset);
+      },
+      { 0x12345678u, 0xffffc840u });
+}
 
-TEST_FOR_OFFSETS(LDRX_SPREL_LDR_TEST, 0, 8)
+TEST_F(Arm64RelativePatcherTestDefault, StringReferenceSubX3X2) {
+  TestForAdrpOffsets(
+      [&](uint32_t adrp_offset, uint32_t string_offset) {
+        Reset();
+        /* SUB unrelated to "ADRP x0, addr". */ \
+        uint32_t sub = kSubXInsn | (100 << 10) | (2u << 5) | 3u;  /* SUB x3, x2, #100 */
+        TestAdrpInsn2Add(sub, adrp_offset, /*has_thunk=*/ false, string_offset);
+      },
+      { 0x12345678u, 0xffffc840u });
+}
 
-#define DEFAULT_LDUR_ADD_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## Ldur ## disp) { \
-    bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu); \
-    TestAdrpLdurAdd(adrp_offset, has_thunk, disp); \
-  }
+TEST_F(Arm64RelativePatcherTestDefault, StringReferenceSubsX3X0) {
+  TestForAdrpOffsets(
+      [&](uint32_t adrp_offset, uint32_t string_offset) {
+        Reset();
+        /* SUBS that uses the result of "ADRP x0, addr". */ \
+        uint32_t subs = kSubsXInsn | (100 << 10) | (0u << 5) | 3u;  /* SUBS x3, x0, #100 */
+        TestAdrpInsn2Add(subs, adrp_offset, /*has_thunk=*/ false, string_offset);
+      },
+      { 0x12345678u, 0xffffc840u });
+}
 
-TEST_FOR_OFFSETS(DEFAULT_LDUR_ADD_TEST, 0x12345678, 0xffffc840)
+TEST_F(Arm64RelativePatcherTestDefault, StringReferenceAddX0X0) {
+  TestForAdrpOffsets(
+      [&](uint32_t adrp_offset, uint32_t string_offset) {
+        Reset();
+        /* ADD that uses the result register of "ADRP x0, addr" as both source and destination. */
+        uint32_t add = kSubXInsn | (100 << 10) | (0u << 5) | 0u;  /* ADD x0, x0, #100 */
+        TestAdrpInsn2Add(add, adrp_offset, /*has_thunk=*/ false, string_offset);
+      },
+      { 0x12345678u, 0xffffc840 });
+}
 
-#define DENVER64_LDUR_ADD_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDenver64, StringReference ## adrp_offset ## Ldur ## disp) { \
-    TestAdrpLdurAdd(adrp_offset, false, disp); \
-  }
-
-TEST_FOR_OFFSETS(DENVER64_LDUR_ADD_TEST, 0x12345678, 0xffffc840)
-
-#define DEFAULT_SUBX3X2_ADD_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## SubX3X2 ## disp) { \
-    /* SUB unrelated to "ADRP x0, addr". */ \
-    uint32_t sub = kSubXInsn | (100 << 10) | (2u << 5) | 3u;  /* SUB x3, x2, #100 */ \
-    TestAdrpInsn2Add(sub, adrp_offset, false, disp); \
-  }
-
-TEST_FOR_OFFSETS(DEFAULT_SUBX3X2_ADD_TEST, 0x12345678, 0xffffc840)
-
-#define DEFAULT_SUBSX3X0_ADD_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## SubsX3X0 ## disp) { \
-    /* SUBS that uses the result of "ADRP x0, addr". */ \
-    uint32_t subs = kSubsXInsn | (100 << 10) | (0u << 5) | 3u;  /* SUBS x3, x0, #100 */ \
-    TestAdrpInsn2Add(subs, adrp_offset, false, disp); \
-  }
-
-TEST_FOR_OFFSETS(DEFAULT_SUBSX3X0_ADD_TEST, 0x12345678, 0xffffc840)
-
-#define DEFAULT_ADDX0X0_ADD_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## AddX0X0 ## disp) { \
-    /* ADD that uses the result register of "ADRP x0, addr" as both source and destination. */ \
-    uint32_t add = kSubXInsn | (100 << 10) | (0u << 5) | 0u;  /* ADD x0, x0, #100 */ \
-    TestAdrpInsn2Add(add, adrp_offset, false, disp); \
-  }
-
-TEST_FOR_OFFSETS(DEFAULT_ADDX0X0_ADD_TEST, 0x12345678, 0xffffc840)
-
-#define DEFAULT_ADDSX0X2_ADD_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## AddsX0X2 ## disp) { \
-    /* ADDS that does not use the result of "ADRP x0, addr" but overwrites that register. */ \
-    uint32_t adds = kAddsXInsn | (100 << 10) | (2u << 5) | 0u;  /* ADDS x0, x2, #100 */ \
-    bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu); \
-    TestAdrpInsn2Add(adds, adrp_offset, has_thunk, disp); \
-  }
-
-TEST_FOR_OFFSETS(DEFAULT_ADDSX0X2_ADD_TEST, 0x12345678, 0xffffc840)
+TEST_F(Arm64RelativePatcherTestDefault, StringReferenceAddsX0X2) {
+  TestForAdrpOffsets(
+      [&](uint32_t adrp_offset, uint32_t string_offset) {
+        Reset();
+        /* ADDS that does not use the result of "ADRP x0, addr" but overwrites that register. */
+        uint32_t adds = kAddsXInsn | (100 << 10) | (2u << 5) | 0u;  /* ADDS x0, x2, #100 */
+        bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu);
+        TestAdrpInsn2Add(adds, adrp_offset, has_thunk, string_offset);
+      },
+      { 0x12345678u, 0xffffc840u });
+}
 
 // LDR <Wt>, <label> is always aligned. We should never have to use a fixup.
-#define LDRW_PCREL_ADD_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## WPcRel ## disp) { \
-    TestAdrpLdrPcRelAdd(kLdrWPcRelInsn, disp, adrp_offset, false, 0x12345678u); \
-  }
-
-TEST_FOR_OFFSETS(LDRW_PCREL_ADD_TEST, 0x1234, 0x1238)
+TEST_F(Arm64RelativePatcherTestDefault, StringReferenceWPcRel) {
+  TestForAdrpOffsets(
+      [&](uint32_t adrp_offset, uint32_t pcrel_disp) {
+        Reset();
+        TestAdrpLdrPcRelAdd(kLdrWPcRelInsn,
+                            pcrel_disp,
+                            adrp_offset,
+                            /*has_thunk=*/ false,
+                            /*string_offset=*/ 0x12345678u);
+      },
+      { 0x1234u, 0x1238u });
+}
 
 // LDR <Xt>, <label> is aligned when offset + displacement is a multiple of 8.
-#define LDRX_PCREL_ADD_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## XPcRel ## disp) { \
-    bool unaligned = !IsAligned<8u>((adrp_offset) + 4u + static_cast<uint32_t>(disp)); \
-    bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu) && unaligned; \
-    TestAdrpLdrPcRelAdd(kLdrXPcRelInsn, disp, adrp_offset, has_thunk, 0x12345678u); \
-  }
-
-TEST_FOR_OFFSETS(LDRX_PCREL_ADD_TEST, 0x1234, 0x1238)
+TEST_F(Arm64RelativePatcherTestDefault, StringReferenceXPcRel) {
+  TestForAdrpOffsets(
+      [&](uint32_t adrp_offset, uint32_t pcrel_disp) {
+        Reset();
+        bool unaligned = !IsAligned<8u>((adrp_offset) + 4u + static_cast<uint32_t>(pcrel_disp));
+        bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu) && unaligned;
+        TestAdrpLdrPcRelAdd(kLdrXPcRelInsn,
+                            pcrel_disp,
+                            adrp_offset,
+                            has_thunk,
+                            /*string_offset=*/ 0x12345678u);
+      },
+      { 0x1234u, 0x1238u });
+}
 
 // LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned. No fixup needed.
-#define LDRW_SPREL_ADD_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## WSpRel ## disp) { \
-    TestAdrpLdrSpRelAdd(kLdrWSpRelInsn, (disp) >> 2, adrp_offset, false, 0x12345678u); \
-  }
+TEST_F(Arm64RelativePatcherTestDefault, StringReferenceWSpRel) {
+  TestForAdrpOffsets(
+      [&](uint32_t adrp_offset, uint32_t disp) {
+        Reset();
+        TestAdrpLdrSpRelAdd(kLdrWSpRelInsn,
+                            /*sprel_disp_in_load_units=*/ (disp) >> 2,
+                            adrp_offset,
+                            /*has_thunk=*/ false,
+                            /*string_offset=*/ 0x12345678u);
+      },
+      { 0u, 4u });
+}
 
-TEST_FOR_OFFSETS(LDRW_SPREL_ADD_TEST, 0, 4)
-
-#define LDRX_SPREL_ADD_TEST(adrp_offset, disp) \
-  TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## XSpRel ## disp) { \
-    TestAdrpLdrSpRelAdd(kLdrXSpRelInsn, (disp) >> 3, adrp_offset, false, 0x12345678u); \
-  }
-
-TEST_FOR_OFFSETS(LDRX_SPREL_ADD_TEST, 0, 8)
+TEST_F(Arm64RelativePatcherTestDefault, StringReferenceXSpRel) {
+  TestForAdrpOffsets(
+      [&](uint32_t adrp_offset, uint32_t disp) {
+        Reset();
+        TestAdrpLdrSpRelAdd(kLdrXSpRelInsn,
+                            /*sprel_disp_in_load_units=*/ (disp) >> 3,
+                            adrp_offset,
+                            /*has_thunk=*/ false,
+                            /*string_offset=*/ 0x12345678u);
+      },
+      { 0u, 8u });
+}
 
 void Arm64RelativePatcherTest::TestBakerField(uint32_t offset, uint32_t ref_reg) {
   uint32_t valid_regs[] = {
@@ -1039,15 +1089,22 @@
   }
 }
 
-#define TEST_BAKER_FIELD(offset, ref_reg)     \
-  TEST_F(Arm64RelativePatcherTestDefault,     \
-    BakerOffset##offset##_##ref_reg) {        \
-    TestBakerField(offset, ref_reg);          \
+TEST_F(Arm64RelativePatcherTestDefault, BakerOffset) {
+  struct TestCase {
+    uint32_t offset;
+    uint32_t ref_reg;
+  };
+  static const TestCase test_cases[] = {
+      { 0u, 0u },
+      { 8u, 15u},
+      { 0x3ffcu, 29u },
+  };
+  for (const TestCase& test_case : test_cases) {
+    Reset();
+    TestBakerField(test_case.offset, test_case.ref_reg);
   }
+}
 
-TEST_BAKER_FIELD(/* offset */ 0, /* ref_reg */ 0)
-TEST_BAKER_FIELD(/* offset */ 8, /* ref_reg */ 15)
-TEST_BAKER_FIELD(/* offset */ 0x3ffc, /* ref_reg */ 29)
 
 TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddle) {
   // One thunk in the middle with maximum distance branches to it from both sides.
diff --git a/dex2oat/linker/elf_writer_quick.cc b/dex2oat/linker/elf_writer_quick.cc
index 852293b..b3e8290 100644
--- a/dex2oat/linker/elf_writer_quick.cc
+++ b/dex2oat/linker/elf_writer_quick.cc
@@ -17,6 +17,7 @@
 #include "elf_writer_quick.h"
 
 #include <openssl/sha.h>
+#include <memory>
 #include <unordered_map>
 #include <unordered_set>
 
@@ -68,7 +69,7 @@
         debug_info_(debug_info) {
   }
 
-  void Run(Thread*) {
+  void Run(Thread*) override {
     result_ = debug::MakeMiniDebugInfo(isa_,
                                        instruction_set_features_,
                                        text_section_address_,
@@ -260,19 +261,18 @@
 
 template <typename ElfTypes>
 void ElfWriterQuick<ElfTypes>::PrepareDebugInfo(const debug::DebugInfo& debug_info) {
-  if (!debug_info.Empty() && compiler_options_.GetGenerateMiniDebugInfo()) {
+  if (compiler_options_.GetGenerateMiniDebugInfo()) {
     // Prepare the mini-debug-info in background while we do other I/O.
     Thread* self = Thread::Current();
-    debug_info_task_ = std::unique_ptr<DebugInfoTask>(
-        new DebugInfoTask(builder_->GetIsa(),
-                          compiler_options_.GetInstructionSetFeatures(),
-                          builder_->GetText()->GetAddress(),
-                          text_size_,
-                          builder_->GetDex()->Exists() ? builder_->GetDex()->GetAddress() : 0,
-                          dex_section_size_,
-                          debug_info));
-    debug_info_thread_pool_ = std::unique_ptr<ThreadPool>(
-        new ThreadPool("Mini-debug-info writer", 1));
+    debug_info_task_ = std::make_unique<DebugInfoTask>(
+        builder_->GetIsa(),
+        compiler_options_.GetInstructionSetFeatures(),
+        builder_->GetText()->GetAddress(),
+        text_size_,
+        builder_->GetDex()->Exists() ? builder_->GetDex()->GetAddress() : 0,
+        dex_section_size_,
+        debug_info);
+    debug_info_thread_pool_ = std::make_unique<ThreadPool>("Mini-debug-info writer", 1);
     debug_info_thread_pool_->AddTask(self, debug_info_task_.get());
     debug_info_thread_pool_->StartWorkers(self);
   }
@@ -280,19 +280,17 @@
 
 template <typename ElfTypes>
 void ElfWriterQuick<ElfTypes>::WriteDebugInfo(const debug::DebugInfo& debug_info) {
-  if (!debug_info.Empty()) {
-    if (compiler_options_.GetGenerateMiniDebugInfo()) {
-      // Wait for the mini-debug-info generation to finish and write it to disk.
-      Thread* self = Thread::Current();
-      DCHECK(debug_info_thread_pool_ != nullptr);
-      debug_info_thread_pool_->Wait(self, true, false);
-      builder_->WriteSection(".gnu_debugdata", debug_info_task_->GetResult());
-    }
-    // The Strip method expects debug info to be last (mini-debug-info is not stripped).
-    if (compiler_options_.GetGenerateDebugInfo()) {
-      // Generate all the debug information we can.
-      debug::WriteDebugInfo(builder_.get(), debug_info, kCFIFormat, true /* write_oat_patches */);
-    }
+  if (compiler_options_.GetGenerateMiniDebugInfo()) {
+    // Wait for the mini-debug-info generation to finish and write it to disk.
+    Thread* self = Thread::Current();
+    DCHECK(debug_info_thread_pool_ != nullptr);
+    debug_info_thread_pool_->Wait(self, true, false);
+    builder_->WriteSection(".gnu_debugdata", debug_info_task_->GetResult());
+  }
+  // The Strip method expects debug info to be last (mini-debug-info is not stripped).
+  if (!debug_info.Empty() && compiler_options_.GetGenerateDebugInfo()) {
+    // Generate all the debug information we can.
+    debug::WriteDebugInfo(builder_.get(), debug_info, kCFIFormat, true /* write_oat_patches */);
   }
 }
 
diff --git a/dex2oat/linker/elf_writer_test.cc b/dex2oat/linker/elf_writer_test.cc
index 40495f3..b381765 100644
--- a/dex2oat/linker/elf_writer_test.cc
+++ b/dex2oat/linker/elf_writer_test.cc
@@ -34,7 +34,7 @@
 
 class ElfWriterTest : public CommonCompilerTest {
  protected:
-  virtual void SetUp() {
+  void SetUp() override {
     ReserveImageSpace();
     CommonCompilerTest::SetUp();
   }
@@ -68,9 +68,9 @@
   {
     std::string error_msg;
     std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
-                                              /* writable */ false,
-                                              /* program_header_only */ false,
-                                              /*low_4gb*/false,
+                                              /*writable=*/ false,
+                                              /*program_header_only=*/ false,
+                                              /*low_4gb=*/false,
                                               &error_msg));
     CHECK(ef.get() != nullptr) << error_msg;
     EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", false);
@@ -80,9 +80,9 @@
   {
     std::string error_msg;
     std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
-                                              /* writable */ false,
-                                              /* program_header_only */ false,
-                                              /* low_4gb */ false,
+                                              /*writable=*/ false,
+                                              /*program_header_only=*/ false,
+                                              /*low_4gb=*/ false,
                                               &error_msg));
     CHECK(ef.get() != nullptr) << error_msg;
     EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", true);
@@ -92,24 +92,23 @@
   {
     std::string error_msg;
     std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
-                                              /* writable */ false,
-                                              /* program_header_only */ true,
-                                              /* low_4gb */ false,
+                                              /*writable=*/ false,
+                                              /*program_header_only=*/ true,
+                                              /*low_4gb=*/ false,
                                               &error_msg));
     CHECK(ef.get() != nullptr) << error_msg;
     size_t size;
     bool success = ef->GetLoadedSize(&size, &error_msg);
     CHECK(success) << error_msg;
     MemMap reservation = MemMap::MapAnonymous("ElfWriterTest#dlsym reservation",
-                                              /* addr */ nullptr,
                                               RoundUp(size, kPageSize),
                                               PROT_NONE,
-                                              /* low_4gb */ true,
+                                              /*low_4gb=*/ true,
                                               &error_msg);
     CHECK(reservation.IsValid()) << error_msg;
     uint8_t* base = reservation.Begin();
     success =
-        ef->Load(file.get(), /* executable */ false, /* low_4gb */ false, &reservation, &error_msg);
+        ef->Load(file.get(), /*executable=*/ false, /*low_4gb=*/ false, &reservation, &error_msg);
     CHECK(success) << error_msg;
     CHECK(!reservation.IsValid());
     EXPECT_EQ(reinterpret_cast<uintptr_t>(dl_oatdata) + reinterpret_cast<uintptr_t>(base),
@@ -131,9 +130,9 @@
   {
     std::string error_msg;
     std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
-                                              /* writable */ false,
-                                              /* program_header_only */ false,
-                                              /* low_4gb */ false,
+                                              /*writable=*/ false,
+                                              /*program_header_only=*/ false,
+                                              /*low_4gb=*/ false,
                                               &error_msg));
     CHECK(ef.get() != nullptr) << error_msg;
     EXPECT_TRUE(ef->HasSection(".note.gnu.build-id"));
@@ -164,7 +163,7 @@
     // Patch manually.
     std::vector<uint8_t> expected = initial_data;
     for (uintptr_t location : patch_locations) {
-      typedef __attribute__((__aligned__(1))) uint32_t UnalignedAddress;
+      using UnalignedAddress __attribute__((__aligned__(1))) = uint32_t;
       *reinterpret_cast<UnalignedAddress*>(expected.data() + location) += delta;
     }
 
diff --git a/dex2oat/linker/image_test.cc b/dex2oat/linker/image_test.cc
index 96c48b8..1a5701d 100644
--- a/dex2oat/linker/image_test.cc
+++ b/dex2oat/linker/image_test.cc
@@ -32,7 +32,11 @@
   // Compile multi-image with ImageLayoutA being the last image.
   {
     CompilationHelper helper;
-    Compile(ImageHeader::kStorageModeUncompressed, helper, "ImageLayoutA", {"LMyClass;"});
+    Compile(ImageHeader::kStorageModeUncompressed,
+            /*max_image_block_size=*/std::numeric_limits<uint32_t>::max(),
+            helper,
+            "ImageLayoutA",
+            {"LMyClass;"});
     image_sizes = helper.GetImageObjectSectionSizes();
   }
   TearDown();
@@ -41,7 +45,11 @@
   // Compile multi-image with ImageLayoutB being the last image.
   {
     CompilationHelper helper;
-    Compile(ImageHeader::kStorageModeUncompressed, helper, "ImageLayoutB", {"LMyClass;"});
+    Compile(ImageHeader::kStorageModeUncompressed,
+            /*max_image_block_size=*/std::numeric_limits<uint32_t>::max(),
+            helper,
+            "ImageLayoutB",
+            {"LMyClass;"});
     image_sizes_extra = helper.GetImageObjectSectionSizes();
   }
   // Make sure that the new stuff in the clinit in ImageLayoutB is in the last image and not in the
@@ -65,7 +73,10 @@
     uint32_t oat_data_end = ART_BASE_ADDRESS + (9 * KB);
     uint32_t oat_file_end = ART_BASE_ADDRESS + (10 * KB);
     ImageSection sections[ImageHeader::kSectionCount];
-    ImageHeader image_header(image_begin,
+    uint32_t image_reservation_size = RoundUp(oat_file_end - image_begin, kPageSize);
+    ImageHeader image_header(image_reservation_size,
+                             /*component_count=*/ 1u,
+                             image_begin,
                              image_size_,
                              sections,
                              image_roots,
@@ -74,15 +85,10 @@
                              oat_data_begin,
                              oat_data_end,
                              oat_file_end,
-                             /*boot_image_begin*/0U,
-                             /*boot_image_size*/0U,
-                             /*boot_oat_begin*/0U,
-                             /*boot_oat_size_*/0U,
-                             sizeof(void*),
-                             /*compile_pic*/false,
-                             /*is_pic*/false,
-                             ImageHeader::kDefaultStorageMode,
-                             /*data_size*/0u);
+                             /*boot_image_begin=*/ 0u,
+                             /*boot_image_size=*/ 0u,
+                             sizeof(void*));
+
     ASSERT_TRUE(image_header.IsValid());
     ASSERT_TRUE(!image_header.IsAppImage());
 
@@ -101,9 +107,10 @@
 TEST_F(ImageTest, TestDefaultMethods) {
   CompilationHelper helper;
   Compile(ImageHeader::kStorageModeUncompressed,
-      helper,
-      "DefaultMethods",
-      {"LIface;", "LImpl;", "LIterableBase;"});
+          /*max_image_block_size=*/std::numeric_limits<uint32_t>::max(),
+          helper,
+          "DefaultMethods",
+          {"LIface;", "LImpl;", "LIterableBase;"});
 
   PointerSize pointer_size = class_linker_->GetImagePointerSize();
   Thread* self = Thread::Current();
@@ -112,7 +119,7 @@
   // Test the pointer to quick code is the same in origin method
   // and in the copied method form the same oat file.
   ObjPtr<mirror::Class> iface_klass =
-      class_linker_->LookupClass(self, "LIface;", /* class_loader */ nullptr);
+      class_linker_->LookupClass(self, "LIface;", /*class_loader=*/ nullptr);
   ASSERT_NE(nullptr, iface_klass);
   ArtMethod* origin = iface_klass->FindInterfaceMethod("defaultMethod", "()V", pointer_size);
   ASSERT_NE(nullptr, origin);
@@ -122,7 +129,7 @@
   ASSERT_NE(nullptr, code);
   ASSERT_FALSE(class_linker_->IsQuickToInterpreterBridge(code));
   ObjPtr<mirror::Class> impl_klass =
-      class_linker_->LookupClass(self, "LImpl;", /* class_loader */ nullptr);
+      class_linker_->LookupClass(self, "LImpl;", /*class_loader=*/ nullptr);
   ASSERT_NE(nullptr, impl_klass);
   ArtMethod* copied = FindCopiedMethod(origin, impl_klass);
   ASSERT_NE(nullptr, copied);
@@ -133,7 +140,7 @@
   // but the copied method has pointer to interpreter
   // because these methods are in different oat files.
   ObjPtr<mirror::Class> iterable_klass =
-      class_linker_->LookupClass(self, "Ljava/lang/Iterable;", /* class_loader */ nullptr);
+      class_linker_->LookupClass(self, "Ljava/lang/Iterable;", /*class_loader=*/ nullptr);
   ASSERT_NE(nullptr, iterable_klass);
   origin = iterable_klass->FindClassMethod(
       "forEach", "(Ljava/util/function/Consumer;)V", pointer_size);
@@ -145,7 +152,7 @@
   ASSERT_NE(nullptr, code);
   ASSERT_FALSE(class_linker_->IsQuickToInterpreterBridge(code));
   ObjPtr<mirror::Class> iterablebase_klass =
-      class_linker_->LookupClass(self, "LIterableBase;", /* class_loader */ nullptr);
+      class_linker_->LookupClass(self, "LIterableBase;", /*class_loader=*/ nullptr);
   ASSERT_NE(nullptr, iterablebase_klass);
   copied = FindCopiedMethod(origin, iterablebase_klass);
   ASSERT_NE(nullptr, copied);
@@ -154,5 +161,18 @@
   ASSERT_TRUE(class_linker_->IsQuickToInterpreterBridge(code));
 }
 
+// Regression test for dex2oat crash for soft verification failure during
+// class initialization check from the transactional interpreter while
+// running the class initializer for another class.
+TEST_F(ImageTest, TestSoftVerificationFailureDuringClassInitialization) {
+  CompilationHelper helper;
+  Compile(ImageHeader::kStorageModeUncompressed,
+          /*max_image_block_size=*/std::numeric_limits<uint32_t>::max(),
+          helper,
+          "VerifySoftFailDuringClinit",
+          /*image_classes=*/ {"LClassToInitialize;"},
+          /*image_classes_failing_aot_clinit=*/ {"LClassToInitialize;"});
+}
+
 }  // namespace linker
 }  // namespace art
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index 2b6786d..8c9dfb8 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -28,6 +28,7 @@
 #include "art_method-inl.h"
 #include "base/file_utils.h"
 #include "base/hash_set.h"
+#include "base/stl_util.h"
 #include "base/unix_file/fd_file.h"
 #include "base/utils.h"
 #include "class_linker-inl.h"
@@ -35,6 +36,7 @@
 #include "compiler_callbacks.h"
 #include "debug/method_debug_info.h"
 #include "dex/quick_compiler_callbacks.h"
+#include "dex/signature-inl.h"
 #include "driver/compiler_driver.h"
 #include "driver/compiler_options.h"
 #include "gc/space/image_space.h"
@@ -76,12 +78,14 @@
     CommonCompilerTest::SetUp();
   }
 
-  void TestWriteRead(ImageHeader::StorageMode storage_mode);
+  void TestWriteRead(ImageHeader::StorageMode storage_mode, uint32_t max_image_block_size);
 
   void Compile(ImageHeader::StorageMode storage_mode,
+               uint32_t max_image_block_size,
                /*out*/ CompilationHelper& out_helper,
                const std::string& extra_dex = "",
-               const std::initializer_list<std::string>& image_classes = {});
+               const std::initializer_list<std::string>& image_classes = {},
+               const std::initializer_list<std::string>& image_classes_failing_aot_clinit = {});
 
   void SetUpRuntimeOptions(RuntimeOptions* options) override {
     CommonCompilerTest::SetUpRuntimeOptions(options);
@@ -166,10 +170,11 @@
   {
     // Create a generic tmp file, to be the base of the .art and .oat temporary files.
     ScratchFile location;
-    for (int i = 0; i < static_cast<int>(class_path.size()); ++i) {
-      std::string cur_location =
-          android::base::StringPrintf("%s-%d.art", location.GetFilename().c_str(), i);
-      out_helper.image_locations.push_back(ScratchFile(cur_location));
+    std::vector<std::string> image_locations =
+        gc::space::ImageSpace::ExpandMultiImageLocations(out_helper.dex_file_locations,
+                                                         location.GetFilename() + ".art");
+    for (size_t i = 0u; i != class_path.size(); ++i) {
+      out_helper.image_locations.push_back(ScratchFile(image_locations[i]));
     }
   }
   std::vector<std::string> image_filenames;
@@ -198,14 +203,6 @@
   }
 
   std::unordered_map<const DexFile*, size_t> dex_file_to_oat_index_map;
-  std::vector<const char*> oat_filename_vector;
-  for (const std::string& file : oat_filenames) {
-    oat_filename_vector.push_back(file.c_str());
-  }
-  std::vector<const char*> image_filename_vector;
-  for (const std::string& file : image_filenames) {
-    image_filename_vector.push_back(file.c_str());
-  }
   size_t image_idx = 0;
   for (const DexFile* dex_file : class_path) {
     dex_file_to_oat_index_map.emplace(dex_file, image_idx);
@@ -214,31 +211,21 @@
   // TODO: compile_pic should be a test argument.
   std::unique_ptr<ImageWriter> writer(new ImageWriter(*compiler_options_,
                                                       kRequestedImageBase,
-                                                      /*compile_pic*/false,
-                                                      /*compile_app_image*/false,
                                                       storage_mode,
-                                                      oat_filename_vector,
+                                                      oat_filenames,
                                                       dex_file_to_oat_index_map,
-                                                      /*dirty_image_objects*/nullptr));
+                                                      /*class_loader=*/ nullptr,
+                                                      /*dirty_image_objects=*/ nullptr));
   {
     {
       jobject class_loader = nullptr;
       TimingLogger timings("ImageTest::WriteRead", false, false);
-      TimingLogger::ScopedTiming t("CompileAll", &timings);
-      SetDexFilesForOatFile(class_path);
-      driver->CompileAll(class_loader, class_path, &timings);
+      CompileAll(class_loader, class_path, &timings);
 
-      t.NewTiming("WriteElf");
+      TimingLogger::ScopedTiming t("WriteElf", &timings);
       SafeMap<std::string, std::string> key_value_store;
-      std::vector<const char*> dex_filename_vector;
-      for (size_t i = 0; i < class_path.size(); ++i) {
-        dex_filename_vector.push_back("");
-      }
       key_value_store.Put(OatHeader::kBootClassPathKey,
-                          gc::space::ImageSpace::GetMultiImageBootClassPath(
-                              dex_filename_vector,
-                              oat_filename_vector,
-                              image_filename_vector));
+                          android::base::Join(out_helper.dex_file_locations, ':'));
 
       std::vector<std::unique_ptr<ElfWriter>> elf_writers;
       std::vector<std::unique_ptr<OatWriter>> oat_writers;
@@ -270,7 +257,7 @@
         bool dex_files_ok = oat_writers[i]->WriteAndOpenDexFiles(
             out_helper.vdex_files[i].GetFile(),
             rodata.back(),
-            &key_value_store,
+            (i == 0u) ? &key_value_store : nullptr,
             /* verify */ false,           // Dex files may be dex-to-dex-ed, don't verify.
             /* update_input_vdex */ false,
             /* copy_dex_files */ CopyOption::kOnlyIfCompressed,
@@ -340,8 +327,7 @@
           elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
         }
 
-        bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream(),
-                                                 /* image_file_location_oat_checksum */ 0u);
+        bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream());
         ASSERT_TRUE(header_ok);
 
         writer->UpdateOatFileHeader(i, oat_writer->GetOatHeader());
@@ -355,8 +341,8 @@
     }
 
     bool success_image = writer->Write(kInvalidFd,
-                                       image_filename_vector,
-                                       oat_filename_vector);
+                                       image_filenames,
+                                       oat_filenames);
     ASSERT_TRUE(success_image);
 
     for (size_t i = 0, size = oat_filenames.size(); i != size; ++i) {
@@ -371,10 +357,16 @@
   }
 }
 
-inline void ImageTest::Compile(ImageHeader::StorageMode storage_mode,
-                        CompilationHelper& helper,
-                        const std::string& extra_dex,
-                        const std::initializer_list<std::string>& image_classes) {
+inline void ImageTest::Compile(
+    ImageHeader::StorageMode storage_mode,
+    uint32_t max_image_block_size,
+    CompilationHelper& helper,
+    const std::string& extra_dex,
+    const std::initializer_list<std::string>& image_classes,
+    const std::initializer_list<std::string>& image_classes_failing_aot_clinit) {
+  for (const std::string& image_class : image_classes_failing_aot_clinit) {
+    ASSERT_TRUE(ContainsElement(image_classes, image_class));
+  }
   for (const std::string& image_class : image_classes) {
     image_classes_.insert(image_class);
   }
@@ -382,6 +374,7 @@
   CreateCompilerDriver();
   // Set inline filter values.
   compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits);
+  compiler_options_->SetMaxImageBlockSize(max_image_block_size);
   image_classes_.clear();
   if (!extra_dex.empty()) {
     helper.extra_dex_files = OpenTestDexFiles(extra_dex.c_str());
@@ -395,14 +388,20 @@
       ObjPtr<mirror::Class> klass =
           class_linker->FindSystemClass(Thread::Current(), image_class.c_str());
       EXPECT_TRUE(klass != nullptr);
-      EXPECT_TRUE(klass->IsInitialized());
+      EXPECT_TRUE(klass->IsResolved());
+      if (ContainsElement(image_classes_failing_aot_clinit, image_class)) {
+        EXPECT_FALSE(klass->IsInitialized());
+      } else {
+        EXPECT_TRUE(klass->IsInitialized());
+      }
     }
   }
 }
 
-inline void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode) {
+inline void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode,
+                                     uint32_t max_image_block_size) {
   CompilationHelper helper;
-  Compile(storage_mode, /*out*/ helper);
+  Compile(storage_mode, max_image_block_size, /*out*/ helper);
   std::vector<uint64_t> image_file_sizes;
   for (ScratchFile& image_file : helper.image_files) {
     std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
@@ -439,6 +438,9 @@
   MemMap::Init();
 
   RuntimeOptions options;
+  options.emplace_back(GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()), nullptr);
+  options.emplace_back(
+      GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()), nullptr);
   std::string image("-Ximage:");
   image.append(helper.image_locations[0].GetFilename());
   options.push_back(std::make_pair(image.c_str(), static_cast<void*>(nullptr)));
@@ -477,6 +479,10 @@
     } else if (image_file_size > 16 * KB) {
       // Compressed, file should be smaller than image. Not really valid for small images.
       ASSERT_LE(image_file_size, image_space->GetImageHeader().GetImageSize());
+      // TODO: Actually validate the blocks, this is hard since the blocks are not copied over for
+      // compressed images. Add kPageSize since image_size is rounded up to this.
+      ASSERT_GT(image_space->GetImageHeader().GetBlockCount() * max_image_block_size,
+                image_space->GetImageHeader().GetImageSize() - kPageSize);
     }
 
     image_space->VerifyImageAllocations();
@@ -487,7 +493,7 @@
       CHECK_EQ(kRequestedImageBase, reinterpret_cast<uintptr_t>(image_begin));
     }
     for (size_t j = 0; j < dex->NumClassDefs(); ++j) {
-      const DexFile::ClassDef& class_def = dex->GetClassDef(j);
+      const dex::ClassDef& class_def = dex->GetClassDef(j);
       const char* descriptor = dex->GetClassDescriptor(class_def);
       ObjPtr<mirror::Class> klass = class_linker_->FindSystemClass(soa.Self(), descriptor);
       EXPECT_TRUE(klass != nullptr) << descriptor;
diff --git a/dex2oat/linker/image_write_read_test.cc b/dex2oat/linker/image_write_read_test.cc
index 30996b5..5ddbd09 100644
--- a/dex2oat/linker/image_write_read_test.cc
+++ b/dex2oat/linker/image_write_read_test.cc
@@ -20,15 +20,23 @@
 namespace linker {
 
 TEST_F(ImageTest, WriteReadUncompressed) {
-  TestWriteRead(ImageHeader::kStorageModeUncompressed);
+  TestWriteRead(ImageHeader::kStorageModeUncompressed,
+                /*max_image_block_size=*/std::numeric_limits<uint32_t>::max());
 }
 
 TEST_F(ImageTest, WriteReadLZ4) {
-  TestWriteRead(ImageHeader::kStorageModeLZ4);
+  TestWriteRead(ImageHeader::kStorageModeLZ4,
+                /*max_image_block_size=*/std::numeric_limits<uint32_t>::max());
 }
 
 TEST_F(ImageTest, WriteReadLZ4HC) {
-  TestWriteRead(ImageHeader::kStorageModeLZ4HC);
+  TestWriteRead(ImageHeader::kStorageModeLZ4HC,
+                /*max_image_block_size=*/std::numeric_limits<uint32_t>::max());
+}
+
+
+TEST_F(ImageTest, WriteReadLZ4HCKBBlock) {
+  TestWriteRead(ImageHeader::kStorageModeLZ4HC, /*max_image_block_size=*/KB);
 }
 
 }  // namespace linker
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 6a13454..15ced72 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -19,6 +19,7 @@
 #include <lz4.h>
 #include <lz4hc.h>
 #include <sys/stat.h>
+#include <zlib.h>
 
 #include <memory>
 #include <numeric>
@@ -27,11 +28,11 @@
 
 #include "art_field-inl.h"
 #include "art_method-inl.h"
-#include "base/bit_memory_region.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
 #include "base/globals.h"
 #include "base/logging.h"  // For VLOG.
+#include "base/stl_util.h"
 #include "base/unix_file/fd_file.h"
 #include "class_linker-inl.h"
 #include "class_root.h"
@@ -48,11 +49,13 @@
 #include "gc/heap-visit-objects-inl.h"
 #include "gc/heap.h"
 #include "gc/space/large_object_space.h"
+#include "gc/space/region_space.h"
 #include "gc/space/space-inl.h"
 #include "gc/verification.h"
 #include "handle_scope-inl.h"
 #include "image.h"
 #include "imt_conflict_table.h"
+#include "intern_table-inl.h"
 #include "jni/jni_internal.h"
 #include "linear_alloc.h"
 #include "lock_word.h"
@@ -66,6 +69,7 @@
 #include "mirror/method.h"
 #include "mirror/object-inl.h"
 #include "mirror/object-refvisitor-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/string-inl.h"
 #include "oat.h"
@@ -87,14 +91,6 @@
 namespace art {
 namespace linker {
 
-static inline size_t RelocationIndex(size_t relocation_offset, PointerSize target_ptr_size) {
-  static_assert(sizeof(GcRoot<mirror::Object>) == sizeof(mirror::HeapReference<mirror::Object>),
-                "Expecting heap GC roots and references to have the same size.");
-  DCHECK_LE(sizeof(GcRoot<mirror::Object>), static_cast<size_t>(target_ptr_size));
-  DCHECK_ALIGNED(relocation_offset, sizeof(GcRoot<mirror::Object>));
-  return relocation_offset / sizeof(GcRoot<mirror::Object>);
-}
-
 static ArrayRef<const uint8_t> MaybeCompressData(ArrayRef<const uint8_t> source,
                                                  ImageHeader::StorageMode image_storage_mode,
                                                  /*out*/ std::vector<uint8_t>* storage) {
@@ -152,10 +148,37 @@
 // Separate objects into multiple bins to optimize dirty memory use.
 static constexpr bool kBinObjects = true;
 
+ObjPtr<mirror::ClassLoader> ImageWriter::GetAppClassLoader() const
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  return compiler_options_.IsAppImage()
+      ? ObjPtr<mirror::ClassLoader>::DownCast(Thread::Current()->DecodeJObject(app_class_loader_))
+      : nullptr;
+}
+
+bool ImageWriter::IsImageObject(ObjPtr<mirror::Object> obj) const {
+  // For boot image, we keep all objects remaining after the GC in PrepareImageAddressSpace().
+  if (compiler_options_.IsBootImage()) {
+    return true;
+  }
+  // Objects already in the boot image do not belong to the image being written.
+  if (IsInBootImage(obj.Ptr())) {
+    return false;
+  }
+  // DexCaches for the boot class path components that are not a part of the boot image
+  // cannot be garbage collected in PrepareImageAddressSpace() but we do not want to
+  // include them in the app image. So make sure we include only the app DexCaches.
+  if (obj->IsDexCache() &&
+      !ContainsElement(compiler_options_.GetDexFilesForOatFile(),
+                       obj->AsDexCache()->GetDexFile())) {
+    return false;
+  }
+  return true;
+}
+
 // Return true if an object is already in an image space.
 bool ImageWriter::IsInBootImage(const void* obj) const {
   gc::Heap* const heap = Runtime::Current()->GetHeap();
-  if (!compile_app_image_) {
+  if (compiler_options_.IsBootImage()) {
     DCHECK(heap->GetBootImageSpaces().empty());
     return false;
   }
@@ -172,7 +195,7 @@
 
 bool ImageWriter::IsInBootOatFile(const void* ptr) const {
   gc::Heap* const heap = Runtime::Current()->GetHeap();
-  if (!compile_app_image_) {
+  if (compiler_options_.IsBootImage()) {
     DCHECK(heap->GetBootImageSpaces().empty());
     return false;
   }
@@ -200,54 +223,469 @@
 
 bool ImageWriter::PrepareImageAddressSpace(TimingLogger* timings) {
   target_ptr_size_ = InstructionSetPointerSize(compiler_options_.GetInstructionSet());
+
+  Thread* const self = Thread::Current();
+
   gc::Heap* const heap = Runtime::Current()->GetHeap();
   {
-    ScopedObjectAccess soa(Thread::Current());
+    ScopedObjectAccess soa(self);
     {
       TimingLogger::ScopedTiming t("PruneNonImageClasses", timings);
       PruneNonImageClasses();  // Remove junk
     }
-    if (compile_app_image_) {
+
+    if (compiler_options_.IsAppImage()) {
       TimingLogger::ScopedTiming t("ClearDexFileCookies", timings);
       // Clear dex file cookies for app images to enable app image determinism. This is required
       // since the cookie field contains long pointers to DexFiles which are not deterministic.
       // b/34090128
       ClearDexFileCookies();
-    } else {
-      TimingLogger::ScopedTiming t("ComputeLazyFieldsForImageClasses", timings);
-      // Avoid for app image since this may increase RAM and image size.
-      ComputeLazyFieldsForImageClasses();  // Add useful information
     }
   }
+
   {
     TimingLogger::ScopedTiming t("CollectGarbage", timings);
     heap->CollectGarbage(/* clear_soft_references */ false);  // Remove garbage.
   }
 
   if (kIsDebugBuild) {
-    ScopedObjectAccess soa(Thread::Current());
+    ScopedObjectAccess soa(self);
     CheckNonImageClassesRemoved();
   }
 
+  // Used to store information that will later be used to calculate image
+  // offsets to string references in the AppImage.
+  std::vector<HeapReferencePointerInfo> string_ref_info;
+  if (ClassLinker::kAppImageMayContainStrings && compiler_options_.IsAppImage()) {
+    // Count the number of string fields so we can allocate the appropriate
+    // amount of space in the image section.
+    TimingLogger::ScopedTiming t("AppImage:CollectStringReferenceInfo", timings);
+    ScopedObjectAccess soa(self);
+
+    if (kIsDebugBuild) {
+      VerifyNativeGCRootInvariants();
+      CHECK_EQ(image_infos_.size(), 1u);
+    }
+
+    string_ref_info = CollectStringReferenceInfo();
+    image_infos_.back().num_string_references_ = string_ref_info.size();
+  }
+
   {
     TimingLogger::ScopedTiming t("CalculateNewObjectOffsets", timings);
-    ScopedObjectAccess soa(Thread::Current());
+    ScopedObjectAccess soa(self);
     CalculateNewObjectOffsets();
   }
 
+  // Obtain class count for debugging purposes
+  if (VLOG_IS_ON(compiler) && compiler_options_.IsAppImage()) {
+    ScopedObjectAccess soa(self);
+
+    size_t app_image_class_count  = 0;
+
+    for (ImageInfo& info : image_infos_) {
+      info.class_table_->Visit([&](ObjPtr<mirror::Class> klass)
+                                   REQUIRES_SHARED(Locks::mutator_lock_) {
+        if (!IsInBootImage(klass.Ptr())) {
+          ++app_image_class_count;
+        }
+
+        // Indicate that we would like to continue visiting classes.
+        return true;
+      });
+    }
+
+    VLOG(compiler) << "Dex2Oat:AppImage:classCount = " << app_image_class_count;
+  }
+
+  if (ClassLinker::kAppImageMayContainStrings && compiler_options_.IsAppImage()) {
+    // Use the string reference information obtained earlier to calculate image
+    // offsets.  These will later be written to the image by Write/CopyMetadata.
+    TimingLogger::ScopedTiming t("AppImage:CalculateImageOffsets", timings);
+    ScopedObjectAccess soa(self);
+
+    size_t managed_string_refs = 0;
+    size_t native_string_refs = 0;
+
+    /*
+     * Iterate over the string reference info and calculate image offsets.
+     * The first element of the pair is either the object the reference belongs
+     * to or the beginning of the native reference array it is located in.  In
+     * the first case the second element is the offset of the field relative to
+     * the object's base address.  In the second case, it is the index of the
+     * StringDexCacheType object in the array.
+     */
+    for (const HeapReferencePointerInfo& ref_info : string_ref_info) {
+      uint32_t base_offset;
+
+      if (HasDexCacheStringNativeRefTag(ref_info.first)) {
+        ++native_string_refs;
+        auto* obj_ptr = reinterpret_cast<mirror::Object*>(ClearDexCacheNativeRefTags(
+            ref_info.first));
+        base_offset = SetDexCacheStringNativeRefTag(GetImageOffset(obj_ptr));
+      } else if (HasDexCachePreResolvedStringNativeRefTag(ref_info.first)) {
+        ++native_string_refs;
+        auto* obj_ptr = reinterpret_cast<mirror::Object*>(ClearDexCacheNativeRefTags(
+            ref_info.first));
+        base_offset = SetDexCachePreResolvedStringNativeRefTag(GetImageOffset(obj_ptr));
+      } else {
+        ++managed_string_refs;
+        base_offset = GetImageOffset(reinterpret_cast<mirror::Object*>(ref_info.first));
+      }
+
+      string_reference_offsets_.emplace_back(base_offset, ref_info.second);
+    }
+
+    CHECK_EQ(image_infos_.back().num_string_references_,
+             string_reference_offsets_.size());
+
+    VLOG(compiler) << "Dex2Oat:AppImage:stringReferences = " << string_reference_offsets_.size();
+    VLOG(compiler) << "Dex2Oat:AppImage:managedStringReferences = " << managed_string_refs;
+    VLOG(compiler) << "Dex2Oat:AppImage:nativeStringReferences = " << native_string_refs;
+  }
+
   // This needs to happen after CalculateNewObjectOffsets since it relies on intern_table_bytes_ and
   // bin size sums being calculated.
   TimingLogger::ScopedTiming t("AllocMemory", timings);
-  if (!AllocMemory()) {
-    return false;
-  }
-
-  return true;
+  return AllocMemory();
 }
 
+class ImageWriter::CollectStringReferenceVisitor {
+ public:
+  explicit CollectStringReferenceVisitor(const ImageWriter& image_writer)
+      : image_writer_(image_writer),
+        curr_obj_(nullptr),
+        string_ref_info_(0),
+        dex_cache_string_ref_counter_(0) {}
+
+  // Used to prevent repeated null checks in the code that calls the visitor.
+  ALWAYS_INLINE
+  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (!root->IsNull()) {
+      VisitRoot(root);
+    }
+  }
+
+  /*
+   * Counts the number of native references to strings reachable through
+   * DexCache objects for verification later.
+   */
+  ALWAYS_INLINE
+  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+      REQUIRES_SHARED(Locks::mutator_lock_)  {
+    ObjPtr<mirror::Object> referred_obj = root->AsMirrorPtr();
+
+    if (curr_obj_->IsDexCache() &&
+        image_writer_.IsValidAppImageStringReference(referred_obj)) {
+      ++dex_cache_string_ref_counter_;
+    }
+  }
+
+  // Collects info for managed fields that reference managed Strings.
+  ALWAYS_INLINE
+  void operator() (ObjPtr<mirror::Object> obj,
+                   MemberOffset member_offset,
+                   bool is_static ATTRIBUTE_UNUSED) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> referred_obj =
+        obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
+            member_offset);
+
+    if (image_writer_.IsValidAppImageStringReference(referred_obj)) {
+      string_ref_info_.emplace_back(reinterpret_cast<uintptr_t>(obj.Ptr()),
+                                    member_offset.Uint32Value());
+    }
+  }
+
+  ALWAYS_INLINE
+  void operator() (ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+                   ObjPtr<mirror::Reference> ref) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    operator()(ref, mirror::Reference::ReferentOffset(), /* is_static */ false);
+  }
+
+  void AddStringRefInfo(uint32_t first, uint32_t second) {
+    string_ref_info_.emplace_back(first, second);
+  }
+
+  std::vector<HeapReferencePointerInfo>&& MoveRefInfo() {
+    return std::move(string_ref_info_);
+  }
+
+  // Used by the wrapper function to obtain a native reference count.
+  size_t GetDexCacheStringRefCount() const {
+    return dex_cache_string_ref_counter_;
+  }
+
+  void SetObject(ObjPtr<mirror::Object> obj) {
+    curr_obj_ = obj;
+    dex_cache_string_ref_counter_ = 0;
+  }
+
+ private:
+  const ImageWriter& image_writer_;
+  ObjPtr<mirror::Object> curr_obj_;
+  mutable std::vector<HeapReferencePointerInfo> string_ref_info_;
+  mutable size_t dex_cache_string_ref_counter_;
+};
+
+std::vector<ImageWriter::HeapReferencePointerInfo> ImageWriter::CollectStringReferenceInfo() const
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  gc::Heap* const heap = Runtime::Current()->GetHeap();
+  CollectStringReferenceVisitor visitor(*this);
+
+  /*
+   * References to managed strings can occur either in the managed heap or in
+   * native memory regions.  Information about managed references is collected
+   * by the CollectStringReferenceVisitor and directly added to the internal
+   * info vector.
+   *
+   * Native references to managed strings can only occur through DexCache
+   * objects.  This is verified by VerifyNativeGCRootInvariants().  Due to the
+   * fact that these native references are encapsulated in std::atomic objects
+   * the VisitReferences() function can't pass the visiting object the address
+   * of the reference.  Instead, the VisitReferences() function loads the
+   * reference into a temporary variable and passes that address to the
+   * visitor.  As a consequence of this we can't uniquely identify the location
+   * of the string reference in the visitor.
+   *
+   * Due to these limitations, the visitor will only count the number of
+   * managed strings reachable through the native references of a DexCache
+   * object.  If there are any such strings, this function will then iterate
+   * over the native references, test the string for membership in the
+   * AppImage, and add the tagged DexCache pointer and string array offset to
+   * the info vector if necessary.
+   */
+  heap->VisitObjects([this, &visitor](ObjPtr<mirror::Object> object)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (IsImageObject(object)) {
+      visitor.SetObject(object);
+
+      if (object->IsDexCache()) {
+        object->VisitReferences</* kVisitNativeRoots= */ true,
+                                kVerifyNone,
+                                kWithoutReadBarrier>(visitor, visitor);
+
+        if (visitor.GetDexCacheStringRefCount() > 0) {
+          size_t string_info_collected = 0;
+
+          ObjPtr<mirror::DexCache> dex_cache = object->AsDexCache();
+          DCHECK_LE(visitor.GetDexCacheStringRefCount(), dex_cache->NumStrings());
+
+          for (uint32_t index = 0; index < dex_cache->NumStrings(); ++index) {
+            // GetResolvedString() can't be used here due to the circular
+            // nature of the cache and the collision detection this requires.
+            ObjPtr<mirror::String> referred_string =
+                dex_cache->GetStrings()[index].load().object.Read();
+
+            if (IsValidAppImageStringReference(referred_string)) {
+              ++string_info_collected;
+              visitor.AddStringRefInfo(
+                  SetDexCacheStringNativeRefTag(reinterpret_cast<uintptr_t>(object.Ptr())), index);
+            }
+          }
+
+          // Visit all of the preinitialized strings.
+          GcRoot<mirror::String>* preresolved_strings = dex_cache->GetPreResolvedStrings();
+          for (size_t index = 0; index < dex_cache->NumPreResolvedStrings(); ++index) {
+            ObjPtr<mirror::String> referred_string = preresolved_strings[index].Read();
+            if (IsValidAppImageStringReference(referred_string)) {
+              ++string_info_collected;
+              visitor.AddStringRefInfo(SetDexCachePreResolvedStringNativeRefTag(
+                reinterpret_cast<uintptr_t>(object.Ptr())),
+                index);
+            }
+          }
+
+          DCHECK_EQ(string_info_collected, visitor.GetDexCacheStringRefCount());
+        }
+      } else {
+        object->VisitReferences</* kVisitNativeRoots= */ false,
+                                kVerifyNone,
+                                kWithoutReadBarrier>(visitor, visitor);
+      }
+    }
+  });
+
+  return visitor.MoveRefInfo();
+}
+
+class ImageWriter::NativeGCRootInvariantVisitor {
+ public:
+  explicit NativeGCRootInvariantVisitor(const ImageWriter& image_writer) :
+    curr_obj_(nullptr), class_violation_(false), class_loader_violation_(false),
+    image_writer_(image_writer) {}
+
+  ALWAYS_INLINE
+  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (!root->IsNull()) {
+      VisitRoot(root);
+    }
+  }
+
+  ALWAYS_INLINE
+  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+      REQUIRES_SHARED(Locks::mutator_lock_)  {
+    ObjPtr<mirror::Object> referred_obj = root->AsMirrorPtr();
+
+    if (curr_obj_->IsClass()) {
+      class_violation_ = class_violation_ ||
+                         image_writer_.IsValidAppImageStringReference(referred_obj);
+
+    } else if (curr_obj_->IsClassLoader()) {
+      class_loader_violation_ = class_loader_violation_ ||
+                                image_writer_.IsValidAppImageStringReference(referred_obj);
+
+    } else if (!curr_obj_->IsDexCache()) {
+      LOG(FATAL) << "Dex2Oat:AppImage | " <<
+                    "Native reference to String found in unexpected object type.";
+    }
+  }
+
+  ALWAYS_INLINE
+  void operator() (ObjPtr<mirror::Object> obj ATTRIBUTE_UNUSED,
+                   MemberOffset member_offset ATTRIBUTE_UNUSED,
+                   bool is_static ATTRIBUTE_UNUSED) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {}
+
+  ALWAYS_INLINE
+  void operator() (ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+                   ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {}
+
+  // Returns true iff the only reachable native string references are through DexCache objects.
+  bool InvariantsHold() const {
+    return !(class_violation_ || class_loader_violation_);
+  }
+
+  ObjPtr<mirror::Object> curr_obj_;
+  mutable bool class_violation_;
+  mutable bool class_loader_violation_;
+
+ private:
+  const ImageWriter& image_writer_;
+};
+
+void ImageWriter::VerifyNativeGCRootInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) {
+  gc::Heap* const heap = Runtime::Current()->GetHeap();
+
+  NativeGCRootInvariantVisitor visitor(*this);
+
+  heap->VisitObjects([this, &visitor](ObjPtr<mirror::Object> object)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    visitor.curr_obj_ = object;
+
+    if (!IsInBootImage(object.Ptr())) {
+      object->VisitReferences</* kVisitNativeReferences= */ true,
+                              kVerifyNone,
+                              kWithoutReadBarrier>(visitor, visitor);
+    }
+  });
+
+  bool error = false;
+  std::ostringstream error_str;
+
+  /*
+   * Build the error string
+   */
+
+  if (UNLIKELY(visitor.class_violation_)) {
+    error_str << "Class";
+    error = true;
+  }
+
+  if (UNLIKELY(visitor.class_loader_violation_)) {
+    if (error) {
+      error_str << ", ";
+    }
+
+    error_str << "ClassLoader";
+  }
+
+  CHECK(visitor.InvariantsHold()) <<
+    "Native GC root invariant failure. String ref invariants don't hold for the following " <<
+    "object types: " << error_str.str();
+}
+
+void ImageWriter::CopyMetadata() {
+  DCHECK(compiler_options_.IsAppImage());
+  CHECK_EQ(image_infos_.size(), 1u);
+
+  const ImageInfo& image_info = image_infos_.back();
+  std::vector<ImageSection> image_sections = image_info.CreateImageSections().second;
+
+  auto* sfo_section_base = reinterpret_cast<AppImageReferenceOffsetInfo*>(
+      image_info.image_.Begin() +
+      image_sections[ImageHeader::kSectionStringReferenceOffsets].Offset());
+
+  std::copy(string_reference_offsets_.begin(),
+            string_reference_offsets_.end(),
+            sfo_section_base);
+}
+
+bool ImageWriter::IsValidAppImageStringReference(ObjPtr<mirror::Object> referred_obj) const {
+  return referred_obj != nullptr &&
+         !IsInBootImage(referred_obj.Ptr()) &&
+         referred_obj->IsString();
+}
+
+// Helper class that erases the image file if it isn't properly flushed and closed.
+class ImageWriter::ImageFileGuard {
+ public:
+  ImageFileGuard() noexcept = default;
+  ImageFileGuard(ImageFileGuard&& other) noexcept = default;
+  ImageFileGuard& operator=(ImageFileGuard&& other) noexcept = default;
+
+  ~ImageFileGuard() {
+    if (image_file_ != nullptr) {
+      // Failure, erase the image file.
+      image_file_->Erase();
+    }
+  }
+
+  void reset(File* image_file) {
+    image_file_.reset(image_file);
+  }
+
+  bool operator==(std::nullptr_t) {
+    return image_file_ == nullptr;
+  }
+
+  bool operator!=(std::nullptr_t) {
+    return image_file_ != nullptr;
+  }
+
+  File* operator->() const {
+    return image_file_.get();
+  }
+
+  bool WriteHeaderAndClose(const std::string& image_filename, const ImageHeader* image_header) {
+    // The header is uncompressed since it contains whether the image is compressed or not.
+    if (!image_file_->PwriteFully(image_header, sizeof(ImageHeader), 0)) {
+      PLOG(ERROR) << "Failed to write image file header " << image_filename;
+      return false;
+    }
+
+    // FlushCloseOrErase() takes care of erasing, so the destructor does not need
+    // to do that whether the FlushCloseOrErase() succeeds or fails.
+    std::unique_ptr<File> image_file = std::move(image_file_);
+    if (image_file->FlushCloseOrErase() != 0) {
+      PLOG(ERROR) << "Failed to flush and close image file " << image_filename;
+      return false;
+    }
+
+    return true;
+  }
+
+ private:
+  std::unique_ptr<File> image_file_;
+};
+
 bool ImageWriter::Write(int image_fd,
-                        const std::vector<const char*>& image_filenames,
-                        const std::vector<const char*>& oat_filenames) {
+                        const std::vector<std::string>& image_filenames,
+                        const std::vector<std::string>& oat_filenames) {
   // If image_fd or oat_fd are not kInvalidFd then we may have empty strings in image_filenames or
   // oat_filenames.
   CHECK(!image_filenames.empty());
@@ -257,8 +695,22 @@
   CHECK(!oat_filenames.empty());
   CHECK_EQ(image_filenames.size(), oat_filenames.size());
 
+  Thread* self = Thread::Current();
   {
-    ScopedObjectAccess soa(Thread::Current());
+    // Preload deterministic contents to the dex cache arrays we're going to write.
+    ScopedObjectAccess soa(self);
+    ObjPtr<mirror::ClassLoader> class_loader = GetAppClassLoader();
+    std::vector<ObjPtr<mirror::DexCache>> dex_caches = FindDexCaches(self);
+    for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) {
+      if (!IsImageObject(dex_cache)) {
+        continue;  // Boot image DexCache is not written to the app image.
+      }
+      PreloadDexCache(dex_cache, class_loader);
+    }
+  }
+
+  {
+    ScopedObjectAccess soa(self);
     for (size_t i = 0; i < oat_filenames.size(); ++i) {
       CreateHeader(i);
       CopyAndFixupNativeData(i);
@@ -267,17 +719,29 @@
 
   {
     // TODO: heap validation can't handle these fix up passes.
-    ScopedObjectAccess soa(Thread::Current());
+    ScopedObjectAccess soa(self);
     Runtime::Current()->GetHeap()->DisableObjectValidation();
     CopyAndFixupObjects();
   }
 
+  if (compiler_options_.IsAppImage()) {
+    CopyMetadata();
+  }
+
+  // Primary image header shall be written last for two reasons. First, this ensures
+  // that we shall not end up with a valid primary image and invalid secondary image.
+  // Second, its checksum shall include the checksums of the secondary images (XORed).
+  // This way only the primary image checksum needs to be checked to determine whether
+  // any of the images or oat files are out of date. (Oat file checksums are included
+  // in the image checksum calculation.)
+  ImageHeader* primary_header = reinterpret_cast<ImageHeader*>(image_infos_[0].image_.Begin());
+  ImageFileGuard primary_image_file;
   for (size_t i = 0; i < image_filenames.size(); ++i) {
-    const char* image_filename = image_filenames[i];
+    const std::string& image_filename = image_filenames[i];
     ImageInfo& image_info = GetImageInfo(i);
-    std::unique_ptr<File> image_file;
+    ImageFileGuard image_file;
     if (image_fd != kInvalidFd) {
-      if (strlen(image_filename) == 0u) {
+      if (image_filename.empty()) {
         image_file.reset(new File(image_fd, unix_file::kCheckSafeUsage));
         // Empty the file in case it already exists.
         if (image_file != nullptr) {
@@ -288,7 +752,7 @@
         LOG(ERROR) << "image fd " << image_fd << " name " << image_filename;
       }
     } else {
-      image_file.reset(OS::CreateEmptyFile(image_filename));
+      image_file.reset(OS::CreateEmptyFile(image_filename.c_str()));
     }
 
     if (image_file == nullptr) {
@@ -296,87 +760,149 @@
       return false;
     }
 
-    if (!compile_app_image_ && fchmod(image_file->Fd(), 0644) != 0) {
+    if (!compiler_options_.IsAppImage() && fchmod(image_file->Fd(), 0644) != 0) {
       PLOG(ERROR) << "Failed to make image file world readable: " << image_filename;
-      image_file->Erase();
       return EXIT_FAILURE;
     }
 
     // Image data size excludes the bitmap and the header.
     ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info.image_.Begin());
-    ArrayRef<const uint8_t> raw_image_data(image_info.image_.Begin() + sizeof(ImageHeader),
-                                           image_header->GetImageSize() - sizeof(ImageHeader));
 
-    CHECK_EQ(image_header->storage_mode_, image_storage_mode_);
-    std::vector<uint8_t> compressed_data;
-    ArrayRef<const uint8_t> image_data =
-        MaybeCompressData(raw_image_data, image_storage_mode_, &compressed_data);
+    // Block sources (from the image).
+    const bool is_compressed = image_storage_mode_ != ImageHeader::kStorageModeUncompressed;
+    std::vector<std::pair<uint32_t, uint32_t>> block_sources;
+    std::vector<ImageHeader::Block> blocks;
 
-    // Write out the image + fields + methods.
-    if (!image_file->PwriteFully(image_data.data(), image_data.size(), sizeof(ImageHeader))) {
-      PLOG(ERROR) << "Failed to write image file data " << image_filename;
-      image_file->Erase();
-      return false;
+    // Add a set of solid blocks such that no block is larger than the maximum size. A solid block
+    // is a block that must be decompressed all at once.
+    auto add_blocks = [&](uint32_t offset, uint32_t size) {
+      while (size != 0u) {
+        const uint32_t cur_size = std::min(size, compiler_options_.MaxImageBlockSize());
+        block_sources.emplace_back(offset, cur_size);
+        offset += cur_size;
+        size -= cur_size;
+      }
+    };
+
+    add_blocks(sizeof(ImageHeader), image_header->GetImageSize() - sizeof(ImageHeader));
+
+    // Checksum of compressed image data and header.
+    uint32_t image_checksum = adler32(0L, Z_NULL, 0);
+    image_checksum = adler32(image_checksum,
+                             reinterpret_cast<const uint8_t*>(image_header),
+                             sizeof(ImageHeader));
+    // Copy and compress blocks.
+    size_t out_offset = sizeof(ImageHeader);
+    for (const std::pair<uint32_t, uint32_t> block : block_sources) {
+      ArrayRef<const uint8_t> raw_image_data(image_info.image_.Begin() + block.first,
+                                             block.second);
+      std::vector<uint8_t> compressed_data;
+      ArrayRef<const uint8_t> image_data =
+          MaybeCompressData(raw_image_data, image_storage_mode_, &compressed_data);
+
+      if (!is_compressed) {
+        // For uncompressed, preserve alignment since the image will be directly mapped.
+        out_offset = block.first;
+      }
+
+      // Fill in the compressed location of the block.
+      blocks.emplace_back(ImageHeader::Block(
+          image_storage_mode_,
+          /*data_offset=*/ out_offset,
+          /*data_size=*/ image_data.size(),
+          /*image_offset=*/ block.first,
+          /*image_size=*/ block.second));
+
+      // Write out the image + fields + methods.
+      if (!image_file->PwriteFully(image_data.data(), image_data.size(), out_offset)) {
+        PLOG(ERROR) << "Failed to write image file data " << image_filename;
+        image_file->Erase();
+        return false;
+      }
+      out_offset += image_data.size();
+      image_checksum = adler32(image_checksum, image_data.data(), image_data.size());
     }
 
-    // Write out the image bitmap at the page aligned start of the image end, also uncompressed for
-    // convenience.
-    const ImageSection& bitmap_section = image_header->GetImageBitmapSection();
+    // Write the block metadata directly after the image sections.
+    // Note: This is not part of the mapped image and is not preserved after decompressing, it's
+    // only used for image loading. For this reason, only write it out for compressed images.
+    if (is_compressed) {
+      // Align up since the compressed data is not necessarily aligned.
+      out_offset = RoundUp(out_offset, alignof(ImageHeader::Block));
+      CHECK(!blocks.empty());
+      const size_t blocks_bytes = blocks.size() * sizeof(blocks[0]);
+      if (!image_file->PwriteFully(&blocks[0], blocks_bytes, out_offset)) {
+        PLOG(ERROR) << "Failed to write image blocks " << image_filename;
+        image_file->Erase();
+        return false;
+      }
+      image_header->blocks_offset_ = out_offset;
+      image_header->blocks_count_ = blocks.size();
+      out_offset += blocks_bytes;
+    }
+
+    // Data size includes everything except the bitmap.
+    image_header->data_size_ = out_offset - sizeof(ImageHeader);
+
+    // Update and write the bitmap section. Note that the bitmap section is relative to the
+    // possibly compressed image.
+    ImageSection& bitmap_section = image_header->GetImageSection(ImageHeader::kSectionImageBitmap);
     // Align up since data size may be unaligned if the image is compressed.
-    size_t bitmap_position_in_file = RoundUp(sizeof(ImageHeader) + image_data.size(), kPageSize);
-    if (image_storage_mode_ == ImageHeader::kDefaultStorageMode) {
-      CHECK_EQ(bitmap_position_in_file, bitmap_section.Offset());
-    }
+    out_offset = RoundUp(out_offset, kPageSize);
+    bitmap_section = ImageSection(out_offset, bitmap_section.Size());
+
     if (!image_file->PwriteFully(image_info.image_bitmap_->Begin(),
                                  bitmap_section.Size(),
-                                 bitmap_position_in_file)) {
+                                 bitmap_section.Offset())) {
       PLOG(ERROR) << "Failed to write image file bitmap " << image_filename;
-      image_file->Erase();
-      return false;
-    }
-
-    // Write out relocations.
-    size_t relocations_position_in_file = bitmap_position_in_file + bitmap_section.Size();
-    ArrayRef<const uint8_t> relocations = MaybeCompressData(
-        ArrayRef<const uint8_t>(image_info.relocation_bitmap_),
-        image_storage_mode_,
-        &compressed_data);
-    image_header->sections_[ImageHeader::kSectionImageRelocations] =
-        ImageSection(bitmap_section.Offset() + bitmap_section.Size(), relocations.size());
-    if (!image_file->PwriteFully(relocations.data(),
-                                 relocations.size(),
-                                 relocations_position_in_file)) {
-      PLOG(ERROR) << "Failed to write image file relocations " << image_filename;
-      image_file->Erase();
       return false;
     }
 
     int err = image_file->Flush();
     if (err < 0) {
       PLOG(ERROR) << "Failed to flush image file " << image_filename << " with result " << err;
-      image_file->Erase();
       return false;
     }
 
+    // Calculate the image checksum of the remaining data.
+    image_checksum = adler32(image_checksum,
+                             reinterpret_cast<const uint8_t*>(image_info.image_bitmap_->Begin()),
+                             bitmap_section.Size());
+    image_header->SetImageChecksum(image_checksum);
+
+    if (VLOG_IS_ON(compiler)) {
+      const size_t separately_written_section_size = bitmap_section.Size();
+      const size_t total_uncompressed_size = image_info.image_size_ +
+          separately_written_section_size;
+      const size_t total_compressed_size = out_offset + separately_written_section_size;
+
+      VLOG(compiler) << "Dex2Oat:uncompressedImageSize = " << total_uncompressed_size;
+      if (total_uncompressed_size != total_compressed_size) {
+        VLOG(compiler) << "Dex2Oat:compressedImageSize = " << total_compressed_size;
+      }
+    }
+
+    CHECK_EQ(bitmap_section.End(), static_cast<size_t>(image_file->GetLength()))
+        << "Bitmap should be at the end of the file";
+
     // Write header last in case the compiler gets killed in the middle of image writing.
     // We do not want to have a corrupted image with a valid header.
-    // The header is uncompressed since it contains whether the image is compressed or not.
-    image_header->data_size_ = image_data.size();
-    if (!image_file->PwriteFully(reinterpret_cast<char*>(image_info.image_.Begin()),
-                                 sizeof(ImageHeader),
-                                 0)) {
-      PLOG(ERROR) << "Failed to write image file header " << image_filename;
-      image_file->Erase();
-      return false;
-    }
-
-    CHECK_EQ(relocations_position_in_file + relocations.size(),
-             static_cast<size_t>(image_file->GetLength()));
-    if (image_file->FlushCloseOrErase() != 0) {
-      PLOG(ERROR) << "Failed to flush and close image file " << image_filename;
-      return false;
+    // Delay the writing of the primary image header until after writing secondary images.
+    if (i == 0u) {
+      primary_image_file = std::move(image_file);
+    } else {
+      if (!image_file.WriteHeaderAndClose(image_filename, image_header)) {
+        return false;
+      }
+      // Update the primary image checksum with the secondary image checksum.
+      primary_header->SetImageChecksum(primary_header->GetImageChecksum() ^ image_checksum);
     }
   }
+  DCHECK(primary_image_file != nullptr);
+  if (!primary_image_file.WriteHeaderAndClose(image_filenames[0], primary_header)) {
+    return false;
+  }
+
   return true;
 }
 
@@ -447,7 +973,7 @@
         oss << ". Lock owner:" << lw.ThinLockOwner();
       }
       LOG(FATAL) << oss.str();
-      break;
+      UNREACHABLE();
     }
     case LockWord::kUnlocked:
       // No hash, don't need to save it.
@@ -485,7 +1011,7 @@
   for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
     ObjPtr<mirror::DexCache> dex_cache =
         ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root));
-    if (dex_cache == nullptr || IsInBootImage(dex_cache.Ptr())) {
+    if (dex_cache == nullptr || !IsImageObject(dex_cache)) {
       continue;
     }
     const DexFile* dex_file = dex_cache->GetDexFile();
@@ -512,15 +1038,27 @@
     DCHECK_EQ(dex_file->NumStringIds() != 0u, dex_cache->GetStrings() != nullptr);
     AddDexCacheArrayRelocation(dex_cache->GetStrings(), start + layout.StringsOffset(), oat_index);
 
-    if (dex_cache->GetResolvedMethodTypes() != nullptr) {
-      AddDexCacheArrayRelocation(dex_cache->GetResolvedMethodTypes(),
-                                 start + layout.MethodTypesOffset(),
-                                 oat_index);
-    }
-    if (dex_cache->GetResolvedCallSites() != nullptr) {
-      AddDexCacheArrayRelocation(dex_cache->GetResolvedCallSites(),
-                                 start + layout.CallSitesOffset(),
-                                 oat_index);
+    AddDexCacheArrayRelocation(dex_cache->GetResolvedMethodTypes(),
+                               start + layout.MethodTypesOffset(),
+                               oat_index);
+    AddDexCacheArrayRelocation(dex_cache->GetResolvedCallSites(),
+                                start + layout.CallSitesOffset(),
+                                oat_index);
+
+    // Preresolved strings aren't part of the special layout.
+    GcRoot<mirror::String>* preresolved_strings = dex_cache->GetPreResolvedStrings();
+    if (preresolved_strings != nullptr) {
+      DCHECK(!IsInBootImage(preresolved_strings));
+      // Add the array to the metadata section.
+      const size_t count = dex_cache->NumPreResolvedStrings();
+      auto bin = BinTypeForNativeRelocationType(NativeObjectRelocationType::kGcRootPointer);
+      for (size_t i = 0; i < count; ++i) {
+        native_object_relocations_.emplace(&preresolved_strings[i],
+            NativeObjectRelocation { oat_index,
+                                     image_info.GetBinSlotSize(bin),
+                                     NativeObjectRelocationType::kGcRootPointer });
+        image_info.IncrementBinSlotSize(bin, sizeof(GcRoot<mirror::Object>));
+      }
     }
   }
 }
@@ -725,16 +1263,13 @@
 
 bool ImageWriter::AllocMemory() {
   for (ImageInfo& image_info : image_infos_) {
-    ImageSection unused_sections[ImageHeader::kSectionCount];
-    const size_t length = RoundUp(
-        image_info.CreateImageSections(unused_sections), kPageSize);
+    const size_t length = RoundUp(image_info.CreateImageSections().first, kPageSize);
 
     std::string error_msg;
     image_info.image_ = MemMap::MapAnonymous("image writer image",
-                                             /* addr */ nullptr,
                                              length,
                                              PROT_READ | PROT_WRITE,
-                                             /* low_4gb */ false,
+                                             /*low_4gb=*/ false,
                                              &error_msg);
     if (UNLIKELY(!image_info.image_.IsValid())) {
       LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
@@ -753,21 +1288,6 @@
   return true;
 }
 
-class ImageWriter::ComputeLazyFieldsForClassesVisitor : public ClassVisitor {
- public:
-  bool operator()(ObjPtr<Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
-    StackHandleScope<1> hs(Thread::Current());
-    mirror::Class::ComputeName(hs.NewHandle(c));
-    return true;
-  }
-};
-
-void ImageWriter::ComputeLazyFieldsForImageClasses() {
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  ComputeLazyFieldsForClassesVisitor visitor;
-  class_linker->VisitClassesWithoutClassesLock(&visitor);
-}
-
 static bool IsBootClassLoaderClass(ObjPtr<mirror::Class> klass)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   return klass->GetClassLoader() == nullptr;
@@ -858,7 +1378,7 @@
     std::unordered_set<mirror::Object*>* visited) {
   DCHECK(early_exit != nullptr);
   DCHECK(visited != nullptr);
-  DCHECK(compile_app_image_);
+  DCHECK(compiler_options_.IsAppImage());
   if (klass == nullptr || IsInBootImage(klass.Ptr())) {
     return false;
   }
@@ -962,7 +1482,8 @@
   if (klass == nullptr) {
     return false;
   }
-  if (compile_app_image_ && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
+  if (!compiler_options_.IsBootImage() &&
+      Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
     // Already in boot image, return true.
     return true;
   }
@@ -970,7 +1491,7 @@
   if (!compiler_options_.IsImageClass(klass->GetDescriptor(&temp))) {
     return false;
   }
-  if (compile_app_image_) {
+  if (compiler_options_.IsAppImage()) {
     // For app images, we need to prune boot loader classes that are not in the boot image since
     // these may have already been loaded when the app image is loaded.
     // Keep classes in the boot image space since we don't want to re-resolve these.
@@ -1029,27 +1550,15 @@
         Runtime::Current()->GetClassLinker()->ClassTableForClassLoader(class_loader);
     class_table->Visit(classes_visitor);
     removed_class_count_ += classes_visitor.Prune();
-
-    // Record app image class loader. The fake boot class loader should not get registered
-    // and we should end up with only one class loader for an app and none for boot image.
-    if (class_loader != nullptr && class_table != nullptr) {
-      DCHECK(class_loader_ == nullptr);
-      class_loader_ = class_loader;
-    }
   }
 
   size_t GetRemovedClassCount() const {
     return removed_class_count_;
   }
 
-  ObjPtr<mirror::ClassLoader> GetClassLoader() const REQUIRES_SHARED(Locks::mutator_lock_) {
-    return class_loader_;
-  }
-
  private:
   ImageWriter* const image_writer_;
   size_t removed_class_count_;
-  ObjPtr<mirror::ClassLoader> class_loader_;
 };
 
 void ImageWriter::VisitClassLoaders(ClassLoaderVisitor* visitor) {
@@ -1058,8 +1567,79 @@
   Runtime::Current()->GetClassLinker()->VisitClassLoaders(visitor);
 }
 
-void ImageWriter::PruneAndPreloadDexCache(ObjPtr<mirror::DexCache> dex_cache,
-                                          ObjPtr<mirror::ClassLoader> class_loader) {
+void ImageWriter::PruneDexCache(ObjPtr<mirror::DexCache> dex_cache,
+                                ObjPtr<mirror::ClassLoader> class_loader) {
+  Runtime* runtime = Runtime::Current();
+  ClassLinker* class_linker = runtime->GetClassLinker();
+  const DexFile& dex_file = *dex_cache->GetDexFile();
+  // Prune methods.
+  dex::TypeIndex last_class_idx;  // Initialized to invalid index.
+  ObjPtr<mirror::Class> last_class = nullptr;
+  mirror::MethodDexCacheType* resolved_methods = dex_cache->GetResolvedMethods();
+  for (size_t slot_idx = 0, num = dex_cache->NumResolvedMethods(); slot_idx != num; ++slot_idx) {
+    auto pair =
+        mirror::DexCache::GetNativePairPtrSize(resolved_methods, slot_idx, target_ptr_size_);
+    uint32_t stored_index = pair.index;
+    ArtMethod* method = pair.object;
+    if (method == nullptr) {
+      continue;  // Empty entry.
+    }
+    // Check if the referenced class is in the image. Note that we want to check the referenced
+    // class rather than the declaring class to preserve the semantics, i.e. using a MethodId
+    // results in resolving the referenced class and that can for example throw OOME.
+    const dex::MethodId& method_id = dex_file.GetMethodId(stored_index);
+    if (method_id.class_idx_ != last_class_idx) {
+      last_class_idx = method_id.class_idx_;
+      last_class = class_linker->LookupResolvedType(last_class_idx, dex_cache, class_loader);
+      if (last_class != nullptr && !KeepClass(last_class)) {
+        last_class = nullptr;
+      }
+    }
+    if (last_class == nullptr) {
+      dex_cache->ClearResolvedMethod(stored_index, target_ptr_size_);
+    }
+  }
+  // Prune fields.
+  mirror::FieldDexCacheType* resolved_fields = dex_cache->GetResolvedFields();
+  last_class_idx = dex::TypeIndex();  // Initialized to invalid index.
+  last_class = nullptr;
+  for (size_t slot_idx = 0, num = dex_cache->NumResolvedFields(); slot_idx != num; ++slot_idx) {
+    auto pair = mirror::DexCache::GetNativePairPtrSize(resolved_fields, slot_idx, target_ptr_size_);
+    uint32_t stored_index = pair.index;
+    ArtField* field = pair.object;
+    if (field == nullptr) {
+      continue;  // Empty entry.
+    }
+    // Check if the referenced class is in the image. Note that we want to check the referenced
+    // class rather than the declaring class to preserve the semantics, i.e. using a FieldId
+    // results in resolving the referenced class and that can for example throw OOME.
+    const dex::FieldId& field_id = dex_file.GetFieldId(stored_index);
+    if (field_id.class_idx_ != last_class_idx) {
+      last_class_idx = field_id.class_idx_;
+      last_class = class_linker->LookupResolvedType(last_class_idx, dex_cache, class_loader);
+      if (last_class != nullptr && !KeepClass(last_class)) {
+        last_class = nullptr;
+      }
+    }
+    if (last_class == nullptr) {
+      dex_cache->ClearResolvedField(stored_index, target_ptr_size_);
+    }
+  }
+  // Prune types.
+  for (size_t slot_idx = 0, num = dex_cache->NumResolvedTypes(); slot_idx != num; ++slot_idx) {
+    mirror::TypeDexCachePair pair =
+        dex_cache->GetResolvedTypes()[slot_idx].load(std::memory_order_relaxed);
+    uint32_t stored_index = pair.index;
+    ObjPtr<mirror::Class> klass = pair.object.Read();
+    if (klass != nullptr && !KeepClass(klass)) {
+      dex_cache->ClearResolvedType(dex::TypeIndex(stored_index));
+    }
+  }
+  // Strings do not need pruning.
+}
+
+void ImageWriter::PreloadDexCache(ObjPtr<mirror::DexCache> dex_cache,
+                                  ObjPtr<mirror::ClassLoader> class_loader) {
   // To ensure deterministic contents of the hash-based arrays, each slot shall contain
   // the candidate with the lowest index. As we're processing entries in increasing index
   // order, this means trying to look up the entry for the current index if the slot is
@@ -1068,7 +1648,7 @@
   Runtime* runtime = Runtime::Current();
   ClassLinker* class_linker = runtime->GetClassLinker();
   const DexFile& dex_file = *dex_cache->GetDexFile();
-  // Prune methods.
+  // Preload the methods array and make the contents deterministic.
   mirror::MethodDexCacheType* resolved_methods = dex_cache->GetResolvedMethods();
   dex::TypeIndex last_class_idx;  // Initialized to invalid index.
   ObjPtr<mirror::Class> last_class = nullptr;
@@ -1084,31 +1664,24 @@
     // Check if the referenced class is in the image. Note that we want to check the referenced
     // class rather than the declaring class to preserve the semantics, i.e. using a MethodId
     // results in resolving the referenced class and that can for example throw OOME.
-    const DexFile::MethodId& method_id = dex_file.GetMethodId(i);
+    const dex::MethodId& method_id = dex_file.GetMethodId(i);
     if (method_id.class_idx_ != last_class_idx) {
       last_class_idx = method_id.class_idx_;
       last_class = class_linker->LookupResolvedType(last_class_idx, dex_cache, class_loader);
-      if (last_class != nullptr && !KeepClass(last_class)) {
-        last_class = nullptr;
-      }
     }
     if (method == nullptr || i < stored_index) {
       if (last_class != nullptr) {
         // Try to resolve the method with the class linker, which will insert
         // it into the dex cache if successful.
         method = class_linker->FindResolvedMethod(last_class, dex_cache, class_loader, i);
-        // If the referenced class is in the image, the defining class must also be there.
-        DCHECK(method == nullptr || KeepClass(method->GetDeclaringClass()));
         DCHECK(method == nullptr || dex_cache->GetResolvedMethod(i, target_ptr_size_) == method);
       }
     } else {
       DCHECK_EQ(i, stored_index);
-      if (last_class == nullptr) {
-        dex_cache->ClearResolvedMethod(stored_index, target_ptr_size_);
-      }
+      DCHECK(last_class != nullptr);
     }
   }
-  // Prune fields and make the contents of the field array deterministic.
+  // Preload the fields array and make the contents deterministic.
   mirror::FieldDexCacheType* resolved_fields = dex_cache->GetResolvedFields();
   last_class_idx = dex::TypeIndex();  // Initialized to invalid index.
   last_class = nullptr;
@@ -1123,7 +1696,7 @@
     // Check if the referenced class is in the image. Note that we want to check the referenced
     // class rather than the declaring class to preserve the semantics, i.e. using a FieldId
     // results in resolving the referenced class and that can for example throw OOME.
-    const DexFile::FieldId& field_id = dex_file.GetFieldId(i);
+    const dex::FieldId& field_id = dex_file.GetFieldId(i);
     if (field_id.class_idx_ != last_class_idx) {
       last_class_idx = field_id.class_idx_;
       last_class = class_linker->LookupResolvedType(last_class_idx, dex_cache, class_loader);
@@ -1133,19 +1706,17 @@
     }
     if (field == nullptr || i < stored_index) {
       if (last_class != nullptr) {
+        // Try to resolve the field with the class linker, which will insert
+        // it into the dex cache if successful.
         field = class_linker->FindResolvedFieldJLS(last_class, dex_cache, class_loader, i);
-        // If the referenced class is in the image, the defining class must also be there.
-        DCHECK(field == nullptr || KeepClass(field->GetDeclaringClass()));
         DCHECK(field == nullptr || dex_cache->GetResolvedField(i, target_ptr_size_) == field);
       }
     } else {
       DCHECK_EQ(i, stored_index);
-      if (last_class == nullptr) {
-        dex_cache->ClearResolvedField(stored_index, target_ptr_size_);
-      }
+      DCHECK(last_class != nullptr);
     }
   }
-  // Prune types and make the contents of the type array deterministic.
+  // Preload the types array and make the contents deterministic.
   // This is done after fields and methods as their lookup can touch the types array.
   for (size_t i = 0, end = dex_cache->GetDexFile()->NumTypeIds(); i < end; ++i) {
     dex::TypeIndex type_idx(i);
@@ -1156,15 +1727,10 @@
     ObjPtr<mirror::Class> klass = pair.object.Read();
     if (klass == nullptr || i < stored_index) {
       klass = class_linker->LookupResolvedType(type_idx, dex_cache, class_loader);
-      if (klass != nullptr) {
-        DCHECK_EQ(dex_cache->GetResolvedType(type_idx), klass);
-        stored_index = i;  // For correct clearing below if not keeping the `klass`.
-      }
-    } else if (i == stored_index && !KeepClass(klass)) {
-      dex_cache->ClearResolvedType(dex::TypeIndex(stored_index));
+      DCHECK(klass == nullptr || dex_cache->GetResolvedType(type_idx) == klass);
     }
   }
-  // Strings do not need pruning, but the contents of the string array must be deterministic.
+  // Preload the strings array and make the contents deterministic.
   for (size_t i = 0, end = dex_cache->GetDexFile()->NumStringIds(); i < end; ++i) {
     dex::StringIndex string_idx(i);
     uint32_t slot_idx = dex_cache->StringSlotIndex(string_idx);
@@ -1203,31 +1769,19 @@
   });
 
   // Remove the undesired classes from the class roots.
-  ObjPtr<mirror::ClassLoader> class_loader;
   {
     PruneClassLoaderClassesVisitor class_loader_visitor(this);
     VisitClassLoaders(&class_loader_visitor);
     VLOG(compiler) << "Pruned " << class_loader_visitor.GetRemovedClassCount() << " classes";
-    class_loader = class_loader_visitor.GetClassLoader();
-    DCHECK_EQ(class_loader != nullptr, compile_app_image_);
   }
 
   // Clear references to removed classes from the DexCaches.
-  std::vector<ObjPtr<mirror::DexCache>> dex_caches;
-  {
-    ReaderMutexLock mu2(self, *Locks::dex_lock_);
-    dex_caches.reserve(class_linker->GetDexCachesData().size());
-    for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
-      if (self->IsJWeakCleared(data.weak_root)) {
-        continue;
-      }
-      dex_caches.push_back(self->DecodeJObject(data.weak_root)->AsDexCache());
-    }
-  }
+  std::vector<ObjPtr<mirror::DexCache>> dex_caches = FindDexCaches(self);
   for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) {
     // Pass the class loader associated with the DexCache. This can either be
     // the app's `class_loader` or `nullptr` if boot class loader.
-    PruneAndPreloadDexCache(dex_cache, IsInBootImage(dex_cache.Ptr()) ? nullptr : class_loader);
+    bool is_app_image_dex_cache = compiler_options_.IsAppImage() && IsImageObject(dex_cache);
+    PruneDexCache(dex_cache, is_app_image_dex_cache ? GetAppClassLoader() : nullptr);
   }
 
   // Drop the array class cache in the ClassLinker, as these are roots holding those classes live.
@@ -1237,6 +1791,20 @@
   prune_class_memo_.clear();
 }
 
+std::vector<ObjPtr<mirror::DexCache>> ImageWriter::FindDexCaches(Thread* self) {
+  std::vector<ObjPtr<mirror::DexCache>> dex_caches;
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  ReaderMutexLock mu2(self, *Locks::dex_lock_);
+  dex_caches.reserve(class_linker->GetDexCachesData().size());
+  for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
+    if (self->IsJWeakCleared(data.weak_root)) {
+      continue;
+    }
+    dex_caches.push_back(self->DecodeJObject(data.weak_root)->AsDexCache());
+  }
+  return dex_caches;
+}
+
 void ImageWriter::CheckNonImageClassesRemoved() {
   auto visitor = [&](Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
     if (obj->IsClass() && !IsInBootImage(obj)) {
@@ -1268,7 +1836,7 @@
       return found.Ptr();
     }
   }
-  if (compile_app_image_) {
+  if (!compiler_options_.IsBootImage()) {
     Runtime* const runtime = Runtime::Current();
     ObjPtr<mirror::String> found = runtime->GetInternTable()->LookupStrong(self, string);
     // If we found it in the runtime intern table it could either be in the boot image or interned
@@ -1311,7 +1879,7 @@
         continue;
       }
       const DexFile* dex_file = dex_cache->GetDexFile();
-      if (!IsInBootImage(dex_cache.Ptr())) {
+      if (IsImageObject(dex_cache)) {
         dex_cache_count += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u;
       }
     }
@@ -1330,7 +1898,7 @@
         continue;
       }
       const DexFile* dex_file = dex_cache->GetDexFile();
-      if (!IsInBootImage(dex_cache.Ptr())) {
+      if (IsImageObject(dex_cache)) {
         non_image_dex_caches += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u;
       }
     }
@@ -1344,7 +1912,7 @@
         continue;
       }
       const DexFile* dex_file = dex_cache->GetDexFile();
-      if (!IsInBootImage(dex_cache.Ptr()) &&
+      if (IsImageObject(dex_cache) &&
           image_dex_files.find(dex_file) != image_dex_files.end()) {
         dex_caches->Set<false>(i, dex_cache.Ptr());
         ++i;
@@ -1365,7 +1933,7 @@
   Handle<ObjectArray<Object>> dex_caches(hs.NewHandle(CollectDexCaches(self, oat_index)));
 
   // build an Object[] of the roots needed to restore the runtime
-  int32_t image_roots_size = ImageHeader::NumberOfImageRoots(compile_app_image_);
+  int32_t image_roots_size = ImageHeader::NumberOfImageRoots(compiler_options_.IsAppImage());
   Handle<ObjectArray<Object>> image_roots(hs.NewHandle(ObjectArray<Object>::Alloc(
       self, GetClassRoot<ObjectArray<Object>>(class_linker), image_roots_size)));
   image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get());
@@ -1378,14 +1946,14 @@
                           runtime->GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow());
   image_roots->Set<false>(ImageHeader::kNoClassDefFoundError,
                           runtime->GetPreAllocatedNoClassDefFoundError());
-  if (!compile_app_image_) {
+  if (!compiler_options_.IsAppImage()) {
     DCHECK(boot_image_live_objects != nullptr);
     image_roots->Set<false>(ImageHeader::kBootImageLiveObjects, boot_image_live_objects.Get());
   } else {
     DCHECK(boot_image_live_objects == nullptr);
   }
-  for (int32_t i = 0, num = ImageHeader::NumberOfImageRoots(compile_app_image_); i != num; ++i) {
-    if (compile_app_image_ && i == ImageHeader::kAppImageClassLoader) {
+  for (int32_t i = 0; i != image_roots_size; ++i) {
+    if (compiler_options_.IsAppImage() && i == ImageHeader::kAppImageClassLoader) {
       // image_roots[ImageHeader::kAppImageClassLoader] will be set later for app image.
       continue;
     }
@@ -1397,7 +1965,7 @@
 mirror::Object* ImageWriter::TryAssignBinSlot(WorkStack& work_stack,
                                               mirror::Object* obj,
                                               size_t oat_index) {
-  if (obj == nullptr || IsInBootImage(obj)) {
+  if (obj == nullptr || !IsImageObject(obj)) {
     // Object is null or already in the image, there is no work to do.
     return obj;
   }
@@ -1422,7 +1990,7 @@
       mirror::Class* as_klass = obj->AsClass();
       mirror::DexCache* dex_cache = as_klass->GetDexCache();
       DCHECK(!as_klass->IsErroneous()) << as_klass->GetStatus();
-      if (compile_app_image_) {
+      if (compiler_options_.IsAppImage()) {
         // Extra sanity, no boot loader classes should be left!
         CHECK(!IsBootClassLoaderClass(as_klass)) << as_klass->PrettyClass();
       }
@@ -1433,7 +2001,7 @@
       // belongs.
       oat_index = GetOatIndexForDexCache(dex_cache);
       ImageInfo& image_info = GetImageInfo(oat_index);
-      if (!compile_app_image_) {
+      if (!compiler_options_.IsAppImage()) {
         // Note: Avoid locking to prevent lock order violations from root visiting;
         // image_info.class_table_ is only accessed from the image writer.
         image_info.class_table_->InsertWithoutLocks(as_klass);
@@ -1532,18 +2100,17 @@
       }
     } else if (obj->IsClassLoader()) {
       // Register the class loader if it has a class table.
-      // The fake boot class loader should not get registered and we should end up with only one
-      // class loader.
+      // The fake boot class loader should not get registered.
       mirror::ClassLoader* class_loader = obj->AsClassLoader();
       if (class_loader->GetClassTable() != nullptr) {
-        DCHECK(compile_app_image_);
-        DCHECK(class_loaders_.empty());
-        class_loaders_.insert(class_loader);
-        ImageInfo& image_info = GetImageInfo(oat_index);
-        // Note: Avoid locking to prevent lock order violations from root visiting;
-        // image_info.class_table_ table is only accessed from the image writer
-        // and class_loader->GetClassTable() is iterated but not modified.
-        image_info.class_table_->CopyWithoutLocks(*class_loader->GetClassTable());
+        DCHECK(compiler_options_.IsAppImage());
+        if (class_loader == GetAppClassLoader()) {
+          ImageInfo& image_info = GetImageInfo(oat_index);
+          // Note: Avoid locking to prevent lock order violations from root visiting;
+          // image_info.class_table_ table is only accessed from the image writer
+          // and class_loader->GetClassTable() is iterated but not modified.
+          image_info.class_table_->CopyWithoutLocks(*class_loader->GetClassTable());
+        }
       }
     }
     AssignImageBinSlot(obj, oat_index);
@@ -1714,7 +2281,7 @@
   Runtime* const runtime = Runtime::Current();
   VariableSizedHandleScope handles(self);
   MutableHandle<ObjectArray<Object>> boot_image_live_objects = handles.NewHandle(
-      compile_app_image_
+      compiler_options_.IsAppImage()
           ? nullptr
           : IntrinsicObjects::AllocateBootImageLiveObjects(self, runtime->GetClassLinker()));
   std::vector<Handle<ObjectArray<Object>>> image_roots;
@@ -1749,7 +2316,8 @@
   for (auto* m : image_methods_) {
     CHECK(m != nullptr);
     CHECK(m->IsRuntimeMethod());
-    DCHECK_EQ(compile_app_image_, IsInBootImage(m)) << "Trampolines should be in boot image";
+    DCHECK_EQ(!compiler_options_.IsBootImage(), IsInBootImage(m))
+        << "Trampolines should be in boot image";
     if (!IsInBootImage(m)) {
       AssignMethodOffset(m, NativeObjectRelocationType::kRuntimeMethod, GetDefaultOatIndex());
     }
@@ -1798,10 +2366,10 @@
   }
   ProcessWorkStack(&work_stack);
 
-  // For app images, there may be objects that are only held live by the by the boot image. One
+  // For app images, there may be objects that are only held live by the boot image. One
   // example is finalizer references. Forward these objects so that EnsureBinSlotAssignedCallback
-  // does not fail any checks. TODO: We should probably avoid copying these objects.
-  if (compile_app_image_) {
+  // does not fail any checks.
+  if (compiler_options_.IsAppImage()) {
     for (gc::space::ImageSpace* space : heap->GetBootImageSpaces()) {
       DCHECK(space->IsImageSpace());
       gc::accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
@@ -1820,17 +2388,15 @@
     ProcessWorkStack(&work_stack);
 
     // Store the class loader in the class roots.
-    CHECK_EQ(class_loaders_.size(), 1u);
     CHECK_EQ(image_roots.size(), 1u);
-    CHECK(*class_loaders_.begin() != nullptr);
-    image_roots[0]->Set<false>(ImageHeader::kAppImageClassLoader, *class_loaders_.begin());
+    image_roots[0]->Set<false>(ImageHeader::kAppImageClassLoader, GetAppClassLoader());
   }
 
   // Verify that all objects have assigned image bin slots.
   {
     auto ensure_bin_slots_assigned = [&](mirror::Object* obj)
         REQUIRES_SHARED(Locks::mutator_lock_) {
-      if (!Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(obj)) {
+      if (IsImageObject(obj)) {
         CHECK(IsImageBinSlotAssigned(obj)) << mirror::Object::PrettyTypeOf(obj) << " " << obj;
       }
     };
@@ -1858,10 +2424,33 @@
   }
 
   // Calculate bin slot offsets.
-  for (ImageInfo& image_info : image_infos_) {
+  for (size_t oat_index = 0; oat_index < image_infos_.size(); ++oat_index) {
+    ImageInfo& image_info = image_infos_[oat_index];
     size_t bin_offset = image_objects_offset_begin_;
+    // Need to visit the objects in bin order since alignment requirements might change the
+    // section sizes.
+    // Avoid using ObjPtr since VisitObjects invalidates. This is safe since concurrent GC can not
+    // occur during image writing.
+    using BinPair = std::pair<BinSlot, mirror::Object*>;
+    std::vector<BinPair> objects;
+    heap->VisitObjects([&](mirror::Object* obj)
+        REQUIRES_SHARED(Locks::mutator_lock_) {
+      // Only visit the oat index for the current image.
+      if (IsImageObject(obj) && GetOatIndex(obj) == oat_index) {
+        objects.emplace_back(GetImageBinSlot(obj), obj);
+      }
+    });
+    std::sort(objects.begin(), objects.end(), [](const BinPair& a, const BinPair& b) -> bool {
+      if (a.first.GetBin() != b.first.GetBin()) {
+        return a.first.GetBin() < b.first.GetBin();
+      }
+      // Note that the index is really the relative offset in this case.
+      return a.first.GetIndex() < b.first.GetIndex();
+    });
+    auto it = objects.begin();
     for (size_t i = 0; i != kNumberOfBins; ++i) {
-      switch (static_cast<Bin>(i)) {
+      Bin bin = enum_cast<Bin>(i);
+      switch (bin) {
         case Bin::kArtMethodClean:
         case Bin::kArtMethodDirty: {
           bin_offset = RoundUp(bin_offset, method_alignment);
@@ -1880,36 +2469,59 @@
         }
       }
       image_info.bin_slot_offsets_[i] = bin_offset;
-      bin_offset += image_info.bin_slot_sizes_[i];
+
+      // If the bin is for mirror objects, assign the offsets since we may need to change sizes
+      // from alignment requirements.
+      if (i < static_cast<size_t>(Bin::kMirrorCount)) {
+        const size_t start_offset = bin_offset;
+        // Visit and assign offsets for all objects of the bin type.
+        while (it != objects.end() && it->first.GetBin() == bin) {
+          ObjPtr<mirror::Object> obj(it->second);
+          const size_t object_size = RoundUp(obj->SizeOf(), kObjectAlignment);
+          // If the object spans region bondaries, add padding objects between.
+          // TODO: Instead of adding padding, we should consider reordering the bins to reduce
+          // wasted space.
+          if (region_size_ != 0u) {
+            const size_t offset_after_header = bin_offset - sizeof(ImageHeader);
+            const size_t next_region = RoundUp(offset_after_header, region_size_);
+            if (offset_after_header != next_region &&
+                offset_after_header + object_size > next_region) {
+              // Add padding objects until aligned.
+              while (bin_offset - sizeof(ImageHeader) < next_region) {
+                image_info.padding_object_offsets_.push_back(bin_offset);
+                bin_offset += kObjectAlignment;
+                region_alignment_wasted_ += kObjectAlignment;
+                image_info.image_end_ += kObjectAlignment;
+              }
+              CHECK_EQ(bin_offset - sizeof(ImageHeader), next_region);
+            }
+          }
+          SetImageOffset(obj.Ptr(), bin_offset);
+          bin_offset = bin_offset + object_size;
+          ++it;
+        }
+        image_info.bin_slot_sizes_[i] = bin_offset - start_offset;
+      } else {
+        bin_offset += image_info.bin_slot_sizes_[i];
+      }
     }
     // NOTE: There may be additional padding between the bin slots and the intern table.
     DCHECK_EQ(image_info.image_end_,
               image_info.GetBinSizeSum(Bin::kMirrorCount) + image_objects_offset_begin_);
   }
 
+  VLOG(image) << "Space wasted for region alignment " << region_alignment_wasted_;
+
   // Calculate image offsets.
   size_t image_offset = 0;
   for (ImageInfo& image_info : image_infos_) {
     image_info.image_begin_ = global_image_begin_ + image_offset;
     image_info.image_offset_ = image_offset;
-    ImageSection unused_sections[ImageHeader::kSectionCount];
-    image_info.image_size_ =
-        RoundUp(image_info.CreateImageSections(unused_sections), kPageSize);
+    image_info.image_size_ = RoundUp(image_info.CreateImageSections().first, kPageSize);
     // There should be no gaps until the next image.
     image_offset += image_info.image_size_;
   }
 
-  // Transform each object's bin slot into an offset which will be used to do the final copy.
-  {
-    auto unbin_objects_into_offset = [&](mirror::Object* obj)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      if (!IsInBootImage(obj)) {
-        UnbinObjectsIntoOffset(obj);
-      }
-    };
-    heap->VisitObjects(unbin_objects_into_offset);
-  }
-
   size_t i = 0;
   for (ImageInfo& image_info : image_infos_) {
     image_info.image_roots_address_ = PointerToLowMemUInt32(GetImageAddress(image_roots[i].Get()));
@@ -1928,58 +2540,112 @@
   boot_image_live_objects_ = boot_image_live_objects.Get();
 }
 
-size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) const {
-  DCHECK(out_sections != nullptr);
+std::pair<size_t, std::vector<ImageSection>> ImageWriter::ImageInfo::CreateImageSections() const {
+  std::vector<ImageSection> sections(ImageHeader::kSectionCount);
 
-  // Do not round up any sections here that are represented by the bins since it will break
-  // offsets.
+  // Do not round up any sections here that are represented by the bins since it
+  // will break offsets.
 
-  // Objects section
-  ImageSection* objects_section = &out_sections[ImageHeader::kSectionObjects];
-  *objects_section = ImageSection(0u, image_end_);
+  /*
+   * Objects section
+   */
+  sections[ImageHeader::kSectionObjects] =
+      ImageSection(0u, image_end_);
 
-  // Add field section.
-  ImageSection* field_section = &out_sections[ImageHeader::kSectionArtFields];
-  *field_section = ImageSection(GetBinSlotOffset(Bin::kArtField), GetBinSlotSize(Bin::kArtField));
+  /*
+   * Field section
+   */
+  sections[ImageHeader::kSectionArtFields] =
+      ImageSection(GetBinSlotOffset(Bin::kArtField), GetBinSlotSize(Bin::kArtField));
 
-  // Add method section.
-  ImageSection* methods_section = &out_sections[ImageHeader::kSectionArtMethods];
-  *methods_section = ImageSection(
-      GetBinSlotOffset(Bin::kArtMethodClean),
-      GetBinSlotSize(Bin::kArtMethodClean) + GetBinSlotSize(Bin::kArtMethodDirty));
+  /*
+   * Method section
+   */
+  sections[ImageHeader::kSectionArtMethods] =
+      ImageSection(GetBinSlotOffset(Bin::kArtMethodClean),
+                   GetBinSlotSize(Bin::kArtMethodClean) +
+                   GetBinSlotSize(Bin::kArtMethodDirty));
 
-  // IMT section.
-  ImageSection* imt_section = &out_sections[ImageHeader::kSectionImTables];
-  *imt_section = ImageSection(GetBinSlotOffset(Bin::kImTable), GetBinSlotSize(Bin::kImTable));
+  /*
+   * IMT section
+   */
+  sections[ImageHeader::kSectionImTables] =
+      ImageSection(GetBinSlotOffset(Bin::kImTable), GetBinSlotSize(Bin::kImTable));
 
-  // Conflict tables section.
-  ImageSection* imt_conflict_tables_section = &out_sections[ImageHeader::kSectionIMTConflictTables];
-  *imt_conflict_tables_section = ImageSection(GetBinSlotOffset(Bin::kIMTConflictTable),
-                                              GetBinSlotSize(Bin::kIMTConflictTable));
+  /*
+   * Conflict Tables section
+   */
+  sections[ImageHeader::kSectionIMTConflictTables] =
+      ImageSection(GetBinSlotOffset(Bin::kIMTConflictTable), GetBinSlotSize(Bin::kIMTConflictTable));
 
-  // Runtime methods section.
-  ImageSection* runtime_methods_section = &out_sections[ImageHeader::kSectionRuntimeMethods];
-  *runtime_methods_section = ImageSection(GetBinSlotOffset(Bin::kRuntimeMethod),
-                                          GetBinSlotSize(Bin::kRuntimeMethod));
+  /*
+   * Runtime Methods section
+   */
+  sections[ImageHeader::kSectionRuntimeMethods] =
+      ImageSection(GetBinSlotOffset(Bin::kRuntimeMethod), GetBinSlotSize(Bin::kRuntimeMethod));
 
-  // Add dex cache arrays section.
-  ImageSection* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays];
-  *dex_cache_arrays_section = ImageSection(GetBinSlotOffset(Bin::kDexCacheArray),
-                                           GetBinSlotSize(Bin::kDexCacheArray));
+  /*
+   * DexCache Arrays section.
+   */
+  const ImageSection& dex_cache_arrays_section =
+      sections[ImageHeader::kSectionDexCacheArrays] =
+          ImageSection(GetBinSlotOffset(Bin::kDexCacheArray),
+                       GetBinSlotSize(Bin::kDexCacheArray));
+
+  /*
+   * Interned Strings section
+   */
+
   // Round up to the alignment the string table expects. See HashSet::WriteToMemory.
-  size_t cur_pos = RoundUp(dex_cache_arrays_section->End(), sizeof(uint64_t));
-  // Calculate the size of the interned strings.
-  ImageSection* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings];
-  *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
-  cur_pos = interned_strings_section->End();
-  // Round up to the alignment the class table expects. See HashSet::WriteToMemory.
-  cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
-  // Calculate the size of the class table section.
-  ImageSection* class_table_section = &out_sections[ImageHeader::kSectionClassTable];
-  *class_table_section = ImageSection(cur_pos, class_table_bytes_);
-  cur_pos = class_table_section->End();
-  // Image end goes right before the start of the image bitmap.
-  return cur_pos;
+  size_t cur_pos = RoundUp(dex_cache_arrays_section.End(), sizeof(uint64_t));
+
+  const ImageSection& interned_strings_section =
+      sections[ImageHeader::kSectionInternedStrings] =
+          ImageSection(cur_pos, intern_table_bytes_);
+
+  /*
+   * Class Table section
+   */
+
+  // Obtain the new position and round it up to the appropriate alignment.
+  cur_pos = RoundUp(interned_strings_section.End(), sizeof(uint64_t));
+
+  const ImageSection& class_table_section =
+      sections[ImageHeader::kSectionClassTable] =
+          ImageSection(cur_pos, class_table_bytes_);
+
+  /*
+   * String Field Offsets section
+   */
+
+  // Round up to the alignment of the offsets we are going to store.
+  cur_pos = RoundUp(class_table_section.End(), sizeof(uint32_t));
+
+  // The size of string_reference_offsets_ can't be used here because it hasn't
+  // been filled with AppImageReferenceOffsetInfo objects yet.  The
+  // num_string_references_ value is calculated separately, before we can
+  // compute the actual offsets.
+  const ImageSection& string_reference_offsets =
+      sections[ImageHeader::kSectionStringReferenceOffsets] =
+          ImageSection(cur_pos,
+                       sizeof(typename decltype(string_reference_offsets_)::value_type) *
+                           num_string_references_);
+
+  /*
+   * Metadata section.
+   */
+
+  // Round up to the alignment of the offsets we are going to store.
+  cur_pos = RoundUp(string_reference_offsets.End(),
+                    mirror::DexCache::PreResolvedStringsAlignment());
+
+  const ImageSection& metadata_section =
+      sections[ImageHeader::kSectionMetadata] =
+          ImageSection(cur_pos, GetBinSlotSize(Bin::kMetadata));
+
+  // Return the number of bytes described by these sections, and the sections
+  // themselves.
+  return make_pair(metadata_section.End(), std::move(sections));
 }
 
 void ImageWriter::CreateHeader(size_t oat_index) {
@@ -1988,16 +2654,32 @@
   const uint8_t* oat_file_end = oat_file_begin + image_info.oat_loaded_size_;
   const uint8_t* oat_data_end = image_info.oat_data_begin_ + image_info.oat_size_;
 
+  uint32_t image_reservation_size = image_info.image_size_;
+  DCHECK_ALIGNED(image_reservation_size, kPageSize);
+  uint32_t component_count = 1u;
+  if (!compiler_options_.IsAppImage()) {
+    if (oat_index == 0u) {
+      const ImageInfo& last_info = image_infos_.back();
+      const uint8_t* end = last_info.oat_file_begin_ + last_info.oat_loaded_size_;
+      DCHECK_ALIGNED(image_info.image_begin_, kPageSize);
+      image_reservation_size =
+          dchecked_integral_cast<uint32_t>(RoundUp(end - image_info.image_begin_, kPageSize));
+      component_count = image_infos_.size();
+    } else {
+      image_reservation_size = 0u;
+      component_count = 0u;
+    }
+  }
+
   // Create the image sections.
-  ImageSection sections[ImageHeader::kSectionCount];
-  const size_t image_end = image_info.CreateImageSections(sections);
+  auto section_info_pair = image_info.CreateImageSections();
+  const size_t image_end = section_info_pair.first;
+  std::vector<ImageSection>& sections = section_info_pair.second;
 
   // Finally bitmap section.
   const size_t bitmap_bytes = image_info.image_bitmap_->Size();
   auto* bitmap_section = &sections[ImageHeader::kSectionImageBitmap];
   *bitmap_section = ImageSection(RoundUp(image_end, kPageSize), RoundUp(bitmap_bytes, kPageSize));
-  // The relocations section shall be finished later as we do not know its actual size yet.
-
   if (VLOG_IS_ON(compiler)) {
     LOG(INFO) << "Creating header for " << oat_filenames_[oat_index];
     size_t idx = 0;
@@ -2024,10 +2706,12 @@
 
   // Create the header, leave 0 for data size since we will fill this in as we are writing the
   // image.
-  ImageHeader* header = new (image_info.image_.Begin()) ImageHeader(
+  new (image_info.image_.Begin()) ImageHeader(
+      image_reservation_size,
+      component_count,
       PointerToLowMemUInt32(image_info.image_begin_),
       image_end,
-      sections,
+      sections.data(),
       image_info.image_roots_address_,
       image_info.oat_checksum_,
       PointerToLowMemUInt32(oat_file_begin),
@@ -2035,36 +2719,8 @@
       PointerToLowMemUInt32(oat_data_end),
       PointerToLowMemUInt32(oat_file_end),
       boot_image_begin,
-      boot_image_end - boot_image_begin,
-      boot_oat_begin,
-      boot_oat_end - boot_oat_begin,
-      static_cast<uint32_t>(target_ptr_size_),
-      compile_pic_,
-      /*is_pic*/compile_app_image_,
-      image_storage_mode_,
-      /*data_size*/0u);
-
-  // Resize relocation bitmap for recording reference/pointer relocations.
-  size_t number_of_relocation_locations = RelocationIndex(image_end, target_ptr_size_);
-  DCHECK(image_info.relocation_bitmap_.empty());
-  image_info.relocation_bitmap_.resize(
-      BitsToBytesRoundUp(number_of_relocation_locations * (compile_app_image_ ? 2u : 1u)));
-  // Record header relocations.
-  RecordImageRelocation(&header->image_begin_, oat_index);
-  RecordImageRelocation(&header->oat_file_begin_, oat_index);
-  RecordImageRelocation(&header->oat_data_begin_, oat_index);
-  RecordImageRelocation(&header->oat_data_end_, oat_index);
-  RecordImageRelocation(&header->oat_file_end_, oat_index);
-  if (compile_app_image_) {
-    RecordImageRelocation(&header->boot_image_begin_, oat_index, /* app_to_boot_image */ true);
-    RecordImageRelocation(&header->boot_oat_begin_, oat_index, /* app_to_boot_image */ true);
-  } else {
-    DCHECK_EQ(header->boot_image_begin_, 0u);
-    DCHECK_EQ(header->boot_oat_begin_, 0u);
-  }
-  RecordImageRelocation(&header->image_roots_, oat_index);
-  // Skip non-null check for `patch_delta_` as it is actually 0 but still needs to be recorded.
-  RecordImageRelocation</* kCheckNotNull */ false>(&header->patch_delta_, oat_index);
+      boot_oat_end - boot_image_begin,
+      static_cast<uint32_t>(target_ptr_size_));
 }
 
 ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) {
@@ -2075,7 +2731,7 @@
 }
 
 const void* ImageWriter::GetIntrinsicReferenceAddress(uint32_t intrinsic_data) {
-  DCHECK(!compile_app_image_);
+  DCHECK(compiler_options_.IsBootImage());
   switch (IntrinsicObjects::DecodePatchType(intrinsic_data)) {
     case IntrinsicObjects::PatchType::kIntegerValueOfArray: {
       const uint8_t* base_address =
@@ -2124,28 +2780,23 @@
   ImageWriter* const image_writer_;
 };
 
-void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy, size_t oat_index) {
+void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy) {
   for (size_t i = 0; i < ImTable::kSize; ++i) {
     ArtMethod* method = orig->Get(i, target_ptr_size_);
     void** address = reinterpret_cast<void**>(copy->AddressOfElement(i, target_ptr_size_));
-    CopyAndFixupPointer(address, method, oat_index);
+    CopyAndFixupPointer(address, method);
     DCHECK_EQ(copy->Get(i, target_ptr_size_), NativeLocationInImage(method));
   }
 }
 
-void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig,
-                                               ImtConflictTable* copy,
-                                               size_t oat_index) {
+void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) {
   const size_t count = orig->NumEntries(target_ptr_size_);
   for (size_t i = 0; i < count; ++i) {
     ArtMethod* interface_method = orig->GetInterfaceMethod(i, target_ptr_size_);
     ArtMethod* implementation_method = orig->GetImplementationMethod(i, target_ptr_size_);
-    CopyAndFixupPointer(copy->AddressOfInterfaceMethod(i, target_ptr_size_),
-                        interface_method,
-                        oat_index);
-    CopyAndFixupPointer(copy->AddressOfImplementationMethod(i, target_ptr_size_),
-                        implementation_method,
-                        oat_index);
+    CopyAndFixupPointer(copy->AddressOfInterfaceMethod(i, target_ptr_size_), interface_method);
+    CopyAndFixupPointer(
+        copy->AddressOfImplementationMethod(i, target_ptr_size_), implementation_method);
     DCHECK_EQ(copy->GetInterfaceMethod(i, target_ptr_size_),
               NativeLocationInImage(interface_method));
     DCHECK_EQ(copy->GetImplementationMethod(i, target_ptr_size_),
@@ -2170,8 +2821,7 @@
         memcpy(dest, pair.first, sizeof(ArtField));
         CopyAndFixupReference(
             reinterpret_cast<ArtField*>(dest)->GetDeclaringClassAddressWithoutBarrier(),
-            reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass(),
-            oat_index);
+            reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass());
         break;
       }
       case NativeObjectRelocationType::kRuntimeMethod:
@@ -2204,15 +2854,20 @@
       case NativeObjectRelocationType::kIMTable: {
         ImTable* orig_imt = reinterpret_cast<ImTable*>(pair.first);
         ImTable* dest_imt = reinterpret_cast<ImTable*>(dest);
-        CopyAndFixupImTable(orig_imt, dest_imt, oat_index);
+        CopyAndFixupImTable(orig_imt, dest_imt);
         break;
       }
       case NativeObjectRelocationType::kIMTConflictTable: {
         auto* orig_table = reinterpret_cast<ImtConflictTable*>(pair.first);
         CopyAndFixupImtConflictTable(
             orig_table,
-            new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_),
-            oat_index);
+            new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_));
+        break;
+      }
+      case NativeObjectRelocationType::kGcRootPointer: {
+        auto* orig_pointer = reinterpret_cast<GcRoot<mirror::Object>*>(pair.first);
+        auto* dest_pointer = reinterpret_cast<GcRoot<mirror::Object>*>(dest);
+        CopyAndFixupReference(dest_pointer->AddressWithoutBarrier(), orig_pointer->Read());
         break;
       }
     }
@@ -2222,10 +2877,8 @@
   for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
     ArtMethod* method = image_methods_[i];
     CHECK(method != nullptr);
-    CopyAndFixupPointer(reinterpret_cast<void**>(&image_header->image_methods_[i]),
-                        method,
-                        oat_index,
-                        PointerSize::k32);
+    CopyAndFixupPointer(
+        reinterpret_cast<void**>(&image_header->image_methods_[i]), method, PointerSize::k32);
   }
   FixupRootVisitor root_visitor(this);
 
@@ -2239,20 +2892,19 @@
     CHECK_EQ(intern_table_bytes, image_info.intern_table_bytes_);
     // Fixup the pointers in the newly written intern table to contain image addresses.
     InternTable temp_intern_table;
-    // Note that we require that ReadFromMemory does not make an internal copy of the elements so that
-    // the VisitRoots() will update the memory directly rather than the copies.
+    // Note that we require that ReadFromMemory does not make an internal copy of the elements so
+    // that the VisitRoots() will update the memory directly rather than the copies.
     // This also relies on visit roots not doing any verification which could fail after we update
     // the roots to be the image addresses.
-    temp_intern_table.AddTableFromMemory(intern_table_memory_ptr);
+    temp_intern_table.AddTableFromMemory(intern_table_memory_ptr,
+                                         VoidFunctor(),
+                                         /*is_boot_image=*/ false);
     CHECK_EQ(temp_intern_table.Size(), intern_table->Size());
     temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots);
     // Record relocations. (The root visitor does not get to see the slot addresses.)
     MutexLock lock(Thread::Current(), *Locks::intern_table_lock_);
     DCHECK(!temp_intern_table.strong_interns_.tables_.empty());
-    DCHECK(!temp_intern_table.strong_interns_.tables_[0].empty());  // Inserted at the beginning.
-    for (const GcRoot<mirror::String>& slot : temp_intern_table.strong_interns_.tables_[0]) {
-      RecordImageRelocation(&slot, oat_index);
-    }
+    DCHECK(!temp_intern_table.strong_interns_.tables_[0].Empty());  // Inserted at the beginning.
   }
   // Write the class table(s) into the image. class_table_bytes_ may be 0 if there are multiple
   // class loaders. Writing multiple class tables into the image is currently unsupported.
@@ -2281,34 +2933,17 @@
     ReaderMutexLock lock(self, temp_class_table.lock_);
     DCHECK(!temp_class_table.classes_.empty());
     DCHECK(!temp_class_table.classes_[0].empty());  // The ClassSet was inserted at the beginning.
-    for (const ClassTable::TableSlot& slot : temp_class_table.classes_[0]) {
-      RecordImageRelocation(&slot, oat_index);
-    }
   }
 }
 
-void ImageWriter::CopyAndFixupObjects() {
-  auto visitor = [&](Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK(obj != nullptr);
-    CopyAndFixupObject(obj);
-  };
-  Runtime::Current()->GetHeap()->VisitObjects(visitor);
-  // We no longer need the hashcode map, values have already been copied to target objects.
-  saved_hashcode_map_.clear();
-}
-
 void ImageWriter::FixupPointerArray(mirror::Object* dst,
                                     mirror::PointerArray* arr,
-                                    mirror::Class* klass,
-                                    Bin array_type,
-                                    size_t oat_index) {
-  CHECK(klass->IsArrayClass());
-  CHECK(arr->IsIntArray() || arr->IsLongArray()) << klass->PrettyClass() << " " << arr;
+                                    Bin array_type) {
+  CHECK(arr->IsIntArray() || arr->IsLongArray()) << arr->GetClass()->PrettyClass() << " " << arr;
   // Fixup int and long pointers for the ArtMethod or ArtField arrays.
   const size_t num_elements = arr->GetLength();
-  CopyAndFixupReference(dst->GetFieldObjectReferenceAddr<kVerifyNone>(Class::ClassOffset()),
-                        arr->GetClass(),
-                        oat_index);
+  CopyAndFixupReference(
+      dst->GetFieldObjectReferenceAddr<kVerifyNone>(Class::ClassOffset()), arr->GetClass());
   auto* dest_array = down_cast<mirror::PointerArray*>(dst);
   for (size_t i = 0, count = num_elements; i < count; ++i) {
     void* elem = arr->GetElementPtrSize<void*>(i, target_ptr_size_);
@@ -2330,12 +2965,12 @@
         UNREACHABLE();
       }
     }
-    CopyAndFixupPointer(dest_array->ElementAddress(i, target_ptr_size_), elem, oat_index);
+    CopyAndFixupPointer(dest_array->ElementAddress(i, target_ptr_size_), elem);
   }
 }
 
 void ImageWriter::CopyAndFixupObject(Object* obj) {
-  if (IsInBootImage(obj)) {
+  if (!IsImageObject(obj)) {
     return;
   }
   size_t offset = GetImageOffset(obj);
@@ -2348,6 +2983,18 @@
   image_info.image_bitmap_->Set(dst);  // Mark the obj as live.
 
   const size_t n = obj->SizeOf();
+
+  if (kIsDebugBuild && region_size_ != 0u) {
+    const size_t offset_after_header = offset - sizeof(ImageHeader);
+    const size_t next_region = RoundUp(offset_after_header, region_size_);
+    if (offset_after_header != next_region) {
+      // If the object is not on a region bondary, it must not be cross region.
+      CHECK_LT(offset_after_header, next_region)
+          << "offset_after_header=" << offset_after_header << " size=" << n;
+      CHECK_LE(offset_after_header + n, next_region)
+          << "offset_after_header=" << offset_after_header << " size=" << n;
+    }
+  }
   DCHECK_LE(offset + n, image_info.image_.Size());
   memcpy(dst, src, n);
 
@@ -2361,14 +3008,14 @@
     // safe since we mark all of the objects that may reference non immune objects as gray.
     CHECK(dst->AtomicSetMarkBit(0, 1));
   }
-  FixupObject(obj, dst, oat_index);
+  FixupObject(obj, dst);
 }
 
 // Rewrite all the references in the copied object to point to their image address equivalent
 class ImageWriter::FixupVisitor {
  public:
-  FixupVisitor(ImageWriter* image_writer, Object* copy, size_t oat_index)
-      : image_writer_(image_writer), copy_(copy), oat_index_(oat_index) {
+  FixupVisitor(ImageWriter* image_writer, Object* copy)
+      : image_writer_(image_writer), copy_(copy) {
   }
 
   // Ignore class roots since we don't have a way to map them to the destination. These are handled
@@ -2377,15 +3024,12 @@
       const {}
   void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
 
-
   void operator()(ObjPtr<Object> obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
     ObjPtr<Object> ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
     // Copy the reference and record the fixup if necessary.
     image_writer_->CopyAndFixupReference(
-        copy_->GetFieldObjectReferenceAddr<kVerifyNone>(offset),
-        ref.Ptr(),
-        oat_index_);
+        copy_->GetFieldObjectReferenceAddr<kVerifyNone>(offset), ref);
   }
 
   // java.lang.ref.Reference visitor.
@@ -2398,13 +3042,31 @@
  protected:
   ImageWriter* const image_writer_;
   mirror::Object* const copy_;
-  size_t oat_index_;
 };
 
+void ImageWriter::CopyAndFixupObjects() {
+  auto visitor = [&](Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(obj != nullptr);
+    CopyAndFixupObject(obj);
+  };
+  Runtime::Current()->GetHeap()->VisitObjects(visitor);
+  // Copy the padding objects since they are required for in order traversal of the image space.
+  for (const ImageInfo& image_info : image_infos_) {
+    for (const size_t offset : image_info.padding_object_offsets_) {
+      auto* dst = reinterpret_cast<Object*>(image_info.image_.Begin() + offset);
+      dst->SetClass<kVerifyNone>(GetImageAddress(GetClassRoot<mirror::Object>().Ptr()));
+      dst->SetLockWord<kVerifyNone>(LockWord::Default(), /*as_volatile=*/ false);
+      image_info.image_bitmap_->Set(dst);  // Mark the obj as live.
+    }
+  }
+  // We no longer need the hashcode map, values have already been copied to target objects.
+  saved_hashcode_map_.clear();
+}
+
 class ImageWriter::FixupClassVisitor final : public FixupVisitor {
  public:
-  FixupClassVisitor(ImageWriter* image_writer, Object* copy, size_t oat_index)
-      : FixupVisitor(image_writer, copy, oat_index) {}
+  FixupClassVisitor(ImageWriter* image_writer, Object* copy)
+      : FixupVisitor(image_writer, copy) {}
 
   void operator()(ObjPtr<Object> obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
       REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
@@ -2460,14 +3122,13 @@
 
 class ImageWriter::NativeLocationVisitor {
  public:
-  NativeLocationVisitor(ImageWriter* image_writer, size_t oat_index)
-      : image_writer_(image_writer),
-        oat_index_(oat_index) {}
+  explicit NativeLocationVisitor(ImageWriter* image_writer)
+      : image_writer_(image_writer) {}
 
   template <typename T>
   T* operator()(T* ptr, void** dest_addr) const REQUIRES_SHARED(Locks::mutator_lock_) {
     if (ptr != nullptr) {
-      image_writer_->CopyAndFixupPointer(dest_addr, ptr, oat_index_);
+      image_writer_->CopyAndFixupPointer(dest_addr, ptr);
     }
     // TODO: The caller shall overwrite the value stored by CopyAndFixupPointer()
     // with the value we return here. We should try to avoid the duplicate work.
@@ -2476,15 +3137,14 @@
 
  private:
   ImageWriter* const image_writer_;
-  const size_t oat_index_;
 };
 
-void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy, size_t oat_index) {
-  orig->FixupNativePointers(copy, target_ptr_size_, NativeLocationVisitor(this, oat_index));
-  FixupClassVisitor visitor(this, copy, oat_index);
+void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) {
+  orig->FixupNativePointers(copy, target_ptr_size_, NativeLocationVisitor(this));
+  FixupClassVisitor visitor(this, copy);
   ObjPtr<mirror::Object>(orig)->VisitReferences(visitor, visitor);
 
-  if (kBitstringSubtypeCheckEnabled && compile_app_image_) {
+  if (kBitstringSubtypeCheckEnabled && compiler_options_.IsAppImage()) {
     // When we call SubtypeCheck::EnsureInitialize, it Assigns new bitstring
     // values to the parent of that class.
     //
@@ -2509,39 +3169,37 @@
   copy->SetClinitThreadId(static_cast<pid_t>(0));
 }
 
-void ImageWriter::FixupObject(Object* orig, Object* copy, size_t oat_index) {
+void ImageWriter::FixupObject(Object* orig, Object* copy) {
   DCHECK(orig != nullptr);
   DCHECK(copy != nullptr);
   if (kUseBakerReadBarrier) {
     orig->AssertReadBarrierState();
   }
-  auto* klass = orig->GetClass();
-  if (klass->IsIntArrayClass() || klass->IsLongArrayClass()) {
+  if (orig->IsIntArray() || orig->IsLongArray()) {
     // Is this a native pointer array?
     auto it = pointer_arrays_.find(down_cast<mirror::PointerArray*>(orig));
     if (it != pointer_arrays_.end()) {
       // Should only need to fixup every pointer array exactly once.
-      FixupPointerArray(copy, down_cast<mirror::PointerArray*>(orig), klass, it->second, oat_index);
+      FixupPointerArray(copy, down_cast<mirror::PointerArray*>(orig), it->second);
       pointer_arrays_.erase(it);
       return;
     }
   }
   if (orig->IsClass()) {
-    FixupClass(orig->AsClass<kVerifyNone>(), down_cast<mirror::Class*>(copy), oat_index);
+    FixupClass(orig->AsClass<kVerifyNone>(), down_cast<mirror::Class*>(copy));
   } else {
     ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
         Runtime::Current()->GetClassLinker()->GetClassRoots();
+    ObjPtr<mirror::Class> klass = orig->GetClass();
     if (klass == GetClassRoot<mirror::Method>(class_roots) ||
         klass == GetClassRoot<mirror::Constructor>(class_roots)) {
       // Need to go update the ArtMethod.
       auto* dest = down_cast<mirror::Executable*>(copy);
       auto* src = down_cast<mirror::Executable*>(orig);
       ArtMethod* src_method = src->GetArtMethod();
-      CopyAndFixupPointer(dest, mirror::Executable::ArtMethodOffset(), src_method, oat_index);
+      CopyAndFixupPointer(dest, mirror::Executable::ArtMethodOffset(), src_method);
     } else if (klass == GetClassRoot<mirror::DexCache>(class_roots)) {
-      FixupDexCache(down_cast<mirror::DexCache*>(orig),
-                    down_cast<mirror::DexCache*>(copy),
-                    oat_index);
+      FixupDexCache(down_cast<mirror::DexCache*>(orig), down_cast<mirror::DexCache*>(copy));
     } else if (klass->IsClassLoaderClass()) {
       mirror::ClassLoader* copy_loader = down_cast<mirror::ClassLoader*>(copy);
       // If src is a ClassLoader, set the class table to null so that it gets recreated by the
@@ -2552,7 +3210,7 @@
       // roots.
       copy_loader->SetAllocator(nullptr);
     }
-    FixupVisitor visitor(this, copy, oat_index);
+    FixupVisitor visitor(this, copy);
     orig->VisitReferences(visitor, visitor);
   }
 }
@@ -2560,8 +3218,7 @@
 template <typename T>
 void ImageWriter::FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* orig_array,
                                           std::atomic<mirror::DexCachePair<T>>* new_array,
-                                          uint32_t array_index,
-                                          size_t oat_index) {
+                                          uint32_t array_index) {
   static_assert(sizeof(std::atomic<mirror::DexCachePair<T>>) == sizeof(mirror::DexCachePair<T>),
                 "Size check for removing std::atomic<>.");
   mirror::DexCachePair<T>* orig_pair =
@@ -2569,15 +3226,14 @@
   mirror::DexCachePair<T>* new_pair =
       reinterpret_cast<mirror::DexCachePair<T>*>(&new_array[array_index]);
   CopyAndFixupReference(
-      new_pair->object.AddressWithoutBarrier(), orig_pair->object.Read(), oat_index);
+      new_pair->object.AddressWithoutBarrier(), orig_pair->object.Read());
   new_pair->index = orig_pair->index;
 }
 
 template <typename T>
 void ImageWriter::FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* orig_array,
                                           std::atomic<mirror::NativeDexCachePair<T>>* new_array,
-                                          uint32_t array_index,
-                                          size_t oat_index) {
+                                          uint32_t array_index) {
   static_assert(
       sizeof(std::atomic<mirror::NativeDexCachePair<T>>) == sizeof(mirror::NativeDexCachePair<T>),
       "Size check for removing std::atomic<>.");
@@ -2588,9 +3244,8 @@
         reinterpret_cast<DexCache::ConversionPair64*>(new_array) + array_index;
     *new_pair = *orig_pair;  // Copy original value and index.
     if (orig_pair->first != 0u) {
-      CopyAndFixupPointer(reinterpret_cast<void**>(&new_pair->first),
-                          reinterpret_cast64<void*>(orig_pair->first),
-                          oat_index);
+      CopyAndFixupPointer(
+          reinterpret_cast<void**>(&new_pair->first), reinterpret_cast64<void*>(orig_pair->first));
     }
   } else {
     DexCache::ConversionPair32* orig_pair =
@@ -2599,26 +3254,22 @@
         reinterpret_cast<DexCache::ConversionPair32*>(new_array) + array_index;
     *new_pair = *orig_pair;  // Copy original value and index.
     if (orig_pair->first != 0u) {
-      CopyAndFixupPointer(reinterpret_cast<void**>(&new_pair->first),
-                          reinterpret_cast32<void*>(orig_pair->first),
-                          oat_index);
+      CopyAndFixupPointer(
+          reinterpret_cast<void**>(&new_pair->first), reinterpret_cast32<void*>(orig_pair->first));
     }
   }
 }
 
 void ImageWriter::FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* orig_array,
                                           GcRoot<mirror::CallSite>* new_array,
-                                          uint32_t array_index,
-                                          size_t oat_index) {
-  CopyAndFixupReference(new_array[array_index].AddressWithoutBarrier(),
-                        orig_array[array_index].Read(),
-                        oat_index);
+                                          uint32_t array_index) {
+  CopyAndFixupReference(
+      new_array[array_index].AddressWithoutBarrier(), orig_array[array_index].Read());
 }
 
 template <typename EntryType>
 void ImageWriter::FixupDexCacheArray(DexCache* orig_dex_cache,
                                      DexCache* copy_dex_cache,
-                                     size_t oat_index,
                                      MemberOffset array_offset,
                                      uint32_t size) {
   EntryType* orig_array = orig_dex_cache->GetFieldPtr64<EntryType*>(array_offset);
@@ -2626,47 +3277,45 @@
   if (orig_array != nullptr) {
     // Though the DexCache array fields are usually treated as native pointers, we clear
     // the top 32 bits for 32-bit targets.
-    CopyAndFixupPointer(copy_dex_cache, array_offset, orig_array, oat_index, PointerSize::k64);
+    CopyAndFixupPointer(copy_dex_cache, array_offset, orig_array, PointerSize::k64);
     EntryType* new_array = NativeCopyLocation(orig_array);
     for (uint32_t i = 0; i != size; ++i) {
-      FixupDexCacheArrayEntry(orig_array, new_array, i, oat_index);
+      FixupDexCacheArrayEntry(orig_array, new_array, i);
     }
   }
 }
 
-void ImageWriter::FixupDexCache(DexCache* orig_dex_cache,
-                                DexCache* copy_dex_cache,
-                                size_t oat_index) {
+void ImageWriter::FixupDexCache(DexCache* orig_dex_cache, DexCache* copy_dex_cache) {
   FixupDexCacheArray<mirror::StringDexCacheType>(orig_dex_cache,
                                                  copy_dex_cache,
-                                                 oat_index,
                                                  DexCache::StringsOffset(),
                                                  orig_dex_cache->NumStrings());
   FixupDexCacheArray<mirror::TypeDexCacheType>(orig_dex_cache,
                                                copy_dex_cache,
-                                               oat_index,
                                                DexCache::ResolvedTypesOffset(),
                                                orig_dex_cache->NumResolvedTypes());
   FixupDexCacheArray<mirror::MethodDexCacheType>(orig_dex_cache,
                                                  copy_dex_cache,
-                                                 oat_index,
                                                  DexCache::ResolvedMethodsOffset(),
                                                  orig_dex_cache->NumResolvedMethods());
   FixupDexCacheArray<mirror::FieldDexCacheType>(orig_dex_cache,
                                                 copy_dex_cache,
-                                                oat_index,
                                                 DexCache::ResolvedFieldsOffset(),
                                                 orig_dex_cache->NumResolvedFields());
   FixupDexCacheArray<mirror::MethodTypeDexCacheType>(orig_dex_cache,
                                                      copy_dex_cache,
-                                                     oat_index,
                                                      DexCache::ResolvedMethodTypesOffset(),
                                                      orig_dex_cache->NumResolvedMethodTypes());
   FixupDexCacheArray<GcRoot<mirror::CallSite>>(orig_dex_cache,
                                                copy_dex_cache,
-                                               oat_index,
                                                DexCache::ResolvedCallSitesOffset(),
                                                orig_dex_cache->NumResolvedCallSites());
+  if (orig_dex_cache->GetPreResolvedStrings() != nullptr) {
+    CopyAndFixupPointer(copy_dex_cache,
+                        DexCache::PreResolvedStringsOffset(),
+                        orig_dex_cache->GetPreResolvedStrings(),
+                        PointerSize::k64);
+  }
 
   // Remove the DexFile pointers. They will be fixed up when the runtime loads the oat file. Leaving
   // compiler pointers in here will make the output non-deterministic.
@@ -2676,7 +3325,7 @@
 const uint8_t* ImageWriter::GetOatAddress(StubType type) const {
   DCHECK_LE(type, StubType::kLast);
   // If we are compiling an app image, we need to use the stubs of the boot image.
-  if (compile_app_image_) {
+  if (!compiler_options_.IsBootImage()) {
     // Use the current image pointers.
     const std::vector<gc::space::ImageSpace*>& image_spaces =
         Runtime::Current()->GetHeap()->GetBootImageSpaces();
@@ -2773,9 +3422,8 @@
 
   memcpy(copy, orig, ArtMethod::Size(target_ptr_size_));
 
-  CopyAndFixupReference(copy->GetDeclaringClassAddressWithoutBarrier(),
-                        orig->GetDeclaringClassUnchecked(),
-                        oat_index);
+  CopyAndFixupReference(
+      copy->GetDeclaringClassAddressWithoutBarrier(), orig->GetDeclaringClassUnchecked());
 
   // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
   // oat_begin_
@@ -2788,7 +3436,7 @@
     if (orig_table != nullptr) {
       // Special IMT conflict method, normal IMT conflict method or unimplemented IMT method.
       quick_code = GetOatAddress(StubType::kQuickIMTConflictTrampoline);
-      CopyAndFixupPointer(copy, ArtMethod::DataOffset(target_ptr_size_), orig_table, oat_index);
+      CopyAndFixupPointer(copy, ArtMethod::DataOffset(target_ptr_size_), orig_table);
     } else if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
       quick_code = GetOatAddress(StubType::kQuickResolutionTrampoline);
     } else {
@@ -2822,9 +3470,6 @@
         // Note this is not the code_ pointer, that is handled above.
         copy->SetEntryPointFromJniPtrSize(
             GetOatAddress(StubType::kJNIDlsymLookup), target_ptr_size_);
-        MemberOffset offset = ArtMethod::EntryPointFromJniOffset(target_ptr_size_);
-        const void* dest = reinterpret_cast<const uint8_t*>(copy) + offset.Uint32Value();
-        RecordImageRelocation(dest, oat_index, /* app_to_boot_image */ compile_app_image_);
       } else {
         CHECK(copy->GetDataPtrSize(target_ptr_size_) == nullptr);
       }
@@ -2832,9 +3477,6 @@
   }
   if (quick_code != nullptr) {
     copy->SetEntryPointFromQuickCompiledCodePtrSize(quick_code, target_ptr_size_);
-    MemberOffset offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(target_ptr_size_);
-    const void* dest = reinterpret_cast<const uint8_t*>(copy) + offset.Uint32Value();
-    RecordImageRelocation(dest, oat_index, /* app_to_boot_image */ IsInBootOatFile(quick_code));
   }
 }
 
@@ -2887,6 +3529,8 @@
       return Bin::kImTable;
     case NativeObjectRelocationType::kIMTConflictTable:
       return Bin::kIMTConflictTable;
+    case NativeObjectRelocationType::kGcRootPointer:
+      return Bin::kMetadata;
   }
   UNREACHABLE();
 }
@@ -2919,11 +3563,14 @@
                                       size_t oat_loaded_size,
                                       size_t oat_data_offset,
                                       size_t oat_data_size) {
+  DCHECK_GE(oat_loaded_size, oat_data_offset);
+  DCHECK_GE(oat_loaded_size - oat_data_offset, oat_data_size);
+
   const uint8_t* images_end = image_infos_.back().image_begin_ + image_infos_.back().image_size_;
+  DCHECK(images_end != nullptr);  // Image space must be ready.
   for (const ImageInfo& info : image_infos_) {
     DCHECK_LE(info.image_begin_ + info.image_size_, images_end);
   }
-  DCHECK(images_end != nullptr);  // Image space must be ready.
 
   ImageInfo& cur_image_info = GetImageInfo(oat_index);
   cur_image_info.oat_file_begin_ = images_end + cur_image_info.oat_offset_;
@@ -2931,7 +3578,7 @@
   cur_image_info.oat_data_begin_ = cur_image_info.oat_file_begin_ + oat_data_offset;
   cur_image_info.oat_size_ = oat_data_size;
 
-  if (compile_app_image_) {
+  if (compiler_options_.IsAppImage()) {
     CHECK_EQ(oat_filenames_.size(), 1u) << "App image should have no next image.";
     return;
   }
@@ -2970,85 +3617,49 @@
 ImageWriter::ImageWriter(
     const CompilerOptions& compiler_options,
     uintptr_t image_begin,
-    bool compile_pic,
-    bool compile_app_image,
     ImageHeader::StorageMode image_storage_mode,
-    const std::vector<const char*>& oat_filenames,
+    const std::vector<std::string>& oat_filenames,
     const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map,
+    jobject class_loader,
     const HashSet<std::string>* dirty_image_objects)
     : compiler_options_(compiler_options),
       global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
       image_objects_offset_begin_(0),
-      compile_pic_(compile_pic),
-      compile_app_image_(compile_app_image),
       target_ptr_size_(InstructionSetPointerSize(compiler_options.GetInstructionSet())),
       image_infos_(oat_filenames.size()),
       dirty_methods_(0u),
       clean_methods_(0u),
+      app_class_loader_(class_loader),
       boot_image_live_objects_(nullptr),
       image_storage_mode_(image_storage_mode),
       oat_filenames_(oat_filenames),
       dex_file_oat_index_map_(dex_file_oat_index_map),
       dirty_image_objects_(dirty_image_objects) {
+  DCHECK(compiler_options.IsBootImage() || compiler_options.IsAppImage());
   CHECK_NE(image_begin, 0U);
   std::fill_n(image_methods_, arraysize(image_methods_), nullptr);
-  CHECK_EQ(compile_app_image, !Runtime::Current()->GetHeap()->GetBootImageSpaces().empty())
+  CHECK_EQ(compiler_options.IsBootImage(),
+           Runtime::Current()->GetHeap()->GetBootImageSpaces().empty())
       << "Compiling a boot image should occur iff there are no boot image spaces loaded";
+  if (compiler_options_.IsAppImage()) {
+    // Make sure objects are not crossing region boundaries for app images.
+    region_size_ = gc::space::RegionSpace::kRegionSize;
+  }
 }
 
 ImageWriter::ImageInfo::ImageInfo()
     : intern_table_(new InternTable),
       class_table_(new ClassTable) {}
 
-template <bool kCheckNotNull /* = true */>
-void ImageWriter::RecordImageRelocation(const void* dest,
-                                        size_t oat_index,
-                                        bool app_to_boot_image /* = false */) {
-  // Check that we're not recording a relocation for null.
-  if (kCheckNotNull) {
-    DCHECK(reinterpret_cast<const uint32_t*>(dest)[0] != 0u);
-  }
-  // Calculate the offset within the image.
-  ImageInfo* image_info = &image_infos_[oat_index];
-  DCHECK(image_info->image_.HasAddress(dest))
-      << "MemMap range " << static_cast<const void*>(image_info->image_.Begin())
-      << "-" << static_cast<const void*>(image_info->image_.End())
-      << " does not contain " << dest;
-  size_t offset = reinterpret_cast<const uint8_t*>(dest) - image_info->image_.Begin();
-  ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info->image_.Begin());
-  size_t image_end = image_header->GetClassTableSection().End();
-  DCHECK_LT(offset, image_end);
-  // Calculate the location index.
-  size_t size = RelocationIndex(image_end, target_ptr_size_);
-  size_t index = RelocationIndex(offset, target_ptr_size_);
-  if (app_to_boot_image) {
-    index += size;
-  }
-  // Mark the location in the bitmap.
-  DCHECK(compile_app_image_ || !app_to_boot_image);
-  MemoryRegion region(image_info->relocation_bitmap_.data(), image_info->relocation_bitmap_.size());
-  BitMemoryRegion bit_region(region, /* bit_offset */ 0u, compile_app_image_ ? 2u * size : size);
-  DCHECK(!bit_region.LoadBit(index));
-  bit_region.StoreBit(index, /* value*/ true);
-}
-
 template <typename DestType>
-void ImageWriter::CopyAndFixupReference(DestType* dest,
-                                        ObjPtr<mirror::Object> src,
-                                        size_t oat_index) {
+void ImageWriter::CopyAndFixupReference(DestType* dest, ObjPtr<mirror::Object> src) {
   static_assert(std::is_same<DestType, mirror::CompressedReference<mirror::Object>>::value ||
                     std::is_same<DestType, mirror::HeapReference<mirror::Object>>::value,
                 "DestType must be a Compressed-/HeapReference<Object>.");
   dest->Assign(GetImageAddress(src.Ptr()));
-  if (src != nullptr) {
-    RecordImageRelocation(dest, oat_index, /* app_to_boot_image */ IsInBootImage(src.Ptr()));
-  }
 }
 
-void ImageWriter::CopyAndFixupPointer(void** target,
-                                      void* value,
-                                      size_t oat_index,
-                                      PointerSize pointer_size) {
+void ImageWriter::CopyAndFixupPointer(void** target, void* value, PointerSize pointer_size) {
   void* new_value = NativeLocationInImage(value);
   if (pointer_size == PointerSize::k32) {
     *reinterpret_cast<uint32_t*>(target) = reinterpret_cast32<uint32_t>(new_value);
@@ -3056,24 +3667,22 @@
     *reinterpret_cast<uint64_t*>(target) = reinterpret_cast64<uint64_t>(new_value);
   }
   DCHECK(value != nullptr);
-  RecordImageRelocation(target, oat_index, /* app_to_boot_image */ IsInBootImage(value));
 }
 
-void ImageWriter::CopyAndFixupPointer(void** target, void* value, size_t oat_index)
+void ImageWriter::CopyAndFixupPointer(void** target, void* value)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  CopyAndFixupPointer(target, value, oat_index, target_ptr_size_);
+  CopyAndFixupPointer(target, value, target_ptr_size_);
 }
 
 void ImageWriter::CopyAndFixupPointer(
-    void* object, MemberOffset offset, void* value, size_t oat_index, PointerSize pointer_size) {
+    void* object, MemberOffset offset, void* value, PointerSize pointer_size) {
   void** target =
       reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(object) + offset.Uint32Value());
-  return CopyAndFixupPointer(target, value, oat_index, pointer_size);
+  return CopyAndFixupPointer(target, value, pointer_size);
 }
 
-void ImageWriter::CopyAndFixupPointer(
-    void* object, MemberOffset offset, void* value, size_t oat_index) {
-  return CopyAndFixupPointer(object, offset, value, oat_index, target_ptr_size_);
+void ImageWriter::CopyAndFixupPointer(void* object, MemberOffset offset, void* value) {
+  return CopyAndFixupPointer(object, offset, value, target_ptr_size_);
 }
 
 }  // namespace linker
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index e45023e..8896e07 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -26,6 +26,7 @@
 #include <set>
 #include <stack>
 #include <string>
+#include <unordered_map>
 
 #include "art_method.h"
 #include "base/bit_utils.h"
@@ -77,13 +78,28 @@
  public:
   ImageWriter(const CompilerOptions& compiler_options,
               uintptr_t image_begin,
-              bool compile_pic,
-              bool compile_app_image,
               ImageHeader::StorageMode image_storage_mode,
-              const std::vector<const char*>& oat_filenames,
+              const std::vector<std::string>& oat_filenames,
               const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map,
+              jobject class_loader,
               const HashSet<std::string>* dirty_image_objects);
 
+  /*
+   * Modifies the heap and collects information about objects and code so that
+   * they can be written to the boot or app image later.
+   *
+   * First, unneeded classes are removed from the managed heap.  Next, we
+   * remove cached values and calculate necessary metadata for later in the
+   * process. Optionally some debugging information is collected and used to
+   * verify the state of the heap at this point.  Next, metadata from earlier
+   * is used to calculate offsets of references to strings to speed up string
+   * interning when the image is loaded.  Lastly, we allocate enough memory to
+   * fit all image data minus the bitmap and relocation sections.
+   *
+   * This function should only be called when all objects to be included in the
+   * image have been initialized and all native methods have been generated.  In
+   * addition, no other thread should be modifying the heap.
+   */
   bool PrepareImageAddressSpace(TimingLogger* timings);
 
   bool IsImageAddressSpaceReady() const {
@@ -96,10 +112,7 @@
     return true;
   }
 
-  ObjPtr<mirror::ClassLoader> GetClassLoader() {
-    CHECK_EQ(class_loaders_.size(), compile_app_image_ ? 1u : 0u);
-    return compile_app_image_ ? *class_loaders_.begin() : nullptr;
-  }
+  ObjPtr<mirror::ClassLoader> GetAppClassLoader() const REQUIRES_SHARED(Locks::mutator_lock_);
 
   template <typename T>
   T* GetImageAddress(T* object) const REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -129,8 +142,8 @@
   // If oat_fd is not kInvalidFd, then we use that for the oat file. Otherwise we open
   // the names in oat_filenames.
   bool Write(int image_fd,
-             const std::vector<const char*>& image_filenames,
-             const std::vector<const char*>& oat_filenames)
+             const std::vector<std::string>& image_filenames,
+             const std::vector<std::string>& oat_filenames)
       REQUIRES(!Locks::mutator_lock_);
 
   uintptr_t GetOatDataBegin(size_t oat_index) {
@@ -193,6 +206,8 @@
     kIMTConflictTable,
     // Runtime methods (always clean, do not have a length prefix array).
     kRuntimeMethod,
+    // Metadata bin for data that is temporary during image lifetime.
+    kMetadata,
     // Dex cache arrays have a special slot for PC-relative addressing. Since they are
     // huge, and as such their dirtiness is not important for the clean/dirty separation,
     // we arbitrarily keep them at the end of the native data.
@@ -210,6 +225,7 @@
     kArtMethodArrayClean,
     kArtMethodDirty,
     kArtMethodArrayDirty,
+    kGcRootPointer,
     kRuntimeMethod,
     kIMTable,
     kIMTConflictTable,
@@ -263,16 +279,24 @@
 
    private:
     // Must be the same size as LockWord, any larger and we would truncate the data.
-    const uint32_t lockword_;
+    uint32_t lockword_;
   };
 
   struct ImageInfo {
     ImageInfo();
     ImageInfo(ImageInfo&&) = default;
 
-    // Create the image sections into the out sections variable, returns the size of the image
-    // excluding the bitmap.
-    size_t CreateImageSections(ImageSection* out_sections) const;
+    /*
+     * Creates ImageSection objects that describe most of the sections of a
+     * boot or AppImage. The following sections are not included:
+     *   - ImageHeader::kSectionImageBitmap
+     *
+     * In addition, the ImageHeader is not covered here.
+     *
+     * This function will return the total size of the covered sections as well
+     * as a vector containing the individual ImageSection objects.
+     */
+    std::pair<size_t, std::vector<ImageSection>> CreateImageSections() const;
 
     size_t GetStubOffset(StubType stub_type) const {
       DCHECK_LT(static_cast<size_t>(stub_type), kNumberOfStubTypes);
@@ -364,17 +388,18 @@
     // Number of pointer fixup bytes.
     size_t pointer_fixup_bytes_ = 0;
 
+    // Number of offsets to string references that will be written to the
+    // StringFieldOffsets section.
+    size_t num_string_references_ = 0;
+
     // Intern table associated with this image for serialization.
     std::unique_ptr<InternTable> intern_table_;
 
     // Class table associated with this image for serialization.
     std::unique_ptr<ClassTable> class_table_;
 
-    // Relocations of references/pointers. For boot image, it contains one bit
-    // for each location that can be relocated. For app image, it contains twice
-    // that many bits, first half contains relocations within this image and the
-    // second half contains relocations for references to the boot image.
-    std::vector<uint8_t> relocation_bitmap_;
+    // Padding objects to ensure region alignment (if required).
+    std::vector<size_t> padding_object_offsets_;
   };
 
   // We use the lock word to store the offset of the object in the image.
@@ -429,19 +454,24 @@
   // Debug aid that list of requested image classes.
   void DumpImageClasses();
 
-  // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
-  void ComputeLazyFieldsForImageClasses()
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   // Visit all class loaders.
   void VisitClassLoaders(ClassLoaderVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Remove unwanted classes from various roots.
   void PruneNonImageClasses() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Remove unwanted classes from the DexCache roots and preload deterministic DexCache contents.
-  void PruneAndPreloadDexCache(ObjPtr<mirror::DexCache> dex_cache,
-                               ObjPtr<mirror::ClassLoader> class_loader)
+  // Remove unwanted classes from the DexCache roots.
+  void PruneDexCache(ObjPtr<mirror::DexCache> dex_cache, ObjPtr<mirror::ClassLoader> class_loader)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!Locks::classlinker_classes_lock_);
+
+  // Preload deterministic DexCache contents.
+  void PreloadDexCache(ObjPtr<mirror::DexCache> dex_cache, ObjPtr<mirror::ClassLoader> class_loader)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!Locks::classlinker_classes_lock_);
+
+  // Find dex caches for pruning or preloading.
+  std::vector<ObjPtr<mirror::DexCache>> FindDexCaches(Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::classlinker_classes_lock_);
 
@@ -472,51 +502,55 @@
   void CopyAndFixupObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
   void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, size_t oat_index)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void CopyAndFixupImTable(ImTable* orig, ImTable* copy, size_t oat_index)
+  void CopyAndFixupImTable(ImTable* orig, ImTable* copy)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void CopyAndFixupImtConflictTable(ImtConflictTable* orig,
-                                    ImtConflictTable* copy,
-                                    size_t oat_index)
+  void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  template <bool kCheckNotNull = true>
-  void RecordImageRelocation(const void* dest, size_t oat_index, bool app_to_boot_image = false);
-  void FixupClass(mirror::Class* orig, mirror::Class* copy, size_t oat_index)
+
+  /*
+   * Copies metadata from the heap into a buffer that will be compressed and
+   * written to the image.
+   *
+   * This function copies the string offset metadata from a local vector to an
+   * offset inside the image_ field of an ImageInfo struct.  The offset into the
+   * memory pointed to by the image_ field is obtained from the ImageSection
+   * object for the String Offsets section.
+   *
+   * All data for the image, besides the object bitmap and the relocation data,
+   * will also be copied into the memory region pointed to by image_.
+   */
+  void CopyMetadata();
+
+  void FixupClass(mirror::Class* orig, mirror::Class* copy)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void FixupObject(mirror::Object* orig, mirror::Object* copy, size_t oat_index)
+  void FixupObject(mirror::Object* orig, mirror::Object* copy)
       REQUIRES_SHARED(Locks::mutator_lock_);
   template <typename T>
   void FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* orig_array,
                                std::atomic<mirror::DexCachePair<T>>* new_array,
-                               uint32_t array_index,
-                               size_t oat_index)
+                               uint32_t array_index)
       REQUIRES_SHARED(Locks::mutator_lock_);
   template <typename T>
   void FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* orig_array,
                                std::atomic<mirror::NativeDexCachePair<T>>* new_array,
-                               uint32_t array_index,
-                               size_t oat_index)
+                               uint32_t array_index)
       REQUIRES_SHARED(Locks::mutator_lock_);
   void FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* orig_array,
                                GcRoot<mirror::CallSite>* new_array,
-                               uint32_t array_index,
-                               size_t oat_index)
+                               uint32_t array_index)
       REQUIRES_SHARED(Locks::mutator_lock_);
   template <typename EntryType>
   void FixupDexCacheArray(mirror::DexCache* orig_dex_cache,
                           mirror::DexCache* copy_dex_cache,
-                          size_t oat_index,
                           MemberOffset array_offset,
                           uint32_t size)
       REQUIRES_SHARED(Locks::mutator_lock_);
   void FixupDexCache(mirror::DexCache* orig_dex_cache,
-                     mirror::DexCache* copy_dex_cache,
-                     size_t oat_index)
+                     mirror::DexCache* copy_dex_cache)
       REQUIRES_SHARED(Locks::mutator_lock_);
   void FixupPointerArray(mirror::Object* dst,
                          mirror::PointerArray* arr,
-                         mirror::Class* klass,
-                         Bin array_type,
-                         size_t oat_index)
+                         Bin array_type)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get quick code for non-resolution/imt_conflict/abstract method.
@@ -558,6 +592,63 @@
                                   std::unordered_set<mirror::Object*>* visited)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  /*
+   * This type holds the information necessary for calculating
+   * AppImageReferenceOffsetInfo values after the object relocations have been
+   * computed.
+   *
+   * The first element will always be a pointer to a managed object.  If the
+   * pointer has been tagged (testable with HasDexCacheNativeRefTag) it
+   * indicates that the referenced object is a DexCache object that requires
+   * special handling during loading and the second element has no meaningful
+   * value.  If the pointer isn't tagged then the second element is an
+   * object-relative offset to a field containing a string reference.
+   *
+   * Note that it is possible for an untagged DexCache pointer to occur in the
+   * first position if it has a managed reference that needs to be updated.
+   *
+   * TODO (chriswailes): Add a note indicating the source line where we ensure
+   * that no moving garbage collection will occur.
+   *
+   * TODO (chriswailes): Replace with std::variant once ART is building with
+   * C++17
+   */
+  typedef std::pair<uintptr_t, uint32_t> HeapReferencePointerInfo;
+
+  /*
+   * Collects the info necessary for calculating image offsets to string field
+   * later.
+   *
+   * This function is used when constructing AppImages.  Because AppImages
+   * contain strings that must be interned we need to visit references to these
+   * strings when the AppImage is loaded and either insert them into the
+   * runtime intern table or replace the existing reference with a reference
+   * to the interned strings.
+   *
+   * To speed up the interning of strings when the AppImage is loaded we include
+   * a list of offsets to string references in the AppImage.  These are then
+   * iterated over at load time and fixed up.
+   *
+   * To record the offsets we first have to count the number of string
+   * references that will be included in the AppImage.  This allows use to both
+   * allocate enough memory for soring the offsets and correctly calculate the
+   * offsets of various objects into the image.  Once the image offset
+   * calculations are done for managed objects the reference object/offset pairs
+   * are translated to image offsets.  The CopyMetadata function then copies
+   * these offsets into the image.
+   */
+  std::vector<HeapReferencePointerInfo> CollectStringReferenceInfo() const
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  /*
+   * Ensures that assumptions about native GC roots and AppImages hold.
+   *
+   * This function verifies the following condition(s):
+   *   - Native references to managed strings are only reachable through DexCache
+   *     objects
+   */
+  void VerifyNativeGCRootInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
+
   bool IsMultiImage() const {
     return image_infos_.size() > 1;
   }
@@ -586,7 +677,12 @@
   template <typename T>
   T* NativeCopyLocation(T* obj) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Return true of obj is inside of the boot image space. This may only return true if we are
+  // Return true if `obj` belongs to the image we're writing.
+  // For a boot image, this is true for all objects.
+  // For an app image, boot image objects and boot class path dex caches are excluded.
+  bool IsImageObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Return true if `obj` is inside of the boot image space. This may only return true if we are
   // compiling an app image.
   bool IsInBootImage(const void* obj) const;
 
@@ -618,18 +714,30 @@
 
   // Copy a reference and record image relocation.
   template <typename DestType>
-  void CopyAndFixupReference(DestType* dest, ObjPtr<mirror::Object> src, size_t oat_index)
+  void CopyAndFixupReference(DestType* dest, ObjPtr<mirror::Object> src)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Copy a native pointer and record image relocation.
-  void CopyAndFixupPointer(void** target, void* value, size_t oat_index, PointerSize pointer_size)
+  void CopyAndFixupPointer(void** target, void* value, PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void CopyAndFixupPointer(void** target, void* value, size_t oat_index)
+  void CopyAndFixupPointer(void** target, void* value)
       REQUIRES_SHARED(Locks::mutator_lock_);
   void CopyAndFixupPointer(
-      void* object, MemberOffset offset, void* value, size_t oat_index, PointerSize pointer_size)
+      void* object, MemberOffset offset, void* value, PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void CopyAndFixupPointer(void* object, MemberOffset offset, void* value, size_t oat_index)
+  void CopyAndFixupPointer(void* object, MemberOffset offset, void* value)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  /*
+   * Tests an object to see if it will be contained in an AppImage.
+   *
+   * An object reference is considered to be a AppImage String reference iff:
+   *   - It isn't null
+   *   - The referred-object isn't in the boot image
+   *   - The referred-object is a Java String
+   */
+  ALWAYS_INLINE
+  bool IsValidAppImageStringReference(ObjPtr<mirror::Object> referred_obj) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   const CompilerOptions& compiler_options_;
@@ -651,10 +759,6 @@
   // Oat index map for objects.
   std::unordered_map<mirror::Object*, uint32_t> oat_index_map_;
 
-  // Boolean flags.
-  const bool compile_pic_;
-  const bool compile_app_image_;
-
   // Size of pointers on the target architecture.
   PointerSize target_ptr_size_;
 
@@ -676,19 +780,20 @@
   // Prune class memoization table to speed up ContainsBootClassLoaderNonImageClass.
   std::unordered_map<mirror::Class*, bool> prune_class_memo_;
 
-  // Class loaders with a class table to write out. There should only be one class loader because
-  // dex2oat loads the dex files to be compiled into a single class loader. For the boot image,
-  // null is a valid entry.
-  std::unordered_set<mirror::ClassLoader*> class_loaders_;
+  // The application class loader. Null for boot image.
+  jobject app_class_loader_;
 
   // Boot image live objects, null for app image.
   mirror::ObjectArray<mirror::Object>* boot_image_live_objects_;
 
+  // Offsets into the image that indicate where string references are recorded.
+  std::vector<AppImageReferenceOffsetInfo> string_reference_offsets_;
+
   // Which mode the image is stored as, see image.h
   const ImageHeader::StorageMode image_storage_mode_;
 
   // The file names of oat files.
-  const std::vector<const char*>& oat_filenames_;
+  const std::vector<std::string>& oat_filenames_;
 
   // Map of dex files to the indexes of oat files that they were compiled into.
   const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map_;
@@ -696,7 +801,13 @@
   // Set of objects known to be dirty in the image. Can be nullptr if there are none.
   const HashSet<std::string>* dirty_image_objects_;
 
-  class ComputeLazyFieldsForClassesVisitor;
+  // Objects are guaranteed to not cross the region size boundary.
+  size_t region_size_ = 0u;
+
+  // Region alignment bytes wasted.
+  size_t region_alignment_wasted_ = 0u;
+
+  class ImageFileGuard;
   class FixupClassVisitor;
   class FixupRootVisitor;
   class FixupVisitor;
@@ -704,9 +815,23 @@
   class NativeLocationVisitor;
   class PruneClassesVisitor;
   class PruneClassLoaderClassesVisitor;
+  class PruneObjectReferenceVisitor;
   class RegisterBootClassPathClassesVisitor;
   class VisitReferencesVisitor;
-  class PruneObjectReferenceVisitor;
+
+  /*
+   * A visitor class for extracting object/offset pairs.
+   *
+   * This visitor walks the fields of an object and extracts object/offset pairs
+   * that are later translated to image offsets.  This visitor is only
+   * responsible for extracting info for Java references.  Native references to
+   * Java strings are handled in the wrapper function
+   * CollectStringReferenceInfo().
+   */
+  class CollectStringReferenceVisitor;
+
+  // A visitor used by the VerifyNativeGCRootInvariants() function.
+  class NativeGCRootInvariantVisitor;
 
   DISALLOW_COPY_AND_ASSIGN(ImageWriter);
 };
diff --git a/dex2oat/linker/multi_oat_relative_patcher_test.cc b/dex2oat/linker/multi_oat_relative_patcher_test.cc
index a5831b6..2610561 100644
--- a/dex2oat/linker/multi_oat_relative_patcher_test.cc
+++ b/dex2oat/linker/multi_oat_relative_patcher_test.cc
@@ -96,12 +96,12 @@
 
     void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
                                      const LinkerPatch& patch ATTRIBUTE_UNUSED,
-                                     uint32_t patch_offset ATTRIBUTE_UNUSED) {
+                                     uint32_t patch_offset ATTRIBUTE_UNUSED) override {
       LOG(FATAL) << "UNIMPLEMENTED";
     }
 
     std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(
-        uint32_t executable_offset ATTRIBUTE_UNUSED) {
+        uint32_t executable_offset ATTRIBUTE_UNUSED) override {
       LOG(FATAL) << "UNIMPLEMENTED";
       UNREACHABLE();
     }
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index f88d8d4..be9a0cb 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -26,6 +26,7 @@
 #include "base/bit_vector-inl.h"
 #include "base/enums.h"
 #include "base/file_magic.h"
+#include "base/file_utils.h"
 #include "base/indenter.h"
 #include "base/logging.h"  // For VLOG
 #include "base/os.h"
@@ -92,19 +93,34 @@
 
 static constexpr bool kOatWriterDebugOatCodeLayout = false;
 
-typedef DexFile::Header __attribute__((aligned(1))) UnalignedDexFileHeader;
+using UnalignedDexFileHeader __attribute__((__aligned__(1))) = DexFile::Header;
 
 const UnalignedDexFileHeader* AsUnalignedDexFileHeader(const uint8_t* raw_data) {
-    return reinterpret_cast<const UnalignedDexFileHeader*>(raw_data);
+  return reinterpret_cast<const UnalignedDexFileHeader*>(raw_data);
 }
 
-class ChecksumUpdatingOutputStream : public OutputStream {
+inline uint32_t CodeAlignmentSize(uint32_t header_offset, const CompiledMethod& compiled_method) {
+  // We want to align the code rather than the preheader.
+  uint32_t unaligned_code_offset = header_offset + sizeof(OatQuickMethodHeader);
+  uint32_t aligned_code_offset =  compiled_method.AlignCode(unaligned_code_offset);
+  return aligned_code_offset - unaligned_code_offset;
+}
+
+}  // anonymous namespace
+
+class OatWriter::ChecksumUpdatingOutputStream : public OutputStream {
  public:
-  ChecksumUpdatingOutputStream(OutputStream* out, OatHeader* oat_header)
-      : OutputStream(out->GetLocation()), out_(out), oat_header_(oat_header) { }
+  ChecksumUpdatingOutputStream(OutputStream* out, OatWriter* writer)
+      : OutputStream(out->GetLocation()), out_(out), writer_(writer) { }
 
   bool WriteFully(const void* buffer, size_t byte_count) override {
-    oat_header_->UpdateChecksum(buffer, byte_count);
+    if (buffer != nullptr) {
+      const uint8_t* bytes = reinterpret_cast<const uint8_t*>(buffer);
+      uint32_t old_checksum = writer_->oat_checksum_;
+      writer_->oat_checksum_ = adler32(old_checksum, bytes, byte_count);
+    } else {
+      DCHECK_EQ(0U, byte_count);
+    }
     return out_->WriteFully(buffer, byte_count);
   }
 
@@ -118,18 +134,9 @@
 
  private:
   OutputStream* const out_;
-  OatHeader* const oat_header_;
+  OatWriter* const writer_;
 };
 
-inline uint32_t CodeAlignmentSize(uint32_t header_offset, const CompiledMethod& compiled_method) {
-  // We want to align the code rather than the preheader.
-  uint32_t unaligned_code_offset = header_offset + sizeof(OatQuickMethodHeader);
-  uint32_t aligned_code_offset =  compiled_method.AlignCode(unaligned_code_offset);
-  return aligned_code_offset - unaligned_code_offset;
-}
-
-}  // anonymous namespace
-
 // Defines the location of the raw dex file to write.
 class OatWriter::DexFileSource {
  public:
@@ -378,6 +385,7 @@
     vdex_dex_shared_data_offset_(0u),
     vdex_verifier_deps_offset_(0u),
     vdex_quickening_info_offset_(0u),
+    oat_checksum_(adler32(0L, Z_NULL, 0)),
     code_size_(0u),
     oat_size_(0u),
     data_bimg_rel_ro_start_(0u),
@@ -674,7 +682,7 @@
   oat_size_ = InitOatHeader(dchecked_integral_cast<uint32_t>(oat_dex_files_.size()),
                             key_value_store);
 
-  ChecksumUpdatingOutputStream checksum_updating_rodata(oat_rodata, oat_header_.get());
+  ChecksumUpdatingOutputStream checksum_updating_rodata(oat_rodata, this);
 
   std::unique_ptr<BufferedOutputStream> vdex_out =
       std::make_unique<BufferedOutputStream>(std::make_unique<FileOutputStream>(vdex_file));
@@ -765,10 +773,6 @@
   bss_start_ = (bss_size_ != 0u) ? RoundUp(oat_size_, kPageSize) : 0u;
 
   CHECK_EQ(dex_files_->size(), oat_dex_files_.size());
-  if (GetCompilerOptions().IsBootImage()) {
-    CHECK_EQ(image_writer_ != nullptr,
-             oat_header_->GetStoreValueByKey(OatHeader::kImageLocationKey) == nullptr);
-  }
 
   write_state_ = WriteState::kWriteRoData;
 }
@@ -964,7 +968,7 @@
     ClassStatus status;
     bool found = writer_->compiler_driver_->GetCompiledClass(class_ref, &status);
     if (!found) {
-      VerificationResults* results = writer_->compiler_driver_->GetVerificationResults();
+      const VerificationResults* results = writer_->compiler_options_.GetVerificationResults();
       if (results != nullptr && results->IsClassRejected(class_ref)) {
         // The oat class status is used only for verification of resolved classes,
         // so use ClassStatus::kErrorResolved whether the class was resolved or unresolved
@@ -1007,7 +1011,7 @@
 
   size_t class_def_index;
   uint32_t access_flags;
-  const DexFile::CodeItem* code_item;
+  const dex::CodeItem* code_item;
 
   // A value of -1 denotes missing debug info
   static constexpr size_t kDebugInfoIdxInvalid = static_cast<size_t>(-1);
@@ -1485,7 +1489,7 @@
                          const std::vector<const DexFile*>* dex_files)
       : OatDexMethodVisitor(writer, offset),
         pointer_size_(GetInstructionSetPointerSize(writer_->compiler_options_.GetInstructionSet())),
-        class_loader_(writer->HasImage() ? writer->image_writer_->GetClassLoader() : nullptr),
+        class_loader_(writer->HasImage() ? writer->image_writer_->GetAppClassLoader() : nullptr),
         dex_files_(dex_files),
         class_linker_(Runtime::Current()->GetClassLinker()) {}
 
@@ -1502,8 +1506,9 @@
       return true;
     }
     ObjPtr<mirror::DexCache> dex_cache = class_linker_->FindDexCache(Thread::Current(), *dex_file);
-    const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
-    mirror::Class* klass = dex_cache->GetResolvedType(class_def.class_idx_);
+    const dex::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
+    ObjPtr<mirror::Class> klass =
+        class_linker_->LookupResolvedType(class_def.class_idx_, dex_cache, class_loader_);
     if (klass != nullptr) {
       for (ArtMethod& method : klass->GetCopiedMethods(pointer_size_)) {
         // Find origin method. Declaring class and dex_method_idx
@@ -1553,24 +1558,11 @@
     ObjPtr<mirror::DexCache> dex_cache = class_linker_->FindDexCache(self, *dex_file_);
     ArtMethod* resolved_method;
     if (writer_->GetCompilerOptions().IsBootImage()) {
-      const InvokeType invoke_type = method.GetInvokeType(
-          dex_file_->GetClassDef(class_def_index_).access_flags_);
-      // Unchecked as we hold mutator_lock_ on entry.
-      ScopedObjectAccessUnchecked soa(self);
-      StackHandleScope<1> hs(self);
-      resolved_method = class_linker_->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
-          method.GetIndex(),
-          hs.NewHandle(dex_cache),
-          ScopedNullHandle<mirror::ClassLoader>(),
-          /* referrer */ nullptr,
-          invoke_type);
+      resolved_method = class_linker_->LookupResolvedMethod(
+          method.GetIndex(), dex_cache, /*class_loader=*/ nullptr);
       if (resolved_method == nullptr) {
-        LOG(FATAL_WITHOUT_ABORT) << "Unexpected failure to resolve a method: "
+        LOG(FATAL) << "Unexpected failure to look up a method: "
             << dex_file_->PrettyMethod(method.GetIndex(), true);
-        self->AssertPendingException();
-        mirror::Throwable* exc = self->GetException();
-        std::string dump = exc->Dump();
-        LOG(FATAL) << dump;
         UNREACHABLE();
       }
     } else {
@@ -1593,7 +1585,7 @@
 
   // Check whether current class is image class
   bool IsImageClass() {
-    const DexFile::TypeId& type_id =
+    const dex::TypeId& type_id =
         dex_file_->GetTypeId(dex_file_->GetClassDef(class_def_index_).class_idx_);
     const char* class_descriptor = dex_file_->GetTypeDescriptor(type_id);
     return writer_->GetCompilerOptions().IsImageClass(class_descriptor);
@@ -1638,7 +1630,7 @@
         offset_(relative_offset),
         dex_file_(nullptr),
         pointer_size_(GetInstructionSetPointerSize(writer_->compiler_options_.GetInstructionSet())),
-        class_loader_(writer->HasImage() ? writer->image_writer_->GetClassLoader() : nullptr),
+        class_loader_(writer->HasImage() ? writer->image_writer_->GetAppClassLoader() : nullptr),
         out_(out),
         file_offset_(file_offset),
         class_linker_(Runtime::Current()->GetClassLinker()),
@@ -1671,7 +1663,7 @@
     }
   }
 
-  virtual bool VisitComplete() {
+  bool VisitComplete() override {
     offset_ = writer_->relative_patcher_->WriteThunks(out_, offset_);
     if (UNLIKELY(offset_ == 0u)) {
       PLOG(ERROR) << "Failed to write final relative call thunks";
@@ -2279,6 +2271,7 @@
   }
 
   if (HasImage()) {
+    ScopedAssertNoThreadSuspension sants("Init image method visitor", Thread::Current());
     InitImageMethodVisitor image_visitor(this, offset, dex_files_);
     success = VisitDexMethods(&image_visitor);
     image_visitor.Postprocess();
@@ -2364,7 +2357,7 @@
   size_t relative_offset = current_offset - file_offset;
 
   // Wrap out to update checksum with each write.
-  ChecksumUpdatingOutputStream checksum_updating_out(out, oat_header_.get());
+  ChecksumUpdatingOutputStream checksum_updating_out(out, this);
   out = &checksum_updating_out;
 
   relative_offset = WriteClassOffsets(out, file_offset, relative_offset);
@@ -2405,7 +2398,7 @@
   if (static_cast<uint32_t>(new_offset) != expected_file_offset) {
     PLOG(ERROR) << "Failed to seek to oat code section. Actual: " << new_offset
                 << " Expected: " << expected_file_offset << " File: " << out->GetLocation();
-    return 0;
+    return false;
   }
   DCHECK_OFFSET();
 
@@ -2673,7 +2666,7 @@
   CHECK(write_state_ == WriteState::kWriteText);
 
   // Wrap out to update checksum with each write.
-  ChecksumUpdatingOutputStream checksum_updating_out(out, oat_header_.get());
+  ChecksumUpdatingOutputStream checksum_updating_out(out, this);
   out = &checksum_updating_out;
 
   SetMultiOatRelativePatcherAdjustment();
@@ -2709,7 +2702,7 @@
   CHECK(write_state_ == WriteState::kWriteDataBimgRelRo);
 
   // Wrap out to update checksum with each write.
-  ChecksumUpdatingOutputStream checksum_updating_out(out, oat_header_.get());
+  ChecksumUpdatingOutputStream checksum_updating_out(out, this);
   out = &checksum_updating_out;
 
   const size_t file_offset = oat_data_offset_;
@@ -2815,11 +2808,16 @@
   return true;
 }
 
-bool OatWriter::WriteHeader(OutputStream* out, uint32_t image_file_location_oat_checksum) {
+bool OatWriter::WriteHeader(OutputStream* out) {
   CHECK(write_state_ == WriteState::kWriteHeader);
 
-  oat_header_->SetImageFileLocationOatChecksum(image_file_location_oat_checksum);
-  oat_header_->UpdateChecksumWithHeaderData();
+  // Update checksum with header data.
+  DCHECK_EQ(oat_header_->GetChecksum(), 0u);  // For checksum calculation.
+  const uint8_t* header_begin = reinterpret_cast<const uint8_t*>(oat_header_.get());
+  const uint8_t* header_end = oat_header_->GetKeyValueStore() + oat_header_->GetKeyValueStoreSize();
+  uint32_t old_checksum = oat_checksum_;
+  oat_checksum_ = adler32(old_checksum, header_begin, header_end - header_begin);
+  oat_header_->SetChecksum(oat_checksum_);
 
   const size_t file_offset = oat_data_offset_;
 
@@ -3400,11 +3398,6 @@
 }
 
 bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_file) {
-  // Open dex files and write them into `out`.
-  // Note that we only verify dex files which do not belong to the boot class path.
-  // This is because those have been processed by `hiddenapi` and would not pass
-  // some of the checks. No guarantees are lost, however, as `hiddenapi` verifies
-  // the dex files prior to processing.
   TimingLogger::ScopedTiming split("Dex Layout", timings_);
   std::string error_msg;
   std::string location(oat_dex_file->GetLocation());
@@ -3425,19 +3418,19 @@
     dex_file = dex_file_loader.Open(location,
                                     zip_entry->GetCrc32(),
                                     std::move(mem_map),
-                                    /* verify */ !GetCompilerOptions().IsBootImage(),
+                                    /* verify */ true,
                                     /* verify_checksum */ true,
                                     &error_msg);
   } else if (oat_dex_file->source_.IsRawFile()) {
     File* raw_file = oat_dex_file->source_.GetRawFile();
-    int dup_fd = dup(raw_file->Fd());
+    int dup_fd = DupCloexec(raw_file->Fd());
     if (dup_fd < 0) {
       PLOG(ERROR) << "Failed to dup dex file descriptor (" << raw_file->Fd() << ") at " << location;
       return false;
     }
     TimingLogger::ScopedTiming extract("Open", timings_);
     dex_file = dex_file_loader.OpenDex(dup_fd, location,
-                                       /* verify */ !GetCompilerOptions().IsBootImage(),
+                                       /* verify */ true,
                                        /* verify_checksum */ true,
                                        /* mmap_shared */ false,
                                        &error_msg);
@@ -3687,7 +3680,7 @@
     for (OatDexFile& oat_dex_file : oat_dex_files_) {
       std::string error_msg;
       maps.emplace_back(oat_dex_file.source_.GetZipEntry()->MapDirectlyOrExtract(
-          oat_dex_file.dex_file_location_data_, "zipped dex", &error_msg));
+          oat_dex_file.dex_file_location_data_, "zipped dex", &error_msg, alignof(DexFile)));
       MemMap* map = &maps.back();
       if (!map->IsValid()) {
         LOG(ERROR) << error_msg;
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index c049518..48215bb 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -19,6 +19,7 @@
 
 #include <stdint.h>
 #include <cstddef>
+#include <list>
 #include <memory>
 #include <vector>
 
@@ -198,7 +199,7 @@
   // Check the size of the written oat file.
   bool CheckOatSize(OutputStream* out, size_t file_offset, size_t relative_offset);
   // Write the oat header. This finalizes the oat file.
-  bool WriteHeader(OutputStream* out, uint32_t image_file_location_oat_checksum);
+  bool WriteHeader(OutputStream* out);
 
   // Returns whether the oat file has an associated image.
   bool HasImage() const {
@@ -256,6 +257,7 @@
   }
 
  private:
+  class ChecksumUpdatingOutputStream;
   class DexFileSource;
   class OatClassHeader;
   class OatClass;
@@ -400,6 +402,9 @@
   // Offset of section holding quickening info inside Vdex.
   size_t vdex_quickening_info_offset_;
 
+  // OAT checksum.
+  uint32_t oat_checksum_;
+
   // Size of the .text segment.
   size_t code_size_;
 
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index f764b42..ecf9db8 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -19,6 +19,7 @@
 #include "arch/instruction_set_features.h"
 #include "art_method-inl.h"
 #include "base/enums.h"
+#include "base/file_utils.h"
 #include "base/stl_util.h"
 #include "base/unix_file/fd_file.h"
 #include "class_linker.h"
@@ -87,7 +88,7 @@
   void SetupCompiler(const std::vector<std::string>& compiler_options) {
     std::string error_msg;
     if (!compiler_options_->ParseCompilerOptions(compiler_options,
-                                                 false /* ignore_unrecognized */,
+                                                 /*ignore_unrecognized=*/ false,
                                                  &error_msg)) {
       LOG(FATAL) << error_msg;
       UNREACHABLE();
@@ -118,7 +119,8 @@
         return false;
       }
     }
-    return DoWriteElf(vdex_file, oat_file, oat_writer, key_value_store, verify);
+    return DoWriteElf(
+        vdex_file, oat_file, oat_writer, key_value_store, verify, CopyOption::kOnlyIfCompressed);
   }
 
   bool WriteElf(File* vdex_file,
@@ -126,6 +128,7 @@
                 const std::vector<const char*>& dex_filenames,
                 SafeMap<std::string, std::string>& key_value_store,
                 bool verify,
+                CopyOption copy,
                 ProfileCompilationInfo* profile_compilation_info) {
     TimingLogger timings("WriteElf", false, false);
     ClearBootImageOption();
@@ -138,7 +141,7 @@
         return false;
       }
     }
-    return DoWriteElf(vdex_file, oat_file, oat_writer, key_value_store, verify);
+    return DoWriteElf(vdex_file, oat_file, oat_writer, key_value_store, verify, copy);
   }
 
   bool WriteElf(File* vdex_file,
@@ -146,7 +149,8 @@
                 File&& zip_fd,
                 const char* location,
                 SafeMap<std::string, std::string>& key_value_store,
-                bool verify) {
+                bool verify,
+                CopyOption copy) {
     TimingLogger timings("WriteElf", false, false);
     ClearBootImageOption();
     OatWriter oat_writer(*compiler_options_,
@@ -156,14 +160,15 @@
     if (!oat_writer.AddZippedDexFilesSource(std::move(zip_fd), location)) {
       return false;
     }
-    return DoWriteElf(vdex_file, oat_file, oat_writer, key_value_store, verify);
+    return DoWriteElf(vdex_file, oat_file, oat_writer, key_value_store, verify, copy);
   }
 
   bool DoWriteElf(File* vdex_file,
                   File* oat_file,
                   OatWriter& oat_writer,
                   SafeMap<std::string, std::string>& key_value_store,
-                  bool verify) {
+                  bool verify,
+                  CopyOption copy) {
     std::unique_ptr<ElfWriter> elf_writer = CreateElfWriterQuick(
         compiler_driver_->GetCompilerOptions(),
         oat_file);
@@ -176,8 +181,8 @@
         oat_rodata,
         &key_value_store,
         verify,
-        /* update_input_vdex */ false,
-        CopyOption::kOnlyIfCompressed,
+        /*update_input_vdex=*/ false,
+        copy,
         &opened_dex_files_maps,
         &opened_dex_files)) {
       return false;
@@ -235,8 +240,7 @@
       elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
     }
 
-    if (!oat_writer.WriteHeader(elf_writer->GetStream(),
-                                /* image_file_location_oat_checksum */ 42U)) {
+    if (!oat_writer.WriteHeader(elf_writer->GetStream())) {
       return false;
     }
 
@@ -257,7 +261,7 @@
   }
 
   void TestDexFileInput(bool verify, bool low_4gb, bool use_profile);
-  void TestZipFileInput(bool verify);
+  void TestZipFileInput(bool verify, CopyOption copy);
   void TestZipFileInputWithEmptyDex();
 
   std::unique_ptr<QuickCompilerCallbacks> callbacks_;
@@ -387,13 +391,12 @@
   jobject class_loader = nullptr;
   if (kCompile) {
     TimingLogger timings2("OatTest::WriteRead", false, false);
-    SetDexFilesForOatFile(class_linker->GetBootClassPath());
-    compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings2);
+    CompileAll(class_loader, class_linker->GetBootClassPath(), &timings2);
   }
 
   ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
   SafeMap<std::string, std::string> key_value_store;
-  key_value_store.Put(OatHeader::kImageLocationKey, "lue.art");
+  key_value_store.Put(OatHeader::kBootClassPathChecksumsKey, "testkey");
   bool success = WriteElf(tmp_vdex.GetFile(),
                           tmp_oat.GetFile(),
                           class_linker->GetBootClassPath(),
@@ -402,23 +405,22 @@
   ASSERT_TRUE(success);
 
   if (kCompile) {  // OatWriter strips the code, regenerate to compare
-    compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
+    CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
   }
-  std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
                                                   tmp_oat.GetFilename(),
                                                   tmp_oat.GetFilename(),
-                                                  /* requested_base */ nullptr,
-                                                  /* executable */ false,
-                                                  /* low_4gb */ true,
-                                                  /* abs_dex_location */ nullptr,
-                                                  /* reservation */ nullptr,
+                                                  /*executable=*/ false,
+                                                  /*low_4gb=*/ true,
+                                                  /*abs_dex_location=*/ nullptr,
+                                                  /*reservation=*/ nullptr,
                                                   &error_msg));
   ASSERT_TRUE(oat_file.get() != nullptr) << error_msg;
   const OatHeader& oat_header = oat_file->GetOatHeader();
   ASSERT_TRUE(oat_header.IsValid());
   ASSERT_EQ(class_linker->GetBootClassPath().size(), oat_header.GetDexFileCount());  // core
-  ASSERT_EQ(42U, oat_header.GetImageFileLocationOatChecksum());
-  ASSERT_EQ("lue.art", std::string(oat_header.GetStoreValueByKey(OatHeader::kImageLocationKey)));
+  ASSERT_TRUE(oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey) != nullptr);
+  ASSERT_STREQ("testkey", oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey));
 
   ASSERT_TRUE(java_lang_dex_file_ != nullptr);
   const DexFile& dex_file = *java_lang_dex_file_;
@@ -464,7 +466,7 @@
 TEST_F(OatTest, OatHeaderSizeCheck) {
   // If this test is failing and you have to update these constants,
   // it is time to update OatHeader::kOatVersion
-  EXPECT_EQ(68U, sizeof(OatHeader));
+  EXPECT_EQ(64U, sizeof(OatHeader));
   EXPECT_EQ(4U, sizeof(OatMethodOffsets));
   EXPECT_EQ(8U, sizeof(OatQuickMethodHeader));
   EXPECT_EQ(166 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
@@ -512,28 +514,25 @@
     ScopedObjectAccess soa(Thread::Current());
     class_linker->RegisterDexFile(*dex_file, soa.Decode<mirror::ClassLoader>(class_loader));
   }
-  SetDexFilesForOatFile(dex_files);
-  compiler_driver_->CompileAll(class_loader, dex_files, &timings);
+  CompileAll(class_loader, dex_files, &timings);
 
   ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
   SafeMap<std::string, std::string> key_value_store;
-  key_value_store.Put(OatHeader::kImageLocationKey, "test.art");
   bool success = WriteElf(tmp_vdex.GetFile(),
                           tmp_oat.GetFile(),
                           dex_files,
                           key_value_store,
-                          /* verify */ false);
+                          /*verify=*/ false);
   ASSERT_TRUE(success);
 
   std::string error_msg;
-  std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
                                                   tmp_oat.GetFilename(),
                                                   tmp_oat.GetFilename(),
-                                                  /* requested_base */ nullptr,
-                                                  /* executable */ false,
-                                                  /* low_4gb */ false,
-                                                  /* abs_dex_location */ nullptr,
-                                                  /* reservation */ nullptr,
+                                                  /*executable=*/ false,
+                                                  /*low_4gb=*/ false,
+                                                  /*abs_dex_location=*/ nullptr,
+                                                  /*reservation=*/ nullptr,
                                                   &error_msg));
   ASSERT_TRUE(oat_file != nullptr);
   EXPECT_LT(static_cast<size_t>(oat_file->Size()),
@@ -585,7 +584,6 @@
 
   ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
   SafeMap<std::string, std::string> key_value_store;
-  key_value_store.Put(OatHeader::kImageLocationKey, "test.art");
   std::unique_ptr<ProfileCompilationInfo>
       profile_compilation_info(use_profile ? new ProfileCompilationInfo() : nullptr);
   success = WriteElf(tmp_vdex.GetFile(),
@@ -593,6 +591,7 @@
                      input_filenames,
                      key_value_store,
                      verify,
+                     CopyOption::kOnlyIfCompressed,
                      profile_compilation_info.get());
 
   // In verify mode, we expect failure.
@@ -604,14 +603,13 @@
   ASSERT_TRUE(success);
 
   std::string error_msg;
-  std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/*zip_fd=*/ -1,
                                                          tmp_oat.GetFilename(),
                                                          tmp_oat.GetFilename(),
-                                                         /* requested_base */ nullptr,
-                                                         /* executable */ false,
+                                                         /*executable=*/ false,
                                                          low_4gb,
-                                                         /* abs_dex_location */ nullptr,
-                                                         /* reservation */ nullptr,
+                                                         /*abs_dex_location=*/ nullptr,
+                                                         /*reservation=*/ nullptr,
                                                          &error_msg));
   ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
   if (low_4gb) {
@@ -670,7 +668,7 @@
   TestDexFileInput(/*verify*/true, /*low_4gb*/false, /*use_profile*/true);
 }
 
-void OatTest::TestZipFileInput(bool verify) {
+void OatTest::TestZipFileInput(bool verify, CopyOption copy) {
   TimingLogger timings("OatTest::DexFileInput", false, false);
 
   ScratchFile zip_file;
@@ -716,7 +714,6 @@
   ASSERT_TRUE(success) << strerror(errno);
 
   SafeMap<std::string, std::string> key_value_store;
-  key_value_store.Put(OatHeader::kImageLocationKey, "test.art");
   {
     // Test using the AddDexFileSource() interface with the zip file.
     std::vector<const char*> input_filenames = { zip_file.GetFilename().c_str() };
@@ -727,7 +724,8 @@
                        input_filenames,
                        key_value_store,
                        verify,
-                       /* profile_compilation_info */ nullptr);
+                       copy,
+                       /*profile_compilation_info=*/ nullptr);
 
     if (verify) {
       ASSERT_FALSE(success);
@@ -735,14 +733,13 @@
       ASSERT_TRUE(success);
 
       std::string error_msg;
-      std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/* zip_fd */ -1,
+      std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/*zip_fd=*/ -1,
                                                              tmp_oat.GetFilename(),
                                                              tmp_oat.GetFilename(),
-                                                             /* requested_base */ nullptr,
-                                                             /* executable */ false,
-                                                             /* low_4gb */ false,
-                                                             /* abs_dex_location */ nullptr,
-                                                             /* reservation */ nullptr,
+                                                             /*executable=*/ false,
+                                                             /*low_4gb=*/ false,
+                                                             /*abs_dex_location=*/ nullptr,
+                                                             /*reservation=*/ nullptr,
                                                              &error_msg));
       ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
       ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
@@ -769,7 +766,7 @@
 
   {
     // Test using the AddZipDexFileSource() interface with the zip file handle.
-    File zip_fd(dup(zip_file.GetFd()), /* check_usage */ false);
+    File zip_fd(DupCloexec(zip_file.GetFd()), /*check_usage=*/ false);
     ASSERT_NE(-1, zip_fd.Fd());
 
     ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
@@ -778,21 +775,21 @@
                        std::move(zip_fd),
                        zip_file.GetFilename().c_str(),
                        key_value_store,
-                       verify);
+                       verify,
+                       copy);
     if (verify) {
       ASSERT_FALSE(success);
     } else {
       ASSERT_TRUE(success);
 
       std::string error_msg;
-      std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/* zip_fd */ -1,
+      std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/*zip_fd=*/ -1,
                                                              tmp_oat.GetFilename(),
                                                              tmp_oat.GetFilename(),
-                                                             /* requested_base */ nullptr,
-                                                             /* executable */ false,
-                                                             /* low_4gb */ false,
-                                                             /* abs_dex_location */ nullptr,
-                                                             /* reservation */ nullptr,
+                                                             /*executable=*/ false,
+                                                             /*low_4gb=*/ false,
+                                                             /*abs_dex_location=*/ nullptr,
+                                                             /*reservation=*/ nullptr,
                                                              &error_msg));
       ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
       ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
@@ -819,11 +816,15 @@
 }
 
 TEST_F(OatTest, ZipFileInputCheckOutput) {
-  TestZipFileInput(false);
+  TestZipFileInput(false, CopyOption::kOnlyIfCompressed);
+}
+
+TEST_F(OatTest, ZipFileInputCheckOutputWithoutCopy) {
+  TestZipFileInput(false, CopyOption::kNever);
 }
 
 TEST_F(OatTest, ZipFileInputCheckVerifier) {
-  TestZipFileInput(true);
+  TestZipFileInput(true, CopyOption::kOnlyIfCompressed);
 }
 
 void OatTest::TestZipFileInputWithEmptyDex() {
@@ -835,7 +836,6 @@
   ASSERT_TRUE(success) << strerror(errno);
 
   SafeMap<std::string, std::string> key_value_store;
-  key_value_store.Put(OatHeader::kImageLocationKey, "test.art");
   std::vector<const char*> input_filenames = { zip_file.GetFilename().c_str() };
   ScratchFile oat_file, vdex_file(oat_file, ".vdex");
   std::unique_ptr<ProfileCompilationInfo> profile_compilation_info(new ProfileCompilationInfo());
@@ -843,7 +843,8 @@
                      oat_file.GetFile(),
                      input_filenames,
                      key_value_store,
-                     /* verify */ false,
+                     /*verify=*/ false,
+                     CopyOption::kOnlyIfCompressed,
                      profile_compilation_info.get());
   ASSERT_FALSE(success);
 }
@@ -852,29 +853,5 @@
   TestZipFileInputWithEmptyDex();
 }
 
-TEST_F(OatTest, UpdateChecksum) {
-  InstructionSet insn_set = InstructionSet::kX86;
-  std::string error_msg;
-  std::unique_ptr<const InstructionSetFeatures> insn_features(
-    InstructionSetFeatures::FromVariant(insn_set, "default", &error_msg));
-  ASSERT_TRUE(insn_features.get() != nullptr) << error_msg;
-  std::unique_ptr<OatHeader> oat_header(OatHeader::Create(insn_set,
-                                                          insn_features.get(),
-                                                          0u,
-                                                          nullptr));
-  // The starting adler32 value is 1.
-  EXPECT_EQ(1U, oat_header->GetChecksum());
-
-  oat_header->UpdateChecksum(OatHeader::kOatMagic, sizeof(OatHeader::kOatMagic));
-  EXPECT_EQ(64291151U, oat_header->GetChecksum());
-
-  // Make sure that null data does not reset the checksum.
-  oat_header->UpdateChecksum(nullptr, 0);
-  EXPECT_EQ(64291151U, oat_header->GetChecksum());
-
-  oat_header->UpdateChecksum(OatHeader::kOatMagic, sizeof(OatHeader::kOatMagic));
-  EXPECT_EQ(216138397U, oat_header->GetChecksum());
-}
-
 }  // namespace linker
 }  // namespace art
diff --git a/dex2oat/linker/relative_patcher.cc b/dex2oat/linker/relative_patcher.cc
index 564cf30..45a4a22 100644
--- a/dex2oat/linker/relative_patcher.cc
+++ b/dex2oat/linker/relative_patcher.cc
@@ -79,7 +79,7 @@
 
     void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
                                      const LinkerPatch& patch ATTRIBUTE_UNUSED,
-                                     uint32_t patch_offset ATTRIBUTE_UNUSED) {
+                                     uint32_t patch_offset ATTRIBUTE_UNUSED) override {
       LOG(FATAL) << "Unexpected baker read barrier branch patch.";
     }
 
diff --git a/dex2oat/linker/relative_patcher_test.h b/dex2oat/linker/relative_patcher_test.h
index 9725570..4329ee1 100644
--- a/dex2oat/linker/relative_patcher_test.h
+++ b/dex2oat/linker/relative_patcher_test.h
@@ -17,19 +17,17 @@
 #ifndef ART_DEX2OAT_LINKER_RELATIVE_PATCHER_TEST_H_
 #define ART_DEX2OAT_LINKER_RELATIVE_PATCHER_TEST_H_
 
+#include <gtest/gtest.h>
+
 #include "arch/instruction_set.h"
 #include "arch/instruction_set_features.h"
 #include "base/array_ref.h"
 #include "base/globals.h"
 #include "base/macros.h"
-#include "common_compiler_test.h"
 #include "compiled_method-inl.h"
-#include "dex/verification_results.h"
 #include "dex/method_reference.h"
 #include "dex/string_reference.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "gtest/gtest.h"
+#include "driver/compiled_method_storage.h"
 #include "linker/relative_patcher.h"
 #include "linker/vector_output_stream.h"
 #include "oat.h"
@@ -39,10 +37,12 @@
 namespace linker {
 
 // Base class providing infrastructure for architecture-specific tests.
-class RelativePatcherTest : public CommonCompilerTest {
+class RelativePatcherTest : public testing::Test {
  protected:
   RelativePatcherTest(InstructionSet instruction_set, const std::string& variant)
-      : variant_(variant),
+      : storage_(/*swap_fd=*/ -1),
+        instruction_set_(instruction_set),
+        instruction_set_features_(nullptr),
         method_offset_map_(),
         patcher_(nullptr),
         bss_begin_(0u),
@@ -50,27 +50,48 @@
         compiled_methods_(),
         patched_code_(),
         output_(),
-        out_("test output stream", &output_) {
-    // Override CommonCompilerTest's defaults.
-    instruction_set_ = instruction_set;
-    number_of_threads_ = 1u;
+        out_(nullptr) {
+    std::string error_msg;
+    instruction_set_features_ =
+        InstructionSetFeatures::FromVariant(instruction_set, variant, &error_msg);
+    CHECK(instruction_set_features_ != nullptr) << error_msg;
+
     patched_code_.reserve(16 * KB);
   }
 
   void SetUp() override {
-    OverrideInstructionSetFeatures(instruction_set_, variant_);
-    CommonCompilerTest::SetUp();
-
-    patcher_ = RelativePatcher::Create(compiler_options_->GetInstructionSet(),
-                                       compiler_options_->GetInstructionSetFeatures(),
-                                       &thunk_provider_,
-                                       &method_offset_map_);
+    Reset();
   }
 
   void TearDown() override {
+    thunk_provider_.Reset();
     compiled_methods_.clear();
     patcher_.reset();
-    CommonCompilerTest::TearDown();
+    bss_begin_ = 0u;
+    string_index_to_offset_map_.clear();
+    compiled_method_refs_.clear();
+    compiled_methods_.clear();
+    patched_code_.clear();
+    output_.clear();
+    out_.reset();
+  }
+
+  // Reset the helper to start another test. Creating and tearing down the Runtime is expensive,
+  // so we merge related tests together.
+  void Reset() {
+    thunk_provider_.Reset();
+    method_offset_map_.map.clear();
+    patcher_ = RelativePatcher::Create(instruction_set_,
+                                       instruction_set_features_.get(),
+                                       &thunk_provider_,
+                                       &method_offset_map_);
+    bss_begin_ = 0u;
+    string_index_to_offset_map_.clear();
+    compiled_method_refs_.clear();
+    compiled_methods_.clear();
+    patched_code_.clear();
+    output_.clear();
+    out_.reset(new VectorOutputStream("test output stream", &output_));
   }
 
   MethodReference MethodRef(uint32_t method_idx) {
@@ -84,7 +105,7 @@
       const ArrayRef<const LinkerPatch>& patches = ArrayRef<const LinkerPatch>()) {
     compiled_method_refs_.push_back(method_ref);
     compiled_methods_.emplace_back(new CompiledMethod(
-        compiler_driver_.get(),
+        &storage_,
         instruction_set_,
         code,
         /* vmap_table */ ArrayRef<const uint8_t>(),
@@ -127,7 +148,7 @@
     DCHECK(output_.empty());
     uint8_t dummy_trampoline[kTrampolineSize];
     memset(dummy_trampoline, 0, sizeof(dummy_trampoline));
-    out_.WriteFully(dummy_trampoline, kTrampolineSize);
+    out_->WriteFully(dummy_trampoline, kTrampolineSize);
     offset = kTrampolineSize;
     static const uint8_t kPadding[] = {
         0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u
@@ -135,14 +156,14 @@
     uint8_t dummy_header[sizeof(OatQuickMethodHeader)];
     memset(dummy_header, 0, sizeof(dummy_header));
     for (auto& compiled_method : compiled_methods_) {
-      offset = patcher_->WriteThunks(&out_, offset);
+      offset = patcher_->WriteThunks(out_.get(), offset);
 
       uint32_t alignment_size = CodeAlignmentSize(offset);
       CHECK_LE(alignment_size, sizeof(kPadding));
-      out_.WriteFully(kPadding, alignment_size);
+      out_->WriteFully(kPadding, alignment_size);
       offset += alignment_size;
 
-      out_.WriteFully(dummy_header, sizeof(OatQuickMethodHeader));
+      out_->WriteFully(dummy_header, sizeof(OatQuickMethodHeader));
       offset += sizeof(OatQuickMethodHeader);
       ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
       if (!compiled_method->GetPatches().empty()) {
@@ -179,10 +200,10 @@
           }
         }
       }
-      out_.WriteFully(&code[0], code.size());
+      out_->WriteFully(&code[0], code.size());
       offset += code.size();
     }
-    offset = patcher_->WriteThunks(&out_, offset);
+    offset = patcher_->WriteThunks(out_.get(), offset);
     CHECK_EQ(offset, output_size);
     CHECK_EQ(output_.size(), output_size);
   }
@@ -270,6 +291,10 @@
       *debug_name = value.GetDebugName();
     }
 
+    void Reset() {
+      thunk_map_.clear();
+    }
+
    private:
     class ThunkKey {
      public:
@@ -332,7 +357,10 @@
   static const uint32_t kTrampolineSize = 4u;
   static const uint32_t kTrampolineOffset = 0u;
 
-  std::string variant_;
+  CompiledMethodStorage storage_;
+  InstructionSet instruction_set_;
+  std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
+
   ThunkProvider thunk_provider_;
   MethodOffsetMap method_offset_map_;
   std::unique_ptr<RelativePatcher> patcher_;
@@ -342,7 +370,7 @@
   std::vector<std::unique_ptr<CompiledMethod>> compiled_methods_;
   std::vector<uint8_t> patched_code_;
   std::vector<uint8_t> output_;
-  VectorOutputStream out_;
+  std::unique_ptr<VectorOutputStream> out_;
 };
 
 }  // namespace linker
diff --git a/dex2oat/linker/x86/relative_patcher_x86_base.cc b/dex2oat/linker/x86/relative_patcher_x86_base.cc
index 6a9690d..07cd724 100644
--- a/dex2oat/linker/x86/relative_patcher_x86_base.cc
+++ b/dex2oat/linker/x86/relative_patcher_x86_base.cc
@@ -50,7 +50,7 @@
   uint32_t displacement = target_offset - patch_offset;
   displacement -= kPcDisplacement;  // The base PC is at the end of the 4-byte patch.
 
-  typedef __attribute__((__aligned__(1))) int32_t unaligned_int32_t;
+  using unaligned_int32_t __attribute__((__aligned__(1))) = int32_t;
   reinterpret_cast<unaligned_int32_t*>(&(*code)[literal_offset])[0] = displacement;
 }
 
diff --git a/dex2oat/linker/x86_64/relative_patcher_x86_64.cc b/dex2oat/linker/x86_64/relative_patcher_x86_64.cc
index 9633564..c80f6a9 100644
--- a/dex2oat/linker/x86_64/relative_patcher_x86_64.cc
+++ b/dex2oat/linker/x86_64/relative_patcher_x86_64.cc
@@ -31,7 +31,7 @@
   uint32_t displacement = target_offset - patch_offset;
   displacement -= kPcDisplacement;  // The base PC is at the end of the 4-byte patch.
 
-  typedef __attribute__((__aligned__(1))) int32_t unaligned_int32_t;
+  using unaligned_int32_t __attribute__((__aligned__(1))) = int32_t;
   reinterpret_cast<unaligned_int32_t*>(&(*code)[patch.LiteralOffset()])[0] = displacement;
 }
 
diff --git a/dexdump/Android.bp b/dexdump/Android.bp
index ac9a9a2..434cb35 100644
--- a/dexdump/Android.bp
+++ b/dexdump/Android.bp
@@ -17,12 +17,12 @@
 
 cc_defaults {
     name: "dexdump_defaults",
+    defaults: ["art_defaults"],
     srcs: [
         "dexdump_cfg.cc",
         "dexdump_main.cc",
         "dexdump.cc",
     ],
-    cflags: ["-Wall", "-Werror"],
 }
 
 art_cc_binary {
@@ -38,17 +38,20 @@
 
 art_cc_binary {
     name: "dexdumps",
-    defaults: ["dexdump_defaults"],
+    defaults: [
+        "dexdump_defaults",
+        "libartbase_static_defaults",
+        "libdexfile_static_defaults",
+    ],
     host_supported: true,
     device_supported: false,
-    static_libs: [
-        "libdexfile",
-        "libartbase",
-    ] + art_static_dependencies,
     target: {
         darwin: {
             enabled: false,
         },
+        windows: {
+            enabled: true,
+        },
     },
 }
 
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index e9b6402..a412938 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -69,14 +69,14 @@
 /*
  * Data types that match the definitions in the VM specification.
  */
-typedef uint8_t  u1;
-typedef uint16_t u2;
-typedef uint32_t u4;
-typedef uint64_t u8;
-typedef int8_t   s1;
-typedef int16_t  s2;
-typedef int32_t  s4;
-typedef int64_t  s8;
+using u1 = uint8_t;
+using u2 = uint16_t;
+using u4 = uint32_t;
+using u8 = uint64_t;
+using s1 = int8_t;
+using s2 = int16_t;
+using s4 = int32_t;
+using s8 = int64_t;
 
 /*
  * Basic information about a field or a method.
@@ -331,7 +331,7 @@
  * NULL-terminated.
  */
 static void asciify(char* out, const unsigned char* data, size_t len) {
-  while (len--) {
+  for (; len != 0u; --len) {
     if (*data < 0x20) {
       // Could do more here, but we don't need them yet.
       switch (*data) {
@@ -492,13 +492,13 @@
     case DexFile::kDexAnnotationField:
     case DexFile::kDexAnnotationEnum: {
       const u4 field_idx = static_cast<u4>(readVarWidth(data, arg, false));
-      const DexFile::FieldId& pFieldId = pDexFile->GetFieldId(field_idx);
+      const dex::FieldId& pFieldId = pDexFile->GetFieldId(field_idx);
       fputs(pDexFile->StringDataByIdx(pFieldId.name_idx_), gOutFile);
       break;
     }
     case DexFile::kDexAnnotationMethod: {
       const u4 method_idx = static_cast<u4>(readVarWidth(data, arg, false));
-      const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(method_idx);
+      const dex::MethodId& pMethodId = pDexFile->GetMethodId(method_idx);
       fputs(pDexFile->StringDataByIdx(pMethodId.name_idx_), gOutFile);
       break;
     }
@@ -594,7 +594,7 @@
  */
 static void dumpClassDef(const DexFile* pDexFile, int idx) {
   // General class information.
-  const DexFile::ClassDef& pClassDef = pDexFile->GetClassDef(idx);
+  const dex::ClassDef& pClassDef = pDexFile->GetClassDef(idx);
   fprintf(gOutFile, "Class #%d header:\n", idx);
   fprintf(gOutFile, "class_idx           : %d\n", pClassDef.class_idx_.index_);
   fprintf(gOutFile, "access_flags        : %d (0x%04x)\n",
@@ -620,13 +620,13 @@
 /**
  * Dumps an annotation set item.
  */
-static void dumpAnnotationSetItem(const DexFile* pDexFile, const DexFile::AnnotationSetItem* set_item) {
+static void dumpAnnotationSetItem(const DexFile* pDexFile, const dex::AnnotationSetItem* set_item) {
   if (set_item == nullptr || set_item->size_ == 0) {
     fputs("  empty-annotation-set\n", gOutFile);
     return;
   }
   for (u4 i = 0; i < set_item->size_; i++) {
-    const DexFile::AnnotationItem* annotation = pDexFile->GetAnnotationItem(set_item, i);
+    const dex::AnnotationItem* annotation = pDexFile->GetAnnotationItem(set_item, i);
     if (annotation == nullptr) {
       continue;
     }
@@ -648,18 +648,18 @@
  * Dumps class annotations.
  */
 static void dumpClassAnnotations(const DexFile* pDexFile, int idx) {
-  const DexFile::ClassDef& pClassDef = pDexFile->GetClassDef(idx);
-  const DexFile::AnnotationsDirectoryItem* dir = pDexFile->GetAnnotationsDirectory(pClassDef);
+  const dex::ClassDef& pClassDef = pDexFile->GetClassDef(idx);
+  const dex::AnnotationsDirectoryItem* dir = pDexFile->GetAnnotationsDirectory(pClassDef);
   if (dir == nullptr) {
     return;  // none
   }
 
   fprintf(gOutFile, "Class #%d annotations:\n", idx);
 
-  const DexFile::AnnotationSetItem* class_set_item = pDexFile->GetClassAnnotationSet(dir);
-  const DexFile::FieldAnnotationsItem* fields = pDexFile->GetFieldAnnotations(dir);
-  const DexFile::MethodAnnotationsItem* methods = pDexFile->GetMethodAnnotations(dir);
-  const DexFile::ParameterAnnotationsItem* pars = pDexFile->GetParameterAnnotations(dir);
+  const dex::AnnotationSetItem* class_set_item = pDexFile->GetClassAnnotationSet(dir);
+  const dex::FieldAnnotationsItem* fields = pDexFile->GetFieldAnnotations(dir);
+  const dex::MethodAnnotationsItem* methods = pDexFile->GetMethodAnnotations(dir);
+  const dex::ParameterAnnotationsItem* pars = pDexFile->GetParameterAnnotations(dir);
 
   // Annotations on the class itself.
   if (class_set_item != nullptr) {
@@ -671,7 +671,7 @@
   if (fields != nullptr) {
     for (u4 i = 0; i < dir->fields_size_; i++) {
       const u4 field_idx = fields[i].field_idx_;
-      const DexFile::FieldId& pFieldId = pDexFile->GetFieldId(field_idx);
+      const dex::FieldId& pFieldId = pDexFile->GetFieldId(field_idx);
       const char* field_name = pDexFile->StringDataByIdx(pFieldId.name_idx_);
       fprintf(gOutFile, "Annotations on field #%u '%s'\n", field_idx, field_name);
       dumpAnnotationSetItem(pDexFile, pDexFile->GetFieldAnnotationSetItem(fields[i]));
@@ -682,7 +682,7 @@
   if (methods != nullptr) {
     for (u4 i = 0; i < dir->methods_size_; i++) {
       const u4 method_idx = methods[i].method_idx_;
-      const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(method_idx);
+      const dex::MethodId& pMethodId = pDexFile->GetMethodId(method_idx);
       const char* method_name = pDexFile->StringDataByIdx(pMethodId.name_idx_);
       fprintf(gOutFile, "Annotations on method #%u '%s'\n", method_idx, method_name);
       dumpAnnotationSetItem(pDexFile, pDexFile->GetMethodAnnotationSetItem(methods[i]));
@@ -693,10 +693,10 @@
   if (pars != nullptr) {
     for (u4 i = 0; i < dir->parameters_size_; i++) {
       const u4 method_idx = pars[i].method_idx_;
-      const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(method_idx);
+      const dex::MethodId& pMethodId = pDexFile->GetMethodId(method_idx);
       const char* method_name = pDexFile->StringDataByIdx(pMethodId.name_idx_);
       fprintf(gOutFile, "Annotations on method #%u '%s' parameters\n", method_idx, method_name);
-      const DexFile::AnnotationSetRefList*
+      const dex::AnnotationSetRefList*
           list = pDexFile->GetParameterAnnotationSetRefList(&pars[i]);
       if (list != nullptr) {
         for (u4 j = 0; j < list->size_; j++) {
@@ -713,7 +713,7 @@
 /*
  * Dumps an interface that a class declares to implement.
  */
-static void dumpInterface(const DexFile* pDexFile, const DexFile::TypeItem& pTypeItem, int i) {
+static void dumpInterface(const DexFile* pDexFile, const dex::TypeItem& pTypeItem, int i) {
   const char* interfaceName = pDexFile->StringByTypeIdx(pTypeItem.type_idx_);
   if (gOptions.outputFormat == OUTPUT_PLAIN) {
     fprintf(gOutFile, "    #%d              : '%s'\n", i, interfaceName);
@@ -726,7 +726,7 @@
 /*
  * Dumps the catches table associated with the code.
  */
-static void dumpCatches(const DexFile* pDexFile, const DexFile::CodeItem* pCode) {
+static void dumpCatches(const DexFile* pDexFile, const dex::CodeItem* pCode) {
   CodeItemDataAccessor accessor(*pDexFile, pCode);
   const u4 triesSize = accessor.TriesSize();
 
@@ -738,7 +738,7 @@
 
   // Dump all table entries.
   fprintf(gOutFile, "      catches       : %d\n", triesSize);
-  for (const DexFile::TryItem& try_item : accessor.TryItems()) {
+  for (const dex::TryItem& try_item : accessor.TryItems()) {
     const u4 start = try_item.start_addr_;
     const u4 end = start + try_item.insn_count_;
     fprintf(gOutFile, "        0x%04x - 0x%04x\n", start, end);
@@ -751,24 +751,6 @@
 }
 
 /*
- * Callback for dumping each positions table entry.
- */
-static bool dumpPositionsCb(void* /*context*/, const DexFile::PositionInfo& entry) {
-  fprintf(gOutFile, "        0x%04x line=%d\n", entry.address_, entry.line_);
-  return false;
-}
-
-/*
- * Callback for dumping locals table entry.
- */
-static void dumpLocalsCb(void* /*context*/, const DexFile::LocalInfo& entry) {
-  const char* signature = entry.signature_ != nullptr ? entry.signature_ : "";
-  fprintf(gOutFile, "        0x%04x - 0x%04x reg=%d %s %s %s\n",
-          entry.start_address_, entry.end_address_, entry.reg_,
-          entry.name_, entry.descriptor_, signature);
-}
-
-/*
  * Helper for dumpInstruction(), which builds the string
  * representation for the index in the given instruction.
  * Returns a pointer to a buffer of sufficient size.
@@ -844,7 +826,7 @@
       break;
     case Instruction::kIndexMethodRef:
       if (index < pDexFile->GetHeader().method_ids_size_) {
-        const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(index);
+        const dex::MethodId& pMethodId = pDexFile->GetMethodId(index);
         const char* name = pDexFile->StringDataByIdx(pMethodId.name_idx_);
         const Signature signature = pDexFile->GetMethodSignature(pMethodId);
         const char* backDescriptor = pDexFile->StringByTypeIdx(pMethodId.class_idx_);
@@ -856,7 +838,7 @@
       break;
     case Instruction::kIndexFieldRef:
       if (index < pDexFile->GetHeader().field_ids_size_) {
-        const DexFile::FieldId& pFieldId = pDexFile->GetFieldId(index);
+        const dex::FieldId& pFieldId = pDexFile->GetFieldId(index);
         const char* name = pDexFile->StringDataByIdx(pFieldId.name_idx_);
         const char* typeDescriptor = pDexFile->StringByTypeIdx(pFieldId.type_idx_);
         const char* backDescriptor = pDexFile->StringByTypeIdx(pFieldId.class_idx_);
@@ -877,7 +859,7 @@
       std::string method("<method?>");
       std::string proto("<proto?>");
       if (index < pDexFile->GetHeader().method_ids_size_) {
-        const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(index);
+        const dex::MethodId& pMethodId = pDexFile->GetMethodId(index);
         const char* name = pDexFile->StringDataByIdx(pMethodId.name_idx_);
         const Signature signature = pDexFile->GetMethodSignature(pMethodId);
         const char* backDescriptor = pDexFile->StringByTypeIdx(pMethodId.class_idx_);
@@ -887,7 +869,7 @@
                                              signature.ToString().c_str());
       }
       if (secondary_index < pDexFile->GetHeader().proto_ids_size_) {
-        const DexFile::ProtoId& protoId = pDexFile->GetProtoId(dex::ProtoIndex(secondary_index));
+        const dex::ProtoId& protoId = pDexFile->GetProtoId(dex::ProtoIndex(secondary_index));
         const Signature signature = pDexFile->GetProtoSignature(protoId);
         proto = signature.ToString();
       }
@@ -905,7 +887,7 @@
       break;
     case Instruction::kIndexProtoRef:
       if (index < pDexFile->GetHeader().proto_ids_size_) {
-        const DexFile::ProtoId& protoId = pDexFile->GetProtoId(dex::ProtoIndex(index));
+        const dex::ProtoId& protoId = pDexFile->GetProtoId(dex::ProtoIndex(index));
         const Signature signature = pDexFile->GetProtoSignature(protoId);
         const std::string& proto = signature.ToString();
         outSize = snprintf(buf.get(), bufSize, "%s // proto@%0*x", proto.c_str(), width, index);
@@ -934,7 +916,7 @@
  * Dumps a single instruction.
  */
 static void dumpInstruction(const DexFile* pDexFile,
-                            const DexFile::CodeItem* pCode,
+                            const dex::CodeItem* pCode,
                             u4 codeOffset, u4 insnIdx, u4 insnWidth,
                             const Instruction* pDecInsn) {
   // Address of instruction (expressed as byte offset).
@@ -1147,8 +1129,8 @@
  * Dumps a bytecode disassembly.
  */
 static void dumpBytecodes(const DexFile* pDexFile, u4 idx,
-                          const DexFile::CodeItem* pCode, u4 codeOffset) {
-  const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(idx);
+                          const dex::CodeItem* pCode, u4 codeOffset) {
+  const dex::MethodId& pMethodId = pDexFile->GetMethodId(idx);
   const char* name = pDexFile->StringDataByIdx(pMethodId.name_idx_);
   const Signature signature = pDexFile->GetMethodSignature(pMethodId);
   const char* backDescriptor = pDexFile->StringByTypeIdx(pMethodId.class_idx_);
@@ -1181,7 +1163,7 @@
  * Dumps code of a method.
  */
 static void dumpCode(const DexFile* pDexFile, u4 idx, u4 flags,
-                     const DexFile::CodeItem* pCode, u4 codeOffset) {
+                     const dex::CodeItem* pCode, u4 codeOffset) {
   CodeItemDebugInfoAccessor accessor(*pDexFile, pCode, idx);
 
   fprintf(gOutFile, "      registers     : %d\n", accessor.RegistersSize());
@@ -1201,9 +1183,33 @@
   // Positions and locals table in the debug info.
   bool is_static = (flags & kAccStatic) != 0;
   fprintf(gOutFile, "      positions     : \n");
-  pDexFile->DecodeDebugPositionInfo(accessor.DebugInfoOffset(), dumpPositionsCb, nullptr);
+  accessor.DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
+    fprintf(gOutFile, "        0x%04x line=%d\n", entry.address_, entry.line_);
+    return false;
+  });
   fprintf(gOutFile, "      locals        : \n");
-  accessor.DecodeDebugLocalInfo(is_static, idx, dumpLocalsCb, nullptr);
+  accessor.DecodeDebugLocalInfo(is_static,
+                                idx,
+                                [&](const DexFile::LocalInfo& entry) {
+    const char* signature = entry.signature_ != nullptr ? entry.signature_ : "";
+    fprintf(gOutFile,
+            "        0x%04x - 0x%04x reg=%d %s %s %s\n",
+            entry.start_address_,
+            entry.end_address_,
+            entry.reg_,
+            entry.name_,
+            entry.descriptor_,
+            signature);
+  });
+}
+
+static std::string GetHiddenapiFlagStr(uint32_t hiddenapi_flags) {
+  std::stringstream ss;
+  hiddenapi::ApiList api_list(hiddenapi_flags);
+  api_list.Dump(ss);
+  std::string str_api_list = ss.str();
+  std::transform(str_api_list.begin(), str_api_list.end(), str_api_list.begin(), ::toupper);
+  return str_api_list;
 }
 
 /*
@@ -1211,24 +1217,31 @@
  */
 static void dumpMethod(const ClassAccessor::Method& method, int i) {
   // Bail for anything private if export only requested.
-  const uint32_t flags = method.GetRawAccessFlags();
+  const uint32_t flags = method.GetAccessFlags();
   if (gOptions.exportsOnly && (flags & (kAccPublic | kAccProtected)) == 0) {
     return;
   }
 
   const DexFile& dex_file = method.GetDexFile();
-  const DexFile::MethodId& pMethodId = dex_file.GetMethodId(method.GetIndex());
+  const dex::MethodId& pMethodId = dex_file.GetMethodId(method.GetIndex());
   const char* name = dex_file.StringDataByIdx(pMethodId.name_idx_);
   const Signature signature = dex_file.GetMethodSignature(pMethodId);
   char* typeDescriptor = strdup(signature.ToString().c_str());
   const char* backDescriptor = dex_file.StringByTypeIdx(pMethodId.class_idx_);
   char* accessStr = createAccessFlagStr(flags, kAccessForMethod);
+  const uint32_t hiddenapiFlags = method.GetHiddenapiFlags();
 
   if (gOptions.outputFormat == OUTPUT_PLAIN) {
     fprintf(gOutFile, "    #%d              : (in %s)\n", i, backDescriptor);
     fprintf(gOutFile, "      name          : '%s'\n", name);
     fprintf(gOutFile, "      type          : '%s'\n", typeDescriptor);
     fprintf(gOutFile, "      access        : 0x%04x (%s)\n", flags, accessStr);
+    if (hiddenapiFlags != 0u) {
+      fprintf(gOutFile,
+              "      hiddenapi     : 0x%04x (%s)\n",
+              hiddenapiFlags,
+              GetHiddenapiFlagStr(hiddenapiFlags).c_str());
+    }
     if (method.GetCodeItem() == nullptr) {
       fprintf(gOutFile, "      code          : (none)\n");
     } else {
@@ -1322,23 +1335,30 @@
  */
 static void dumpField(const ClassAccessor::Field& field, int i, const u1** data = nullptr) {
   // Bail for anything private if export only requested.
-  const uint32_t flags = field.GetRawAccessFlags();
+  const uint32_t flags = field.GetAccessFlags();
   if (gOptions.exportsOnly && (flags & (kAccPublic | kAccProtected)) == 0) {
     return;
   }
 
   const DexFile& dex_file = field.GetDexFile();
-  const DexFile::FieldId& field_id = dex_file.GetFieldId(field.GetIndex());
+  const dex::FieldId& field_id = dex_file.GetFieldId(field.GetIndex());
   const char* name = dex_file.StringDataByIdx(field_id.name_idx_);
   const char* typeDescriptor = dex_file.StringByTypeIdx(field_id.type_idx_);
   const char* backDescriptor = dex_file.StringByTypeIdx(field_id.class_idx_);
   char* accessStr = createAccessFlagStr(flags, kAccessForField);
+  const uint32_t hiddenapiFlags = field.GetHiddenapiFlags();
 
   if (gOptions.outputFormat == OUTPUT_PLAIN) {
     fprintf(gOutFile, "    #%d              : (in %s)\n", i, backDescriptor);
     fprintf(gOutFile, "      name          : '%s'\n", name);
     fprintf(gOutFile, "      type          : '%s'\n", typeDescriptor);
     fprintf(gOutFile, "      access        : 0x%04x (%s)\n", flags, accessStr);
+    if (hiddenapiFlags != 0u) {
+      fprintf(gOutFile,
+              "      hiddenapi     : 0x%04x (%s)\n",
+              hiddenapiFlags,
+              GetHiddenapiFlagStr(hiddenapiFlags).c_str());
+    }
     if (data != nullptr) {
       fputs("      value         : ", gOutFile);
       dumpEncodedValue(&dex_file, data);
@@ -1389,7 +1409,7 @@
  * the value will be replaced with a newly-allocated string.
  */
 static void dumpClass(const DexFile* pDexFile, int idx, char** pLastPackage) {
-  const DexFile::ClassDef& pClassDef = pDexFile->GetClassDef(idx);
+  const dex::ClassDef& pClassDef = pDexFile->GetClassDef(idx);
 
   // Omitting non-public class.
   if (gOptions.exportsOnly && (pClassDef.access_flags_ & kAccPublic) == 0) {
@@ -1483,7 +1503,7 @@
   }
 
   // Interfaces.
-  const DexFile::TypeList* pInterfaces = pDexFile->GetInterfacesList(pClassDef);
+  const dex::TypeList* pInterfaces = pDexFile->GetInterfacesList(pClassDef);
   if (pInterfaces != nullptr) {
     for (u4 i = 0; i < pInterfaces->Size(); i++) {
       dumpInterface(pDexFile, pInterfaces->GetTypeItem(i), i);
@@ -1491,7 +1511,7 @@
   }
 
   // Fields and methods.
-  ClassAccessor accessor(*pDexFile, pClassDef);
+  ClassAccessor accessor(*pDexFile, pClassDef, /* parse_hiddenapi_class_data= */ true);
 
   // Prepare data for static fields.
   const u1* sData = pDexFile->GetEncodedStaticFieldValuesArray(pClassDef);
@@ -1555,7 +1575,7 @@
 }
 
 static void dumpMethodHandle(const DexFile* pDexFile, u4 idx) {
-  const DexFile::MethodHandleItem& mh = pDexFile->GetMethodHandle(idx);
+  const dex::MethodHandleItem& mh = pDexFile->GetMethodHandle(idx);
   const char* type = nullptr;
   bool is_instance = false;
   bool is_invoke = false;
@@ -1612,12 +1632,12 @@
   std::string member_type;
   if (type != nullptr) {
     if (is_invoke) {
-      const DexFile::MethodId& method_id = pDexFile->GetMethodId(mh.field_or_method_idx_);
+      const dex::MethodId& method_id = pDexFile->GetMethodId(mh.field_or_method_idx_);
       declaring_class = pDexFile->GetMethodDeclaringClassDescriptor(method_id);
       member = pDexFile->GetMethodName(method_id);
       member_type = pDexFile->GetMethodSignature(method_id).ToString();
     } else {
-      const DexFile::FieldId& field_id = pDexFile->GetFieldId(mh.field_or_method_idx_);
+      const dex::FieldId& field_id = pDexFile->GetFieldId(mh.field_or_method_idx_);
       declaring_class = pDexFile->GetFieldDeclaringClassDescriptor(field_id);
       member = pDexFile->GetFieldName(field_id);
       member_type = pDexFile->GetFieldTypeDescriptor(field_id);
@@ -1649,7 +1669,7 @@
 }
 
 static void dumpCallSite(const DexFile* pDexFile, u4 idx) {
-  const DexFile::CallSiteIdItem& call_site_id = pDexFile->GetCallSiteId(idx);
+  const dex::CallSiteIdItem& call_site_id = pDexFile->GetCallSiteId(idx);
   CallSiteArrayValueIterator it(*pDexFile, call_site_id);
   if (it.Size() < 3) {
     LOG(ERROR) << "ERROR: Call site " << idx << " has too few values.";
@@ -1662,7 +1682,7 @@
   const char* method_name = pDexFile->StringDataByIdx(method_name_idx);
   it.Next();
   dex::ProtoIndex method_type_idx = static_cast<dex::ProtoIndex>(it.GetJavaValue().i);
-  const DexFile::ProtoId& method_type_id = pDexFile->GetProtoId(method_type_idx);
+  const dex::ProtoId& method_type_id = pDexFile->GetProtoId(method_type_idx);
   std::string method_type = pDexFile->GetProtoSignature(method_type_id).ToString();
   it.Next();
 
@@ -1720,7 +1740,7 @@
       case EncodedArrayValueIterator::ValueType::kMethodType: {
         type = "MethodType";
         dex::ProtoIndex proto_idx = static_cast<dex::ProtoIndex>(it.GetJavaValue().i);
-        const DexFile::ProtoId& proto_id = pDexFile->GetProtoId(proto_idx);
+        const dex::ProtoId& proto_id = pDexFile->GetProtoId(proto_idx);
         value = pDexFile->GetProtoSignature(proto_id).ToString();
         break;
       }
@@ -1737,7 +1757,7 @@
       case EncodedArrayValueIterator::ValueType::kType: {
         type = "Class";
         dex::TypeIndex type_idx = static_cast<dex::TypeIndex>(it.GetJavaValue().i);
-        const DexFile::TypeId& type_id = pDexFile->GetTypeId(type_idx);
+        const dex::TypeId& type_id = pDexFile->GetTypeId(type_idx);
         value = pDexFile->GetTypeDescriptor(type_id);
         break;
       }
@@ -1803,18 +1823,18 @@
   // Iterate over all classes.
   char* package = nullptr;
   const u4 classDefsSize = pDexFile->GetHeader().class_defs_size_;
-  for (u4 i = 0; i < classDefsSize; i++) {
-    dumpClass(pDexFile, i, &package);
+  for (u4 j = 0; j < classDefsSize; j++) {
+    dumpClass(pDexFile, j, &package);
   }  // for
 
   // Iterate over all method handles.
-  for (u4 i = 0; i < pDexFile->NumMethodHandles(); ++i) {
-    dumpMethodHandle(pDexFile, i);
+  for (u4 j = 0; j < pDexFile->NumMethodHandles(); ++j) {
+    dumpMethodHandle(pDexFile, j);
   }  // for
 
   // Iterate over all call site ids.
-  for (u4 i = 0; i < pDexFile->NumCallSiteIds(); ++i) {
-    dumpCallSite(pDexFile, i);
+  for (u4 j = 0; j < pDexFile->NumCallSiteIds(); ++j) {
+    dumpCallSite(pDexFile, j);
   }  // for
 
   // Free the last package allocated.
diff --git a/dexdump/dexdump_main.cc b/dexdump/dexdump_main.cc
index f4a3866..cf0d113 100644
--- a/dexdump/dexdump_main.cc
+++ b/dexdump/dexdump_main.cc
@@ -37,7 +37,7 @@
 /*
  * Shows usage.
  */
-static void usage(void) {
+static void usage() {
   LOG(ERROR) << "Copyright (C) 2007 The Android Open Source Project\n";
   LOG(ERROR) << gProgName << ": [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-j] [-l layout] [-o outfile]"
                   " dexfile...\n";
@@ -64,7 +64,7 @@
   gOptions.verbose = true;
 
   // Parse all arguments.
-  while (1) {
+  while (true) {
     const int ic = getopt(argc, argv, "acdefghijl:o:");
     if (ic < 0) {
       break;  // done
diff --git a/dexdump/dexdump_test.cc b/dexdump/dexdump_test.cc
index 3a2d38d..bb6d4a4 100644
--- a/dexdump/dexdump_test.cc
+++ b/dexdump/dexdump_test.cc
@@ -31,7 +31,7 @@
 
 class DexDumpTest : public CommonRuntimeTest {
  protected:
-  virtual void SetUp() {
+  void SetUp() override {
     CommonRuntimeTest::SetUp();
     // Dogfood our own lib core dex file.
     dex_file_ = GetLibCoreDexFileNames()[0];
diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp
index 147af0c..838510b 100644
--- a/dexlayout/Android.bp
+++ b/dexlayout/Android.bp
@@ -26,33 +26,79 @@
         "dex_writer.cc",
     ],
     export_include_dirs: ["."],
-    shared_libs: [
-        "libbase",
-    ],
+    target: {
+        android: {
+            shared_libs: [
+                "libartbase",
+                "libartpalette",
+                "libdexfile",
+                "libprofile",
+                "libbase",
+            ],
+        },
+        not_windows: {
+            shared_libs: [
+                "libartbase",
+                "libartpalette",
+                "libdexfile",
+                "libprofile",
+                "libbase",
+            ],
+        },
+        windows: {
+            cflags: ["-Wno-thread-safety"],
+            static_libs: [
+                "libartbase",
+                "libartpalette",
+                "libdexfile",
+                "libprofile",
+                "libbase",
+            ],
+        },
+    },
     static_libs: ["libz"],
 }
 
+cc_defaults {
+    name: "libart-dexlayout_static_base_defaults",
+    static_libs: [
+        "libbase",
+        "libz",
+    ],
+}
+
 art_cc_library {
     name: "libart-dexlayout",
     defaults: [
         "libart-dexlayout-defaults",
         "dex2oat-pgo-defaults",
     ],
-    shared_libs: [
-        "libdexfile",
-        "libartbase",
-        "libprofile",
-    ],
-
     target: {
         android: {
             lto: {
                  thin: true,
             },
         },
+        windows: {
+            enabled: true,
+            shared: {
+                enabled: false,
+            },
+        },
     },
 }
 
+cc_defaults {
+    name: "libart-dexlayout_static_defaults",
+    defaults: [
+        "libart-dexlayout_static_base_defaults",
+        "libartbase_static_defaults",
+        "libdexfile_static_defaults",
+        "libprofile_static_defaults",
+    ],
+    static_libs: ["libart-dexlayout"],
+}
+
 art_cc_library {
     name: "libartd-dexlayout",
     defaults: [
@@ -67,6 +113,17 @@
 }
 
 cc_defaults {
+    name: "libartd-dexlayout_static_defaults",
+    defaults: [
+        "libart-dexlayout_static_base_defaults",
+        "libartbased_static_defaults",
+        "libdexfiled_static_defaults",
+        "libprofiled_static_defaults",
+    ],
+    static_libs: ["libartd-dexlayout"],
+}
+
+cc_defaults {
     name: "dexlayout-defaults",
     defaults: ["art_defaults"],
     host_supported: true,
@@ -88,6 +145,29 @@
 }
 
 art_cc_binary {
+    name: "dexlayouts",
+    defaults: [
+        "dexlayout-defaults",
+        "libart-dexlayout_static_defaults",
+        "libprofile_static_defaults",
+        "libdexfile_static_defaults",
+        "libartbase_static_defaults",
+    ],
+    srcs: ["dexlayout_main.cc"],
+    host_supported: true,
+    device_supported: false,
+    target: {
+        darwin: {
+            enabled: false,
+        },
+        windows: {
+            enabled: true,
+            cflags: ["-Wno-thread-safety"],
+        },
+    },
+}
+
+art_cc_binary {
     name: "dexlayoutd",
     defaults: [
         "art_debug_defaults",
@@ -128,7 +208,7 @@
     target: {
         android: {
             shared_libs: [
-                "libpagemap",
+                "libmeminfo",
             ],
         },
     },
diff --git a/dexlayout/compact_dex_writer.cc b/dexlayout/compact_dex_writer.cc
index 00fb0af..a7dad07 100644
--- a/dexlayout/compact_dex_writer.cc
+++ b/dexlayout/compact_dex_writer.cc
@@ -26,7 +26,7 @@
 namespace art {
 
 CompactDexWriter::CompactDexWriter(DexLayout* dex_layout)
-    : DexWriter(dex_layout, /*compute_offsets*/ true) {
+    : DexWriter(dex_layout, /*compute_offsets=*/ true) {
   CHECK(GetCompactDexLevel() != CompactDexLevel::kCompactDexLevelNone);
 }
 
@@ -36,7 +36,7 @@
 
 CompactDexWriter::Container::Container(bool dedupe_code_items)
     : code_item_dedupe_(dedupe_code_items, &data_section_),
-      data_item_dedupe_(/*dedupe*/ true, &data_section_) {}
+      data_item_dedupe_(/*enabled=*/ true, &data_section_) {}
 
 uint32_t CompactDexWriter::WriteDebugInfoOffsetTable(Stream* stream) {
   const uint32_t start_offset = stream->Tell();
@@ -211,7 +211,7 @@
 
 CompactDexWriter::Deduper::Deduper(bool enabled, DexContainer::Section* section)
     : enabled_(enabled),
-      dedupe_map_(/*bucket_count*/ 32,
+      dedupe_map_(/*__n=*/ 32,
                   HashedMemoryRange::HashEqual(section),
                   HashedMemoryRange::HashEqual(section)) {}
 
@@ -406,16 +406,16 @@
   // Based on: https://source.android.com/devices/tech/dalvik/dex-format
   // Since the offsets may not be calculated already, the writing must be done in the correct order.
   const uint32_t string_ids_offset = main_stream->Tell();
-  WriteStringIds(main_stream, /*reserve_only*/ true);
+  WriteStringIds(main_stream, /*reserve_only=*/ true);
   WriteTypeIds(main_stream);
   const uint32_t proto_ids_offset = main_stream->Tell();
-  WriteProtoIds(main_stream, /*reserve_only*/ true);
+  WriteProtoIds(main_stream, /*reserve_only=*/ true);
   WriteFieldIds(main_stream);
   WriteMethodIds(main_stream);
   const uint32_t class_defs_offset = main_stream->Tell();
-  WriteClassDefs(main_stream, /*reserve_only*/ true);
+  WriteClassDefs(main_stream, /*reserve_only=*/ true);
   const uint32_t call_site_ids_offset = main_stream->Tell();
-  WriteCallSiteIds(main_stream, /*reserve_only*/ true);
+  WriteCallSiteIds(main_stream, /*reserve_only=*/ true);
   WriteMethodHandles(main_stream);
 
   if (compute_offsets_) {
@@ -426,7 +426,7 @@
 
   // Write code item first to minimize the space required for encoded methods.
   // For cdex, the code items don't depend on the debug info.
-  WriteCodeItems(data_stream, /*reserve_only*/ false);
+  WriteCodeItems(data_stream, /*reserve_only=*/ false);
 
   // Sort the debug infos by method index order, this reduces size by ~0.1% by reducing the size of
   // the debug info offset table.
@@ -441,23 +441,24 @@
   WriteTypeLists(data_stream);
   WriteClassDatas(data_stream);
   WriteStringDatas(data_stream);
+  WriteHiddenapiClassData(data_stream);
 
   // Write delayed id sections that depend on data sections.
   {
     Stream::ScopedSeek seek(main_stream, string_ids_offset);
-    WriteStringIds(main_stream, /*reserve_only*/ false);
+    WriteStringIds(main_stream, /*reserve_only=*/ false);
   }
   {
     Stream::ScopedSeek seek(main_stream, proto_ids_offset);
-    WriteProtoIds(main_stream, /*reserve_only*/ false);
+    WriteProtoIds(main_stream, /*reserve_only=*/ false);
   }
   {
     Stream::ScopedSeek seek(main_stream, class_defs_offset);
-    WriteClassDefs(main_stream, /*reserve_only*/ false);
+    WriteClassDefs(main_stream, /*reserve_only=*/ false);
   }
   {
     Stream::ScopedSeek seek(main_stream, call_site_ids_offset);
-    WriteCallSiteIds(main_stream, /*reserve_only*/ false);
+    WriteCallSiteIds(main_stream, /*reserve_only=*/ false);
   }
 
   // Write the map list.
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 178a4d4..20ebc17 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -21,11 +21,11 @@
 
 #include <stdint.h>
 
-#include <map>
 #include <vector>
 
 #include "base/iteration_range.h"
 #include "base/leb128.h"
+#include "base/safe_map.h"
 #include "base/stl_util.h"
 #include "dex/dex_file-inl.h"
 #include "dex/dex_file_types.h"
@@ -50,6 +50,7 @@
 class FieldId;
 class FieldItem;
 class Header;
+class HiddenapiClassData;
 class MapList;
 class MapItem;
 class MethodHandleItem;
@@ -101,6 +102,7 @@
   virtual void Dispatch(AnnotationSetItem* annotation_set_item) = 0;
   virtual void Dispatch(AnnotationSetRefList* annotation_set_ref_list) = 0;
   virtual void Dispatch(AnnotationsDirectoryItem* annotations_directory_item) = 0;
+  virtual void Dispatch(HiddenapiClassData* hiddenapi_class_data) = 0;
   virtual void Dispatch(MapList* map_list) = 0;
   virtual void Dispatch(MapItem* map_item) = 0;
 
@@ -215,7 +217,8 @@
 
   uint32_t GetOffset() const { return offset_; }
   void SetOffset(uint32_t new_offset) { offset_ = new_offset; }
-  virtual uint32_t Size() const { return 0U; }
+  virtual uint32_t Size() const = 0;
+  bool Empty() const { return Size() == 0u; }
 
  private:
   // Start out unassigned.
@@ -476,6 +479,12 @@
   const CollectionVector<AnnotationsDirectoryItem>& AnnotationsDirectoryItems() const {
     return annotations_directory_items_;
   }
+  IndexedCollectionVector<HiddenapiClassData>& HiddenapiClassDatas() {
+    return hiddenapi_class_datas_;
+  }
+  const IndexedCollectionVector<HiddenapiClassData>& HiddenapiClassDatas() const {
+    return hiddenapi_class_datas_;
+  }
   CollectionVector<DebugInfoItem>& DebugInfoItems() { return debug_info_items_; }
   const CollectionVector<DebugInfoItem>& DebugInfoItems() const { return debug_info_items_; }
   CollectionVector<CodeItem>& CodeItems() { return code_items_; }
@@ -553,6 +562,7 @@
   IndexedCollectionVector<AnnotationSetItem> annotation_set_items_;
   IndexedCollectionVector<AnnotationSetRefList> annotation_set_ref_lists_;
   IndexedCollectionVector<AnnotationsDirectoryItem> annotations_directory_items_;
+  IndexedCollectionVector<HiddenapiClassData> hiddenapi_class_datas_;
   // The order of the vectors controls the layout of the output file by index order, to change the
   // layout just sort the vector. Note that you may only change the order of the non indexed vectors
   // below. Indexed vectors are accessed by indices in other places, changing the sorting order will
@@ -1264,6 +1274,49 @@
   DISALLOW_COPY_AND_ASSIGN(MethodHandleItem);
 };
 
+using HiddenapiFlagsMap = SafeMap<const Item*, uint32_t>;
+
+class HiddenapiClassData : public IndexedItem {
+ public:
+  HiddenapiClassData(const ClassDef* class_def, std::unique_ptr<HiddenapiFlagsMap> flags)
+      : class_def_(class_def), flags_(std::move(flags)) { }
+  ~HiddenapiClassData() override { }
+
+  const ClassDef* GetClassDef() const { return class_def_; }
+
+  uint32_t GetFlags(const Item* field_or_method_item) const {
+    return (flags_ == nullptr) ? 0u : flags_->Get(field_or_method_item);
+  }
+
+  static uint32_t GetFlags(Header* header, ClassDef* class_def, const Item* field_or_method_item) {
+    DCHECK(header != nullptr);
+    DCHECK(class_def != nullptr);
+    return (header->HiddenapiClassDatas().Empty())
+        ? 0u
+        : header->HiddenapiClassDatas()[class_def->GetIndex()]->GetFlags(field_or_method_item);
+  }
+
+  uint32_t ItemSize() const {
+    uint32_t size = 0u;
+    bool has_non_zero_entries = false;
+    if (flags_ != nullptr) {
+      for (const auto& entry : *flags_) {
+        size += UnsignedLeb128Size(entry.second);
+        has_non_zero_entries |= (entry.second != 0u);
+      }
+    }
+    return has_non_zero_entries ? size : 0u;
+  }
+
+  void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+  const ClassDef* class_def_;
+  std::unique_ptr<HiddenapiFlagsMap> flags_;
+
+  DISALLOW_COPY_AND_ASSIGN(HiddenapiClassData);
+};
+
 // TODO(sehr): implement MapList.
 class MapList : public Item {
  public:
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index ca6ff9e..f4195b2 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -17,6 +17,7 @@
  */
 
 #include <stdint.h>
+#include <memory>
 #include <vector>
 
 #include "dex_ir_builder.h"
@@ -115,6 +116,8 @@
     return it != collection_.end() ? it->second : nullptr;
   }
 
+  uint32_t Size() const override { return size(); }
+
   // Lower case for template interop with std::map.
   uint32_t size() const { return collection_.size(); }
   std::map<uint32_t, T*>& Collection() { return collection_; }
@@ -149,25 +152,26 @@
 
   void CreateCallSitesAndMethodHandles(const DexFile& dex_file);
 
-  TypeList* CreateTypeList(const DexFile::TypeList* type_list, uint32_t offset);
+  TypeList* CreateTypeList(const dex::TypeList* type_list, uint32_t offset);
   EncodedArrayItem* CreateEncodedArrayItem(const DexFile& dex_file,
                                            const uint8_t* static_data,
                                            uint32_t offset);
   AnnotationItem* CreateAnnotationItem(const DexFile& dex_file,
-                                       const DexFile::AnnotationItem* annotation);
+                                       const dex::AnnotationItem* annotation);
   AnnotationSetItem* CreateAnnotationSetItem(const DexFile& dex_file,
-      const DexFile::AnnotationSetItem* disk_annotations_item, uint32_t offset);
+      const dex::AnnotationSetItem* disk_annotations_item, uint32_t offset);
   AnnotationsDirectoryItem* CreateAnnotationsDirectoryItem(const DexFile& dex_file,
-      const DexFile::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset);
+      const dex::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset);
   CodeItem* DedupeOrCreateCodeItem(const DexFile& dex_file,
-                                   const DexFile::CodeItem* disk_code_item,
+                                   const dex::CodeItem* disk_code_item,
                                    uint32_t offset,
                                    uint32_t dex_method_index);
-  ClassData* CreateClassData(const DexFile& dex_file, const DexFile::ClassDef& class_def);
+  ClassData* CreateClassData(const DexFile& dex_file, const dex::ClassDef& class_def);
 
   void AddAnnotationsFromMapListSection(const DexFile& dex_file,
                                         uint32_t start_offset,
                                         uint32_t count);
+  void AddHiddenapiClassDataFromMapListSection(const DexFile& dex_file, uint32_t offset);
 
   void CheckAndSetRemainingOffsets(const DexFile& dex_file, const Options& options);
 
@@ -203,7 +207,7 @@
   ParameterAnnotation* GenerateParameterAnnotation(
       const DexFile& dex_file,
       MethodId* method_id,
-      const DexFile::AnnotationSetRefList* annotation_set_ref_list,
+      const dex::AnnotationSetRefList* annotation_set_ref_list,
       uint32_t offset);
 
   template <typename Type, class... Args>
@@ -296,7 +300,7 @@
     if (!options.class_filter_.empty()) {
       // If the filter is enabled (not empty), filter out classes that don't have a matching
       // descriptor.
-      const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
+      const dex::ClassDef& class_def = dex_file.GetClassDef(i);
       const char* descriptor = dex_file.GetClassDescriptor(class_def);
       if (options.class_filter_.find(descriptor) == options.class_filter_.end()) {
         continue;
@@ -327,10 +331,10 @@
 void BuilderMaps::CheckAndSetRemainingOffsets(const DexFile& dex_file, const Options& options) {
   const DexFile::Header& disk_header = dex_file.GetHeader();
   // Read MapItems and validate/set remaining offsets.
-  const DexFile::MapList* map = dex_file.GetMapList();
+  const dex::MapList* map = dex_file.GetMapList();
   const uint32_t count = map->size_;
   for (uint32_t i = 0; i < count; ++i) {
-    const DexFile::MapItem* item = map->list_ + i;
+    const dex::MapItem* item = map->list_ + i;
     switch (item->type_) {
       case DexFile::kDexTypeHeaderItem:
         CHECK_EQ(item->size_, 1u);
@@ -406,6 +410,10 @@
       case DexFile::kDexTypeAnnotationsDirectoryItem:
         header_->AnnotationsDirectoryItems().SetOffset(item->offset_);
         break;
+      case DexFile::kDexTypeHiddenapiClassData:
+        header_->HiddenapiClassDatas().SetOffset(item->offset_);
+        AddHiddenapiClassDataFromMapListSection(dex_file, item->offset_);
+        break;
       default:
         LOG(ERROR) << "Unknown map list item type.";
     }
@@ -413,7 +421,7 @@
 }
 
 void BuilderMaps::CreateStringId(const DexFile& dex_file, uint32_t i) {
-  const DexFile::StringId& disk_string_id = dex_file.GetStringId(dex::StringIndex(i));
+  const dex::StringId& disk_string_id = dex_file.GetStringId(dex::StringIndex(i));
   StringData* string_data =
       string_datas_map_.CreateAndAddItem(header_->StringDatas(),
                                          eagerly_assign_offsets_,
@@ -426,7 +434,7 @@
 }
 
 void BuilderMaps::CreateTypeId(const DexFile& dex_file, uint32_t i) {
-  const DexFile::TypeId& disk_type_id = dex_file.GetTypeId(dex::TypeIndex(i));
+  const dex::TypeId& disk_type_id = dex_file.GetTypeId(dex::TypeIndex(i));
   CreateAndAddIndexedItem(header_->TypeIds(),
                           header_->TypeIds().GetOffset() + i * TypeId::ItemSize(),
                           i,
@@ -434,8 +442,8 @@
 }
 
 void BuilderMaps::CreateProtoId(const DexFile& dex_file, uint32_t i) {
-  const DexFile::ProtoId& disk_proto_id = dex_file.GetProtoId(dex::ProtoIndex(i));
-  const DexFile::TypeList* type_list = dex_file.GetProtoParameters(disk_proto_id);
+  const dex::ProtoId& disk_proto_id = dex_file.GetProtoId(dex::ProtoIndex(i));
+  const dex::TypeList* type_list = dex_file.GetProtoParameters(disk_proto_id);
   TypeList* parameter_type_list = CreateTypeList(type_list, disk_proto_id.parameters_off_);
 
   CreateAndAddIndexedItem(header_->ProtoIds(),
@@ -447,7 +455,7 @@
 }
 
 void BuilderMaps::CreateFieldId(const DexFile& dex_file, uint32_t i) {
-  const DexFile::FieldId& disk_field_id = dex_file.GetFieldId(i);
+  const dex::FieldId& disk_field_id = dex_file.GetFieldId(i);
   CreateAndAddIndexedItem(header_->FieldIds(),
                           header_->FieldIds().GetOffset() + i * FieldId::ItemSize(),
                           i,
@@ -457,7 +465,7 @@
 }
 
 void BuilderMaps::CreateMethodId(const DexFile& dex_file, uint32_t i) {
-  const DexFile::MethodId& disk_method_id = dex_file.GetMethodId(i);
+  const dex::MethodId& disk_method_id = dex_file.GetMethodId(i);
   CreateAndAddIndexedItem(header_->MethodIds(),
                           header_->MethodIds().GetOffset() + i * MethodId::ItemSize(),
                           i,
@@ -467,19 +475,19 @@
 }
 
 void BuilderMaps::CreateClassDef(const DexFile& dex_file, uint32_t i) {
-  const DexFile::ClassDef& disk_class_def = dex_file.GetClassDef(i);
+  const dex::ClassDef& disk_class_def = dex_file.GetClassDef(i);
   const TypeId* class_type = header_->TypeIds()[disk_class_def.class_idx_.index_];
   uint32_t access_flags = disk_class_def.access_flags_;
   const TypeId* superclass = header_->GetTypeIdOrNullPtr(disk_class_def.superclass_idx_.index_);
 
-  const DexFile::TypeList* type_list = dex_file.GetInterfacesList(disk_class_def);
+  const dex::TypeList* type_list = dex_file.GetInterfacesList(disk_class_def);
   TypeList* interfaces_type_list = CreateTypeList(type_list, disk_class_def.interfaces_off_);
 
   const StringId* source_file =
       header_->GetStringIdOrNullPtr(disk_class_def.source_file_idx_.index_);
   // Annotations.
   AnnotationsDirectoryItem* annotations = nullptr;
-  const DexFile::AnnotationsDirectoryItem* disk_annotations_directory_item =
+  const dex::AnnotationsDirectoryItem* disk_annotations_directory_item =
       dex_file.GetAnnotationsDirectory(disk_class_def);
   if (disk_annotations_directory_item != nullptr) {
     annotations = CreateAnnotationsDirectoryItem(
@@ -504,7 +512,7 @@
 }
 
 void BuilderMaps::CreateCallSiteId(const DexFile& dex_file, uint32_t i) {
-  const DexFile::CallSiteIdItem& disk_call_site_id = dex_file.GetCallSiteId(i);
+  const dex::CallSiteIdItem& disk_call_site_id = dex_file.GetCallSiteId(i);
   const uint8_t* disk_call_item_ptr = dex_file.DataBegin() + disk_call_site_id.data_off_;
   EncodedArrayItem* call_site_item =
       CreateEncodedArrayItem(dex_file, disk_call_item_ptr, disk_call_site_id.data_off_);
@@ -516,7 +524,7 @@
 }
 
 void BuilderMaps::CreateMethodHandleItem(const DexFile& dex_file, uint32_t i) {
-  const DexFile::MethodHandleItem& disk_method_handle = dex_file.GetMethodHandle(i);
+  const dex::MethodHandleItem& disk_method_handle = dex_file.GetMethodHandle(i);
   uint16_t index = disk_method_handle.field_or_method_idx_;
   DexFile::MethodHandleType type =
       static_cast<DexFile::MethodHandleType>(disk_method_handle.method_handle_type_);
@@ -543,9 +551,9 @@
 
 void BuilderMaps::CreateCallSitesAndMethodHandles(const DexFile& dex_file) {
   // Iterate through the map list and set the offset of the CallSiteIds and MethodHandleItems.
-  const DexFile::MapList* map = dex_file.GetMapList();
+  const dex::MapList* map = dex_file.GetMapList();
   for (uint32_t i = 0; i < map->size_; ++i) {
-    const DexFile::MapItem* item = map->list_ + i;
+    const dex::MapItem* item = map->list_ + i;
     switch (item->type_) {
       case DexFile::kDexTypeCallSiteIdItem:
         header_->CallSiteIds().SetOffset(item->offset_);
@@ -567,7 +575,7 @@
   }
 }
 
-TypeList* BuilderMaps::CreateTypeList(const DexFile::TypeList* dex_type_list, uint32_t offset) {
+TypeList* BuilderMaps::CreateTypeList(const dex::TypeList* dex_type_list, uint32_t offset) {
   if (dex_type_list == nullptr) {
     return nullptr;
   }
@@ -615,15 +623,53 @@
   uint32_t current_offset = start_offset;
   for (size_t i = 0; i < count; ++i) {
     // Annotation that we didn't process already, add it to the set.
-    const DexFile::AnnotationItem* annotation = dex_file.GetAnnotationItemAtOffset(current_offset);
+    const dex::AnnotationItem* annotation = dex_file.GetAnnotationItemAtOffset(current_offset);
     AnnotationItem* annotation_item = CreateAnnotationItem(dex_file, annotation);
     DCHECK(annotation_item != nullptr);
     current_offset += annotation_item->GetSize();
   }
 }
 
+void BuilderMaps::AddHiddenapiClassDataFromMapListSection(const DexFile& dex_file,
+                                                          uint32_t offset) {
+  const dex::HiddenapiClassData* hiddenapi_class_data =
+      dex_file.GetHiddenapiClassDataAtOffset(offset);
+  DCHECK(hiddenapi_class_data == dex_file.GetHiddenapiClassData());
+
+  for (auto& class_def : header_->ClassDefs()) {
+    uint32_t index = class_def->GetIndex();
+    ClassData* class_data = class_def->GetClassData();
+    const uint8_t* ptr = hiddenapi_class_data->GetFlagsPointer(index);
+
+    std::unique_ptr<HiddenapiFlagsMap> flags = nullptr;
+    if (ptr != nullptr) {
+      DCHECK(class_data != nullptr);
+      flags = std::make_unique<HiddenapiFlagsMap>();
+      for (const dex_ir::FieldItem& field : *class_data->StaticFields()) {
+        flags->emplace(&field, DecodeUnsignedLeb128(&ptr));
+      }
+      for (const dex_ir::FieldItem& field : *class_data->InstanceFields()) {
+        flags->emplace(&field, DecodeUnsignedLeb128(&ptr));
+      }
+      for (const dex_ir::MethodItem& method : *class_data->DirectMethods()) {
+        flags->emplace(&method, DecodeUnsignedLeb128(&ptr));
+      }
+      for (const dex_ir::MethodItem& method : *class_data->VirtualMethods()) {
+        flags->emplace(&method, DecodeUnsignedLeb128(&ptr));
+      }
+    }
+
+    CreateAndAddIndexedItem(header_->HiddenapiClassDatas(),
+                            header_->HiddenapiClassDatas().GetOffset() +
+                                hiddenapi_class_data->flags_offset_[index],
+                            index,
+                            class_def.get(),
+                            std::move(flags));
+  }
+}
+
 AnnotationItem* BuilderMaps::CreateAnnotationItem(const DexFile& dex_file,
-                                                  const DexFile::AnnotationItem* annotation) {
+                                                  const dex::AnnotationItem* annotation) {
   const uint8_t* const start_data = reinterpret_cast<const uint8_t*>(annotation);
   const uint32_t offset = start_data - dex_file.DataBegin();
   AnnotationItem* annotation_item = annotation_items_map_.GetExistingObject(offset);
@@ -645,7 +691,7 @@
 
 
 AnnotationSetItem* BuilderMaps::CreateAnnotationSetItem(const DexFile& dex_file,
-    const DexFile::AnnotationSetItem* disk_annotations_item, uint32_t offset) {
+    const dex::AnnotationSetItem* disk_annotations_item, uint32_t offset) {
   if (disk_annotations_item == nullptr || (disk_annotations_item->size_ == 0 && offset == 0)) {
     return nullptr;
   }
@@ -653,7 +699,7 @@
   if (annotation_set_item == nullptr) {
     std::vector<AnnotationItem*>* items = new std::vector<AnnotationItem*>();
     for (uint32_t i = 0; i < disk_annotations_item->size_; ++i) {
-      const DexFile::AnnotationItem* annotation =
+      const dex::AnnotationItem* annotation =
           dex_file.GetAnnotationItem(disk_annotations_item, i);
       if (annotation == nullptr) {
         continue;
@@ -671,59 +717,59 @@
 }
 
 AnnotationsDirectoryItem* BuilderMaps::CreateAnnotationsDirectoryItem(const DexFile& dex_file,
-    const DexFile::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset) {
+    const dex::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset) {
   AnnotationsDirectoryItem* annotations_directory_item =
       annotations_directory_items_map_.GetExistingObject(offset);
   if (annotations_directory_item != nullptr) {
     return annotations_directory_item;
   }
-  const DexFile::AnnotationSetItem* class_set_item =
+  const dex::AnnotationSetItem* class_set_item =
       dex_file.GetClassAnnotationSet(disk_annotations_item);
   AnnotationSetItem* class_annotation = nullptr;
   if (class_set_item != nullptr) {
     uint32_t item_offset = disk_annotations_item->class_annotations_off_;
     class_annotation = CreateAnnotationSetItem(dex_file, class_set_item, item_offset);
   }
-  const DexFile::FieldAnnotationsItem* fields =
+  const dex::FieldAnnotationsItem* fields =
       dex_file.GetFieldAnnotations(disk_annotations_item);
   FieldAnnotationVector* field_annotations = nullptr;
   if (fields != nullptr) {
     field_annotations = new FieldAnnotationVector();
     for (uint32_t i = 0; i < disk_annotations_item->fields_size_; ++i) {
       FieldId* field_id = header_->FieldIds()[fields[i].field_idx_];
-      const DexFile::AnnotationSetItem* field_set_item =
+      const dex::AnnotationSetItem* field_set_item =
           dex_file.GetFieldAnnotationSetItem(fields[i]);
       uint32_t annotation_set_offset = fields[i].annotations_off_;
       AnnotationSetItem* annotation_set_item =
           CreateAnnotationSetItem(dex_file, field_set_item, annotation_set_offset);
-      field_annotations->push_back(std::unique_ptr<FieldAnnotation>(
-          new FieldAnnotation(field_id, annotation_set_item)));
+      field_annotations->push_back(std::make_unique<FieldAnnotation>(
+          field_id, annotation_set_item));
     }
   }
-  const DexFile::MethodAnnotationsItem* methods =
+  const dex::MethodAnnotationsItem* methods =
       dex_file.GetMethodAnnotations(disk_annotations_item);
   MethodAnnotationVector* method_annotations = nullptr;
   if (methods != nullptr) {
     method_annotations = new MethodAnnotationVector();
     for (uint32_t i = 0; i < disk_annotations_item->methods_size_; ++i) {
       MethodId* method_id = header_->MethodIds()[methods[i].method_idx_];
-      const DexFile::AnnotationSetItem* method_set_item =
+      const dex::AnnotationSetItem* method_set_item =
           dex_file.GetMethodAnnotationSetItem(methods[i]);
       uint32_t annotation_set_offset = methods[i].annotations_off_;
       AnnotationSetItem* annotation_set_item =
           CreateAnnotationSetItem(dex_file, method_set_item, annotation_set_offset);
-      method_annotations->push_back(std::unique_ptr<MethodAnnotation>(
-          new MethodAnnotation(method_id, annotation_set_item)));
+      method_annotations->push_back(std::make_unique<MethodAnnotation>(
+          method_id, annotation_set_item));
     }
   }
-  const DexFile::ParameterAnnotationsItem* parameters =
+  const dex::ParameterAnnotationsItem* parameters =
       dex_file.GetParameterAnnotations(disk_annotations_item);
   ParameterAnnotationVector* parameter_annotations = nullptr;
   if (parameters != nullptr) {
     parameter_annotations = new ParameterAnnotationVector();
     for (uint32_t i = 0; i < disk_annotations_item->parameters_size_; ++i) {
       MethodId* method_id = header_->MethodIds()[parameters[i].method_idx_];
-      const DexFile::AnnotationSetRefList* list =
+      const dex::AnnotationSetRefList* list =
           dex_file.GetParameterAnnotationSetRefList(&parameters[i]);
       parameter_annotations->push_back(std::unique_ptr<ParameterAnnotation>(
           GenerateParameterAnnotation(dex_file, method_id, list, parameters[i].annotations_off_)));
@@ -740,7 +786,7 @@
 }
 
 CodeItem* BuilderMaps::DedupeOrCreateCodeItem(const DexFile& dex_file,
-                                              const DexFile::CodeItem* disk_code_item,
+                                              const dex::CodeItem* disk_code_item,
                                               uint32_t offset,
                                               uint32_t dex_method_index) {
   if (disk_code_item == nullptr) {
@@ -781,7 +827,7 @@
   if (accessor.TriesSize() > 0) {
     tries = new TryItemVector();
     handler_list = new CatchHandlerVector();
-    for (const DexFile::TryItem& disk_try_item : accessor.TryItems()) {
+    for (const dex::TryItem& disk_try_item : accessor.TryItems()) {
       uint32_t start_addr = disk_try_item.start_addr_;
       uint16_t insn_count = disk_try_item.insn_count_;
       uint16_t handler_off = disk_try_item.handler_off_;
@@ -895,7 +941,7 @@
 }
 
 ClassData* BuilderMaps::CreateClassData(const DexFile& dex_file,
-                                        const DexFile::ClassDef& class_def) {
+                                        const dex::ClassDef& class_def) {
   // Read the fields and methods defined by the class, resolving the circular reference from those
   // to classes by setting class at the same time.
   const uint32_t offset = class_def.class_data_off_;
@@ -906,13 +952,13 @@
     FieldItemVector* static_fields = new FieldItemVector();
     for (const ClassAccessor::Field& field : accessor.GetStaticFields()) {
       FieldId* field_item = header_->FieldIds()[field.GetIndex()];
-      uint32_t access_flags = field.GetRawAccessFlags();
+      uint32_t access_flags = field.GetAccessFlags();
       static_fields->emplace_back(access_flags, field_item);
     }
     FieldItemVector* instance_fields = new FieldItemVector();
     for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) {
       FieldId* field_item = header_->FieldIds()[field.GetIndex()];
-      uint32_t access_flags = field.GetRawAccessFlags();
+      uint32_t access_flags = field.GetAccessFlags();
       instance_fields->emplace_back(access_flags, field_item);
     }
     // Direct methods.
@@ -1158,9 +1204,9 @@
       // Decode all name=value pairs.
       for (uint32_t i = 0; i < size; i++) {
         const uint32_t name_index = DecodeUnsignedLeb128(data);
-        elements->push_back(std::unique_ptr<AnnotationElement>(
-            new AnnotationElement(header_->StringIds()[name_index],
-                                  ReadEncodedValue(dex_file, data))));
+        elements->push_back(std::make_unique<AnnotationElement>(
+            header_->StringIds()[name_index],
+            ReadEncodedValue(dex_file, data)));
       }
       item->SetEncodedAnnotation(new EncodedAnnotation(header_->TypeIds()[type_idx], elements));
       break;
@@ -1178,8 +1224,8 @@
 MethodItem BuilderMaps::GenerateMethodItem(const DexFile& dex_file,
                                            const ClassAccessor::Method& method) {
   MethodId* method_id = header_->MethodIds()[method.GetIndex()];
-  uint32_t access_flags = method.GetRawAccessFlags();
-  const DexFile::CodeItem* disk_code_item = method.GetCodeItem();
+  uint32_t access_flags = method.GetAccessFlags();
+  const dex::CodeItem* disk_code_item = method.GetCodeItem();
   // Temporary hack to prevent incorrectly deduping code items if they have the same offset since
   // they may have different debug info streams.
   CodeItem* code_item = DedupeOrCreateCodeItem(dex_file,
@@ -1192,13 +1238,13 @@
 ParameterAnnotation* BuilderMaps::GenerateParameterAnnotation(
     const DexFile& dex_file,
     MethodId* method_id,
-    const DexFile::AnnotationSetRefList* annotation_set_ref_list,
+    const dex::AnnotationSetRefList* annotation_set_ref_list,
     uint32_t offset) {
   AnnotationSetRefList* set_ref_list = annotation_set_ref_lists_map_.GetExistingObject(offset);
   if (set_ref_list == nullptr) {
     std::vector<AnnotationSetItem*>* annotations = new std::vector<AnnotationSetItem*>();
     for (uint32_t i = 0; i < annotation_set_ref_list->size_; ++i) {
-      const DexFile::AnnotationSetItem* annotation_set_item =
+      const dex::AnnotationSetItem* annotation_set_item =
           dex_file.GetSetRefItemItem(&annotation_set_ref_list->list_[i]);
       uint32_t set_offset = annotation_set_ref_list->list_[i].annotations_off_;
       annotations->push_back(CreateAnnotationSetItem(dex_file, annotation_set_item, set_offset));
diff --git a/dexlayout/dex_visualize.cc b/dexlayout/dex_visualize.cc
index 4a36744..27cec8d9 100644
--- a/dexlayout/dex_visualize.cc
+++ b/dexlayout/dex_visualize.cc
@@ -53,7 +53,7 @@
 
   bool OpenAndPrintHeader(size_t dex_index) {
     // Open the file and emit the gnuplot prologue.
-    out_file_ = fopen(MultidexName("layout", dex_index, ".gnuplot").c_str(), "w");
+    out_file_ = fopen(MultidexName("layout", dex_index, ".gnuplot").c_str(), "we");
     if (out_file_ == nullptr) {
       return false;
     }
diff --git a/dexlayout/dex_writer.cc b/dexlayout/dex_writer.cc
index a4c5cda..268abe4 100644
--- a/dexlayout/dex_writer.cc
+++ b/dexlayout/dex_writer.cc
@@ -462,6 +462,60 @@
   }
 }
 
+void DexWriter::WriteHiddenapiClassData(Stream* stream) {
+  if (header_->HiddenapiClassDatas().Empty()) {
+    return;
+  }
+  DCHECK_EQ(header_->HiddenapiClassDatas().Size(), header_->ClassDefs().Size());
+
+  stream->AlignTo(SectionAlignment(DexFile::kDexTypeHiddenapiClassData));
+  ProcessOffset(stream, &header_->HiddenapiClassDatas());
+  const uint32_t start = stream->Tell();
+
+  // Compute offsets for each class def and write the header.
+  // data_header[0]: total size of the section
+  // data_header[i + 1]: offset of class def[i] from the beginning of the section,
+  //                     or zero if no data
+  std::vector<uint32_t> data_header(header_->ClassDefs().Size() + 1, 0);
+  data_header[0] = sizeof(uint32_t) * (header_->ClassDefs().Size() + 1);
+  for (uint32_t i = 0; i < header_->ClassDefs().Size(); ++i) {
+    uint32_t item_size = header_->HiddenapiClassDatas()[i]->ItemSize();
+    data_header[i + 1] = item_size == 0u ? 0 : data_header[0];
+    data_header[0] += item_size;
+  }
+  stream->Write(data_header.data(), sizeof(uint32_t) * data_header.size());
+
+  // Write class data streams.
+  for (uint32_t i = 0; i < header_->ClassDefs().Size(); ++i) {
+    dex_ir::ClassDef* class_def = header_->ClassDefs()[i];
+    const auto& item = header_->HiddenapiClassDatas()[i];
+    DCHECK(item->GetClassDef() == class_def);
+
+    if (data_header[i + 1] != 0u) {
+      dex_ir::ClassData* class_data = class_def->GetClassData();
+      DCHECK(class_data != nullptr);
+      DCHECK_EQ(data_header[i + 1], stream->Tell() - start);
+      for (const dex_ir::FieldItem& field : *class_data->StaticFields()) {
+        stream->WriteUleb128(item->GetFlags(&field));
+      }
+      for (const dex_ir::FieldItem& field : *class_data->InstanceFields()) {
+        stream->WriteUleb128(item->GetFlags(&field));
+      }
+      for (const dex_ir::MethodItem& method : *class_data->DirectMethods()) {
+        stream->WriteUleb128(item->GetFlags(&method));
+      }
+      for (const dex_ir::MethodItem& method : *class_data->VirtualMethods()) {
+        stream->WriteUleb128(item->GetFlags(&method));
+      }
+    }
+  }
+  DCHECK_EQ(stream->Tell() - start, data_header[0]);
+
+  if (compute_offsets_ && start != stream->Tell()) {
+    header_->HiddenapiClassDatas().SetOffset(start);
+  }
+}
+
 void DexWriter::WriteDebugInfoItem(Stream* stream, dex_ir::DebugInfoItem* debug_info) {
   stream->AlignTo(SectionAlignment(DexFile::kDexTypeDebugInfoItem));
   ProcessOffset(stream, debug_info);
@@ -482,10 +536,10 @@
                                                  dex_ir::CodeItem* code_item,
                                                  bool reserve_only) {
   if (code_item->TriesSize() != 0) {
-    stream->AlignTo(DexFile::TryItem::kAlignment);
+    stream->AlignTo(dex::TryItem::kAlignment);
     // Write try items.
     for (std::unique_ptr<const dex_ir::TryItem>& try_item : *code_item->Tries()) {
-      DexFile::TryItem disk_try_item;
+      dex::TryItem disk_try_item;
       if (!reserve_only) {
         disk_try_item.start_addr_ = try_item->StartAddr();
         disk_try_item.insn_count_ = try_item->InsnCount();
@@ -659,7 +713,7 @@
   stream->Write(&map_list_size, sizeof(map_list_size));
   while (!queue->empty()) {
     const MapItem& item = queue->top();
-    DexFile::MapItem map_item;
+    dex::MapItem map_item;
     map_item.type_ = item.type_;
     map_item.size_ = item.size_;
     map_item.offset_ = item.offset_;
@@ -730,6 +784,9 @@
   queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeAnnotationsDirectoryItem,
                               header_->AnnotationsDirectoryItems().Size(),
                               header_->AnnotationsDirectoryItems().GetOffset()));
+  queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeHiddenapiClassData,
+                              header_->HiddenapiClassDatas().Empty() ? 0u : 1u,
+                              header_->HiddenapiClassDatas().GetOffset()));
   WriteMapItems(stream, &queue);
 }
 
@@ -790,16 +847,16 @@
   // Based on: https://source.android.com/devices/tech/dalvik/dex-format
   // Since the offsets may not be calculated already, the writing must be done in the correct order.
   const uint32_t string_ids_offset = stream->Tell();
-  WriteStringIds(stream, /*reserve_only*/ true);
+  WriteStringIds(stream, /*reserve_only=*/ true);
   WriteTypeIds(stream);
   const uint32_t proto_ids_offset = stream->Tell();
-  WriteProtoIds(stream, /*reserve_only*/ true);
+  WriteProtoIds(stream, /*reserve_only=*/ true);
   WriteFieldIds(stream);
   WriteMethodIds(stream);
   const uint32_t class_defs_offset = stream->Tell();
-  WriteClassDefs(stream, /*reserve_only*/ true);
+  WriteClassDefs(stream, /*reserve_only=*/ true);
   const uint32_t call_site_ids_offset = stream->Tell();
-  WriteCallSiteIds(stream, /*reserve_only*/ true);
+  WriteCallSiteIds(stream, /*reserve_only=*/ true);
   WriteMethodHandles(stream);
 
   uint32_t data_offset_ = 0u;
@@ -812,13 +869,13 @@
   // Write code item first to minimize the space required for encoded methods.
   // Reserve code item space since we need the debug offsets to actually write them.
   const uint32_t code_items_offset = stream->Tell();
-  WriteCodeItems(stream, /*reserve_only*/ true);
+  WriteCodeItems(stream, /*reserve_only=*/ true);
   // Write debug info section.
   WriteDebugInfoItems(stream);
   {
     // Actually write code items since debug info offsets are calculated now.
     Stream::ScopedSeek seek(stream, code_items_offset);
-    WriteCodeItems(stream, /*reserve_only*/ false);
+    WriteCodeItems(stream, /*reserve_only=*/ false);
   }
 
   WriteEncodedArrays(stream);
@@ -829,23 +886,24 @@
   WriteTypeLists(stream);
   WriteClassDatas(stream);
   WriteStringDatas(stream);
+  WriteHiddenapiClassData(stream);
 
   // Write delayed id sections that depend on data sections.
   {
     Stream::ScopedSeek seek(stream, string_ids_offset);
-    WriteStringIds(stream, /*reserve_only*/ false);
+    WriteStringIds(stream, /*reserve_only=*/ false);
   }
   {
     Stream::ScopedSeek seek(stream, proto_ids_offset);
-    WriteProtoIds(stream, /*reserve_only*/ false);
+    WriteProtoIds(stream, /*reserve_only=*/ false);
   }
   {
     Stream::ScopedSeek seek(stream, class_defs_offset);
-    WriteClassDefs(stream, /*reserve_only*/ false);
+    WriteClassDefs(stream, /*reserve_only=*/ false);
   }
   {
     Stream::ScopedSeek seek(stream, call_site_ids_offset);
-    WriteCallSiteIds(stream, /*reserve_only*/ false);
+    WriteCallSiteIds(stream, /*reserve_only=*/ false);
   }
 
   // Write the map list.
@@ -932,6 +990,15 @@
   }
 }
 
+void DexWriter::ProcessOffset(Stream* stream, dex_ir::CollectionBase* item) {
+  if (compute_offsets_) {
+    item->SetOffset(stream->Tell());
+  } else {
+    // Not computing offsets, just use the one in the item.
+    stream->Seek(item->GetOffset());
+  }
+}
+
 std::unique_ptr<DexContainer> DexWriter::CreateDexContainer() const {
   return std::unique_ptr<DexContainer>(new DexWriter::Container);
 }
diff --git a/dexlayout/dex_writer.h b/dexlayout/dex_writer.h
index dd2ebad..62247ec 100644
--- a/dexlayout/dex_writer.h
+++ b/dexlayout/dex_writer.h
@@ -257,6 +257,7 @@
   void WriteStringDatas(Stream* stream);
   void WriteClassDatas(Stream* stream);
   void WriteMethodHandles(Stream* stream);
+  void WriteHiddenapiClassData(Stream* stream);
   void WriteMapItems(Stream* stream, MapItemQueue* queue);
   void GenerateAndWriteMapItems(Stream* stream);
 
@@ -270,6 +271,7 @@
   // Process an offset, if compute_offset is set, write into the dex ir item, otherwise read the
   // existing offset and use that for writing.
   void ProcessOffset(Stream* stream, dex_ir::Item* item);
+  void ProcessOffset(Stream* stream, dex_ir::CollectionBase* item);
 
   dex_ir::Header* const header_;
   DexLayout* const dex_layout_;
diff --git a/dexlayout/dexdiag.cc b/dexlayout/dexdiag.cc
index 493a8a2..28d4048 100644
--- a/dexlayout/dexdiag.cc
+++ b/dexlayout/dexdiag.cc
@@ -34,13 +34,18 @@
 #include "dex_ir.h"
 #include "dex_ir_builder.h"
 #ifdef ART_TARGET_ANDROID
-#include "pagemap/pagemap.h"
+#include <meminfo/pageacct.h>
+#include <meminfo/procmeminfo.h>
 #endif
 #include "vdex_file.h"
 
 namespace art {
 
 using android::base::StringPrintf;
+#ifdef ART_TARGET_ANDROID
+using android::meminfo::ProcMemInfo;
+using android::meminfo::Vma;
+#endif
 
 static bool g_verbose = false;
 
@@ -194,7 +199,7 @@
   return DexFile::kDexTypeHeaderItem;
 }
 
-static void ProcessPageMap(uint64_t* pagemap,
+static void ProcessPageMap(const std::vector<uint64_t>& pagemap,
                            size_t start,
                            size_t end,
                            const std::vector<dex_ir::DexFileSection>& sections,
@@ -202,7 +207,7 @@
   static constexpr size_t kLineLength = 32;
   for (size_t page = start; page < end; ++page) {
     char type_char = '.';
-    if (PM_PAGEMAP_PRESENT(pagemap[page])) {
+    if (::android::meminfo::page_present(pagemap[page])) {
       const size_t dex_page_offset = page - start;
       uint16_t type = FindSectionTypeForPage(dex_page_offset, sections);
       page_counts->Increment(type);
@@ -265,7 +270,7 @@
   printer->PrintSkipLine();
 }
 
-static void ProcessOneDexMapping(uint64_t* pagemap,
+static void ProcessOneDexMapping(const std::vector<uint64_t>& pagemap,
                                  uint64_t map_start,
                                  const DexFile* dex_file,
                                  uint64_t vdex_start,
@@ -294,7 +299,7 @@
   {
     Options options;
     std::unique_ptr<dex_ir::Header> header(dex_ir::DexIrBuilder(*dex_file,
-                                                                /*eagerly_assign_offsets*/ true,
+                                                                /*eagerly_assign_offsets=*/ true,
                                                                 options));
     sections = dex_ir::GetSortedDexFileSections(header.get(),
                                                 dex_ir::SortDirection::kSortDescending);
@@ -316,14 +321,14 @@
   return false;
 }
 
-static bool DisplayMappingIfFromVdexFile(pm_map_t* map, Printer* printer) {
-  std::string vdex_name = pm_map_name(map);
+static bool DisplayMappingIfFromVdexFile(ProcMemInfo& proc, const Vma& vma, Printer* printer) {
+  std::string vdex_name = vma.name;
   // Extract all the dex files from the vdex file.
   std::string error_msg;
   std::unique_ptr<VdexFile> vdex(VdexFile::Open(vdex_name,
-                                                false /*writeable*/,
-                                                false /*low_4gb*/,
-                                                false /*unquicken */,
+                                                /*writable=*/ false,
+                                                /*low_4gb=*/ false,
+                                                /*unquicken= */ false,
                                                 &error_msg /*out*/));
   if (vdex == nullptr) {
     std::cerr << "Could not open vdex file "
@@ -344,34 +349,33 @@
     return false;
   }
   // Open the page mapping (one uint64_t per page) for the entire vdex mapping.
-  uint64_t* pagemap;
-  size_t len;
-  if (pm_map_pagemap(map, &pagemap, &len) != 0) {
+  std::vector<uint64_t> pagemap;
+  if (!proc.PageMap(vma, &pagemap)) {
     std::cerr << "Error creating pagemap." << std::endl;
     return false;
   }
   // Process the dex files.
   std::cout << "MAPPING "
-            << pm_map_name(map)
-            << StringPrintf(": %" PRIx64 "-%" PRIx64, pm_map_start(map), pm_map_end(map))
+            << vma.name
+            << StringPrintf(": %" PRIx64 "-%" PRIx64, vma.start, vma.end)
             << std::endl;
   for (const auto& dex_file : dex_files) {
     ProcessOneDexMapping(pagemap,
-                         pm_map_start(map),
+                         vma.start,
                          dex_file.get(),
                          reinterpret_cast<uint64_t>(vdex->Begin()),
                          printer);
   }
-  free(pagemap);
   return true;
 }
 
-static void ProcessOneOatMapping(uint64_t* pagemap, size_t size, Printer* printer) {
+static void ProcessOneOatMapping(const std::vector<uint64_t>& pagemap,
+                                 Printer* printer) {
   static constexpr size_t kLineLength = 32;
   size_t resident_page_count = 0;
-  for (size_t page = 0; page < size; ++page) {
+  for (size_t page = 0; page < pagemap.size(); ++page) {
     char type_char = '.';
-    if (PM_PAGEMAP_PRESENT(pagemap[page])) {
+    if (::android::meminfo::page_present(pagemap[page])) {
       ++resident_page_count;
       type_char = '*';
     }
@@ -383,13 +387,13 @@
     }
   }
   if (g_verbose) {
-    if (size % kLineLength != 0) {
+    if (pagemap.size() % kLineLength != 0) {
       std::cout << std::endl;
     }
   }
-  double percent_of_total = 100.0 * resident_page_count / size;
+  double percent_of_total = 100.0 * resident_page_count / pagemap.size();
   printer->PrintHeader();
-  printer->PrintOne("EXECUTABLE", resident_page_count, size, percent_of_total, percent_of_total);
+  printer->PrintOne("EXECUTABLE", resident_page_count, pagemap.size(), percent_of_total, percent_of_total);
   printer->PrintSkipLine();
 }
 
@@ -405,21 +409,19 @@
   return false;
 }
 
-static bool DisplayMappingIfFromOatFile(pm_map_t* map, Printer* printer) {
+static bool DisplayMappingIfFromOatFile(ProcMemInfo& proc, const Vma& vma, Printer* printer) {
   // Open the page mapping (one uint64_t per page) for the entire vdex mapping.
-  uint64_t* pagemap;
-  size_t len;
-  if (pm_map_pagemap(map, &pagemap, &len) != 0) {
+  std::vector<uint64_t> pagemap;
+  if (!proc.PageMap(vma, &pagemap) != 0) {
     std::cerr << "Error creating pagemap." << std::endl;
     return false;
   }
   // Process the dex files.
   std::cout << "MAPPING "
-            << pm_map_name(map)
-            << StringPrintf(": %" PRIx64 "-%" PRIx64, pm_map_start(map), pm_map_end(map))
+            << vma.name
+            << StringPrintf(": %" PRIx64 "-%" PRIx64, vma.start, vma.end)
             << std::endl;
-  ProcessOneOatMapping(pagemap, len, printer);
-  free(pagemap);
+  ProcessOneOatMapping(pagemap, printer);
   return true;
 }
 
@@ -488,27 +490,11 @@
     return EXIT_FAILURE;
   }
 
-  // get libpagemap kernel information.
-  pm_kernel_t* ker;
-  if (pm_kernel_create(&ker) != 0) {
-    std::cerr << "Error creating kernel interface -- does this kernel have pagemap?" << std::endl;
-    return EXIT_FAILURE;
-  }
-
-  // get libpagemap process information.
-  pm_process_t* proc;
-  if (pm_process_create(ker, pid, &proc) != 0) {
-    std::cerr << "Error creating process interface -- does process "
-              << pid
-              << " really exist?"
-              << std::endl;
-    return EXIT_FAILURE;
-  }
-
+  // get libmeminfo process information.
+  ProcMemInfo proc(pid);
   // Get the set of mappings by the specified process.
-  pm_map_t** maps;
-  size_t num_maps;
-  if (pm_process_maps(proc, &maps, &num_maps) != 0) {
+  const std::vector<Vma>& maps = proc.Maps();
+  if (maps.empty()) {
     std::cerr << "Error listing maps." << std::endl;
     return EXIT_FAILURE;
   }
@@ -516,19 +502,19 @@
   bool match_found = false;
   // Process the mappings that are due to vdex or oat files.
   Printer printer;
-  for (size_t i = 0; i < num_maps; ++i) {
-    std::string mapped_file_name = pm_map_name(maps[i]);
+  for (auto& vma : maps) {
+    std::string mapped_file_name = vma.name;
     // Filter by name contains options (if any).
     if (!FilterByNameContains(mapped_file_name, name_filters)) {
       continue;
     }
     if (IsVdexFileMapping(mapped_file_name)) {
-      if (!DisplayMappingIfFromVdexFile(maps[i], &printer)) {
+      if (!DisplayMappingIfFromVdexFile(proc, vma, &printer)) {
         return EXIT_FAILURE;
       }
       match_found = true;
     } else if (IsOatFileMapping(mapped_file_name)) {
-      if (!DisplayMappingIfFromOatFile(maps[i], &printer)) {
+      if (!DisplayMappingIfFromOatFile(proc, vma, &printer)) {
         return EXIT_FAILURE;
       }
       match_found = true;
diff --git a/dexlayout/dexdiag_test.cc b/dexlayout/dexdiag_test.cc
index 60dd7e4..47ef0a5 100644
--- a/dexlayout/dexdiag_test.cc
+++ b/dexlayout/dexdiag_test.cc
@@ -34,7 +34,7 @@
 
 class DexDiagTest : public CommonRuntimeTest {
  protected:
-  virtual void SetUp() {
+  void SetUp() override {
     CommonRuntimeTest::SetUp();
   }
 
@@ -68,14 +68,13 @@
     EXPECT_TRUE(!oat_location.empty());
     std::cout << "==" << oat_location << std::endl;
     std::string error_msg;
-    std::unique_ptr<OatFile> oat(OatFile::Open(/* zip_fd */ -1,
+    std::unique_ptr<OatFile> oat(OatFile::Open(/*zip_fd=*/ -1,
                                                oat_location.c_str(),
                                                oat_location.c_str(),
-                                               /* requested_base */ nullptr,
-                                               /* executable */ false,
-                                               /* low_4gb */ false,
-                                               /* abs_dex_location */ nullptr,
-                                               /* reservation */ nullptr,
+                                               /*executable=*/ false,
+                                               /*low_4gb=*/ false,
+                                               /*abs_dex_location=*/ nullptr,
+                                               /*reservation=*/ nullptr,
                                                &error_msg));
     EXPECT_TRUE(oat != nullptr) << error_msg;
     return oat;
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 52d355b..7382a97 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -24,7 +24,6 @@
 
 #include <inttypes.h>
 #include <stdio.h>
-#include <sys/mman.h>  // For the PROT_* and MAP_* constants.
 
 #include <iostream>
 #include <memory>
@@ -34,7 +33,9 @@
 #include "android-base/stringprintf.h"
 
 #include "base/logging.h"  // For VLOG_IS_ON.
+#include "base/hiddenapi_flags.h"
 #include "base/mem_map.h"
+#include "base/mman.h"  // For the PROT_* and MAP_* constants.
 #include "base/os.h"
 #include "base/utils.h"
 #include "dex/art_dex_file_loader.h"
@@ -222,6 +223,14 @@
   return str;
 }
 
+static std::string GetHiddenapiFlagStr(uint32_t hiddenapi_flags) {
+  std::stringstream ss;
+  hiddenapi::ApiList(hiddenapi_flags).Dump(ss);
+  std::string api_list = ss.str();
+  std::transform(api_list.begin(), api_list.end(), api_list.begin(), ::toupper);
+  return api_list;
+}
+
 static std::string GetSignatureForProtoId(const dex_ir::ProtoId* proto) {
   if (proto == nullptr) {
     return "<no signature>";
@@ -247,7 +256,7 @@
  * NULL-terminated.
  */
 static void Asciify(char* out, const unsigned char* data, size_t len) {
-  while (len--) {
+  for (; len != 0u; --len) {
     if (*data < 0x20) {
       // Could do more here, but we don't need them yet.
       switch (*data) {
@@ -1037,26 +1046,6 @@
 }
 
 /*
- * Callback for dumping each positions table entry.
- */
-static bool DumpPositionsCb(void* context, const DexFile::PositionInfo& entry) {
-  FILE* out_file = reinterpret_cast<FILE*>(context);
-  fprintf(out_file, "        0x%04x line=%d\n", entry.address_, entry.line_);
-  return false;
-}
-
-/*
- * Callback for dumping locals table entry.
- */
-static void DumpLocalsCb(void* context, const DexFile::LocalInfo& entry) {
-  const char* signature = entry.signature_ != nullptr ? entry.signature_ : "";
-  FILE* out_file = reinterpret_cast<FILE*>(context);
-  fprintf(out_file, "        0x%04x - 0x%04x reg=%d %s %s %s\n",
-          entry.start_address_, entry.end_address_, entry.reg_,
-          entry.name_, entry.descriptor_, signature);
-}
-
-/*
  * Lookup functions.
  */
 static const char* StringDataByIdx(uint32_t idx, dex_ir::Header* header) {
@@ -1112,8 +1101,13 @@
                                      [this](uint32_t idx) {
                                        return StringDataByIdx(idx, this->header_);
                                      },
-                                     DumpPositionsCb,
-                                     out_file_);
+                                     [&](const DexFile::PositionInfo& entry) {
+                                       fprintf(out_file_,
+                                               "        0x%04x line=%d\n",
+                                               entry.address_,
+                                               entry.line_);
+                                        return false;
+                                     });
   }
   fprintf(out_file_, "      locals        : \n");
   if (debug_info != nullptr) {
@@ -1144,15 +1138,29 @@
                                         StringDataByTypeIdx(dchecked_integral_cast<uint16_t>(idx),
                                                             this->header_);
                                   },
-                                  DumpLocalsCb,
-                                  out_file_);
+                                  [&](const DexFile::LocalInfo& entry) {
+                                    const char* signature =
+                                        entry.signature_ != nullptr ? entry.signature_ : "";
+                                    fprintf(out_file_,
+                                            "        0x%04x - 0x%04x reg=%d %s %s %s\n",
+                                            entry.start_address_,
+                                            entry.end_address_,
+                                            entry.reg_,
+                                            entry.name_,
+                                            entry.descriptor_,
+                                            signature);
+                                  });
   }
 }
 
 /*
  * Dumps a method.
  */
-void DexLayout::DumpMethod(uint32_t idx, uint32_t flags, const dex_ir::CodeItem* code, int i) {
+void DexLayout::DumpMethod(uint32_t idx,
+                           uint32_t flags,
+                           uint32_t hiddenapi_flags,
+                           const dex_ir::CodeItem* code,
+                           int i) {
   // Bail for anything private if export only requested.
   if (options_.exports_only_ && (flags & (kAccPublic | kAccProtected)) == 0) {
     return;
@@ -1169,6 +1177,12 @@
     fprintf(out_file_, "      name          : '%s'\n", name);
     fprintf(out_file_, "      type          : '%s'\n", type_descriptor);
     fprintf(out_file_, "      access        : 0x%04x (%s)\n", flags, access_str);
+    if (hiddenapi_flags != 0u) {
+      fprintf(out_file_,
+              "      hiddenapi     : 0x%04x (%s)\n",
+              hiddenapi_flags,
+              GetHiddenapiFlagStr(hiddenapi_flags).c_str());
+    }
     if (code == nullptr) {
       fprintf(out_file_, "      code          : (none)\n");
     } else {
@@ -1262,7 +1276,11 @@
 /*
  * Dumps a static (class) field.
  */
-void DexLayout::DumpSField(uint32_t idx, uint32_t flags, int i, dex_ir::EncodedValue* init) {
+void DexLayout::DumpSField(uint32_t idx,
+                           uint32_t flags,
+                           uint32_t hiddenapi_flags,
+                           int i,
+                           dex_ir::EncodedValue* init) {
   // Bail for anything private if export only requested.
   if (options_.exports_only_ && (flags & (kAccPublic | kAccProtected)) == 0) {
     return;
@@ -1279,6 +1297,12 @@
     fprintf(out_file_, "      name          : '%s'\n", name);
     fprintf(out_file_, "      type          : '%s'\n", type_descriptor);
     fprintf(out_file_, "      access        : 0x%04x (%s)\n", flags, access_str);
+    if (hiddenapi_flags != 0u) {
+      fprintf(out_file_,
+              "      hiddenapi     : 0x%04x (%s)\n",
+              hiddenapi_flags,
+              GetHiddenapiFlagStr(hiddenapi_flags).c_str());
+    }
     if (init != nullptr) {
       fputs("      value         : ", out_file_);
       DumpEncodedValue(init);
@@ -1309,8 +1333,11 @@
 /*
  * Dumps an instance field.
  */
-void DexLayout::DumpIField(uint32_t idx, uint32_t flags, int i) {
-  DumpSField(idx, flags, i, nullptr);
+void DexLayout::DumpIField(uint32_t idx,
+                           uint32_t flags,
+                           uint32_t hiddenapi_flags,
+                           int i) {
+  DumpSField(idx, flags, hiddenapi_flags, i, nullptr);
 }
 
 /*
@@ -1436,6 +1463,7 @@
       for (uint32_t i = 0; i < static_fields->size(); i++) {
         DumpSField((*static_fields)[i].GetFieldId()->GetIndex(),
                    (*static_fields)[i].GetAccessFlags(),
+                   dex_ir::HiddenapiClassData::GetFlags(header_, class_def, &(*static_fields)[i]),
                    i,
                    i < encoded_values_size ? (*encoded_values)[i].get() : nullptr);
       }  // for
@@ -1452,6 +1480,7 @@
       for (uint32_t i = 0; i < instance_fields->size(); i++) {
         DumpIField((*instance_fields)[i].GetFieldId()->GetIndex(),
                    (*instance_fields)[i].GetAccessFlags(),
+                   dex_ir::HiddenapiClassData::GetFlags(header_, class_def, &(*instance_fields)[i]),
                    i);
       }  // for
     }
@@ -1467,8 +1496,9 @@
       for (uint32_t i = 0; i < direct_methods->size(); i++) {
         DumpMethod((*direct_methods)[i].GetMethodId()->GetIndex(),
                    (*direct_methods)[i].GetAccessFlags(),
+                   dex_ir::HiddenapiClassData::GetFlags(header_, class_def, &(*direct_methods)[i]),
                    (*direct_methods)[i].GetCodeItem(),
-                 i);
+                   i);
       }  // for
     }
   }
@@ -1483,6 +1513,7 @@
       for (uint32_t i = 0; i < virtual_methods->size(); i++) {
         DumpMethod((*virtual_methods)[i].GetMethodId()->GetIndex(),
                    (*virtual_methods)[i].GetAccessFlags(),
+                   dex_ir::HiddenapiClassData::GetFlags(header_, class_def, &(*virtual_methods)[i]),
                    (*virtual_methods)[i].GetCodeItem(),
                    i);
       }  // for
@@ -1559,7 +1590,7 @@
       // Overwrite the existing vector with the new ordering, note that the sets of objects are
       // equivalent, but the order changes. This is why this is not a memory leak.
       // TODO: Consider cleaning this up with a shared_ptr.
-      class_datas[class_data_index].release();
+      class_datas[class_data_index].release();  // NOLINT b/117926937
       class_datas[class_data_index].reset(class_data);
       ++class_data_index;
     }
@@ -1575,7 +1606,7 @@
       // Overwrite the existing vector with the new ordering, note that the sets of objects are
       // equivalent, but the order changes. This is why this is not a memory leak.
       // TODO: Consider cleaning this up with a shared_ptr.
-      class_defs[i].release();
+      class_defs[i].release();  // NOLINT b/117926937
       class_defs[i].reset(new_class_def_order[i]);
     }
   }
@@ -1676,7 +1707,7 @@
   // Now we know what order we want the string data, reorder them.
   size_t data_index = 0;
   for (dex_ir::StringId* string_id : string_ids) {
-    string_datas[data_index].release();
+    string_datas[data_index].release();  // NOLINT b/117926937
     string_datas[data_index].reset(string_id->DataItem());
     ++data_index;
   }
@@ -1919,10 +1950,10 @@
               data_section->Begin(),
               data_section->Size(),
               location,
-              /* checksum */ 0,
-              /*oat_dex_file*/ nullptr,
+              /* location_checksum= */ 0,
+              /*oat_dex_file=*/ nullptr,
               verify,
-              /*verify_checksum*/ false,
+              /*verify_checksum=*/ false,
               error_msg));
       CHECK(output_dex_file != nullptr) << "Failed to re-open output file:" << *error_msg;
 
@@ -1933,11 +1964,11 @@
       // Regenerate output IR to catch any bugs that might happen during writing.
       std::unique_ptr<dex_ir::Header> output_header(
           dex_ir::DexIrBuilder(*output_dex_file,
-                               /*eagerly_assign_offsets*/ true,
+                               /*eagerly_assign_offsets=*/ true,
                                GetOptions()));
       std::unique_ptr<dex_ir::Header> orig_header(
           dex_ir::DexIrBuilder(*dex_file,
-                               /*eagerly_assign_offsets*/ true,
+                               /*eagerly_assign_offsets=*/ true,
                                GetOptions()));
       CHECK(VerifyOutputDexFile(output_header.get(), orig_header.get(), error_msg)) << *error_msg;
     }
@@ -1960,7 +1991,7 @@
   const ArtDexFileLoader dex_file_loader;
   std::vector<std::unique_ptr<const DexFile>> dex_files;
   if (!dex_file_loader.Open(
-        file_name, file_name, /* verify */ true, verify_checksum, &error_msg, &dex_files)) {
+        file_name, file_name, /* verify= */ true, verify_checksum, &error_msg, &dex_files)) {
     // Display returned error message to user. Note that this error behavior
     // differs from the error messages shown by the original Dalvik dexdump.
     LOG(ERROR) << error_msg;
@@ -1977,7 +2008,7 @@
       if (!ProcessDexFile(file_name,
                           dex_files[i].get(),
                           i,
-                          /*dex_container*/ nullptr,
+                          /*dex_container=*/ nullptr,
                           &error_msg)) {
         LOG(WARNING) << "Failed to run dex file " << i << " in " << file_name << " : " << error_msg;
       }
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index 2bca10d..535f789 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -148,7 +148,7 @@
   void DumpEncodedAnnotation(dex_ir::EncodedAnnotation* annotation);
   void DumpEncodedValue(const dex_ir::EncodedValue* data);
   void DumpFileHeader();
-  void DumpIField(uint32_t idx, uint32_t flags, int i);
+  void DumpIField(uint32_t idx, uint32_t flags, uint32_t hiddenapi_flags, int i);
   void DumpInstruction(const dex_ir::CodeItem* code,
                        uint32_t code_offset,
                        uint32_t insn_idx,
@@ -156,9 +156,17 @@
                        const Instruction* dec_insn);
   void DumpInterface(const dex_ir::TypeId* type_item, int i);
   void DumpLocalInfo(const dex_ir::CodeItem* code);
-  void DumpMethod(uint32_t idx, uint32_t flags, const dex_ir::CodeItem* code, int i);
+  void DumpMethod(uint32_t idx,
+                  uint32_t flags,
+                  uint32_t hiddenapi_flags,
+                  const dex_ir::CodeItem* code,
+                  int i);
   void DumpPositionInfo(const dex_ir::CodeItem* code);
-  void DumpSField(uint32_t idx, uint32_t flags, int i, dex_ir::EncodedValue* init);
+  void DumpSField(uint32_t idx,
+                  uint32_t flags,
+                  uint32_t hiddenapi_flags,
+                  int i,
+                  dex_ir::EncodedValue* init);
   void DumpDexFile();
 
   void LayoutClassDefsAndClassData(const DexFile* dex_file);
@@ -174,7 +182,7 @@
                      std::string* error_msg);
 
   void DumpCFG(const DexFile* dex_file, int idx);
-  void DumpCFG(const DexFile* dex_file, uint32_t dex_method_idx, const DexFile::CodeItem* code);
+  void DumpCFG(const DexFile* dex_file, uint32_t dex_method_idx, const dex::CodeItem* code);
 
   Options& options_;
   ProfileCompilationInfo* info_;
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index 9f73347..2163f89 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -42,11 +42,11 @@
 /*
  * Shows usage.
  */
-static void Usage(void) {
+static void Usage() {
   LOG(ERROR) << "Copyright (C) 2016 The Android Open Source Project\n";
   LOG(ERROR) << kProgramName
              << ": [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile] [-p profile]"
-                " [-s] [-t] [-v] [-w directory] dexfile...\n";
+                " [-s] [-t] [-u] [-v] [-w directory] dexfile...\n";
   LOG(ERROR) << " -a : display annotations";
   LOG(ERROR) << " -b : build dex_ir";
   LOG(ERROR) << " -c : verify checksum and exit";
@@ -85,7 +85,7 @@
   bool want_usage = false;
 
   // Parse all arguments.
-  while (1) {
+  while (true) {
     const int ic = getopt(argc, argv, "abcdefghil:o:p:stuvw:x:");
     if (ic < 0) {
       break;  // done
@@ -190,7 +190,12 @@
   // Open profile file.
   std::unique_ptr<ProfileCompilationInfo> profile_info;
   if (options.profile_file_name_) {
-    int profile_fd = open(options.profile_file_name_, O_RDONLY);
+#ifdef _WIN32
+    int flags = O_RDONLY;
+#else
+    int flags = O_RDONLY | O_CLOEXEC;
+#endif
+    int profile_fd = open(options.profile_file_name_, flags);
     if (profile_fd < 0) {
       PLOG(ERROR) << "Can't open " << options.profile_file_name_;
       return 1;
@@ -201,9 +206,10 @@
       return 1;
     }
   }
+  PLOG(INFO) << "After opening profile file";
 
   // Create DexLayout instance.
-  DexLayout dex_layout(options, profile_info.get(), out_file, /*header*/ nullptr);
+  DexLayout dex_layout(options, profile_info.get(), out_file, /*header=*/ nullptr);
 
   // Process all files supplied on command line.
   int result = 0;
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 187c687..b68449e 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -298,7 +298,7 @@
     for (const std::string& dex_file : GetLibCoreDexFileNames()) {
       std::vector<std::string> dexlayout_args =
           { "-w", tmp_dir, "-o", tmp_name, dex_file };
-      if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
+      if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option=*/ false)) {
         return false;
       }
       std::string dex_file_name = "classes.dex";
@@ -333,8 +333,8 @@
     const ArtDexFileLoader dex_file_loader;
     bool result = dex_file_loader.Open(input_dex.c_str(),
                                        input_dex,
-                                       /*verify*/ true,
-                                       /*verify_checksum*/ false,
+                                       /*verify=*/ true,
+                                       /*verify_checksum=*/ false,
                                        &error_msg,
                                        &dex_files);
 
@@ -359,7 +359,7 @@
         pfi.AddMethodIndex(static_cast<ProfileCompilationInfo::MethodHotness::Flag>(flags),
                            dex_location,
                            dex_file->GetLocationChecksum(),
-                           /*dex_method_idx*/i,
+                           /*method_idx=*/i,
                            dex_file->NumMethodIds());
       }
       DexCacheResolvedClasses cur_classes(dex_location,
@@ -447,7 +447,7 @@
     // -v makes sure that the layout did not corrupt the dex file.
     std::vector<std::string> dexlayout_args =
         { "-i", "-v", "-w", tmp_dir, "-o", tmp_name, "-p", profile_file, dex_file };
-    if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
+    if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option=*/ false)) {
       return false;
     }
 
@@ -459,7 +459,7 @@
     // -i since the checksum won't match from the first layout.
     std::vector<std::string> second_dexlayout_args =
         { "-i", "-v", "-w", tmp_dir, "-o", tmp_name, "-p", profile_file, output_dex };
-    if (!DexLayoutExec(second_dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
+    if (!DexLayoutExec(second_dexlayout_args, error_msg, /*pass_default_cdex_option=*/ false)) {
       return false;
     }
 
@@ -493,7 +493,7 @@
     std::string output_dex = tmp_dir + "classes.dex.new";
 
     std::vector<std::string> dexlayout_args = { "-w", tmp_dir, "-o", "/dev/null", input_dex };
-    if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
+    if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option=*/ false)) {
       return false;
     }
 
@@ -615,7 +615,7 @@
       { "-a", "-i", "-o", "/dev/null", temp_dex.GetFilename() };
   ASSERT_TRUE(DexLayoutExec(&temp_dex,
                             kDexFileDuplicateOffset,
-                            nullptr /* profile_file */,
+                            /* profile_file= */ nullptr,
                             dexlayout_args));
 }
 
@@ -624,7 +624,7 @@
   std::vector<std::string> dexlayout_args = { "-o", "/dev/null", temp_dex.GetFilename() };
   ASSERT_TRUE(DexLayoutExec(&temp_dex,
                             kNullSetRefListElementInputDex,
-                            nullptr /* profile_file */,
+                            /* profile_file= */ nullptr,
                             dexlayout_args));
 }
 
@@ -666,7 +666,7 @@
   std::vector<std::string> dexlayout_args = { "-o", "/dev/null", temp_dex.GetFilename() };
   ASSERT_TRUE(DexLayoutExec(&temp_dex,
                             kUnknownTypeDebugInfoInputDex,
-                            nullptr /* profile_file */,
+                            /* profile_file= */ nullptr,
                             dexlayout_args));
 }
 
@@ -675,7 +675,7 @@
   std::vector<std::string> dexlayout_args = { "-o", "/dev/null", temp_dex.GetFilename() };
   ASSERT_TRUE(DexLayoutExec(&temp_dex,
                             kDuplicateCodeItemInputDex,
-                            nullptr /* profile_file */,
+                            /* profile_file= */ nullptr,
                             dexlayout_args));
 }
 
@@ -687,7 +687,7 @@
     // Change the dex instructions to make an opcode that spans past the end of the code item.
     for (ClassAccessor accessor : dex->GetClasses()) {
       for (const ClassAccessor::Method& method : accessor.GetMethods()) {
-        DexFile::CodeItem* item = const_cast<DexFile::CodeItem*>(method.GetCodeItem());
+        dex::CodeItem* item = const_cast<dex::CodeItem*>(method.GetCodeItem());
         if (item != nullptr) {
           CodeItemInstructionAccessor instructions(*dex, item);
           if (instructions.begin() != instructions.end()) {
@@ -734,7 +734,7 @@
       };
   // -v makes sure that the layout did not corrupt the dex file.
   ASSERT_TRUE(DexLayoutExec(&temp_dex,
-                            /*dex_filename*/ nullptr,
+                            /*dex_filename=*/ nullptr,
                             &profile_file,
                             dexlayout_args));
   ASSERT_TRUE(UnlinkFile(temp_dex.GetFilename() + ".new"));
@@ -772,7 +772,7 @@
       };
   // -v makes sure that the layout did not corrupt the dex file.
   ASSERT_TRUE(DexLayoutExec(&temp_dex,
-                            /*dex_filename*/ nullptr,
+                            /*dex_filename=*/ nullptr,
                             &profile_file,
                             dexlayout_args));
   ASSERT_TRUE(UnlinkFile(temp_dex.GetFilename() + ".new"));
@@ -785,29 +785,29 @@
   const std::string input_jar = GetTestDexFileName("ManyMethods");
   CHECK(dex_file_loader.Open(input_jar.c_str(),
                              input_jar.c_str(),
-                             /*verify*/ true,
-                             /*verify_checksum*/ true,
+                             /*verify=*/ true,
+                             /*verify_checksum=*/ true,
                              &error_msg,
                              &dex_files)) << error_msg;
   ASSERT_EQ(dex_files.size(), 1u);
   for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
     EXPECT_GT(dex_file->NumClassDefs(), 1u);
     for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
-      const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+      const dex::ClassDef& class_def = dex_file->GetClassDef(i);
       LOG(INFO) << dex_file->GetClassDescriptor(class_def);
     }
     Options options;
     // Filter out all the classes other than the one below based on class descriptor.
     options.class_filter_.insert("LManyMethods$Strings;");
     DexLayout dexlayout(options,
-                        /*info*/ nullptr,
-                        /*out_file*/ nullptr,
-                        /*header*/ nullptr);
+                        /*info=*/ nullptr,
+                        /*out_file=*/ nullptr,
+                        /*header=*/ nullptr);
     std::unique_ptr<DexContainer> out;
     bool result = dexlayout.ProcessDexFile(
         dex_file->GetLocation().c_str(),
         dex_file.get(),
-        /*dex_file_index*/ 0,
+        /*dex_file_index=*/ 0,
         &out,
         &error_msg);
     ASSERT_TRUE(result) << "Failed to run dexlayout " << error_msg;
@@ -818,17 +818,17 @@
             out->GetDataSection()->Begin(),
             out->GetDataSection()->Size(),
             dex_file->GetLocation().c_str(),
-            /* checksum */ 0,
-            /*oat_dex_file*/ nullptr,
-            /* verify */ true,
-            /*verify_checksum*/ false,
+            /* location_checksum= */ 0,
+            /*oat_dex_file=*/ nullptr,
+            /* verify= */ true,
+            /*verify_checksum=*/ false,
             &error_msg));
     ASSERT_TRUE(output_dex_file != nullptr);
 
     ASSERT_EQ(output_dex_file->NumClassDefs(), options.class_filter_.size());
     for (uint32_t i = 0; i < output_dex_file->NumClassDefs(); ++i) {
       // Check that every class in the output dex file is in the filter.
-      const DexFile::ClassDef& class_def = output_dex_file->GetClassDef(i);
+      const dex::ClassDef& class_def = output_dex_file->GetClassDef(i);
       ASSERT_TRUE(options.class_filter_.find(output_dex_file->GetClassDescriptor(class_def)) !=
           options.class_filter_.end());
     }
diff --git a/dexlist/Android.bp b/dexlist/Android.bp
index bd521ac..356791c 100644
--- a/dexlist/Android.bp
+++ b/dexlist/Android.bp
@@ -14,18 +14,34 @@
 
 art_cc_binary {
     name: "dexlist",
+    defaults: ["art_defaults"],
     host_supported: true,
     srcs: ["dexlist.cc"],
-    cflags: ["-Wall", "-Werror"],
     shared_libs: [
         "libdexfile",
         "libartbase",
         "libbase"
     ],
-    // TODO: fix b/72216369 and remove the need for this.
-    include_dirs: [
-        "art/runtime"  // dex utils.
+}
+
+art_cc_binary {
+    name: "dexlists",
+    defaults: [
+        "art_defaults",
+        "libartbase_static_defaults",
+        "libdexfile_static_defaults",
     ],
+    host_supported: true,
+    srcs: ["dexlist.cc"],
+    device_supported: false,
+    target: {
+        darwin: {
+            enabled: false,
+        },
+        windows: {
+            enabled: true,
+        },
+    },
 }
 
 art_cc_test {
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index e7eaf30..dd32fae 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -55,9 +55,9 @@
 /*
  * Data types that match the definitions in the VM specification.
  */
-typedef uint8_t  u1;
-typedef uint32_t u4;
-typedef uint64_t u8;
+using u1 = uint8_t;
+using u4 = uint32_t;
+using u8 = uint64_t;
 
 /*
  * Returns a newly-allocated string for the "dot version" of the class
@@ -80,24 +80,11 @@
 }
 
 /*
- * Positions table callback; we just want to catch the number of the
- * first line in the method, which *should* correspond to the first
- * entry from the table.  (Could also use "min" here.)
- */
-static bool positionsCb(void* context, const DexFile::PositionInfo& entry) {
-  int* pFirstLine = reinterpret_cast<int *>(context);
-  if (*pFirstLine == -1) {
-    *pFirstLine = entry.line_;
-  }
-  return 0;
-}
-
-/*
  * Dumps a method.
  */
 static void dumpMethod(const DexFile* pDexFile,
                        const char* fileName, u4 idx, u4 flags ATTRIBUTE_UNUSED,
-                       const DexFile::CodeItem* pCode, u4 codeOffset) {
+                       const dex::CodeItem* pCode, u4 codeOffset) {
   // Abstract and native methods don't get listed.
   if (pCode == nullptr || codeOffset == 0) {
     return;
@@ -105,7 +92,7 @@
   CodeItemDebugInfoAccessor accessor(*pDexFile, pCode, idx);
 
   // Method information.
-  const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(idx);
+  const dex::MethodId& pMethodId = pDexFile->GetMethodId(idx);
   const char* methodName = pDexFile->StringDataByIdx(pMethodId.name_idx_);
   const char* classDescriptor = pDexFile->StringByTypeIdx(pMethodId.class_idx_);
   std::unique_ptr<char[]> className(descriptorToDot(classDescriptor));
@@ -123,9 +110,13 @@
     fileName = "(none)";
   }
 
-  // Find the first line.
-  int firstLine = -1;
-  pDexFile->DecodeDebugPositionInfo(accessor.DebugInfoOffset(), positionsCb, &firstLine);
+  // We just want to catch the number of the first line in the method, which *should* correspond to
+  // the first entry from the table.
+  int first_line = -1;
+  accessor.DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
+    first_line = entry.line_;
+    return true;  // Early exit since we only want the first line.
+  });
 
   // Method signature.
   const Signature signature = pDexFile->GetMethodSignature(pMethodId);
@@ -134,7 +125,7 @@
   // Dump actual method information.
   fprintf(gOutFile, "0x%08x %d %s %s %s %s %d\n",
           insnsOff, accessor.InsnsSizeInCodeUnits() * 2,
-          className.get(), methodName, typeDesc, fileName, firstLine);
+          className.get(), methodName, typeDesc, fileName, first_line);
 
   free(typeDesc);
 }
@@ -143,7 +134,7 @@
  * Runs through all direct and virtual methods in the class.
  */
 void dumpClass(const DexFile* pDexFile, u4 idx) {
-  const DexFile::ClassDef& class_def = pDexFile->GetClassDef(idx);
+  const dex::ClassDef& class_def = pDexFile->GetClassDef(idx);
 
   const char* fileName = nullptr;
   if (class_def.source_file_idx_.IsValid()) {
@@ -155,7 +146,7 @@
     dumpMethod(pDexFile,
                fileName,
                method.GetIndex(),
-               method.GetRawAccessFlags(),
+               method.GetAccessFlags(),
                method.GetCodeItem(),
                method.GetCodeItemOffset());
   }
@@ -181,7 +172,7 @@
   if (!dex_file_loader.OpenAll(reinterpret_cast<const uint8_t*>(content.data()),
                                content.size(),
                                fileName,
-                               /*verify*/ true,
+                               /*verify=*/ true,
                                kVerifyChecksum,
                                &error_code,
                                &error_msg,
@@ -206,7 +197,7 @@
 /*
  * Shows usage.
  */
-static void usage(void) {
+static void usage() {
   LOG(ERROR) << "Copyright (C) 2007 The Android Open Source Project\n";
   LOG(ERROR) << gProgName << ": [-m p.c.m] [-o outfile] dexfile...";
   LOG(ERROR) << "";
@@ -221,7 +212,7 @@
   memset(&gOptions, 0, sizeof(gOptions));
 
   // Parse all arguments.
-  while (1) {
+  while (true) {
     const int ic = getopt(argc, argv, "o:m:");
     if (ic < 0) {
       break;  // done
@@ -266,7 +257,7 @@
 
   // Open alternative output file.
   if (gOptions.outputFileName) {
-    gOutFile = fopen(gOptions.outputFileName, "w");
+    gOutFile = fopen(gOptions.outputFileName, "we");
     if (!gOutFile) {
       PLOG(ERROR) << "Can't open " << gOptions.outputFileName;
       free(gOptions.argCopy);
diff --git a/dexlist/dexlist_test.cc b/dexlist/dexlist_test.cc
index 68e6713..39e5f8c 100644
--- a/dexlist/dexlist_test.cc
+++ b/dexlist/dexlist_test.cc
@@ -33,7 +33,7 @@
 
 class DexListTest : public CommonRuntimeTest {
  protected:
-  virtual void SetUp() {
+  void SetUp() override {
     CommonRuntimeTest::SetUp();
     // Dogfood our own lib core dex file.
     dex_file_ = GetLibCoreDexFileNames()[0];
diff --git a/dexoptanalyzer/Android.bp b/dexoptanalyzer/Android.bp
index 99a11cd..72896c8 100644
--- a/dexoptanalyzer/Android.bp
+++ b/dexoptanalyzer/Android.bp
@@ -24,6 +24,7 @@
 
     target: {
         android: {
+            // Use the 32-bit version of dexoptanalyzer on devices.
             compile_multilib: "prefer32",
         },
     },
diff --git a/dexoptanalyzer/dexoptanalyzer.cc b/dexoptanalyzer/dexoptanalyzer.cc
index 10bb673..92850f7 100644
--- a/dexoptanalyzer/dexoptanalyzer.cc
+++ b/dexoptanalyzer/dexoptanalyzer.cc
@@ -52,6 +52,7 @@
 
 static std::string CommandLine() {
   std::vector<std::string> command;
+  command.reserve(original_argc);
   for (int i = 0; i < original_argc; ++i) {
     command.push_back(original_argv[i]);
   }
@@ -96,6 +97,11 @@
   UsageError("       oat file is up to date. Defaults to $ANDROID_ROOT/framework/boot.art.");
   UsageError("       Example: --image=/system/framework/boot.art");
   UsageError("");
+  UsageError("  --runtime-arg <argument>: used to specify various arguments for the runtime,");
+  UsageError("      such as initial heap size, maximum heap size, and verbose output.");
+  UsageError("      Use a separate --runtime-arg switch for each argument.");
+  UsageError("      Example: --runtime-arg -Xms256m");
+  UsageError("");
   UsageError("  --android-data=<directory>: optional, the directory which should be used as");
   UsageError("       android-data. By default ANDROID_DATA env variable is used.");
   UsageError("");
@@ -167,6 +173,12 @@
         }
       } else if (option.starts_with("--image=")) {
         image_ = option.substr(strlen("--image=")).ToString();
+      } else if (option == "--runtime-arg") {
+        if (i + 1 == argc) {
+          Usage("Missing argument for --runtime-arg\n");
+        }
+        ++i;
+        runtime_args_.push_back(argv[i]);
       } else if (option.starts_with("--android-data=")) {
         // Overwrite android-data if needed (oat file assistant relies on a valid directory to
         // compute dalvik-cache folder). This is mostly used in tests.
@@ -190,11 +202,7 @@
             Usage("Invalid --zip-fd %d", zip_fd_);
           }
       } else if (option.starts_with("--class-loader-context=")) {
-        std::string context_str = option.substr(strlen("--class-loader-context=")).ToString();
-        class_loader_context_ = ClassLoaderContext::Create(context_str);
-        if (class_loader_context_ == nullptr) {
-          Usage("Invalid --class-loader-context '%s'", context_str.c_str());
-        }
+        context_str_ = option.substr(strlen("--class-loader-context=")).ToString();
       } else {
         Usage("Unknown argument '%s'", option.data());
       }
@@ -217,10 +225,14 @@
     RuntimeOptions options;
     // The image could be custom, so make sure we explicitly pass it.
     std::string img = "-Ximage:" + image_;
-    options.push_back(std::make_pair(img.c_str(), nullptr));
+    options.push_back(std::make_pair(img, nullptr));
     // The instruction set of the image should match the instruction set we will test.
     const void* isa_opt = reinterpret_cast<const void*>(GetInstructionSetString(isa_));
     options.push_back(std::make_pair("imageinstructionset", isa_opt));
+    // Explicit runtime args.
+    for (const char* runtime_arg : runtime_args_) {
+      options.push_back(std::make_pair(runtime_arg, nullptr));
+    }
      // Disable libsigchain. We don't don't need it to evaluate DexOptNeeded status.
     options.push_back(std::make_pair("-Xno-sig-chain", nullptr));
     // Pretend we are a compiler so that we can re-use the same infrastructure to load a different
@@ -248,11 +260,22 @@
     }
     std::unique_ptr<Runtime> runtime(Runtime::Current());
 
+    // Only when the runtime is created can we create the class loader context: the
+    // class loader context will open dex file and use the MemMap global lock that the
+    // runtime owns.
+    std::unique_ptr<ClassLoaderContext> class_loader_context;
+    if (!context_str_.empty()) {
+      class_loader_context = ClassLoaderContext::Create(context_str_);
+      if (class_loader_context == nullptr) {
+        Usage("Invalid --class-loader-context '%s'", context_str_.c_str());
+      }
+    }
+
     std::unique_ptr<OatFileAssistant> oat_file_assistant;
     oat_file_assistant = std::make_unique<OatFileAssistant>(dex_file_.c_str(),
                                                             isa_,
-                                                            false /*load_executable*/,
-                                                            false /*only_load_system_executable*/,
+                                                            /*load_executable=*/ false,
+                                                            /*only_load_system_executable=*/ false,
                                                             vdex_fd_,
                                                             oat_fd_,
                                                             zip_fd_);
@@ -263,7 +286,7 @@
     }
 
     int dexoptNeeded = oat_file_assistant->GetDexOptNeeded(
-        compiler_filter_, assume_profile_changed_, downgrade_, class_loader_context_.get());
+        compiler_filter_, assume_profile_changed_, downgrade_, class_loader_context.get());
 
     // Convert OatFileAssitant codes to dexoptanalyzer codes.
     switch (dexoptNeeded) {
@@ -284,10 +307,11 @@
   std::string dex_file_;
   InstructionSet isa_;
   CompilerFilter::Filter compiler_filter_;
-  std::unique_ptr<ClassLoaderContext> class_loader_context_;
+  std::string context_str_;
   bool assume_profile_changed_;
   bool downgrade_;
   std::string image_;
+  std::vector<const char*> runtime_args_;
   int oat_fd_ = -1;
   int vdex_fd_ = -1;
   // File descriptor corresponding to apk, dex_file, or zip.
diff --git a/dexoptanalyzer/dexoptanalyzer_test.cc b/dexoptanalyzer/dexoptanalyzer_test.cc
index 93ebf2b..7b6b36c 100644
--- a/dexoptanalyzer/dexoptanalyzer_test.cc
+++ b/dexoptanalyzer/dexoptanalyzer_test.cc
@@ -36,7 +36,8 @@
 
   int Analyze(const std::string& dex_file,
               CompilerFilter::Filter compiler_filter,
-              bool assume_profile_changed) {
+              bool assume_profile_changed,
+              const std::string& class_loader_context) {
     std::string dexoptanalyzer_cmd = GetDexoptAnalyzerCmd();
     std::vector<std::string> argv_str;
     argv_str.push_back(dexoptanalyzer_cmd);
@@ -46,8 +47,15 @@
     if (assume_profile_changed) {
       argv_str.push_back("--assume-profile-changed");
     }
+    argv_str.push_back("--runtime-arg");
+    argv_str.push_back(GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()));
+    argv_str.push_back("--runtime-arg");
+    argv_str.push_back(GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()));
     argv_str.push_back("--image=" + GetImageLocation());
     argv_str.push_back("--android-data=" + android_data_);
+    if (!class_loader_context.empty()) {
+      argv_str.push_back("--class-loader-context=" + class_loader_context);
+    }
 
     std::string error;
     return ExecAndReturnCode(argv_str, &error);
@@ -70,10 +78,12 @@
   void Verify(const std::string& dex_file,
               CompilerFilter::Filter compiler_filter,
               bool assume_profile_changed = false,
-              bool downgrade = false) {
-    int dexoptanalyzerResult = Analyze(dex_file, compiler_filter, assume_profile_changed);
+              bool downgrade = false,
+              const std::string& class_loader_context = "") {
+    int dexoptanalyzerResult = Analyze(
+        dex_file, compiler_filter, assume_profile_changed, class_loader_context);
     dexoptanalyzerResult = DexoptanalyzerToOatFileAssistant(dexoptanalyzerResult);
-    OatFileAssistant oat_file_assistant(dex_file.c_str(), kRuntimeISA, /*load_executable*/ false);
+    OatFileAssistant oat_file_assistant(dex_file.c_str(), kRuntimeISA, /*load_executable=*/ false);
     int assistantResult = oat_file_assistant.GetDexOptNeeded(
         compiler_filter, assume_profile_changed, downgrade);
     EXPECT_EQ(assistantResult, dexoptanalyzerResult);
@@ -175,7 +185,7 @@
   Copy(GetDexSrc1(), dex_location);
   GenerateOatForTest(dex_location.c_str(),
                      CompilerFilter::kSpeed,
-                     /*with_alternate_image*/true);
+                     /*with_alternate_image=*/true);
 
   Verify(dex_location, CompilerFilter::kExtract);
   Verify(dex_location, CompilerFilter::kQuicken);
@@ -192,7 +202,7 @@
   Copy(GetDexSrc1(), dex_location);
   GenerateOatForTest(dex_location.c_str(),
                      CompilerFilter::kExtract,
-                     /*with_alternate_image*/true);
+                     /*with_alternate_image=*/true);
 
   Verify(dex_location, CompilerFilter::kExtract);
   Verify(dex_location, CompilerFilter::kQuicken);
@@ -208,6 +218,7 @@
 
   Verify(dex_location, CompilerFilter::kExtract);
   Verify(dex_location, CompilerFilter::kSpeed);
+  Verify(dex_location, CompilerFilter::kEverything);
 }
 
 // Case: We have a stripped DEX file and a PIC ODEX file, but no OAT file.
@@ -300,4 +311,22 @@
   Verify(dex_location, CompilerFilter::kSpeed);
 }
 
+// Case: We have a DEX file and up-to-date OAT file for it, and we check with
+// a class loader context.
+TEST_F(DexoptAnalyzerTest, ClassLoaderContext) {
+  std::string dex_location1 = GetScratchDir() + "/DexToAnalyze.jar";
+  std::string odex_location1 = GetOdexDir() + "/DexToAnalyze.odex";
+  std::string dex_location2 = GetScratchDir() + "/DexInContext.jar";
+  Copy(GetDexSrc1(), dex_location1);
+  Copy(GetDexSrc2(), dex_location2);
+
+  std::string class_loader_context = "PCL[" + dex_location2 + "]";
+  std::string class_loader_context_option = "--class-loader-context=PCL[" + dex_location2 + "]";
+
+  // Generate the odex to get the class loader context also open the dex files.
+  GenerateOdexForTest(dex_location1, odex_location1, CompilerFilter::kSpeed, /* compilation_reason= */ nullptr, /* extra_args= */ { class_loader_context_option });
+
+  Verify(dex_location1, CompilerFilter::kSpeed, false, false, class_loader_context);
+}
+
 }  // namespace art
diff --git a/disassembler/Android.bp b/disassembler/Android.bp
index 241b191..5aa159e 100644
--- a/disassembler/Android.bp
+++ b/disassembler/Android.bp
@@ -20,11 +20,39 @@
     host_supported: true,
     srcs: [
         "disassembler.cc",
-        "disassembler_arm.cc",
-        "disassembler_arm64.cc",
         "disassembler_mips.cc",
         "disassembler_x86.cc",
     ],
+    codegen: {
+        arm: {
+            srcs: ["disassembler_arm.cc"]
+        },
+        arm64: {
+            srcs: ["disassembler_arm64.cc"]
+        },
+        // TODO: We should also conditionally include the MIPS32/MIPS64 and the
+        // x86/x86-64 disassembler definitions (b/119090273). However, using the
+        // following syntax here:
+        //
+        //   mips: {
+        //       srcs: ["disassembler_mips.cc"]
+        //   },
+        //   mips64: {
+        //       srcs: ["disassembler_mips.cc"]
+        //   },
+        //   x86: {
+        //       srcs: ["disassembler_x86.cc"]
+        //   },
+        //   x86_64: {
+        //       srcs: ["disassembler_x86.cc"]
+        //   },
+        //
+        // does not work, as it generates a file rejected by ninja with this
+        // error message (e.g. on host, where we include all the back ends by
+        // default):
+        //
+        //   FAILED: ninja: out/soong/build.ninja:320768: multiple rules generate out/soong/.intermediates/art/disassembler/libart-disassembler/linux_glibc_x86_64_static/obj/art/disassembler/disassembler_mips.o [-w dupbuild=err]
+    },
     include_dirs: ["art/runtime"],
 
     shared_libs: [
@@ -41,8 +69,7 @@
     defaults: ["libart-disassembler-defaults"],
     shared_libs: [
         // For disassembler_arm*.
-        "libvixl-arm",
-        "libvixl-arm64",
+        "libvixl",
     ],
 }
 
@@ -54,7 +81,6 @@
     ],
     shared_libs: [
         // For disassembler_arm*.
-        "libvixld-arm",
-        "libvixld-arm64",
+        "libvixld",
     ],
 }
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index 2ed41c8..0662334 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -21,10 +21,21 @@
 #include "android-base/logging.h"
 #include "android-base/stringprintf.h"
 
-#include "disassembler_arm.h"
-#include "disassembler_arm64.h"
-#include "disassembler_mips.h"
-#include "disassembler_x86.h"
+#ifdef ART_ENABLE_CODEGEN_arm
+# include "disassembler_arm.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_arm64
+# include "disassembler_arm64.h"
+#endif
+
+#if defined(ART_ENABLE_CODEGEN_mips) || defined(ART_ENABLE_CODEGEN_mips64)
+# include "disassembler_mips.h"
+#endif
+
+#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
+# include "disassembler_x86.h"
+#endif
 
 using android::base::StringPrintf;
 
@@ -36,21 +47,35 @@
 }
 
 Disassembler* Disassembler::Create(InstructionSet instruction_set, DisassemblerOptions* options) {
-  if (instruction_set == InstructionSet::kArm || instruction_set == InstructionSet::kThumb2) {
-    return new arm::DisassemblerArm(options);
-  } else if (instruction_set == InstructionSet::kArm64) {
-    return new arm64::DisassemblerArm64(options);
-  } else if (instruction_set == InstructionSet::kMips) {
-    return new mips::DisassemblerMips(options, /* is_o32_abi */ true);
-  } else if (instruction_set == InstructionSet::kMips64) {
-    return new mips::DisassemblerMips(options, /* is_o32_abi */ false);
-  } else if (instruction_set == InstructionSet::kX86) {
-    return new x86::DisassemblerX86(options, false);
-  } else if (instruction_set == InstructionSet::kX86_64) {
-    return new x86::DisassemblerX86(options, true);
-  } else {
-    UNIMPLEMENTED(FATAL) << static_cast<uint32_t>(instruction_set);
-    return nullptr;
+  switch (instruction_set) {
+#ifdef ART_ENABLE_CODEGEN_arm
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
+      return new arm::DisassemblerArm(options);
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
+    case InstructionSet::kArm64:
+      return new arm64::DisassemblerArm64(options);
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
+    case InstructionSet::kMips:
+      return new mips::DisassemblerMips(options, /* is_o32_abi= */ true);
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
+    case InstructionSet::kMips64:
+      return new mips::DisassemblerMips(options, /* is_o32_abi= */ false);
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+    case InstructionSet::kX86:
+      return new x86::DisassemblerX86(options, /* supports_rex= */ false);
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+    case InstructionSet::kX86_64:
+      return new x86::DisassemblerX86(options, /* supports_rex= */ true);
+#endif
+    default:
+      UNIMPLEMENTED(FATAL) << static_cast<uint32_t>(instruction_set);
+      UNREACHABLE();
   }
 }
 
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index c1a6f59..94ea006 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -137,12 +137,12 @@
 void DisassemblerArm::CustomDisassembler::CustomDisassemblerStream::PrintLiteral(LocationType type,
                                                                                  int32_t offset) {
   // Literal offsets are not required to be aligned, so we may need unaligned access.
-  typedef const int16_t unaligned_int16_t __attribute__ ((aligned (1)));
-  typedef const uint16_t unaligned_uint16_t __attribute__ ((aligned (1)));
-  typedef const int32_t unaligned_int32_t __attribute__ ((aligned (1)));
-  typedef const int64_t unaligned_int64_t __attribute__ ((aligned (1)));
-  typedef const float unaligned_float __attribute__ ((aligned (1)));
-  typedef const double unaligned_double __attribute__ ((aligned (1)));
+  using unaligned_int16_t  __attribute__((__aligned__(1))) = const int16_t;
+  using unaligned_uint16_t __attribute__((__aligned__(1))) = const uint16_t;
+  using unaligned_int32_t  __attribute__((__aligned__(1))) = const int32_t;
+  using unaligned_int64_t  __attribute__((__aligned__(1))) = const int64_t;
+  using unaligned_float    __attribute__((__aligned__(1))) = const float;
+  using unaligned_double   __attribute__((__aligned__(1))) = const double;
 
   // Zeros are used for the LocationType values this function does not care about.
   const size_t literal_size[kVst4Location + 1] = {
diff --git a/dt_fd_forward/Android.bp b/dt_fd_forward/Android.bp
index 1ba2323..2a2aa18 100644
--- a/dt_fd_forward/Android.bp
+++ b/dt_fd_forward/Android.bp
@@ -41,6 +41,7 @@
     header_libs: [
         "javavm_headers",
         "dt_fd_forward_export",
+        "art_libartbase_headers",  // For strlcpy emulation.
     ],
     multilib: {
         lib32: {
diff --git a/dt_fd_forward/dt_fd_forward.cc b/dt_fd_forward/dt_fd_forward.cc
index 116cdf8..d5b6de5 100644
--- a/dt_fd_forward/dt_fd_forward.cc
+++ b/dt_fd_forward/dt_fd_forward.cc
@@ -50,6 +50,8 @@
 #include <jni.h>
 #include <jdwpTransport.h>
 
+#include <base/strlcpy.h>
+
 namespace dt_fd_forward {
 
 // Helper that puts line-number in error message.
@@ -105,12 +107,21 @@
   TEMP_FAILURE_RETRY(send(fd, kListenStartMessage, sizeof(kListenStartMessage), MSG_EOR));
 }
 
+// Copy from file_utils, so we do not need to depend on libartbase.
+static int DupCloexec(int fd) {
+#if defined(__linux__)
+  return fcntl(fd, F_DUPFD_CLOEXEC, 0);
+#else
+  return dup(fd);
+#endif
+}
+
 jdwpTransportError FdForwardTransport::SetupListen(int listen_fd) {
   std::lock_guard<std::mutex> lk(state_mutex_);
   if (!ChangeState(TransportState::kClosed, TransportState::kListenSetup)) {
     return ERR(ILLEGAL_STATE);
   } else {
-    listen_fd_.reset(dup(listen_fd));
+    listen_fd_.reset(DupCloexec(listen_fd));
     SendListenMessage(listen_fd_);
     CHECK(ChangeState(TransportState::kListenSetup, TransportState::kListening));
     return OK;
@@ -339,7 +350,7 @@
   write_lock_fd_.reset(out_fds.write_lock_fd_);
 
   // We got the fds. Send ack.
-  close_notify_fd_.reset(dup(listen_fd_));
+  close_notify_fd_.reset(DupCloexec(listen_fd_));
   SendAcceptMessage(close_notify_fd_);
 
   return IOResult::kOk;
@@ -642,7 +653,7 @@
 jdwpTransportError FdForwardTransport::GetLastError(/*out*/char** err) {
   std::string data = global_last_error_;
   *err = reinterpret_cast<char*>(Alloc(data.size() + 1));
-  strcpy(*err, data.c_str());
+  strlcpy(*err, data.c_str(), data.size() + 1);
   return OK;
 }
 
diff --git a/imgdiag/Android.bp b/imgdiag/Android.bp
index 972c8f7..39720a0 100644
--- a/imgdiag/Android.bp
+++ b/imgdiag/Android.bp
@@ -31,9 +31,6 @@
         "libbase",
     ],
     target: {
-        android: {
-            shared_libs: ["libcutils"],
-        },
         host: {
             shared_libs: ["libziparchive"],
         },
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index ebc18fc..a1edd00 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -26,6 +26,7 @@
 #include <unordered_set>
 #include <vector>
 
+#include <android-base/parseint.h>
 #include "android-base/stringprintf.h"
 
 #include "art_field-inl.h"
@@ -35,7 +36,7 @@
 #include "class_linker.h"
 #include "gc/heap.h"
 #include "gc/space/image_space.h"
-#include "image.h"
+#include "image-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "oat.h"
@@ -761,7 +762,8 @@
 
     std::unordered_set<size_t> dirty_members;
     // Examine the members comprising the ArtMethod, computing which members are dirty.
-    for (const std::pair<size_t, MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) {
+    for (const std::pair<const size_t,
+                         MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) {
       const size_t offset = p.first;
       if (memcmp(base_ptr + offset, remote_bytes + offset, p.second.size_) != 0) {
         dirty_members.insert(p.first);
@@ -787,7 +789,8 @@
   void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
     DumpSamplesAndOffsetCount();
     os_ << "      offset to field map:\n";
-    for (const std::pair<size_t, MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) {
+    for (const std::pair<const size_t,
+                         MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) {
       const size_t offset = p.first;
       const size_t size = p.second.size_;
       os_ << StringPrintf("        %zu-%zu: ", offset, offset + size - 1)
@@ -1004,7 +1007,7 @@
                      begin_image_ptr,
                      RegionCommon<T>::remote_contents_,
                      base_ptr,
-                     /*log_dirty_objects*/true);
+                     /*log_dirty_objects=*/true);
     // Print shared dirty after since it's less important.
     if (RegionCommon<T>::GetZygoteDirtyEntryCount() != 0) {
       // We only reach this point if both pids were specified.  Furthermore,
@@ -1016,7 +1019,7 @@
                        begin_image_ptr,
                        RegionCommon<T>::zygote_contents_,
                        begin_image_ptr,
-                       /*log_dirty_objects*/false);
+                       /*log_dirty_objects=*/false);
     }
     RegionSpecializedBase<T>::DumpDirtyObjects();
     RegionSpecializedBase<T>::DumpDirtyEntries();
@@ -1682,14 +1685,14 @@
     if (option.starts_with("--image-diff-pid=")) {
       const char* image_diff_pid = option.substr(strlen("--image-diff-pid=")).data();
 
-      if (!ParseInt(image_diff_pid, &image_diff_pid_)) {
+      if (!android::base::ParseInt(image_diff_pid, &image_diff_pid_)) {
         *error_msg = "Image diff pid out of range";
         return kParseError;
       }
     } else if (option.starts_with("--zygote-diff-pid=")) {
       const char* zygote_diff_pid = option.substr(strlen("--zygote-diff-pid=")).data();
 
-      if (!ParseInt(zygote_diff_pid, &zygote_diff_pid_)) {
+      if (!android::base::ParseInt(zygote_diff_pid, &zygote_diff_pid_)) {
         *error_msg = "Zygote diff pid out of range";
         return kParseError;
       }
@@ -1730,7 +1733,7 @@
     return kParseOk;
   }
 
-  virtual std::string GetUsage() const {
+  std::string GetUsage() const override {
     std::string usage;
 
     usage +=
@@ -1760,7 +1763,7 @@
 };
 
 struct ImgDiagMain : public CmdlineMain<ImgDiagArgs> {
-  virtual bool ExecuteWithRuntime(Runtime* runtime) {
+  bool ExecuteWithRuntime(Runtime* runtime) override {
     CHECK(args_ != nullptr);
 
     return DumpImage(runtime,
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index cb40c7d..739d9b8 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -34,12 +34,8 @@
 
 namespace art {
 
-static const char* kImgDiagDiffPid = "--image-diff-pid";
-static const char* kImgDiagBootImage = "--boot-image";
 static const char* kImgDiagBinaryName = "imgdiag";
 
-static const char* kImgDiagZygoteDiffPid = "--zygote-diff-pid";
-
 // from kernel <include/linux/threads.h>
 #define PID_MAX_LIMIT (4*1024*1024)  // Upper bound. Most kernel configs will have smaller max pid.
 
@@ -47,7 +43,7 @@
 
 class ImgDiagTest : public CommonRuntimeTest {
  protected:
-  virtual void SetUp() {
+  void SetUp() override {
     CommonRuntimeTest::SetUp();
 
     // We loaded the runtime with an explicit image. Therefore the image space must exist.
@@ -93,25 +89,15 @@
     EXPECT_TRUE(OS::FileExists(file_path.c_str())) << file_path << " should be a valid file path";
 
     // Run imgdiag --image-diff-pid=$image_diff_pid and wait until it's done with a 0 exit code.
-    std::string diff_pid_args;
-    std::string zygote_diff_pid_args;
-    {
-      std::stringstream diff_pid_args_ss;
-      diff_pid_args_ss << kImgDiagDiffPid << "=" << image_diff_pid;
-      diff_pid_args = diff_pid_args_ss.str();
-    }
-    {
-      std::stringstream zygote_pid_args_ss;
-      zygote_pid_args_ss << kImgDiagZygoteDiffPid << "=" << image_diff_pid;
-      zygote_diff_pid_args = zygote_pid_args_ss.str();
-    }
-    std::string boot_image_args = std::string(kImgDiagBootImage) + "=" + boot_image;
-
     std::vector<std::string> exec_argv = {
         file_path,
-        diff_pid_args,
-        zygote_diff_pid_args,
-        boot_image_args
+        "--image-diff-pid=" + std::to_string(image_diff_pid),
+        "--zygote-diff-pid=" + std::to_string(image_diff_pid),
+        "--runtime-arg",
+        GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()),
+        "--runtime-arg",
+        GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()),
+        "--boot-image=" + boot_image
     };
 
     return ::art::Exec(exec_argv, error_msg);
diff --git a/libart_fake/Android.mk b/libart_fake/Android.mk
deleted file mode 100644
index 96e6a14..0000000
--- a/libart_fake/Android.mk
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH := $(my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := libart_fake
-LOCAL_INSTALLED_MODULE_STEM := libart.so
-LOCAL_SDK_VERSION := 9
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_SRC_FILES := fake.cc
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_SHARED_LIBRARIES := liblog
-
-ifdef TARGET_2ND_ARCH
-    LOCAL_MODULE_PATH_32 := $(TARGET_OUT)/fake-libs
-    LOCAL_MODULE_PATH_64 := $(TARGET_OUT)/fake-libs64
-else
-    LOCAL_MODULE_PATH := $(TARGET_OUT)/fake-libs
-endif
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/libart_fake/README.md b/libart_fake/README.md
deleted file mode 100644
index 6e3621e..0000000
--- a/libart_fake/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-libart_fake
-====
-
-A fake libart made to satisfy some misbehaving apps that will attempt to link
-against libart.so.
diff --git a/libart_fake/fake.cc b/libart_fake/fake.cc
deleted file mode 100644
index 8842421..0000000
--- a/libart_fake/fake.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "libart_fake"
-
-#include <android/log.h>
-
-#define LOGIT(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)
-namespace art {
-class Dbg {
- public:
-  void SuspendVM();
-  void ResumeVM();
-};
-
-class FaultManager {
- public:
-  void EnsureArtActionInFrontOfSignalChain();
-};
-
-void Dbg::SuspendVM() {
-  LOGIT("Linking to and calling into libart.so internal functions is not supported. "
-        "This call to '%s' is being ignored.", __func__);
-}
-void Dbg::ResumeVM() {
-  LOGIT("Linking to and calling into libart.so internal functions is not supported. "
-        "This call to '%s' is being ignored.", __func__);
-}
-void FaultManager::EnsureArtActionInFrontOfSignalChain() {
-  LOGIT("Linking to and calling into libart.so internal functions is not supported. "
-        "This call to '%s' is being ignored.", __func__);
-}
-};  // namespace art
diff --git a/libartbase/Android.bp b/libartbase/Android.bp
index 4ee48da..1ca7011 100644
--- a/libartbase/Android.bp
+++ b/libartbase/Android.bp
@@ -24,20 +24,24 @@
         "base/arena_allocator.cc",
         "base/arena_bit_vector.cc",
         "base/bit_vector.cc",
+        "base/enums.cc",
         "base/file_magic.cc",
         "base/file_utils.cc",
         "base/hex_dump.cc",
+        "base/hiddenapi_flags.cc",
         "base/logging.cc",
         "base/malloc_arena_pool.cc",
+        "base/membarrier.cc",
+        "base/memfd.cc",
         "base/memory_region.cc",
         "base/mem_map.cc",
         // "base/mem_map_fuchsia.cc", put in target when fuchsia supported by soong
-        "base/mem_map_unix.cc",
         "base/os_linux.cc",
         "base/runtime_debug.cc",
         "base/safe_copy.cc",
         "base/scoped_arena_allocator.cc",
         "base/scoped_flock.cc",
+        "base/socket_peer_is_trusted.cc",
         "base/time_utils.cc",
         "base/unix_file/fd_file.cc",
         "base/unix_file/random_access_file_utils.cc",
@@ -46,37 +50,101 @@
     ],
     target: {
         android: {
+            srcs: [
+                "base/mem_map_unix.cc",
+            ],
             static_libs: [
                 // ZipArchive support, the order matters here to get all symbols.
                 "libziparchive",
                 "libz",
             ],
-	    shared_libs: [
-	        // For android::FileMap used by libziparchive.
-                "libutils",
-	    ],
+            shared_libs: [
+                "liblog",
+                // For ashmem.
+                "libartpalette",
+                // For common macros.
+                "libbase",
+            ],
+            export_shared_lib_headers: ["libbase"],
+            // Exclude the version script from Darwin host since it's not
+            // supported by the linker there. That means ASan checks on Darwin
+            // might trigger ODR violations.
+            version_script: "libartbase.map",
         },
-        host: {
+        not_windows: {
+            srcs: [
+                "base/mem_map_unix.cc",
+            ],
             shared_libs: [
                 "libziparchive",
                 "libz",
+                "liblog",
+                // For ashmem.
+                "libartpalette",
+                // For common macros.
+                "libbase",
             ],
+            export_shared_lib_headers: ["libbase"],
+        },
+        linux_glibc: {
+            version_script: "libartbase.map",
+        },
+        windows: {
+            version_script: "libartbase.map",
+            srcs: [
+                "base/mem_map_windows.cc",
+            ],
+            static_libs: [
+                "libziparchive",
+                "libz",
+                "liblog",
+                // For ashmem.
+                "libartpalette",
+                // For common macros.
+                "libbase",
+            ],
+            export_static_lib_headers: ["libbase"],
+            cflags: ["-Wno-thread-safety"],
         },
     },
     generated_sources: ["art_libartbase_operator_srcs"],
     cflags: ["-DBUILDING_LIBART=1"],
-    shared_libs: [
-        "liblog",
-	// For ashmem.
-        "libcutils",
-        // For common macros.
-        "libbase",
+
+    // Utilities used by various ART libs and tools are linked in statically
+    // here to avoid shared lib dependencies outside the ART APEX. No target
+    // there should depend on these separately.
+    whole_static_libs: [
+        "liblz4",
+        "liblzma",
     ],
+
     export_include_dirs: ["."],
     // ART's macros.h depends on libbase's macros.h.
     // Note: runtime_options.h depends on cmdline. But we don't really want to export this
     //       generically. dex2oat takes care of it itself.
-    export_shared_lib_headers: ["libbase"],
+}
+
+cc_defaults {
+    name: "libartbase_static_base_defaults",
+    static_libs: [
+        "libbase",
+        "libartpalette",
+        "liblog",
+        "libz",
+        "libziparchive",
+    ],
+}
+
+cc_defaults {
+    name: "libartbase_static_defaults",
+    defaults: ["libartbase_static_base_defaults"],
+    static_libs: ["libartbase"],
+}
+
+cc_defaults {
+    name: "libartbased_static_defaults",
+    defaults: ["libartbase_static_base_defaults"],
+    static_libs: ["libartbased"],
 }
 
 gensrcs {
@@ -86,7 +154,6 @@
     srcs: [
         "arch/instruction_set.h",
         "base/allocator.h",
-        "base/callee_save_type.h",
         "base/unix_file/fd_file.h",
     ],
     output_extension: "operator_out.cc",
@@ -105,6 +172,14 @@
         "libziparchive",
     ],
     export_shared_lib_headers: ["libbase"],
+    target: {
+        windows: {
+            enabled: true,
+            shared: {
+                enabled: false,
+            },
+        },
+    },
 }
 
 art_cc_library {
@@ -118,6 +193,14 @@
         "libziparchive",
     ],
     export_shared_lib_headers: ["libbase"],
+    target: {
+        windows: {
+            enabled: true,
+            shared: {
+                enabled: false,
+            },
+        },
+    },
 }
 
 art_cc_library {
@@ -135,9 +218,6 @@
     header_libs: [
         "libnativehelper_header_only",
     ],
-    include_dirs: [
-        "external/icu/icu4c/source/common",
-    ],
 }
 
 art_cc_test {
@@ -162,6 +242,8 @@
         "base/indenter_test.cc",
         "base/leb128_test.cc",
         "base/logging_test.cc",
+        "base/memfd_test.cc",
+        "base/membarrier_test.cc",
         "base/memory_region_test.cc",
         "base/mem_map_test.cc",
         "base/safe_copy_test.cc",
diff --git a/libartbase/arch/instruction_set.cc b/libartbase/arch/instruction_set.cc
index a187663..8d4fbf4 100644
--- a/libartbase/arch/instruction_set.cc
+++ b/libartbase/arch/instruction_set.cc
@@ -105,18 +105,7 @@
   UNREACHABLE();
 }
 
-#if !defined(ART_STACK_OVERFLOW_GAP_arm) || !defined(ART_STACK_OVERFLOW_GAP_arm64) || \
-    !defined(ART_STACK_OVERFLOW_GAP_mips) || !defined(ART_STACK_OVERFLOW_GAP_mips64) || \
-    !defined(ART_STACK_OVERFLOW_GAP_x86) || !defined(ART_STACK_OVERFLOW_GAP_x86_64)
-#error "Missing defines for stack overflow gap"
-#endif
-
-static constexpr size_t kArmStackOverflowReservedBytes    = ART_STACK_OVERFLOW_GAP_arm;
-static constexpr size_t kArm64StackOverflowReservedBytes  = ART_STACK_OVERFLOW_GAP_arm64;
-static constexpr size_t kMipsStackOverflowReservedBytes   = ART_STACK_OVERFLOW_GAP_mips;
-static constexpr size_t kMips64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_mips64;
-static constexpr size_t kX86StackOverflowReservedBytes    = ART_STACK_OVERFLOW_GAP_x86;
-static constexpr size_t kX86_64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_x86_64;
+namespace instruction_set_details {
 
 static_assert(IsAligned<kPageSize>(kArmStackOverflowReservedBytes), "ARM gap not page aligned");
 static_assert(IsAligned<kPageSize>(kArm64StackOverflowReservedBytes), "ARM64 gap not page aligned");
@@ -144,33 +133,11 @@
 static_assert(ART_FRAME_SIZE_LIMIT < kX86_64StackOverflowReservedBytes,
               "Frame size limit too large");
 
-size_t GetStackOverflowReservedBytes(InstructionSet isa) {
-  switch (isa) {
-    case InstructionSet::kArm:      // Intentional fall-through.
-    case InstructionSet::kThumb2:
-      return kArmStackOverflowReservedBytes;
-
-    case InstructionSet::kArm64:
-      return kArm64StackOverflowReservedBytes;
-
-    case InstructionSet::kMips:
-      return kMipsStackOverflowReservedBytes;
-
-    case InstructionSet::kMips64:
-      return kMips64StackOverflowReservedBytes;
-
-    case InstructionSet::kX86:
-      return kX86StackOverflowReservedBytes;
-
-    case InstructionSet::kX86_64:
-      return kX86_64StackOverflowReservedBytes;
-
-    case InstructionSet::kNone:
-      LOG(FATAL) << "kNone has no stack overflow size";
-      UNREACHABLE();
-  }
-  LOG(FATAL) << "Unknown instruction set" << isa;
+NO_RETURN void GetStackOverflowReservedBytesFailure(const char* error_msg) {
+  LOG(FATAL) << error_msg;
   UNREACHABLE();
 }
 
+}  // namespace instruction_set_details
+
 }  // namespace art
diff --git a/libartbase/arch/instruction_set.h b/libartbase/arch/instruction_set.h
index 06bd53a..7e071bd 100644
--- a/libartbase/arch/instruction_set.h
+++ b/libartbase/arch/instruction_set.h
@@ -226,7 +226,53 @@
   InstructionSetAbort(isa);
 }
 
-size_t GetStackOverflowReservedBytes(InstructionSet isa);
+namespace instruction_set_details {
+
+#if !defined(ART_STACK_OVERFLOW_GAP_arm) || !defined(ART_STACK_OVERFLOW_GAP_arm64) || \
+    !defined(ART_STACK_OVERFLOW_GAP_mips) || !defined(ART_STACK_OVERFLOW_GAP_mips64) || \
+    !defined(ART_STACK_OVERFLOW_GAP_x86) || !defined(ART_STACK_OVERFLOW_GAP_x86_64)
+#error "Missing defines for stack overflow gap"
+#endif
+
+static constexpr size_t kArmStackOverflowReservedBytes    = ART_STACK_OVERFLOW_GAP_arm;
+static constexpr size_t kArm64StackOverflowReservedBytes  = ART_STACK_OVERFLOW_GAP_arm64;
+static constexpr size_t kMipsStackOverflowReservedBytes   = ART_STACK_OVERFLOW_GAP_mips;
+static constexpr size_t kMips64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_mips64;
+static constexpr size_t kX86StackOverflowReservedBytes    = ART_STACK_OVERFLOW_GAP_x86;
+static constexpr size_t kX86_64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_x86_64;
+
+NO_RETURN void GetStackOverflowReservedBytesFailure(const char* error_msg);
+
+}  // namespace instruction_set_details
+
+ALWAYS_INLINE
+constexpr size_t GetStackOverflowReservedBytes(InstructionSet isa) {
+  switch (isa) {
+    case InstructionSet::kArm:      // Intentional fall-through.
+    case InstructionSet::kThumb2:
+      return instruction_set_details::kArmStackOverflowReservedBytes;
+
+    case InstructionSet::kArm64:
+      return instruction_set_details::kArm64StackOverflowReservedBytes;
+
+    case InstructionSet::kMips:
+      return instruction_set_details::kMipsStackOverflowReservedBytes;
+
+    case InstructionSet::kMips64:
+      return instruction_set_details::kMips64StackOverflowReservedBytes;
+
+    case InstructionSet::kX86:
+      return instruction_set_details::kX86StackOverflowReservedBytes;
+
+    case InstructionSet::kX86_64:
+      return instruction_set_details::kX86_64StackOverflowReservedBytes;
+
+    case InstructionSet::kNone:
+      instruction_set_details::GetStackOverflowReservedBytesFailure(
+          "kNone has no stack overflow size");
+  }
+  instruction_set_details::GetStackOverflowReservedBytesFailure("Unknown instruction set");
+}
 
 // The following definitions create return types for two word-sized entities that will be passed
 // in registers so that memory operations for the interface trampolines can be avoided. The entities
diff --git a/libartbase/base/allocator.cc b/libartbase/base/allocator.cc
index 1bcfe87..6393672 100644
--- a/libartbase/base/allocator.cc
+++ b/libartbase/base/allocator.cc
@@ -30,11 +30,11 @@
   MallocAllocator() {}
   ~MallocAllocator() {}
 
-  void* Alloc(size_t size) {
+  void* Alloc(size_t size) override {
     return calloc(sizeof(uint8_t), size);
   }
 
-  void Free(void* p) {
+  void Free(void* p) override {
     free(p);
   }
 
@@ -49,12 +49,12 @@
   NoopAllocator() {}
   ~NoopAllocator() {}
 
-  void* Alloc(size_t size ATTRIBUTE_UNUSED) {
+  void* Alloc(size_t size ATTRIBUTE_UNUSED) override {
     LOG(FATAL) << "NoopAllocator::Alloc should not be called";
     UNREACHABLE();
   }
 
-  void Free(void* p ATTRIBUTE_UNUSED) {
+  void Free(void* p ATTRIBUTE_UNUSED) override {
     // Noop.
   }
 
diff --git a/libartbase/base/arena_allocator.cc b/libartbase/base/arena_allocator.cc
index df3deba..0e7f6cc 100644
--- a/libartbase/base/arena_allocator.cc
+++ b/libartbase/base/arena_allocator.cc
@@ -16,7 +16,6 @@
 
 #include "arena_allocator-inl.h"
 
-#include <sys/mman.h>
 
 #include <algorithm>
 #include <cstddef>
@@ -25,6 +24,8 @@
 
 #include <android-base/logging.h>
 
+#include "mman.h"
+
 namespace art {
 
 constexpr size_t kMemoryToolRedZoneBytes = 8;
diff --git a/libartbase/base/arena_bit_vector.cc b/libartbase/base/arena_bit_vector.cc
index c6d8993..138a5df 100644
--- a/libartbase/base/arena_bit_vector.cc
+++ b/libartbase/base/arena_bit_vector.cc
@@ -62,11 +62,11 @@
     UNREACHABLE();
   }
 
-  virtual void* Alloc(size_t size) {
+  void* Alloc(size_t size) override {
     return allocator_->Alloc(size, this->Kind());
   }
 
-  virtual void Free(void*) {}  // Nop.
+  void Free(void*) override {}  // Nop.
 
  private:
   ArenaBitVectorAllocator(ArenaAlloc* allocator, ArenaAllocKind kind)
diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h
index 76f57da..1f1011e 100644
--- a/libartbase/base/bit_memory_region.h
+++ b/libartbase/base/bit_memory_region.h
@@ -85,15 +85,15 @@
 
   // Load a single bit in the region. The bit at offset 0 is the least
   // significant bit in the first byte.
-  ATTRIBUTE_NO_SANITIZE_ADDRESS  // We might touch extra bytes due to the alignment.
-  ALWAYS_INLINE bool LoadBit(uintptr_t bit_offset) const {
+  ALWAYS_INLINE bool LoadBit(size_t bit_offset) const {
     DCHECK_LT(bit_offset, bit_size_);
-    size_t index = (bit_start_ + bit_offset) / kBitsPerIntPtrT;
-    size_t shift = (bit_start_ + bit_offset) % kBitsPerIntPtrT;
-    return ((data_[index] >> shift) & 1) != 0;
+    uint8_t* data = reinterpret_cast<uint8_t*>(data_);
+    size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
+    size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
+    return ((data[index] >> shift) & 1) != 0;
   }
 
-  ALWAYS_INLINE void StoreBit(uintptr_t bit_offset, bool value) {
+  ALWAYS_INLINE void StoreBit(size_t bit_offset, bool value) {
     DCHECK_LT(bit_offset, bit_size_);
     uint8_t* data = reinterpret_cast<uint8_t*>(data_);
     size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
diff --git a/libartbase/base/bit_string_test.cc b/libartbase/base/bit_string_test.cc
index 89a71a1..45f4d4e 100644
--- a/libartbase/base/bit_string_test.cc
+++ b/libartbase/base/bit_string_test.cc
@@ -110,17 +110,17 @@
   ASSERT_EQ(BitString::kCapacity, 3u);
 
   EXPECT_BITSTRING_STR("BitString[]", bs);
-  bs = SetBitStringCharAt(bs, /*i*/0, /*val*/1u);
+  bs = SetBitStringCharAt(bs, /*i=*/0, /*val=*/1u);
   EXPECT_BITSTRING_STR("BitString[1]", bs);
-  bs = SetBitStringCharAt(bs, /*i*/1, /*val*/2u);
+  bs = SetBitStringCharAt(bs, /*i=*/1, /*val=*/2u);
   EXPECT_BITSTRING_STR("BitString[1,2]", bs);
-  bs = SetBitStringCharAt(bs, /*i*/2, /*val*/3u);
+  bs = SetBitStringCharAt(bs, /*i=*/2, /*val=*/3u);
   EXPECT_BITSTRING_STR("BitString[1,2,3]", bs);
 
   // There should be at least "kCapacity" # of checks here, 1 for each unique position.
-  EXPECT_EQ(MakeBitStringChar(/*idx*/0, /*val*/1u), bs[0]);
-  EXPECT_EQ(MakeBitStringChar(/*idx*/1, /*val*/2u), bs[1]);
-  EXPECT_EQ(MakeBitStringChar(/*idx*/2, /*val*/3u), bs[2]);
+  EXPECT_EQ(MakeBitStringChar(/*idx=*/0, /*val=*/1u), bs[0]);
+  EXPECT_EQ(MakeBitStringChar(/*idx=*/1, /*val=*/2u), bs[1]);
+  EXPECT_EQ(MakeBitStringChar(/*idx=*/2, /*val=*/3u), bs[2]);
 
   // Each maximal value should be tested here for each position.
   uint32_t max_bitstring_ints[] = {
diff --git a/libartbase/base/bit_struct.h b/libartbase/base/bit_struct.h
index 9814fd4..292eca0 100644
--- a/libartbase/base/bit_struct.h
+++ b/libartbase/base/bit_struct.h
@@ -274,13 +274,13 @@
 // If a standard-layout union contains several standard-layout structs that share a common
 // initial sequence ... it is permitted to inspect the common initial sequence of any of
 // standard-layout struct members.
-#define BITSTRUCT_DEFINE_START(name, bitwidth)                                 \
-    union name {                                                               \
-      art::detail::DefineBitStructSize<(bitwidth)> _;                          \
-      static constexpr size_t BitStructSizeOf() { return (bitwidth); }         \
-      name& operator=(const name& other) { _ = other._; return *this; }        \
-      name(const name& other) : _(other._) {}                                  \
-      name() = default;                                                        \
+#define BITSTRUCT_DEFINE_START(name, bitwidth)                                        \
+    union name {                                                         /* NOLINT */ \
+      art::detail::DefineBitStructSize<(bitwidth)> _;                                 \
+      static constexpr size_t BitStructSizeOf() { return (bitwidth); }                \
+      name& operator=(const name& other) { _ = other._; return *this; }  /* NOLINT */ \
+      name(const name& other) : _(other._) {}                                         \
+      name() = default;                                                               \
       ~name() = default;
 
 // End the definition of a bitstruct, and insert a sanity check
diff --git a/libartbase/base/bit_struct_test.cc b/libartbase/base/bit_struct_test.cc
index 577682c..a2389eb 100644
--- a/libartbase/base/bit_struct_test.cc
+++ b/libartbase/base/bit_struct_test.cc
@@ -73,7 +73,7 @@
 TEST(BitStructs, Custom) {
   CustomBitStruct expected(0b1111);
 
-  BitStructField<CustomBitStruct, /*lsb*/4, /*width*/4> f{};
+  BitStructField<CustomBitStruct, /*lsb=*/4, /*width=*/4> f{};
 
   EXPECT_EQ(1u, sizeof(f));
 
@@ -85,9 +85,9 @@
   EXPECT_EQ(AsUint(f), 0b11110000u);
 }
 
-BITSTRUCT_DEFINE_START(TestTwoCustom, /* size */ 8)
-  BitStructField<CustomBitStruct, /*lsb*/0, /*width*/4> f4_a;
-  BitStructField<CustomBitStruct, /*lsb*/4, /*width*/4> f4_b;
+BITSTRUCT_DEFINE_START(TestTwoCustom, /* size= */ 8)
+  BitStructField<CustomBitStruct, /*lsb=*/0, /*width=*/4> f4_a;
+  BitStructField<CustomBitStruct, /*lsb=*/4, /*width=*/4> f4_b;
 BITSTRUCT_DEFINE_END(TestTwoCustom);
 
 TEST(BitStructs, TwoCustom) {
@@ -122,7 +122,7 @@
 }
 
 TEST(BitStructs, Number) {
-  BitStructNumber<uint16_t, /*lsb*/4, /*width*/4> bsn{};
+  BitStructNumber<uint16_t, /*lsb=*/4, /*width=*/4> bsn{};
   EXPECT_EQ(2u, sizeof(bsn));
 
   bsn = 0b1111;
@@ -135,20 +135,20 @@
   EXPECT_EQ(AsUint(bsn), 0b11110000u);
 }
 
-BITSTRUCT_DEFINE_START(TestBitStruct, /* size */ 8)
-  BitStructInt</*lsb*/0, /*width*/3> i3;
-  BitStructUint</*lsb*/3, /*width*/4> u4;
+BITSTRUCT_DEFINE_START(TestBitStruct, /* size= */ 8)
+  BitStructInt</*lsb=*/0, /*width=*/3> i3;
+  BitStructUint</*lsb=*/3, /*width=*/4> u4;
 
-  BitStructUint</*lsb*/0, /*width*/7> alias_all;
+  BitStructUint</*lsb=*/0, /*width=*/7> alias_all;
 BITSTRUCT_DEFINE_END(TestBitStruct);
 
 TEST(BitStructs, Test1) {
   {
     // Check minimal size selection is correct.
-    BitStructInt</*lsb*/0, /*width*/3> i3;
-    BitStructUint</*lsb*/3, /*width*/4> u4;
+    BitStructInt</*lsb=*/0, /*width=*/3> i3;
+    BitStructUint</*lsb=*/3, /*width=*/4> u4;
 
-    BitStructUint</*lsb*/0, /*width*/7> alias_all;
+    BitStructUint</*lsb=*/0, /*width=*/7> alias_all;
 
     EXPECT_EQ(1u, sizeof(i3));
     EXPECT_EQ(1u, sizeof(u4));
@@ -216,12 +216,12 @@
   }
 }
 
-BITSTRUCT_DEFINE_START(MixedSizeBitStruct, /* size */ 32)
-  BitStructUint</*lsb*/0, /*width*/3> u3;
-  BitStructUint</*lsb*/3, /*width*/10> u10;
-  BitStructUint</*lsb*/13, /*width*/19> u19;
+BITSTRUCT_DEFINE_START(MixedSizeBitStruct, /* size= */ 32)
+  BitStructUint</*lsb=*/0, /*width=*/3> u3;
+  BitStructUint</*lsb=*/3, /*width=*/10> u10;
+  BitStructUint</*lsb=*/13, /*width=*/19> u19;
 
-  BitStructUint</*lsb*/0, /*width*/32> alias_all;
+  BitStructUint</*lsb=*/0, /*width=*/32> alias_all;
 BITSTRUCT_DEFINE_END(MixedSizeBitStruct);
 
 // static_assert(sizeof(MixedSizeBitStruct) == sizeof(uint32_t), "TestBitStructs#MixedSize");
@@ -255,11 +255,11 @@
   EXPECT_EQ(0b10101010101010101011111010100111u, AsUint(tst));
 }
 
-BITSTRUCT_DEFINE_START(TestBitStruct_u8, /* size */ 8)
-  BitStructInt</*lsb*/0, /*width*/3> i3;
-  BitStructUint</*lsb*/3, /*width*/4> u4;
+BITSTRUCT_DEFINE_START(TestBitStruct_u8, /* size= */ 8)
+  BitStructInt</*lsb=*/0, /*width=*/3> i3;
+  BitStructUint</*lsb=*/3, /*width=*/4> u4;
 
-  BitStructUint</*lsb*/0, /*width*/8> alias_all;
+  BitStructUint</*lsb=*/0, /*width=*/8> alias_all;
 BITSTRUCT_DEFINE_END(TestBitStruct_u8);
 
 TEST(BitStructs, FieldAssignment) {
@@ -283,11 +283,11 @@
   }
 }
 
-BITSTRUCT_DEFINE_START(NestedStruct, /* size */ 64)
-  BitStructField<MixedSizeBitStruct, /*lsb*/0> mixed_lower;
-  BitStructField<MixedSizeBitStruct, /*lsb*/32> mixed_upper;
+BITSTRUCT_DEFINE_START(NestedStruct, /* size= */ 64)
+  BitStructField<MixedSizeBitStruct, /*lsb=*/0> mixed_lower;
+  BitStructField<MixedSizeBitStruct, /*lsb=*/32> mixed_upper;
 
-  BitStructUint</*lsb*/0, /*width*/64> alias_all;
+  BitStructUint</*lsb=*/0, /*width=*/64> alias_all;
 BITSTRUCT_DEFINE_END(NestedStruct);
 
 TEST(BitStructs, NestedFieldAssignment) {
diff --git a/libartbase/base/bit_table.h b/libartbase/base/bit_table.h
index 54e8861..d6a1d7b 100644
--- a/libartbase/base/bit_table.h
+++ b/libartbase/base/bit_table.h
@@ -100,8 +100,6 @@
   BitMemoryRegion table_data_;
   size_t num_rows_ = 0;
   uint16_t column_offset_[kNumColumns + 1] = {};
-
-  DISALLOW_COPY_AND_ASSIGN(BitTableBase);
 };
 
 // Helper class which can be used to create BitTable accessors with named getters.
diff --git a/libartbase/base/bit_utils_test.cc b/libartbase/base/bit_utils_test.cc
index 3a80600..91fc3b0 100644
--- a/libartbase/base/bit_utils_test.cc
+++ b/libartbase/base/bit_utils_test.cc
@@ -353,89 +353,92 @@
 static_assert(MaskLeastSignificant<uint64_t>(63) == (std::numeric_limits<uint64_t>::max() >> 1u),
               "TestMaskLeastSignificant#6");
 
-static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/0) == 0xFF, "TestBitFieldClear#1");
-static_assert(BitFieldClear(std::numeric_limits<uint32_t>::max(), /*lsb*/0, /*width*/32) == 0x0,
+static_assert(BitFieldClear(0xFF, /*lsb=*/0, /*width=*/0) == 0xFF, "TestBitFieldClear#1");
+static_assert(BitFieldClear(std::numeric_limits<uint32_t>::max(), /*lsb=*/0, /*width=*/32) == 0x0,
               "TestBitFieldClear#2");
-static_assert(BitFieldClear(std::numeric_limits<int32_t>::max(), /*lsb*/0, /*width*/32) == 0x0,
+static_assert(BitFieldClear(std::numeric_limits<int32_t>::max(), /*lsb=*/0, /*width=*/32) == 0x0,
               "TestBitFieldClear#3");
-static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/2) == 0b11111100, "TestBitFieldClear#4");
-static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/3) == 0b11111000, "TestBitFieldClear#5");
-static_assert(BitFieldClear(0xFF, /*lsb*/1, /*width*/3) == 0b11110001, "TestBitFieldClear#6");
-static_assert(BitFieldClear(0xFF, /*lsb*/2, /*width*/3) == 0b11100011, "TestBitFieldClear#7");
+static_assert(BitFieldClear(0xFF, /*lsb=*/0, /*width=*/2) == 0b11111100, "TestBitFieldClear#4");
+static_assert(BitFieldClear(0xFF, /*lsb=*/0, /*width=*/3) == 0b11111000, "TestBitFieldClear#5");
+static_assert(BitFieldClear(0xFF, /*lsb=*/1, /*width=*/3) == 0b11110001, "TestBitFieldClear#6");
+static_assert(BitFieldClear(0xFF, /*lsb=*/2, /*width=*/3) == 0b11100011, "TestBitFieldClear#7");
 
-static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/0) == 0x0, "TestBitFieldExtract#1");
-static_assert(BitFieldExtract(std::numeric_limits<uint32_t>::max(), /*lsb*/0, /*width*/32)
+static_assert(BitFieldExtract(0xFF, /*lsb=*/0, /*width=*/0) == 0x0, "TestBitFieldExtract#1");
+static_assert(BitFieldExtract(std::numeric_limits<uint32_t>::max(), /*lsb=*/0, /*width=*/32)
                   == std::numeric_limits<uint32_t>::max(),
               "TestBitFieldExtract#2");
-static_assert(BitFieldExtract(std::numeric_limits<int32_t>::max(), /*lsb*/0, /*width*/32)
+static_assert(BitFieldExtract(std::numeric_limits<int32_t>::max(), /*lsb=*/0, /*width=*/32)
                   == std::numeric_limits<int32_t>::max(),
               "TestBitFieldExtract#3");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/0, /*width*/2) == 0b00000011,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/0, /*width=*/2) == 0b00000011,
               "TestBitFieldExtract#4");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/0, /*width*/3) == 0b00000111,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/0, /*width=*/3) == 0b00000111,
               "TestBitFieldExtract#5");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/1, /*width*/3) == 0b00000111,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/1, /*width=*/3) == 0b00000111,
               "TestBitFieldExtract#6");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/2, /*width*/3) == 0b00000111,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/2, /*width=*/3) == 0b00000111,
               "TestBitFieldExtract#7");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/3, /*width*/3) == 0b00000111,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/3, /*width=*/3) == 0b00000111,
               "TestBitFieldExtract#8");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/8, /*width*/3) == 0b00000000,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/8, /*width=*/3) == 0b00000000,
               "TestBitFieldExtract#9");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/7, /*width*/3) == 0b00000001,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/7, /*width=*/3) == 0b00000001,
               "TestBitFieldExtract#10");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/6, /*width*/3) == 0b00000011,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/6, /*width=*/3) == 0b00000011,
               "TestBitFieldExtract#11");
-static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/2) == -1, "TestBitFieldExtract#12");
-static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/3) == -1, "TestBitFieldExtract#13");
-static_assert(BitFieldExtract(0xFF, /*lsb*/1, /*width*/3) == -1, "TestBitFieldExtract#14");
-static_assert(BitFieldExtract(0xFF, /*lsb*/2, /*width*/3) == -1, "TestBitFieldExtract#15");
-static_assert(BitFieldExtract(0xFF, /*lsb*/3, /*width*/3) == -1, "TestBitFieldExtract#16");
-static_assert(BitFieldExtract(0xFF, /*lsb*/8, /*width*/3) == 0b00000000, "TestBitFieldExtract#17");
-static_assert(BitFieldExtract(0xFF, /*lsb*/7, /*width*/3) == 0b00000001, "TestBitFieldExtract#18");
-static_assert(BitFieldExtract(0xFF, /*lsb*/6, /*width*/3) == 0b00000011, "TestBitFieldExtract#19");
-static_assert(BitFieldExtract(static_cast<uint8_t>(0b01101010), /*lsb*/2, /*width*/4)
+static_assert(BitFieldExtract(0xFF, /*lsb=*/0, /*width=*/2) == -1, "TestBitFieldExtract#12");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/0, /*width=*/3) == -1, "TestBitFieldExtract#13");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/1, /*width=*/3) == -1, "TestBitFieldExtract#14");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/2, /*width=*/3) == -1, "TestBitFieldExtract#15");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/3, /*width=*/3) == -1, "TestBitFieldExtract#16");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/8, /*width=*/3) == 0b00000000,
+              "TestBitFieldExtract#17");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/7, /*width=*/3) == 0b00000001,
+              "TestBitFieldExtract#18");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/6, /*width=*/3) == 0b00000011,
+              "TestBitFieldExtract#19");
+static_assert(BitFieldExtract(static_cast<uint8_t>(0b01101010), /*lsb=*/2, /*width=*/4)
                   == 0b00001010,
               "TestBitFieldExtract#20");
-static_assert(BitFieldExtract(static_cast<int8_t>(0b01101010), /*lsb*/2, /*width*/4)
+static_assert(BitFieldExtract(static_cast<int8_t>(0b01101010), /*lsb=*/2, /*width=*/4)
                   == static_cast<int8_t>(0b11111010),
               "TestBitFieldExtract#21");
 
-static_assert(BitFieldInsert(0xFF, /*data*/0x0, /*lsb*/0, /*width*/0) == 0xFF,
+static_assert(BitFieldInsert(0xFF, /*data=*/0x0, /*lsb=*/0, /*width=*/0) == 0xFF,
               "TestBitFieldInsert#1");
 static_assert(BitFieldInsert(std::numeric_limits<uint32_t>::max(),
-                             /*data*/std::numeric_limits<uint32_t>::max(),
-                             /*lsb*/0,
-                             /*width*/32)
+                             /*data=*/std::numeric_limits<uint32_t>::max(),
+                             /*lsb=*/0,
+                             /*width=*/32)
                   == std::numeric_limits<uint32_t>::max(),
               "TestBitFieldInsert#2");
 static_assert(BitFieldInsert(std::numeric_limits<int32_t>::max(),
-                             /*data*/std::numeric_limits<uint32_t>::max(),
-                             /*lsb*/0,
-                             /*width*/32)
+                             /*data=*/std::numeric_limits<uint32_t>::max(),
+                             /*lsb=*/0,
+                             /*width=*/32)
                   == std::numeric_limits<uint32_t>::max(),
               "TestBitFieldInsert#3");
 static_assert(BitFieldInsert(0u,
-                             /*data*/std::numeric_limits<uint32_t>::max(),
-                             /*lsb*/0,
-                             /*width*/32)
+                             /*data=*/std::numeric_limits<uint32_t>::max(),
+                             /*lsb=*/0,
+                             /*width=*/32)
                   == std::numeric_limits<uint32_t>::max(),
               "TestBitFieldInsert#4");
 static_assert(BitFieldInsert(-(-0),
-                             /*data*/std::numeric_limits<uint32_t>::max(),
-                             /*lsb*/0,
-                             /*width*/32)
+                             /*data=*/std::numeric_limits<uint32_t>::max(),
+                             /*lsb=*/0,
+                             /*width=*/32)
                   == std::numeric_limits<uint32_t>::max(),
               "TestBitFieldInsert#5");
-static_assert(BitFieldInsert(0x00, /*data*/0b11u, /*lsb*/0, /*width*/2) == 0b00000011,
+static_assert(BitFieldInsert(0x00, /*data=*/0b11u, /*lsb=*/0, /*width=*/2) == 0b00000011,
               "TestBitFieldInsert#6");
-static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/0, /*width*/3) == 0b00000111,
+static_assert(BitFieldInsert(0x00, /*data=*/0b111u, /*lsb=*/0, /*width=*/3) == 0b00000111,
               "TestBitFieldInsert#7");
-static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/1, /*width*/3) == 0b00001110,
+static_assert(BitFieldInsert(0x00, /*data=*/0b111u, /*lsb=*/1, /*width=*/3) == 0b00001110,
               "TestBitFieldInsert#8");
-static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/2, /*width*/3) == 0b00011100,
+static_assert(BitFieldInsert(0x00, /*data=*/0b111u, /*lsb=*/2, /*width=*/3) == 0b00011100,
               "TestBitFieldInsert#9");
-static_assert(BitFieldInsert(0b01011100, /*data*/0b1101u, /*lsb*/4, /*width*/4) == 0b11011100,
+static_assert(BitFieldInsert(0b01011100, /*data=*/0b1101u, /*lsb=*/4, /*width=*/4) == 0b11011100,
               "TestBitFieldInsert#10");
 
 template <typename Container>
diff --git a/libartbase/base/callee_save_type.h b/libartbase/base/callee_save_type.h
deleted file mode 100644
index 3e44a3a..0000000
--- a/libartbase/base/callee_save_type.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_LIBARTBASE_BASE_CALLEE_SAVE_TYPE_H_
-#define ART_LIBARTBASE_BASE_CALLEE_SAVE_TYPE_H_
-
-#include <cstddef>
-#include <ostream>
-
-namespace art {
-
-// Returns a special method that describes all callee saves being spilled to the stack.
-enum class CalleeSaveType : uint32_t {
-  kSaveAllCalleeSaves,  // All callee-save registers.
-  kSaveRefsOnly,        // Only those callee-save registers that can hold references.
-  kSaveRefsAndArgs,     // References (see above) and arguments (usually caller-save registers).
-  kSaveEverything,      // All registers, including both callee-save and caller-save.
-  kSaveEverythingForClinit,    // Special kSaveEverything for clinit.
-  kSaveEverythingForSuspendCheck,  // Special kSaveEverything for suspend check.
-  kLastCalleeSaveType   // Value used for iteration.
-};
-std::ostream& operator<<(std::ostream& os, const CalleeSaveType& rhs);
-
-static inline constexpr CalleeSaveType GetCanonicalCalleeSaveType(CalleeSaveType type) {
-  if (type == CalleeSaveType::kSaveEverythingForClinit ||
-      type == CalleeSaveType::kSaveEverythingForSuspendCheck) {
-    return CalleeSaveType::kSaveEverything;
-  }
-  return type;
-}
-
-}  // namespace art
-
-#endif  // ART_LIBARTBASE_BASE_CALLEE_SAVE_TYPE_H_
diff --git a/libartbase/base/common_art_test.cc b/libartbase/base/common_art_test.cc
index 6dd2381..18e1bf0 100644
--- a/libartbase/base/common_art_test.cc
+++ b/libartbase/base/common_art_test.cc
@@ -24,8 +24,8 @@
 #include "nativehelper/scoped_local_ref.h"
 
 #include "android-base/stringprintf.h"
+#include "android-base/strings.h"
 #include "android-base/unique_fd.h"
-#include <unicode/uvernum.h>
 
 #include "art_field-inl.h"
 #include "base/file_utils.h"
@@ -62,7 +62,7 @@
     : ScratchFile(other.GetFilename() + suffix) {}
 
 ScratchFile::ScratchFile(const std::string& filename) : filename_(filename) {
-  int fd = open(filename_.c_str(), O_RDWR | O_CREAT, 0666);
+  int fd = open(filename_.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0666);
   CHECK_NE(-1, fd);
   file_.reset(new File(fd, GetFilename(), true));
 }
@@ -73,11 +73,11 @@
   file_.reset(file);
 }
 
-ScratchFile::ScratchFile(ScratchFile&& other) {
+ScratchFile::ScratchFile(ScratchFile&& other) noexcept {
   *this = std::move(other);
 }
 
-ScratchFile& ScratchFile::operator=(ScratchFile&& other) {
+ScratchFile& ScratchFile::operator=(ScratchFile&& other) noexcept {
   if (GetFile() != other.GetFile()) {
     std::swap(filename_, other.filename_);
     std::swap(file_, other.file_);
@@ -110,49 +110,59 @@
   CHECK_EQ(0, unlink_result);
 }
 
-void CommonArtTestImpl::SetUpAndroidRoot() {
+void CommonArtTestImpl::SetUpAndroidRootEnvVars() {
   if (IsHost()) {
-    // $ANDROID_ROOT is set on the device, but not necessarily on the host.
-    // But it needs to be set so that icu4c can find its locale data.
-    const char* android_root_from_env = getenv("ANDROID_ROOT");
-    if (android_root_from_env == nullptr) {
-      // Use ANDROID_HOST_OUT for ANDROID_ROOT if it is set.
-      const char* android_host_out = getenv("ANDROID_HOST_OUT");
-      if (android_host_out != nullptr) {
-        setenv("ANDROID_ROOT", android_host_out, 1);
-      } else {
-        // Build it from ANDROID_BUILD_TOP or cwd
-        std::string root;
-        const char* android_build_top = getenv("ANDROID_BUILD_TOP");
-        if (android_build_top != nullptr) {
-          root += android_build_top;
-        } else {
-          // Not set by build server, so default to current directory
-          char* cwd = getcwd(nullptr, 0);
-          setenv("ANDROID_BUILD_TOP", cwd, 1);
-          root += cwd;
-          free(cwd);
-        }
+    // Make sure that ANDROID_BUILD_TOP is set. If not, set it from CWD.
+    const char* android_build_top_from_env = getenv("ANDROID_BUILD_TOP");
+    if (android_build_top_from_env == nullptr) {
+      // Not set by build server, so default to current directory.
+      char* cwd = getcwd(nullptr, 0);
+      setenv("ANDROID_BUILD_TOP", cwd, 1);
+      free(cwd);
+      android_build_top_from_env = getenv("ANDROID_BUILD_TOP");
+    }
+
+    const char* android_host_out_from_env = getenv("ANDROID_HOST_OUT");
+    if (android_host_out_from_env == nullptr) {
+      // Not set by build server, so default to the usual value of
+      // ANDROID_HOST_OUT.
+      std::string android_host_out = android_build_top_from_env;
 #if defined(__linux__)
-        root += "/out/host/linux-x86";
+      android_host_out += "/out/host/linux-x86";
 #elif defined(__APPLE__)
-        root += "/out/host/darwin-x86";
+      android_host_out += "/out/host/darwin-x86";
 #else
 #error unsupported OS
 #endif
-        setenv("ANDROID_ROOT", root.c_str(), 1);
-      }
+      setenv("ANDROID_HOST_OUT", android_host_out.c_str(), 1);
+      android_host_out_from_env = getenv("ANDROID_HOST_OUT");
     }
-    setenv("LD_LIBRARY_PATH", ":", 0);  // Required by java.lang.System.<clinit>.
 
-    // Not set by build server, so default
-    if (getenv("ANDROID_HOST_OUT") == nullptr) {
-      setenv("ANDROID_HOST_OUT", getenv("ANDROID_ROOT"), 1);
+    // Environment variable ANDROID_ROOT is set on the device, but not
+    // necessarily on the host.
+    const char* android_root_from_env = getenv("ANDROID_ROOT");
+    if (android_root_from_env == nullptr) {
+      // Use ANDROID_HOST_OUT for ANDROID_ROOT.
+      setenv("ANDROID_ROOT", android_host_out_from_env, 1);
+      android_root_from_env = getenv("ANDROID_ROOT");
     }
+
+    // Environment variable ANDROID_RUNTIME_ROOT is set on the device, but not
+    // necessarily on the host. It needs to be set so that various libraries
+    // like icu4c can find their data files.
+    const char* android_runtime_root_from_env = getenv("ANDROID_RUNTIME_ROOT");
+    if (android_runtime_root_from_env == nullptr) {
+      // Use ${ANDROID_HOST_OUT}/com.android.runtime for ANDROID_RUNTIME_ROOT.
+      std::string android_runtime_root = android_host_out_from_env;
+      android_runtime_root += "/com.android.runtime";
+      setenv("ANDROID_RUNTIME_ROOT", android_runtime_root.c_str(), 1);
+    }
+
+    setenv("LD_LIBRARY_PATH", ":", 0);  // Required by java.lang.System.<clinit>.
   }
 }
 
-void CommonArtTestImpl::SetUpAndroidData(std::string& android_data) {
+void CommonArtTestImpl::SetUpAndroidDataDir(std::string& android_data) {
   // On target, Cannot use /mnt/sdcard because it is mounted noexec, so use subdir of dalvik-cache
   if (IsHost()) {
     const char* tmpdir = getenv("TMPDIR");
@@ -172,15 +182,16 @@
 }
 
 void CommonArtTestImpl::SetUp() {
-  SetUpAndroidRoot();
-  SetUpAndroidData(android_data_);
+  SetUpAndroidRootEnvVars();
+  SetUpAndroidDataDir(android_data_);
   dalvik_cache_.append(android_data_.c_str());
   dalvik_cache_.append("/dalvik-cache");
   int mkdir_result = mkdir(dalvik_cache_.c_str(), 0700);
   ASSERT_EQ(mkdir_result, 0);
 }
 
-void CommonArtTestImpl::TearDownAndroidData(const std::string& android_data, bool fail_on_error) {
+void CommonArtTestImpl::TearDownAndroidDataDir(const std::string& android_data,
+                                               bool fail_on_error) {
   if (fail_on_error) {
     ASSERT_EQ(rmdir(android_data.c_str()), 0);
   } else {
@@ -251,7 +262,7 @@
   static constexpr bool kVerifyChecksum = true;
   const ArtDexFileLoader dex_file_loader;
   if (!dex_file_loader.Open(
-        location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files)) {
+        location, location, /* verify= */ true, kVerifyChecksum, &error_msg, &dex_files)) {
     LOG(FATAL) << "Could not open .dex file '" << location << "': " << error_msg << "\n";
     UNREACHABLE();
   } else {
@@ -295,7 +306,7 @@
   ClearDirectory(dalvik_cache_.c_str());
   int rmdir_cache_result = rmdir(dalvik_cache_.c_str());
   ASSERT_EQ(0, rmdir_cache_result);
-  TearDownAndroidData(android_data_, true);
+  TearDownAndroidDataDir(android_data_, true);
   dalvik_cache_.clear();
 }
 
@@ -317,9 +328,51 @@
 }
 
 std::vector<std::string> CommonArtTestImpl::GetLibCoreDexFileNames() {
-  return std::vector<std::string>({GetDexFileName("core-oj", IsHost()),
-                                   GetDexFileName("core-libart", IsHost()),
-                                   GetDexFileName("core-simple", IsHost())});
+  // Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
+  // because that's what we use for compiling the core.art image.
+  // It may contain additional modules from TEST_CORE_JARS.
+  static const char* const kLibcoreModules[] = {
+      // CORE_IMG_JARS modules.
+      "core-oj",
+      "core-libart",
+      "okhttp",
+      "bouncycastle",
+      "apache-xml",
+      // Additional modules.
+      "conscrypt",
+  };
+
+  std::vector<std::string> result;
+  result.reserve(arraysize(kLibcoreModules));
+  for (const char* module : kLibcoreModules) {
+    result.push_back(GetDexFileName(module, IsHost()));
+  }
+  return result;
+}
+
+std::vector<std::string> CommonArtTestImpl::GetLibCoreDexLocations() {
+  std::vector<std::string> result = GetLibCoreDexFileNames();
+  if (IsHost()) {
+    // Strip the ANDROID_BUILD_TOP directory including the directory separator '/'.
+    const char* host_dir = getenv("ANDROID_BUILD_TOP");
+    CHECK(host_dir != nullptr);
+    std::string prefix = host_dir;
+    CHECK(!prefix.empty());
+    if (prefix.back() != '/') {
+      prefix += '/';
+    }
+    for (std::string& location : result) {
+      CHECK_GT(location.size(), prefix.size());
+      CHECK_EQ(location.compare(0u, prefix.size(), prefix), 0);
+      location.erase(0u, prefix.size());
+    }
+  }
+  return result;
+}
+
+std::string CommonArtTestImpl::GetClassPathOption(const char* option,
+                                                  const std::vector<std::string>& class_path) {
+  return option + android::base::Join(class_path, ':');
 }
 
 std::string CommonArtTestImpl::GetTestAndroidRoot() {
diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h
index d645fa1..3e2340f 100644
--- a/libartbase/base/common_art_test.h
+++ b/libartbase/base/common_art_test.h
@@ -54,9 +54,9 @@
 
   ScratchFile(const ScratchFile& other, const char* suffix);
 
-  ScratchFile(ScratchFile&& other);
+  ScratchFile(ScratchFile&& other) noexcept;
 
-  ScratchFile& operator=(ScratchFile&& other);
+  ScratchFile& operator=(ScratchFile&& other) noexcept;
 
   explicit ScratchFile(File* file);
 
@@ -85,17 +85,26 @@
   CommonArtTestImpl() = default;
   virtual ~CommonArtTestImpl() = default;
 
-  static void SetUpAndroidRoot();
+  // Set up ANDROID_BUILD_TOP, ANDROID_HOST_OUT, ANDROID_ROOT and ANDROID_RUNTIME_ROOT
+  // environment variables using sensible defaults if not already set.
+  static void SetUpAndroidRootEnvVars();
 
+  // Set up the ANDROID_DATA environment variable, creating the directory if required.
   // Note: setting up ANDROID_DATA may create a temporary directory. If this is used in a
   // non-derived class, be sure to also call the corresponding tear-down below.
-  static void SetUpAndroidData(std::string& android_data);
+  static void SetUpAndroidDataDir(std::string& android_data);
 
-  static void TearDownAndroidData(const std::string& android_data, bool fail_on_error);
+  static void TearDownAndroidDataDir(const std::string& android_data, bool fail_on_error);
 
   // Gets the paths of the libcore dex files.
   static std::vector<std::string> GetLibCoreDexFileNames();
 
+  // Gets the locations of the libcore dex files.
+  static std::vector<std::string> GetLibCoreDexLocations();
+
+  static std::string GetClassPathOption(const char* option,
+                                        const std::vector<std::string>& class_path);
+
   // Returns bin directory which contains host's prebuild tools.
   static std::string GetAndroidHostToolsDir();
 
diff --git a/libartbase/base/enums.cc b/libartbase/base/enums.cc
new file mode 100644
index 0000000..3f28232
--- /dev/null
+++ b/libartbase/base/enums.cc
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "enums.h"
+
+#include <ostream>
+
+namespace art {
+
+std::ostream& operator<<(std::ostream& os, const PointerSize& rhs) {
+  switch (rhs) {
+    case PointerSize::k32: os << "k32"; break;
+    case PointerSize::k64: os << "k64"; break;
+    default: os << "PointerSize[" << static_cast<int>(rhs) << "]"; break;
+  }
+  return os;
+}
+
+}  // namespace art
diff --git a/libartbase/base/enums.h b/libartbase/base/enums.h
index ad5578f..c5fb880 100644
--- a/libartbase/base/enums.h
+++ b/libartbase/base/enums.h
@@ -18,7 +18,7 @@
 #define ART_LIBARTBASE_BASE_ENUMS_H_
 
 #include <cstddef>
-#include <ostream>
+#include <iosfwd>
 
 namespace art {
 
@@ -27,14 +27,7 @@
   k64 = 8
 };
 
-inline std::ostream& operator<<(std::ostream& os, const PointerSize& rhs) {
-  switch (rhs) {
-    case PointerSize::k32: os << "k32"; break;
-    case PointerSize::k64: os << "k64"; break;
-    default: os << "PointerSize[" << static_cast<int>(rhs) << "]"; break;
-  }
-  return os;
-}
+std::ostream& operator<<(std::ostream& os, const PointerSize& rhs);
 
 static constexpr PointerSize kRuntimePointerSize = sizeof(void*) == 8U
                                                        ? PointerSize::k64
diff --git a/libartbase/base/file_magic.cc b/libartbase/base/file_magic.cc
index d8d843b..1471c59 100644
--- a/libartbase/base/file_magic.cc
+++ b/libartbase/base/file_magic.cc
@@ -31,7 +31,7 @@
 
 File OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_msg) {
   CHECK(magic != nullptr);
-  File fd(filename, O_RDONLY, /* check_usage */ false);
+  File fd(filename, O_RDONLY, /* check_usage= */ false);
   if (fd.Fd() == -1) {
     *error_msg = StringPrintf("Unable to open '%s' : %s", filename, strerror(errno));
     return File();
diff --git a/libartbase/base/file_utils.cc b/libartbase/base/file_utils.cc
index 1d106b2..4953bab 100644
--- a/libartbase/base/file_utils.cc
+++ b/libartbase/base/file_utils.cc
@@ -19,11 +19,13 @@
 #include <inttypes.h>
 #include <sys/stat.h>
 #include <sys/types.h>
+#ifndef _WIN32
 #include <sys/wait.h>
+#endif
 #include <unistd.h>
 
 // We need dladdr.
-#ifndef __APPLE__
+#if !defined(__APPLE__) && !defined(_WIN32)
 #ifndef _GNU_SOURCE
 #define _GNU_SOURCE
 #define DEFINED_GNU_SOURCE
@@ -84,6 +86,10 @@
 }
 
 std::string GetAndroidRootSafe(std::string* error_msg) {
+#ifdef _WIN32
+  *error_msg = "GetAndroidRootSafe unsupported for Windows.";
+  return "";
+#else
   // Prefer ANDROID_ROOT if it's set.
   const char* android_dir = getenv("ANDROID_ROOT");
   if (android_dir != nullptr) {
@@ -118,6 +124,7 @@
     return "";
   }
   return "/system";
+#endif
 }
 
 std::string GetAndroidRoot() {
@@ -157,7 +164,7 @@
     return dir;
   } else {
     LOG(FATAL) << error_msg;
-    return nullptr;
+    UNREACHABLE();
   }
 }
 
@@ -179,6 +186,15 @@
 
 void GetDalvikCache(const char* subdir, const bool create_if_absent, std::string* dalvik_cache,
                     bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache) {
+#ifdef _WIN32
+  UNUSED(subdir);
+  UNUSED(create_if_absent);
+  UNUSED(dalvik_cache);
+  UNUSED(have_android_data);
+  UNUSED(dalvik_cache_exists);
+  UNUSED(is_global_cache);
+  LOG(FATAL) << "GetDalvikCache unsupported on Windows.";
+#else
   CHECK(subdir != nullptr);
   std::string error_msg;
   const char* android_data = GetAndroidDataSafe(&error_msg);
@@ -199,6 +215,7 @@
     *dalvik_cache_exists = ((mkdir(dalvik_cache_root.c_str(), 0700) == 0 || errno == EEXIST) &&
                             (mkdir(dalvik_cache->c_str(), 0700) == 0 || errno == EEXIST));
   }
+#endif
 }
 
 std::string GetDalvikCache(const char* subdir) {
@@ -261,10 +278,27 @@
   }
 }
 
+bool LocationIsOnRuntimeModule(const char* full_path) {
+  std::string error_msg;
+  const char* runtime_path = GetAndroidDirSafe("ANDROID_RUNTIME_ROOT",
+                                               "/apex/com.android.runtime",
+                                               &error_msg);
+  if (runtime_path == nullptr) {
+    return false;
+  }
+  return android::base::StartsWith(full_path, runtime_path);
+}
+
 bool LocationIsOnSystem(const char* path) {
+#ifdef _WIN32
+  UNUSED(path);
+  LOG(FATAL) << "LocationIsOnSystem is unsupported on Windows.";
+  return false;
+#else
   UniqueCPtr<const char[]> full_path(realpath(path, nullptr));
   return full_path != nullptr &&
       android::base::StartsWith(full_path.get(), GetAndroidRoot().c_str());
+#endif
 }
 
 bool LocationIsOnSystemFramework(const char* full_path) {
diff --git a/libartbase/base/file_utils.h b/libartbase/base/file_utils.h
index c249bcc..bddfaa1 100644
--- a/libartbase/base/file_utils.h
+++ b/libartbase/base/file_utils.h
@@ -72,6 +72,9 @@
 //          ReplaceFileExtension("foo", "abc") == "foo.abc"
 std::string ReplaceFileExtension(const std::string& filename, const std::string& new_extension);
 
+// Return whether the location is on apex/com.android.runtime
+bool LocationIsOnRuntimeModule(const char* location);
+
 // Return whether the location is on system (i.e. android root).
 bool LocationIsOnSystem(const char* location);
 
diff --git a/libartbase/base/file_utils_test.cc b/libartbase/base/file_utils_test.cc
index 2a7273b..c917307 100644
--- a/libartbase/base/file_utils_test.cc
+++ b/libartbase/base/file_utils_test.cc
@@ -71,26 +71,30 @@
   // Set ANDROID_ROOT to something else (but the directory must exist). So use dirname.
   UniqueCPtr<char> root_dup(strdup(android_root_env.c_str()));
   char* dir = dirname(root_dup.get());
-  ASSERT_EQ(0, setenv("ANDROID_ROOT", dir, 1 /* overwrite */));
+  ASSERT_EQ(0, setenv("ANDROID_ROOT", dir, /* overwrite */ 1));
   std::string android_root2 = GetAndroidRootSafe(&error_msg);
   EXPECT_STREQ(dir, android_root2.c_str());
 
   // Set a bogus value for ANDROID_ROOT. This should be an error.
-  ASSERT_EQ(0, setenv("ANDROID_ROOT", "/this/is/obviously/bogus", 1 /* overwrite */));
+  ASSERT_EQ(0, setenv("ANDROID_ROOT", "/this/is/obviously/bogus", /* overwrite */ 1));
   EXPECT_EQ(GetAndroidRootSafe(&error_msg), "");
 
   // Unset ANDROID_ROOT and see that it still returns something (as libart code is running).
   ASSERT_EQ(0, unsetenv("ANDROID_ROOT"));
   std::string android_root3 = GetAndroidRootSafe(&error_msg);
   // This should be the same as the other root (modulo realpath), otherwise the test setup is
-  // broken.
-  UniqueCPtr<char> real_root(realpath(android_root.c_str(), nullptr));
+  // broken. On non-bionic. On bionic we can be running with a different libart that lives outside
+  // of ANDROID_ROOT
   UniqueCPtr<char> real_root3(realpath(android_root3.c_str(), nullptr));
+#if !defined(__BIONIC__ ) || defined(__ANDROID__)
+  UniqueCPtr<char> real_root(realpath(android_root.c_str(), nullptr));
   EXPECT_STREQ(real_root.get(), real_root3.get());
-
+#else
+  EXPECT_STRNE(real_root3.get(), "");
+#endif
 
   // Reset ANDROID_ROOT, as other things may depend on it.
-  ASSERT_EQ(0, setenv("ANDROID_ROOT", android_root_env.c_str(), 1 /* overwrite */));
+  ASSERT_EQ(0, setenv("ANDROID_ROOT", android_root_env.c_str(), /* overwrite */ 1));
 }
 
 TEST_F(FileUtilsTest, ReplaceFileExtension) {
diff --git a/libartbase/base/globals.h b/libartbase/base/globals.h
index bc79ff2..97eae63 100644
--- a/libartbase/base/globals.h
+++ b/libartbase/base/globals.h
@@ -38,20 +38,6 @@
 // compile-time constant so the compiler can generate better code.
 static constexpr int kPageSize = 4096;
 
-// Size of Dex virtual registers.
-static constexpr size_t kVRegSize = 4;
-
-// Returns whether the given memory offset can be used for generating
-// an implicit null check.
-static inline bool CanDoImplicitNullCheckOn(uintptr_t offset) {
-  return offset < kPageSize;
-}
-
-// Required object alignment
-static constexpr size_t kObjectAlignmentShift = 3;
-static constexpr size_t kObjectAlignment = 1u << kObjectAlignmentShift;
-static constexpr size_t kLargeObjectAlignment = kPageSize;
-
 // Clion, clang analyzer, etc can falsely believe that "if (kIsDebugBuild)" always
 // returns the same value. By wrapping into a call to another constexpr function, we force it
 // to realize that is not actually always evaluating to the same value.
@@ -117,45 +103,6 @@
 static constexpr bool kHostStaticBuildEnabled = false;
 #endif
 
-// Garbage collector constants.
-static constexpr bool kMovingCollector = true;
-static constexpr bool kMarkCompactSupport = false && kMovingCollector;
-// True if we allow moving classes.
-static constexpr bool kMovingClasses = !kMarkCompactSupport;
-// If true, enable generational collection when using the Concurrent Copying
-// collector, i.e. use sticky-bit CC for minor collections and (full) CC for
-// major collections.
-#ifdef ART_USE_GENERATIONAL_CC
-static constexpr bool kEnableGenerationalConcurrentCopyingCollection = true;
-#else
-static constexpr bool kEnableGenerationalConcurrentCopyingCollection = false;
-#endif
-
-// If true, enable the tlab allocator by default.
-#ifdef ART_USE_TLAB
-static constexpr bool kUseTlab = true;
-#else
-static constexpr bool kUseTlab = false;
-#endif
-
-// Kinds of tracing clocks.
-enum class TraceClockSource {
-  kThreadCpu,
-  kWall,
-  kDual,  // Both wall and thread CPU clocks.
-};
-
-#if defined(__linux__)
-static constexpr TraceClockSource kDefaultTraceClockSource = TraceClockSource::kDual;
-#else
-static constexpr TraceClockSource kDefaultTraceClockSource = TraceClockSource::kWall;
-#endif
-
-static constexpr bool kDefaultMustRelocate = true;
-
-// Size of a heap reference.
-static constexpr size_t kHeapReferenceSize = sizeof(uint32_t);
-
 }  // namespace art
 
 #endif  // ART_LIBARTBASE_BASE_GLOBALS_H_
diff --git a/libartbase/base/hiddenapi_domain.h b/libartbase/base/hiddenapi_domain.h
new file mode 100644
index 0000000..4cbc22d
--- /dev/null
+++ b/libartbase/base/hiddenapi_domain.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_HIDDENAPI_DOMAIN_H_
+#define ART_LIBARTBASE_BASE_HIDDENAPI_DOMAIN_H_
+
+namespace art {
+namespace hiddenapi {
+
+// List of domains supported by the hidden API access checks. Domain with a lower
+// ordinal is considered more "trusted", i.e. always allowed to access members of
+// domains with a greater ordinal. Access checks are performed when code tries to
+// access a method/field from a more trusted domain than itself.
+enum class Domain {
+  kCorePlatform = 0,
+  kPlatform,
+  kApplication,
+};
+
+inline bool IsDomainMoreTrustedThan(Domain domainA, Domain domainB) {
+  return static_cast<uint32_t>(domainA) <= static_cast<uint32_t>(domainB);
+}
+
+}  // namespace hiddenapi
+}  // namespace art
+
+
+#endif  // ART_LIBARTBASE_BASE_HIDDENAPI_DOMAIN_H_
diff --git a/libartbase/base/hiddenapi_flags.cc b/libartbase/base/hiddenapi_flags.cc
new file mode 100644
index 0000000..ea57cb7
--- /dev/null
+++ b/libartbase/base/hiddenapi_flags.cc
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hiddenapi_flags.h"
+
+namespace art {
+namespace hiddenapi {
+
+constexpr const char* ApiList::kValueNames[ApiList::kValueCount];
+constexpr const char* ApiList::kDomainApiNames[ApiList::kDomainApiCount];
+constexpr SdkVersion ApiList::kMaxSdkVersions[ApiList::kValueCount];
+
+}  // namespace hiddenapi
+}  // namespace art
diff --git a/libartbase/base/hiddenapi_flags.h b/libartbase/base/hiddenapi_flags.h
new file mode 100644
index 0000000..c468723
--- /dev/null
+++ b/libartbase/base/hiddenapi_flags.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_HIDDENAPI_FLAGS_H_
+#define ART_LIBARTBASE_BASE_HIDDENAPI_FLAGS_H_
+
+#include "sdk_version.h"
+
+#include <vector>
+
+#include "android-base/logging.h"
+#include "base/bit_utils.h"
+#include "base/dumpable.h"
+#include "base/macros.h"
+
+namespace art {
+namespace hiddenapi {
+
+// Helper methods used inside ApiList. These were moved outside of the ApiList
+// class so that they can be used in static_asserts. If they were inside, they
+// would be part of an unfinished type.
+namespace helper {
+  // Casts enum value to uint32_t.
+  template<typename T>
+  constexpr uint32_t ToUint(T val) { return static_cast<uint32_t>(val); }
+
+  // Returns uint32_t with one bit set at an index given by an enum value.
+  template<typename T>
+  constexpr uint32_t ToBit(T val) { return 1u << ToUint(val); }
+
+  // Returns a bit mask with `size` least significant bits set.
+  constexpr uint32_t BitMask(uint32_t size) { return (1u << size) - 1; }
+
+  // Returns a bit mask formed from an enum defining kMin and kMax. The values
+  // are assumed to be indices of min/max bits and the resulting bitmask has
+  // bits [kMin, kMax] set.
+  template<typename T>
+  constexpr uint32_t BitMask() {
+    return BitMask(ToUint(T::kMax) + 1) & (~BitMask(ToUint(T::kMin)));
+  }
+
+  // Returns true if `val` is a bitwise subset of `mask`.
+  constexpr bool MatchesBitMask(uint32_t val, uint32_t mask) { return (val & mask) == val; }
+
+  // Returns true if the uint32_t value of `val` is a bitwise subset of `mask`.
+  template<typename T>
+  constexpr bool MatchesBitMask(T val, uint32_t mask) { return MatchesBitMask(ToUint(val), mask); }
+
+  // Returns the number of values defined in an enum, assuming the enum defines
+  // kMin and kMax and no integer values are skipped between them.
+  template<typename T>
+  constexpr uint32_t NumValues() { return ToUint(T::kMax) - ToUint(T::kMin) + 1; }
+}  // namespace helper
+
+/*
+ * This class represents the information whether a field/method is in
+ * public API (whitelist) or if it isn't, apps targeting which SDK
+ * versions are allowed to access it.
+ */
+class ApiList {
+ private:
+  // Number of bits reserved for Value in dex flags, and the corresponding bit mask.
+  static constexpr uint32_t kValueBitSize = 3;
+  static constexpr uint32_t kValueBitMask = helper::BitMask(kValueBitSize);
+
+  enum class Value : uint32_t {
+    // Values independent of target SDK version of app
+    kWhitelist =    0,
+    kGreylist =     1,
+    kBlacklist =    2,
+
+    // Values dependent on target SDK version of app. Put these last as
+    // their list will be extended in future releases.
+    // The max release code implicitly includes all maintenance releases,
+    // e.g. GreylistMaxO is accessible to targetSdkVersion <= 27 (O_MR1).
+    kGreylistMaxO = 3,
+    kGreylistMaxP = 4,
+
+    // Special values
+    kInvalid =      (static_cast<uint32_t>(-1) & kValueBitMask),
+    kMin =          kWhitelist,
+    kMax =          kGreylistMaxP,
+  };
+
+  // Additional bit flags after the first kValueBitSize bits in dex flags.
+  // These are used for domain-specific API.
+  enum class DomainApi : uint32_t {
+    kCorePlatformApi = kValueBitSize,
+
+    // Special values
+    kMin =             kCorePlatformApi,
+    kMax =             kCorePlatformApi,
+  };
+
+  // Bit mask of all domain API flags.
+  static constexpr uint32_t kDomainApiBitMask = helper::BitMask<DomainApi>();
+
+  // Check that Values fit in the designated number of bits.
+  static_assert(kValueBitSize >= MinimumBitsToStore(helper::ToUint(Value::kMax)),
+                "Not enough bits to store all ApiList values");
+
+  // Sanity checks that all Values are covered by kValueBitMask.
+  static_assert(helper::MatchesBitMask(Value::kMin, kValueBitMask));
+  static_assert(helper::MatchesBitMask(Value::kMax, kValueBitMask));
+
+  // Assert that Value::kInvalid is larger than the maximum Value.
+  static_assert(helper::ToUint(Value::kMax) < helper::ToUint(Value::kInvalid));
+
+  // Names corresponding to Values.
+  static constexpr const char* kValueNames[] = {
+    "whitelist",
+    "greylist",
+    "blacklist",
+    "greylist-max-o",
+    "greylist-max-p",
+  };
+
+  // Names corresponding to DomainApis.
+  static constexpr const char* kDomainApiNames[] {
+    "core-platform-api",
+  };
+
+  // Maximum SDK versions allowed to access ApiList of given Value.
+  static constexpr SdkVersion kMaxSdkVersions[] {
+    /* whitelist */ SdkVersion::kMax,
+    /* greylist */ SdkVersion::kMax,
+    /* blacklist */ SdkVersion::kMin,
+    /* greylist-max-o */ SdkVersion::kO_MR1,
+    /* greylist-max-p */ SdkVersion::kP,
+  };
+
+  explicit ApiList(Value val, uint32_t domain_apis = 0u)
+      : dex_flags_(helper::ToUint(val) | domain_apis) {
+    DCHECK(GetValue() == val);
+    DCHECK_EQ(GetDomainApis(), domain_apis);
+  }
+
+  explicit ApiList(DomainApi val) : ApiList(Value::kInvalid, helper::ToBit(val)) {}
+
+  Value GetValue() const {
+    uint32_t value = (dex_flags_ & kValueBitMask);
+
+    // Treat all ones as invalid value
+    if (value == helper::ToUint(Value::kInvalid)) {
+      return Value::kInvalid;
+    } else {
+      DCHECK_GE(value, helper::ToUint(Value::kMin));
+      DCHECK_LE(value, helper::ToUint(Value::kMax));
+      return static_cast<Value>(value);
+    }
+  }
+
+  uint32_t GetDomainApis() const { return (dex_flags_ & kDomainApiBitMask); }
+
+  uint32_t dex_flags_;
+
+ public:
+  ApiList() : ApiList(Value::kInvalid) {}
+
+  explicit ApiList(uint32_t dex_flags) : dex_flags_(dex_flags) {
+    DCHECK_EQ(dex_flags_, (dex_flags_ & kValueBitMask) | (dex_flags_ & kDomainApiBitMask));
+  }
+
+  // Helpers for conveniently constructing ApiList instances.
+  static ApiList Whitelist() { return ApiList(Value::kWhitelist); }
+  static ApiList Greylist() { return ApiList(Value::kGreylist); }
+  static ApiList Blacklist() { return ApiList(Value::kBlacklist); }
+  static ApiList GreylistMaxO() { return ApiList(Value::kGreylistMaxO); }
+  static ApiList GreylistMaxP() { return ApiList(Value::kGreylistMaxP); }
+  static ApiList CorePlatformApi() { return ApiList(DomainApi::kCorePlatformApi); }
+
+  uint32_t GetDexFlags() const { return dex_flags_; }
+  uint32_t GetIntValue() const { return helper::ToUint(GetValue()) - helper::ToUint(Value::kMin); }
+
+  // Returns the ApiList with a flag of a given name, or an empty ApiList if not matched.
+  static ApiList FromName(const std::string& str) {
+    for (uint32_t i = 0; i < kValueCount; ++i) {
+      if (str == kValueNames[i]) {
+        return ApiList(static_cast<Value>(i + helper::ToUint(Value::kMin)));
+      }
+    }
+    for (uint32_t i = 0; i < kDomainApiCount; ++i) {
+      if (str == kDomainApiNames[i]) {
+        return ApiList(static_cast<DomainApi>(i + helper::ToUint(DomainApi::kMin)));
+      }
+    }
+    return ApiList();
+  }
+
+  // Parses a vector of flag names into a single ApiList value. If successful,
+  // returns true and assigns the new ApiList to `out_api_list`.
+  static bool FromNames(std::vector<std::string>::iterator begin,
+                        std::vector<std::string>::iterator end,
+                        /* out */ ApiList* out_api_list) {
+    ApiList api_list;
+    for (std::vector<std::string>::iterator it = begin; it != end; it++) {
+      ApiList current = FromName(*it);
+      if (current.IsEmpty() || !api_list.CanCombineWith(current)) {
+        return false;
+      }
+      api_list |= current;
+    }
+    if (out_api_list != nullptr) {
+      *out_api_list = api_list;
+    }
+    return true;
+  }
+
+  bool operator==(const ApiList& other) const { return dex_flags_ == other.dex_flags_; }
+  bool operator!=(const ApiList& other) const { return !(*this == other); }
+
+  // Returns true if combining this ApiList with `other` will succeed.
+  bool CanCombineWith(const ApiList& other) const {
+    const Value val1 = GetValue();
+    const Value val2 = other.GetValue();
+    return (val1 == val2) || (val1 == Value::kInvalid) || (val2 == Value::kInvalid);
+  }
+
+  // Combine two ApiList instances.
+  ApiList operator|(const ApiList& other) {
+    // DomainApis are not mutually exclusive. Simply OR them.
+    const uint32_t domain_apis = GetDomainApis() | other.GetDomainApis();
+
+    // Values are mutually exclusive. Check if `this` and `other` have the same Value
+    // or if at most one is set.
+    const Value val1 = GetValue();
+    const Value val2 = other.GetValue();
+    if (val1 == val2) {
+      return ApiList(val1, domain_apis);
+    } else if (val1 == Value::kInvalid) {
+      return ApiList(val2, domain_apis);
+    } else if (val2 == Value::kInvalid) {
+      return ApiList(val1, domain_apis);
+    } else {
+      LOG(FATAL) << "Invalid combination of values " << Dumpable(ApiList(val1))
+          << " and " << Dumpable(ApiList(val2));
+      UNREACHABLE();
+    }
+  }
+
+  const ApiList& operator|=(const ApiList& other) {
+    (*this) = (*this) | other;
+    return *this;
+  }
+
+  // Returns true if all flags set in `other` are also set in `this`.
+  bool Contains(const ApiList& other) const {
+    return ((other.GetValue() == Value::kInvalid) || (GetValue() == other.GetValue())) &&
+           helper::MatchesBitMask(other.GetDomainApis(), GetDomainApis());
+  }
+
+  // Returns true whether the configuration is valid for runtime use.
+  bool IsValid() const { return GetValue() != Value::kInvalid; }
+
+  // Returns true when no ApiList is specified and no domain_api flags either.
+  bool IsEmpty() const { return (GetValue() == Value::kInvalid) && (GetDomainApis() == 0); }
+
+  // Returns the maximum target SDK version allowed to access this ApiList.
+  SdkVersion GetMaxAllowedSdkVersion() const { return kMaxSdkVersions[GetIntValue()]; }
+
+  void Dump(std::ostream& os) const {
+    bool is_first = true;
+
+    if (GetValue() != Value::kInvalid) {
+      os << kValueNames[GetIntValue()];
+      is_first = false;
+    }
+
+    const uint32_t domain_apis = GetDomainApis();
+    for (uint32_t i = helper::ToUint(DomainApi::kMin); i <= helper::ToUint(DomainApi::kMax); i++) {
+      if (helper::MatchesBitMask(helper::ToBit(i), domain_apis)) {
+        if (is_first) {
+          is_first = false;
+        } else {
+          os << ",";
+        }
+        os << kDomainApiNames[i];
+      }
+    }
+
+    DCHECK_EQ(IsEmpty(), is_first);
+  }
+
+  static constexpr uint32_t kValueCount = helper::NumValues<Value>();
+  static constexpr uint32_t kDomainApiCount = helper::NumValues<DomainApi>();
+};
+
+inline std::ostream& operator<<(std::ostream& os, ApiList value) {
+  value.Dump(os);
+  return os;
+}
+
+}  // namespace hiddenapi
+}  // namespace art
+
+
+#endif  // ART_LIBARTBASE_BASE_HIDDENAPI_FLAGS_H_
diff --git a/libartbase/base/logging.h b/libartbase/base/logging.h
index d2c0a02..484db87 100644
--- a/libartbase/base/logging.h
+++ b/libartbase/base/logging.h
@@ -17,9 +17,6 @@
 #ifndef ART_LIBARTBASE_BASE_LOGGING_H_
 #define ART_LIBARTBASE_BASE_LOGGING_H_
 
-#include <ostream>
-#include <sstream>
-
 #include "android-base/logging.h"
 #include "macros.h"
 
@@ -77,12 +74,12 @@
 // performed.
 extern const char* GetCmdLine();
 
-// The command used to start the ART runtime, such as "/system/bin/dalvikvm". If InitLogging hasn't
-// been performed then just returns "art"
+// The command used to start the ART runtime, such as "/apex/com.android.runtime/bin/dalvikvm". If
+// InitLogging hasn't been performed then just returns "art".
 extern const char* ProgramInvocationName();
 
 // A short version of the command used to start the ART runtime, such as "dalvikvm". If InitLogging
-// hasn't been performed then just returns "art"
+// hasn't been performed then just returns "art".
 extern const char* ProgramInvocationShortName();
 
 class LogHelper {
diff --git a/libartbase/base/macros.h b/libartbase/base/macros.h
index 33866bb..323fa4e 100644
--- a/libartbase/base/macros.h
+++ b/libartbase/base/macros.h
@@ -42,12 +42,21 @@
   private: \
     void* operator new(size_t) = delete  // NOLINT
 
-#define OFFSETOF_MEMBER(t, f) \
-  (reinterpret_cast<uintptr_t>(&reinterpret_cast<t*>(16)->f) - static_cast<uintptr_t>(16u))  // NOLINT
+// offsetof is not defined by the spec on types with non-standard layout,
+// however it is implemented by compilers in practice.
+// (note that reinterpret_cast is not valid constexpr)
+//
+// Alternative approach would be something like:
+// #define OFFSETOF_HELPER(t, f) \
+//   (reinterpret_cast<uintptr_t>(&reinterpret_cast<t*>(16)->f) - static_cast<uintptr_t>(16u))
+// #define OFFSETOF_MEMBER(t, f) \
+//   (__builtin_constant_p(OFFSETOF_HELPER(t,f)) ? OFFSETOF_HELPER(t,f) : OFFSETOF_HELPER(t,f))
+#define OFFSETOF_MEMBER(t, f) offsetof(t, f)
 
 #define OFFSETOF_MEMBERPTR(t, f) \
   (reinterpret_cast<uintptr_t>(&(reinterpret_cast<t*>(16)->*f)) - static_cast<uintptr_t>(16))  // NOLINT
 
+#define ALIGNED(x) __attribute__ ((__aligned__(x)))
 #define PACKED(x) __attribute__ ((__aligned__(x), __packed__))
 
 // Stringify the argument.
diff --git a/libartbase/base/malloc_arena_pool.cc b/libartbase/base/malloc_arena_pool.cc
index 02e29f1..4de34b5 100644
--- a/libartbase/base/malloc_arena_pool.cc
+++ b/libartbase/base/malloc_arena_pool.cc
@@ -16,7 +16,6 @@
 
 #include "malloc_arena_pool.h"
 
-#include <sys/mman.h>
 
 #include <algorithm>
 #include <cstddef>
@@ -25,6 +24,7 @@
 
 #include <android-base/logging.h>
 #include "arena_allocator-inl.h"
+#include "mman.h"
 
 namespace art {
 
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 1bf553d..2833750 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -18,8 +18,7 @@
 
 #include <inttypes.h>
 #include <stdlib.h>
-#include <sys/mman.h>  // For the PROT_* and MAP_* constants.
-#if !defined(ANDROID_OS) && !defined(__Fuchsia__)
+#if !defined(ANDROID_OS) && !defined(__Fuchsia__) && !defined(_WIN32)
 #include <sys/resource.h>
 #endif
 
@@ -39,6 +38,7 @@
 #include "globals.h"
 #include "logging.h"  // For VLOG_IS_ON.
 #include "memory_tool.h"
+#include "mman.h"  // For the PROT_* and MAP_* constants.
 #include "utils.h"
 
 #ifndef MAP_ANONYMOUS
@@ -394,7 +394,7 @@
     return Invalid();
   }
   const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
-  return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
+  return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, /* reuse= */ true);
 }
 
 template<typename A, typename B>
@@ -585,7 +585,7 @@
                 redzone_size);
 }
 
-MemMap::MemMap(MemMap&& other)
+MemMap::MemMap(MemMap&& other) noexcept
     : MemMap() {
   swap(other);
 }
@@ -692,6 +692,24 @@
                           int tail_prot,
                           std::string* error_msg,
                           bool use_debug_name) {
+  return RemapAtEnd(new_end,
+                    tail_name,
+                    tail_prot,
+                    MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
+                    /* fd= */ -1,
+                    /* offset= */ 0,
+                    error_msg,
+                    use_debug_name);
+}
+
+MemMap MemMap::RemapAtEnd(uint8_t* new_end,
+                          const char* tail_name,
+                          int tail_prot,
+                          int flags,
+                          int fd,
+                          off_t offset,
+                          std::string* error_msg,
+                          bool use_debug_name) {
   DCHECK_GE(new_end, Begin());
   DCHECK_LE(new_end, End());
   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
@@ -715,9 +733,6 @@
   DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
   DCHECK_ALIGNED(tail_base_size, kPageSize);
 
-  unique_fd fd;
-  int flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
-
   MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
   // Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
   // removes old mappings for the overlapping region. This makes the operation atomic
@@ -726,13 +741,13 @@
                                                           tail_base_size,
                                                           tail_prot,
                                                           flags,
-                                                          fd.get(),
-                                                          0));
+                                                          fd,
+                                                          offset));
   if (actual == MAP_FAILED) {
     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
-    *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
+    *error_msg = StringPrintf("map(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
                               "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
-                              fd.get());
+                              fd);
     return Invalid();
   }
   // Update *this.
@@ -756,7 +771,7 @@
   uint8_t* begin = Begin();
   ReleaseReservedMemory(byte_count);  // Performs necessary DCHECK()s on this reservation.
   size_t base_size = RoundUp(byte_count, kPageSize);
-  return MemMap(name_, begin, byte_count, begin, base_size, prot_, /* reuse */ false);
+  return MemMap(name_, begin, byte_count, begin, base_size, prot_, /* reuse= */ false);
 }
 
 void MemMap::ReleaseReservedMemory(size_t byte_count) {
@@ -796,19 +811,30 @@
     if (!kMadviseZeroes) {
       memset(base_begin_, 0, base_size_);
     }
+#ifdef _WIN32
+    // It is benign not to madvise away the pages here.
+    PLOG(WARNING) << "MemMap::MadviseDontNeedAndZero does not madvise on Windows.";
+#else
     int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
     if (result == -1) {
       PLOG(WARNING) << "madvise failed";
     }
+#endif
   }
 }
 
 bool MemMap::Sync() {
+#ifdef _WIN32
+  // TODO: add FlushViewOfFile support.
+  PLOG(ERROR) << "MemMap::Sync unsupported on Windows.";
+  return false;
+#else
   // Historical note: To avoid Valgrind errors, we temporarily lifted the lower-end noaccess
   // protection before passing it to msync() when `redzone_size_` was non-null, as Valgrind
   // only accepts page-aligned base address, and excludes the higher-end noaccess protection
   // from the msync range. b/27552451.
   return msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
+#endif
 }
 
 bool MemMap::Protect(int prot) {
@@ -817,10 +843,12 @@
     return true;
   }
 
+#ifndef _WIN32
   if (mprotect(base_begin_, base_size_, prot) == 0) {
     prot_ = prot;
     return true;
   }
+#endif
 
   PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
               << prot << ") failed";
@@ -1191,7 +1219,11 @@
     DCHECK_LE(page_begin, page_end);
     DCHECK_LE(page_end, mem_end);
     std::fill(mem_begin, page_begin, 0);
+#ifdef _WIN32
+    LOG(WARNING) << "ZeroAndReleasePages does not madvise on Windows.";
+#else
     CHECK_NE(madvise(page_begin, page_end - page_begin, MADV_DONTNEED), -1) << "madvise failed";
+#endif
     std::fill(page_end, mem_end, 0);
   }
 }
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 20eda32..525e622 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -68,8 +68,8 @@
     return MemMap();
   }
 
-  MemMap(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_);
-  MemMap& operator=(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_) {
+  MemMap(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_);
+  MemMap& operator=(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_) {
     Reset();
     swap(other);
     return *this;
@@ -139,18 +139,32 @@
                              /*out*/std::string* error_msg,
                              bool use_debug_name = true);
   static MemMap MapAnonymous(const char* name,
-                             uint8_t* addr,
                              size_t byte_count,
                              int prot,
                              bool low_4gb,
                              /*out*/std::string* error_msg) {
     return MapAnonymous(name,
-                        addr,
+                        /*addr=*/ nullptr,
                         byte_count,
                         prot,
                         low_4gb,
-                        /* reuse */ false,
-                        /* reservation */ nullptr,
+                        /*reuse=*/ false,
+                        /*reservation=*/ nullptr,
+                        error_msg);
+  }
+  static MemMap MapAnonymous(const char* name,
+                             size_t byte_count,
+                             int prot,
+                             bool low_4gb,
+                             MemMap* reservation,
+                             /*out*/std::string* error_msg) {
+    return MapAnonymous(name,
+                        /*addr=*/ (reservation != nullptr) ? reservation->Begin() : nullptr,
+                        byte_count,
+                        prot,
+                        low_4gb,
+                        /*reuse=*/ false,
+                        reservation,
                         error_msg);
   }
 
@@ -178,10 +192,10 @@
                             flags,
                             fd,
                             start,
-                            /* low_4gb */ low_4gb,
+                            /*low_4gb=*/ low_4gb,
                             filename,
-                            /* reuse */ false,
-                            /* reservation */ nullptr,
+                            /*reuse=*/ false,
+                            /*reservation=*/ nullptr,
                             error_msg);
   }
 
@@ -261,6 +275,16 @@
                     std::string* error_msg,
                     bool use_debug_name = true);
 
+  // Unmap the pages of a file at end and remap them to create another memory map.
+  MemMap RemapAtEnd(uint8_t* new_end,
+                    const char* tail_name,
+                    int tail_prot,
+                    int tail_flags,
+                    int fd,
+                    off_t offset,
+                    std::string* error_msg,
+                    bool use_debug_name = true);
+
   // Take ownership of pages at the beginning of the mapping. The mapping must be an
   // anonymous reservation mapping, owning entire pages. The `byte_count` must not
   // exceed the size of this reservation.
diff --git a/libartbase/base/mem_map_fuchsia.cc b/libartbase/base/mem_map_fuchsia.cc
index d1c92ce..6b0e06c 100644
--- a/libartbase/base/mem_map_fuchsia.cc
+++ b/libartbase/base/mem_map_fuchsia.cc
@@ -15,8 +15,8 @@
  */
 
 #include "mem_map.h"
-#include <sys/mman.h>
 #include "logging.h"
+#include "mman.h"
 
 #include <zircon/process.h>
 #include <zircon/syscalls.h>
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index ab3d18f..bf39fd1 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -16,14 +16,14 @@
 
 #include "mem_map.h"
 
-#include <sys/mman.h>
-
 #include <memory>
 #include <random>
 
-#include "base/common_art_test.h"
+#include "common_art_test.h"
 #include "common_runtime_test.h"  // For TEST_DISABLED_FOR_MIPS
+#include "logging.h"
 #include "memory_tool.h"
+#include "mman.h"
 #include "unix_file/fd_file.h"
 
 namespace art {
@@ -53,7 +53,6 @@
     // Find a valid map address and unmap it before returning.
     std::string error_msg;
     MemMap map = MemMap::MapAnonymous("temp",
-                                      /* addr */ nullptr,
                                       size,
                                       PROT_READ,
                                       low_4gb,
@@ -68,7 +67,6 @@
     const size_t page_size = static_cast<size_t>(kPageSize);
     // Map a two-page memory region.
     MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
-                                     /* addr */ nullptr,
                                      2 * page_size,
                                      PROT_READ | PROT_WRITE,
                                      low_4gb,
@@ -165,17 +163,15 @@
 TEST_F(MemMapTest, ReplaceMapping_SameSize) {
   std::string error_msg;
   MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
-                                     /* addr */ nullptr,
                                      kPageSize,
                                      PROT_READ,
-                                     /* low_4gb */ false,
+                                     /*low_4gb=*/ false,
                                      &error_msg);
   ASSERT_TRUE(dest.IsValid());
   MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
-                                       /* addr */ nullptr,
                                        kPageSize,
                                        PROT_WRITE | PROT_READ,
-                                       /* low_4gb */ false,
+                                       /*low_4gb=*/ false,
                                        &error_msg);
   ASSERT_TRUE(source.IsValid());
   void* source_addr = source.Begin();
@@ -200,21 +196,19 @@
 TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
   std::string error_msg;
   MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
-                                     /* addr */ nullptr,
                                      5 * kPageSize,  // Need to make it larger
                                                      // initially so we know
                                                      // there won't be mappings
-                                                     // in the way we we move
+                                                     // in the way when we move
                                                      // source.
                                      PROT_READ,
-                                     /* low_4gb */ false,
+                                     /*low_4gb=*/ false,
                                      &error_msg);
   ASSERT_TRUE(dest.IsValid());
   MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
-                                       /* addr */ nullptr,
                                        3 * kPageSize,
                                        PROT_WRITE | PROT_READ,
-                                       /* low_4gb */ false,
+                                       /*low_4gb=*/ false,
                                        &error_msg);
   ASSERT_TRUE(source.IsValid());
   uint8_t* source_addr = source.Begin();
@@ -246,17 +240,15 @@
 TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
   std::string error_msg;
   MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
-                                     /* addr */ nullptr,
                                      3 * kPageSize,
                                      PROT_READ,
-                                     /* low_4gb */ false,
+                                     /*low_4gb=*/ false,
                                      &error_msg);
   ASSERT_TRUE(dest.IsValid());
   MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
-                                       /* addr */ nullptr,
                                        kPageSize,
                                        PROT_WRITE | PROT_READ,
-                                       /* low_4gb */ false,
+                                       /*low_4gb=*/ false,
                                        &error_msg);
   ASSERT_TRUE(source.IsValid());
   uint8_t* source_addr = source.Begin();
@@ -285,11 +277,10 @@
   MemMap dest =
       MemMap::MapAnonymous(
           "MapAnonymousEmpty-atomic-replace-dest",
-          /* addr */ nullptr,
           3 * kPageSize,  // Need to make it larger initially so we know there won't be mappings in
-                          // the way we we move source.
+                          // the way when we move source.
           PROT_READ | PROT_WRITE,
-          /* low_4gb */ false,
+          /*low_4gb=*/ false,
           &error_msg);
   ASSERT_TRUE(dest.IsValid());
   // Resize down to 1 page so we can remap the rest.
@@ -299,7 +290,9 @@
                                        dest.Begin() + kPageSize,
                                        2 * kPageSize,
                                        PROT_WRITE | PROT_READ,
-                                       /* low_4gb */ false,
+                                       /*low_4gb=*/ false,
+                                       /*reuse=*/ false,
+                                       /*reservation=*/ nullptr,
                                        &error_msg);
   ASSERT_TRUE(source.IsValid());
   ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
@@ -332,20 +325,18 @@
   CommonInit();
   std::string error_msg;
   MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
-                                    /* addr */ nullptr,
-                                    0,
+                                    /*byte_count=*/ 0,
                                     PROT_READ,
-                                    /* low_4gb */ false,
+                                    /*low_4gb=*/ false,
                                     &error_msg);
   ASSERT_FALSE(map.IsValid()) << error_msg;
   ASSERT_FALSE(error_msg.empty());
 
   error_msg.clear();
   map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
-                             /* addr */ nullptr,
                              kPageSize,
                              PROT_READ | PROT_WRITE,
-                             /* low_4gb */ false,
+                             /*low_4gb=*/ false,
                              &error_msg);
   ASSERT_TRUE(map.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
@@ -358,7 +349,9 @@
                                     reinterpret_cast<uint8_t*>(kPageSize),
                                     0x20000,
                                     PROT_READ | PROT_WRITE,
-                                    /* low_4gb */ false,
+                                    /*low_4gb=*/ false,
+                                    /*reuse=*/ false,
+                                    /*reservation=*/ nullptr,
                                     nullptr);
   ASSERT_FALSE(map.IsValid());
 }
@@ -368,20 +361,18 @@
   CommonInit();
   std::string error_msg;
   MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
-                                    /* addr */ nullptr,
-                                    0,
+                                    /*byte_count=*/ 0,
                                     PROT_READ,
-                                    /* low_4gb */ true,
+                                    /*low_4gb=*/ true,
                                     &error_msg);
   ASSERT_FALSE(map.IsValid()) << error_msg;
   ASSERT_FALSE(error_msg.empty());
 
   error_msg.clear();
   map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
-                             /* addr */ nullptr,
                              kPageSize,
                              PROT_READ | PROT_WRITE,
-                             /* low_4gb */ true,
+                             /*low_4gb=*/ true,
                              &error_msg);
   ASSERT_TRUE(map.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
@@ -394,12 +385,12 @@
   constexpr size_t kMapSize = kPageSize;
   std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
   ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
-  MemMap map = MemMap::MapFile(/*byte_count*/kMapSize,
+  MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize,
                                PROT_READ,
                                MAP_PRIVATE,
                                scratch_file.GetFd(),
-                               /*start*/0,
-                               /*low_4gb*/true,
+                               /*start=*/0,
+                               /*low_4gb=*/true,
                                scratch_file.GetFilename().c_str(),
                                &error_msg);
   ASSERT_TRUE(map.IsValid()) << error_msg;
@@ -410,26 +401,33 @@
 #endif
 
 TEST_F(MemMapTest, MapAnonymousExactAddr) {
+  // TODO: The semantics of the MemMap::MapAnonymous() with a given address but without
+  // `reuse == true` or `reservation != nullptr` is weird. We should either drop support
+  // for it, or take it only as a hint and allow the result to be mapped elsewhere.
+  // Currently we're seeing failures with ASAN. b/118408378
+  TEST_DISABLED_FOR_MEMORY_TOOL();
+
   CommonInit();
   std::string error_msg;
   // Find a valid address.
-  uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
+  uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb=*/false);
   // Map at an address that should work, which should succeed.
   MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
                                      valid_address,
                                      kPageSize,
                                      PROT_READ | PROT_WRITE,
-                                     /* low_4gb */ false,
+                                     /*low_4gb=*/ false,
+                                     /*reuse=*/ false,
+                                     /*reservation=*/ nullptr,
                                      &error_msg);
   ASSERT_TRUE(map0.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
   ASSERT_TRUE(map0.BaseBegin() == valid_address);
   // Map at an unspecified address, which should succeed.
   MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
-                                     /* addr */ nullptr,
                                      kPageSize,
                                      PROT_READ | PROT_WRITE,
-                                     /* low_4gb */ false,
+                                     /*low_4gb=*/ false,
                                      &error_msg);
   ASSERT_TRUE(map1.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
@@ -439,7 +437,9 @@
                                      reinterpret_cast<uint8_t*>(map1.BaseBegin()),
                                      kPageSize,
                                      PROT_READ | PROT_WRITE,
-                                     /* low_4gb */ false,
+                                     /*low_4gb=*/ false,
+                                     /*reuse=*/ false,
+                                     /*reservation=*/ nullptr,
                                      &error_msg);
   ASSERT_FALSE(map2.IsValid()) << error_msg;
   ASSERT_TRUE(!error_msg.empty());
@@ -455,6 +455,53 @@
 }
 #endif
 
+TEST_F(MemMapTest, RemapFileViewAtEnd) {
+  CommonInit();
+  std::string error_msg;
+  ScratchFile scratch_file;
+
+  // Create a scratch file 3 pages large.
+  constexpr size_t kMapSize = 3 * kPageSize;
+  std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
+  memset(data.get(), 1, kPageSize);
+  memset(&data[0], 0x55, kPageSize);
+  memset(&data[kPageSize], 0x5a, kPageSize);
+  memset(&data[2 * kPageSize], 0xaa, kPageSize);
+  ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
+
+  MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize,
+                               PROT_READ,
+                               MAP_PRIVATE,
+                               scratch_file.GetFd(),
+                               /*start=*/0,
+                               /*low_4gb=*/true,
+                               scratch_file.GetFilename().c_str(),
+                               &error_msg);
+  ASSERT_TRUE(map.IsValid()) << error_msg;
+  ASSERT_TRUE(error_msg.empty());
+  ASSERT_EQ(map.Size(), kMapSize);
+  ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
+  ASSERT_EQ(data[0], *map.Begin());
+  ASSERT_EQ(data[kPageSize], *(map.Begin() + kPageSize));
+  ASSERT_EQ(data[2 * kPageSize], *(map.Begin() + 2 * kPageSize));
+
+  for (size_t offset = 2 * kPageSize; offset > 0; offset -= kPageSize) {
+    MemMap tail = map.RemapAtEnd(map.Begin() + offset,
+                                 "bad_offset_map",
+                                 PROT_READ,
+                                 MAP_PRIVATE | MAP_FIXED,
+                                 scratch_file.GetFd(),
+                                 offset,
+                                 &error_msg);
+    ASSERT_TRUE(tail.IsValid()) << error_msg;
+    ASSERT_TRUE(error_msg.empty());
+    ASSERT_EQ(offset, map.Size());
+    ASSERT_EQ(static_cast<size_t>(kPageSize), tail.Size());
+    ASSERT_EQ(tail.Begin(), map.Begin() + map.Size());
+    ASSERT_EQ(data[offset], *tail.Begin());
+  }
+}
+
 TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
   // Some MIPS32 hardware (namely the Creator Ci20 development board)
   // cannot allocate in the 2GB-4GB region.
@@ -475,7 +522,9 @@
                                reinterpret_cast<uint8_t*>(start_addr),
                                size,
                                PROT_READ | PROT_WRITE,
-                               /*low_4gb*/ true,
+                               /*low_4gb=*/ true,
+                               /*reuse=*/ false,
+                               /*reservation=*/ nullptr,
                                &error_msg);
     if (map.IsValid()) {
       break;
@@ -496,7 +545,9 @@
                                     reinterpret_cast<uint8_t*>(ptr),
                                     2 * kPageSize,  // brings it over the top.
                                     PROT_READ | PROT_WRITE,
-                                    /* low_4gb */ false,
+                                    /*low_4gb=*/ false,
+                                    /*reuse=*/ false,
+                                    /*reservation=*/ nullptr,
                                     &error_msg);
   ASSERT_FALSE(map.IsValid());
   ASSERT_FALSE(error_msg.empty());
@@ -511,7 +562,9 @@
                            reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
                            kPageSize,
                            PROT_READ | PROT_WRITE,
-                           /* low_4gb */ true,
+                           /*low_4gb=*/ true,
+                           /*reuse=*/ false,
+                           /*reservation=*/ nullptr,
                            &error_msg);
   ASSERT_FALSE(map.IsValid());
   ASSERT_FALSE(error_msg.empty());
@@ -521,10 +574,12 @@
   CommonInit();
   std::string error_msg;
   MemMap map = MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
-                                    reinterpret_cast<uint8_t*>(0xF0000000),
-                                    0x20000000,
+                                    /*addr=*/ reinterpret_cast<uint8_t*>(0xF0000000),
+                                    /*byte_count=*/ 0x20000000,
                                     PROT_READ | PROT_WRITE,
-                                    /* low_4gb */ true,
+                                    /*low_4gb=*/ true,
+                                    /*reuse=*/ false,
+                                    /*reservation=*/ nullptr,
                                     &error_msg);
   ASSERT_FALSE(map.IsValid());
   ASSERT_FALSE(error_msg.empty());
@@ -535,22 +590,19 @@
   CommonInit();
   std::string error_msg;
   MemMap map = MemMap::MapAnonymous("MapAnonymousReserve",
-                                    nullptr,
-                                    0x20000,
+                                    /*byte_count=*/ 0x20000,
                                     PROT_READ | PROT_WRITE,
-                                    /* low_4gb */ false,
-                                    /* reuse */ false,
-                                    /* reservation */ nullptr,
+                                    /*low_4gb=*/ false,
                                     &error_msg);
   ASSERT_TRUE(map.IsValid());
   ASSERT_TRUE(error_msg.empty());
   MemMap map2 = MemMap::MapAnonymous("MapAnonymousReused",
-                                     reinterpret_cast<uint8_t*>(map.BaseBegin()),
-                                     0x10000,
+                                     /*addr=*/ reinterpret_cast<uint8_t*>(map.BaseBegin()),
+                                     /*byte_count=*/ 0x10000,
                                      PROT_READ | PROT_WRITE,
-                                     /* low_4gb */ false,
-                                     /* reuse */ true,
-                                     /* reservation */ nullptr,
+                                     /*low_4gb=*/ false,
+                                     /*reuse=*/ true,
+                                     /*reservation=*/ nullptr,
                                      &error_msg);
   ASSERT_TRUE(map2.IsValid());
   ASSERT_TRUE(error_msg.empty());
@@ -561,45 +613,45 @@
   std::string error_msg;
   constexpr size_t kNumPages = 3;
   // Map a 3-page mem map.
-  MemMap map = MemMap::MapAnonymous("MapAnonymous0",
-                                    /* addr */ nullptr,
-                                    kPageSize * kNumPages,
-                                    PROT_READ | PROT_WRITE,
-                                    /* low_4gb */ false,
-                                    &error_msg);
-  ASSERT_TRUE(map.IsValid()) << error_msg;
+  MemMap reservation = MemMap::MapAnonymous("MapAnonymous0",
+                                            kPageSize * kNumPages,
+                                            PROT_READ | PROT_WRITE,
+                                            /*low_4gb=*/ false,
+                                            &error_msg);
+  ASSERT_TRUE(reservation.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
   // Record the base address.
-  uint8_t* map_base = reinterpret_cast<uint8_t*>(map.BaseBegin());
-  // Unmap it.
-  map.Reset();
+  uint8_t* map_base = reinterpret_cast<uint8_t*>(reservation.BaseBegin());
 
-  // Map at the same address, but in page-sized separate mem maps,
-  // assuming the space at the address is still available.
+  // Map at the same address, taking from the `map` reservation.
   MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
-                                     map_base,
                                      kPageSize,
                                      PROT_READ | PROT_WRITE,
-                                     /* low_4gb */ false,
+                                     /*low_4gb=*/ false,
+                                     &reservation,
                                      &error_msg);
   ASSERT_TRUE(map0.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
+  ASSERT_EQ(map_base, map0.Begin());
   MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
-                                     map_base + kPageSize,
                                      kPageSize,
                                      PROT_READ | PROT_WRITE,
-                                     /* low_4gb */ false,
+                                     /*low_4gb=*/ false,
+                                     &reservation,
                                      &error_msg);
   ASSERT_TRUE(map1.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
+  ASSERT_EQ(map_base + kPageSize, map1.Begin());
   MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
-                                     map_base + kPageSize * 2,
                                      kPageSize,
                                      PROT_READ | PROT_WRITE,
-                                     /* low_4gb */ false,
+                                     /*low_4gb=*/ false,
+                                     &reservation,
                                      &error_msg);
   ASSERT_TRUE(map2.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
+  ASSERT_EQ(map_base + 2 * kPageSize, map2.Begin());
+  ASSERT_FALSE(reservation.IsValid());  // The entire reservation was used.
 
   // One-map cases.
   ASSERT_TRUE(MemMap::CheckNoGaps(map0, map0));
@@ -625,10 +677,9 @@
   const size_t page_size = static_cast<size_t>(kPageSize);
   // Map a region.
   MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
-                                   /* addr */ nullptr,
                                    14 * page_size,
                                    PROT_READ | PROT_WRITE,
-                                   /* low_4gb */ false,
+                                   /*low_4gb=*/ false,
                                    &error_msg);
   ASSERT_TRUE(m0.IsValid());
   uint8_t* base0 = m0.Begin();
@@ -731,10 +782,9 @@
   ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
 
   MemMap reservation = MemMap::MapAnonymous("Test reservation",
-                                            /* addr */ nullptr,
                                             kMapSize,
                                             PROT_NONE,
-                                            /* low_4gb */ false,
+                                            /*low_4gb=*/ false,
                                             &error_msg);
   ASSERT_TRUE(reservation.IsValid());
   ASSERT_TRUE(error_msg.empty());
@@ -744,14 +794,14 @@
   static_assert(kChunk1Size < kMapSize, "We want to split the reservation.");
   uint8_t* addr1 = reservation.Begin();
   MemMap map1 = MemMap::MapFileAtAddress(addr1,
-                                         /* byte_count */ kChunk1Size,
+                                         /*byte_count=*/ kChunk1Size,
                                          PROT_READ,
                                          MAP_PRIVATE,
                                          scratch_file.GetFd(),
-                                         /* start */ 0,
-                                         /* low_4gb */ false,
+                                         /*start=*/ 0,
+                                         /*low_4gb=*/ false,
                                          scratch_file.GetFilename().c_str(),
-                                         /* reuse */ false,
+                                         /*reuse=*/ false,
                                          &reservation,
                                          &error_msg);
   ASSERT_TRUE(map1.IsValid()) << error_msg;
@@ -769,10 +819,10 @@
   uint8_t* addr2 = reservation.Begin();
   MemMap map2 = MemMap::MapAnonymous("MiddleReservation",
                                      addr2,
-                                     /* byte_count */ kChunk2Size,
+                                     /*byte_count=*/ kChunk2Size,
                                      PROT_READ,
-                                     /* low_4gb */ false,
-                                     /* reuse */ false,
+                                     /*low_4gb=*/ false,
+                                     /*reuse=*/ false,
                                      &reservation,
                                      &error_msg);
   ASSERT_TRUE(map2.IsValid()) << error_msg;
@@ -786,14 +836,14 @@
   const size_t kChunk3Size = reservation.Size() - 1u;
   uint8_t* addr3 = reservation.Begin();
   MemMap map3 = MemMap::MapFileAtAddress(addr3,
-                                         /* byte_count */ kChunk3Size,
+                                         /*byte_count=*/ kChunk3Size,
                                          PROT_READ,
                                          MAP_PRIVATE,
                                          scratch_file.GetFd(),
-                                         /* start */ dchecked_integral_cast<size_t>(addr3 - addr1),
-                                         /* low_4gb */ false,
+                                         /*start=*/ dchecked_integral_cast<size_t>(addr3 - addr1),
+                                         /*low_4gb=*/ false,
                                          scratch_file.GetFilename().c_str(),
-                                         /* reuse */ false,
+                                         /*reuse=*/ false,
                                          &reservation,
                                          &error_msg);
   ASSERT_TRUE(map3.IsValid()) << error_msg;
@@ -827,3 +877,31 @@
 }
 
 }  // namespace art
+
+namespace {
+
+class DumpMapsOnFailListener : public testing::EmptyTestEventListener {
+  void OnTestPartResult(const testing::TestPartResult& result) override {
+    switch (result.type()) {
+      case testing::TestPartResult::kFatalFailure:
+        art::PrintFileToLog("/proc/self/maps", android::base::LogSeverity::ERROR);
+        break;
+
+      // TODO: Could consider logging on EXPECT failures.
+      case testing::TestPartResult::kNonFatalFailure:
+      case testing::TestPartResult::kSkip:
+      case testing::TestPartResult::kSuccess:
+        break;
+    }
+  }
+};
+
+}  // namespace
+
+// Inject our listener into the test runner.
+extern "C"
+__attribute__((visibility("default"))) __attribute__((used))
+void ArtTestGlobalInit() {
+  LOG(ERROR) << "Installing listener";
+  testing::UnitTest::GetInstance()->listeners().Append(new DumpMapsOnFailListener());
+}
diff --git a/libartbase/base/mem_map_unix.cc b/libartbase/base/mem_map_unix.cc
index 601b049..ac854df 100644
--- a/libartbase/base/mem_map_unix.cc
+++ b/libartbase/base/mem_map_unix.cc
@@ -16,7 +16,7 @@
 
 #include "mem_map.h"
 
-#include <sys/mman.h>
+#include "mman.h"
 
 namespace art {
 
diff --git a/libartbase/base/mem_map_windows.cc b/libartbase/base/mem_map_windows.cc
new file mode 100644
index 0000000..84e14ea
--- /dev/null
+++ b/libartbase/base/mem_map_windows.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mem_map.h"
+
+#include <windows.h>
+
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+#include "android-base/mapped_file.h"
+#ifdef PROT_READ
+#undef PROT_READ
+#endif
+#ifdef PROT_WRITE
+#undef PROT_WRITE
+#endif
+#include "mman.h"
+
+namespace art {
+
+using android::base::MappedFile;
+using android::base::StringPrintf;
+
+static off_t allocation_granularity;
+
+void MemMap::TargetMMapInit() {
+  SYSTEM_INFO si;
+  GetSystemInfo(&si);
+  allocation_granularity = si.dwAllocationGranularity;
+}
+
+void* MemMap::TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off) {
+  UNUSED(start);
+  size_t padding = fd_off % allocation_granularity;
+  off_t file_offset = fd_off - padding;
+  off_t map_length = len + padding;
+
+  // Only read and write permissions are supported.
+  if ((prot != PROT_READ) && (prot != (PROT_READ | PROT_WRITE))) {
+    PLOG(ERROR) << "Protection or flag error was not supported.";
+    errno = EINVAL;
+    return MAP_FAILED;
+  }
+  // Fixed is not currently supported either.
+  // TODO(sehr): add MAP_FIXED support.
+  if ((flags & MAP_FIXED) != 0) {
+    PLOG(ERROR) << "MAP_FIXED not supported.";
+    errno = EINVAL;
+    return MAP_FAILED;
+  }
+
+  // Compute the Windows access flags for the two APIs from the PROTs and MAPs.
+  DWORD map_access = 0;
+  DWORD view_access = 0;
+  if ((prot & PROT_WRITE) != 0) {
+    map_access = PAGE_READWRITE;
+    if (((flags & MAP_SHARED) != 0) && ((flags & MAP_PRIVATE) == 0)) {
+      view_access = FILE_MAP_ALL_ACCESS;
+    } else if (((flags & MAP_SHARED) == 0) && ((flags & MAP_PRIVATE) != 0)) {
+      view_access = FILE_MAP_COPY | FILE_MAP_READ;
+    } else {
+      PLOG(ERROR) << "MAP_PRIVATE and MAP_SHARED inconsistently set.";
+      errno = EINVAL;
+      return MAP_FAILED;
+    }
+  } else {
+    map_access = PAGE_READONLY;
+    view_access = FILE_MAP_READ;
+  }
+
+  // MapViewOfFile does not like to see a size greater than the file size of the
+  // underlying file object, unless the underlying file object is writable.  If
+  // the mapped region would go beyond the end of the underlying file, use zero,
+  // as this indicates the physical size.
+  HANDLE file_handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd));
+  LARGE_INTEGER file_length;
+  if (!::GetFileSizeEx(file_handle, &file_length)) {
+    PLOG(ERROR) << "Couldn't get file size.";
+    errno = EINVAL;
+    return MAP_FAILED;
+  }
+  if (((map_access & PAGE_READONLY) != 0) &&
+      file_offset + map_length > file_length.QuadPart) {
+    map_length = 0;
+  }
+
+  // Create a file mapping object that will be used to access the file.
+  HANDLE handle = ::CreateFileMapping(reinterpret_cast<HANDLE>(_get_osfhandle(fd)),
+                                      nullptr,
+                                      map_access,
+                                      0,
+                                      0,
+                                      nullptr);
+  if (handle == nullptr) {
+    DWORD error = ::GetLastError();
+    PLOG(ERROR) << StringPrintf("Couldn't create file mapping %lx.", error);
+    errno = EINVAL;
+    return MAP_FAILED;
+  }
+
+  // Map the file into the process address space.
+  DWORD offset_low = static_cast<DWORD>(file_offset & 0xffffffffU);
+#ifdef _WIN64
+  DWORD offset_high = static_cast<DWORD>(file_offset >> 32);
+#else
+  DWORD offset_high = static_cast<DWORD>(0);
+#endif
+  void* view_address = MapViewOfFile(handle, view_access, offset_high, offset_low, map_length);
+  if (view_address == nullptr) {
+    DWORD error = ::GetLastError();
+    PLOG(ERROR) << StringPrintf("Couldn't create file view %lx.", error);
+    ::CloseHandle(handle);
+    errno = EINVAL;
+    return MAP_FAILED;
+  }
+
+  return view_address;
+}
+
+int MemMap::TargetMUnmap(void* start, size_t len) {
+  // TODO(sehr): implement unmap.
+  UNUSED(start);
+  UNUSED(len);
+  return 0;
+}
+
+}  // namespace art
diff --git a/libartbase/base/membarrier.cc b/libartbase/base/membarrier.cc
new file mode 100644
index 0000000..abb36bc
--- /dev/null
+++ b/libartbase/base/membarrier.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "membarrier.h"
+
+#include <errno.h>
+
+#if !defined(_WIN32)
+#include <sys/syscall.h>
+#include <unistd.h>
+#endif
+#include "macros.h"
+
+#if defined(__BIONIC__)
+
+#include <atomic>
+#include <linux/membarrier.h>
+
+#define CHECK_MEMBARRIER_CMD(art_value, membarrier_value) \
+  static_assert(static_cast<int>(art_value) == (membarrier_value), "Bad value for " # art_value)
+CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kQuery, MEMBARRIER_CMD_QUERY);
+CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kGlobal, MEMBARRIER_CMD_SHARED);
+CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kPrivateExpedited, MEMBARRIER_CMD_PRIVATE_EXPEDITED);
+CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kRegisterPrivateExpedited,
+                     MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED);
+CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kPrivateExpedited, MEMBARRIER_CMD_PRIVATE_EXPEDITED);
+#undef CHECK_MEMBARRIER_CMD
+
+#endif  // __BIONIC
+
+namespace art {
+
+#if defined(__NR_membarrier)
+
+int membarrier(MembarrierCommand command) {
+#if defined(__BIONIC__)
+  // Avoid calling membarrier on older Android versions where membarrier may be barred by secomp
+  // causing the current process to be killed. The probing here could be considered expensive so
+  // endeavour not to repeat too often.
+  static int api_level = android_get_device_api_level();
+  if (api_level < __ANDROID_API_Q__) {
+    errno = ENOSYS;
+    return -1;
+  }
+#endif  // __BIONIC__
+  return syscall(__NR_membarrier, static_cast<int>(command), 0);
+}
+
+#else  // __NR_membarrier
+
+int membarrier(MembarrierCommand command ATTRIBUTE_UNUSED) {
+  // In principle this could be supported on linux, but Android's prebuilt glibc does not include
+  // the system call number defintions (b/111199492).
+  errno = ENOSYS;
+  return -1;
+}
+
+#endif  // __NR_membarrier
+
+}  // namespace art
diff --git a/libartbase/base/membarrier.h b/libartbase/base/membarrier.h
new file mode 100644
index 0000000..f829fc1f
--- /dev/null
+++ b/libartbase/base/membarrier.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_MEMBARRIER_H_
+#define ART_LIBARTBASE_BASE_MEMBARRIER_H_
+
+namespace art {
+  // Command types for the linux membarrier system call. Different Linux installation may include
+  // different subsets of these commands (at the same codepoints).
+  //
+  // Hardcoding these values is temporary until bionic and prebuilts glibc have an up to date
+  // linux/membarrier.h. The order and values follow the current linux definitions.
+  enum class MembarrierCommand : int  {
+    // MEMBARRIER_CMD_QUERY
+    kQuery = 0,
+    // MEMBARRIER_CMD_GLOBAL
+    kGlobal = (1 << 0),
+    // MEMBARRIER_CMD_GLOBAL_EXPEDITED
+    kGlobalExpedited = (1 << 1),
+    // MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED
+    kRegisterGlobalExpedited = (1 << 2),
+    // MEMBARRIER_CMD_PRIVATE_EXPEDITED
+    kPrivateExpedited = (1 << 3),
+    // MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED
+    kRegisterPrivateExpedited = (1 << 4),
+    // MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE
+    kPrivateExpeditedSyncCore = (1 << 5),
+    // MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE
+    kRegisterPrivateExpeditedSyncCore = (1 << 6)
+  };
+
+  // Call membarrier(2) if available on platform and return result. This method can fail if the
+  // command is not supported by the kernel. The underlying system call is linux specific.
+  int membarrier(MembarrierCommand command);
+
+}  // namespace art
+
+#endif  // ART_LIBARTBASE_BASE_MEMBARRIER_H_
diff --git a/libartbase/base/membarrier_test.cc b/libartbase/base/membarrier_test.cc
new file mode 100644
index 0000000..3eedf14
--- /dev/null
+++ b/libartbase/base/membarrier_test.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "membarrier.h"
+
+class ScopedErrnoCleaner {
+ public:
+  ScopedErrnoCleaner() { errno = 0; }
+  ~ScopedErrnoCleaner() { errno = 0; }
+};
+
+bool HasMembarrier(art::MembarrierCommand cmd) {
+  ScopedErrnoCleaner errno_cleaner;
+  int supported_cmds = art::membarrier(art::MembarrierCommand::kQuery);
+  return (supported_cmds > 0) && ((supported_cmds & static_cast<int>(cmd)) != 0);
+}
+
+TEST(membarrier, query) {
+  ScopedErrnoCleaner errno_cleaner;
+  int supported = art::membarrier(art::MembarrierCommand::kQuery);
+  if (errno == 0) {
+    ASSERT_LE(0, supported);
+  } else {
+    ASSERT_TRUE(errno == ENOSYS && supported == -1);
+  }
+}
+
+TEST(membarrier, global_barrier) {
+  if (!HasMembarrier(art::MembarrierCommand::kGlobal)) {
+    GTEST_LOG_(INFO) << "MembarrierCommand::kGlobal not supported, skipping test.";
+    return;
+  }
+  ASSERT_EQ(0, art::membarrier(art::MembarrierCommand::kGlobal));
+}
+
+static const char* MembarrierCommandToName(art::MembarrierCommand cmd) {
+#define CASE_VALUE(x) case (x): return #x;
+  switch (cmd) {
+    CASE_VALUE(art::MembarrierCommand::kQuery);
+    CASE_VALUE(art::MembarrierCommand::kGlobal);
+    CASE_VALUE(art::MembarrierCommand::kGlobalExpedited);
+    CASE_VALUE(art::MembarrierCommand::kRegisterGlobalExpedited);
+    CASE_VALUE(art::MembarrierCommand::kPrivateExpedited);
+    CASE_VALUE(art::MembarrierCommand::kRegisterPrivateExpedited);
+    CASE_VALUE(art::MembarrierCommand::kPrivateExpeditedSyncCore);
+    CASE_VALUE(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore);
+  }
+}
+
+static void TestRegisterAndBarrierCommands(art::MembarrierCommand membarrier_cmd_register,
+                                           art::MembarrierCommand membarrier_cmd_barrier) {
+  if (!HasMembarrier(membarrier_cmd_register)) {
+    GTEST_LOG_(INFO) << MembarrierCommandToName(membarrier_cmd_register)
+        << " not supported, skipping test.";
+    return;
+  }
+  if (!HasMembarrier(membarrier_cmd_barrier)) {
+    GTEST_LOG_(INFO) << MembarrierCommandToName(membarrier_cmd_barrier)
+        << " not supported, skipping test.";
+    return;
+  }
+
+  ScopedErrnoCleaner errno_cleaner;
+
+  // Check barrier use without prior registration.
+  if (membarrier_cmd_register == art::MembarrierCommand::kRegisterGlobalExpedited) {
+    // Global barrier use is always okay.
+    ASSERT_EQ(0, art::membarrier(membarrier_cmd_barrier));
+  } else {
+    // Private barrier should fail.
+    ASSERT_EQ(-1, art::membarrier(membarrier_cmd_barrier));
+    ASSERT_EQ(EPERM, errno);
+    errno = 0;
+  }
+
+  // Check registration for barrier succeeds.
+  ASSERT_EQ(0, art::membarrier(membarrier_cmd_register));
+
+  // Check barrier use after registration succeeds.
+  ASSERT_EQ(0, art::membarrier(membarrier_cmd_barrier));
+}
+
+TEST(membarrier, global_expedited) {
+  TestRegisterAndBarrierCommands(art::MembarrierCommand::kRegisterGlobalExpedited,
+                                 art::MembarrierCommand::kGlobalExpedited);
+}
+
+TEST(membarrier, private_expedited) {
+  TestRegisterAndBarrierCommands(art::MembarrierCommand::kRegisterPrivateExpedited,
+                                 art::MembarrierCommand::kPrivateExpedited);
+}
+
+TEST(membarrier, private_expedited_sync_core) {
+  TestRegisterAndBarrierCommands(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore,
+                                 art::MembarrierCommand::kPrivateExpeditedSyncCore);
+}
diff --git a/libartbase/base/memfd.cc b/libartbase/base/memfd.cc
new file mode 100644
index 0000000..780be32
--- /dev/null
+++ b/libartbase/base/memfd.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "memfd.h"
+
+#include <errno.h>
+#include <stdio.h>
+#if !defined(_WIN32)
+#include <sys/syscall.h>
+#include <sys/utsname.h>
+#include <unistd.h>
+#endif
+
+#include "macros.h"
+
+// When building for linux host, glibc in prebuilts does not include memfd_create system call
+// number. As a temporary testing measure, we add the definition here.
+#if defined(__linux__) && !defined(__NR_memfd_create)
+#if defined(__x86_64__)
+#define __NR_memfd_create 319
+#elif defined(__i386__)
+#define __NR_memfd_create 356
+#endif  // defined(__i386__)
+#endif  // defined(__linux__) && !defined(__NR_memfd_create)
+
+namespace art {
+
+#if defined(__NR_memfd_create)
+
+int memfd_create(const char* name, unsigned int flags) {
+  // Check kernel version supports memfd_create(). Some older kernels segfault executing
+  // memfd_create() rather than returning ENOSYS (b/116769556).
+  static constexpr int kRequiredMajor = 3;
+  static constexpr int kRequiredMinor = 17;
+  struct utsname uts;
+  int major, minor;
+  if (uname(&uts) != 0 ||
+      strcmp(uts.sysname, "Linux") != 0 ||
+      sscanf(uts.release, "%d.%d", &major, &minor) != 2 ||
+      (major < kRequiredMajor || (major == kRequiredMajor && minor < kRequiredMinor))) {
+    errno = ENOSYS;
+    return -1;
+  }
+
+  return syscall(__NR_memfd_create, name, flags);
+}
+
+#else  // __NR_memfd_create
+
+int memfd_create(const char* name ATTRIBUTE_UNUSED, unsigned int flags ATTRIBUTE_UNUSED) {
+  errno = ENOSYS;
+  return -1;
+}
+
+#endif  // __NR_memfd_create
+
+}  // namespace art
diff --git a/libartbase/base/memfd.h b/libartbase/base/memfd.h
new file mode 100644
index 0000000..91db0b2
--- /dev/null
+++ b/libartbase/base/memfd.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_MEMFD_H_
+#define ART_LIBARTBASE_BASE_MEMFD_H_
+
+namespace art {
+
+// Call memfd(2) if available on platform and return result. This call also makes a kernel version
+// check for safety on older kernels (b/116769556)..
+int memfd_create(const char* name, unsigned int flags);
+
+}  // namespace art
+
+#endif  // ART_LIBARTBASE_BASE_MEMFD_H_
diff --git a/libartbase/base/memfd_test.cc b/libartbase/base/memfd_test.cc
new file mode 100644
index 0000000..1edf3a1
--- /dev/null
+++ b/libartbase/base/memfd_test.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "memfd.h"
+
+TEST(memfd, basic) {
+  errno = 0;
+  int fd = art::memfd_create("memfd_create_test", 0);
+  if (fd < 0) {
+    ASSERT_EQ(ENOSYS, errno);
+    GTEST_LOG_(INFO) << "memfd_create not supported, skipping test.";
+    return;
+  }
+  ASSERT_TRUE(close(fd) == 0 || errno != EBADF);
+}
diff --git a/libartbase/base/memory_tool.h b/libartbase/base/memory_tool.h
index d381f01..1a6a9bb 100644
--- a/libartbase/base/memory_tool.h
+++ b/libartbase/base/memory_tool.h
@@ -44,7 +44,7 @@
 
 extern "C" void __asan_handle_no_return();
 
-# define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+# define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address, noinline))
 # define MEMORY_TOOL_HANDLE_NO_RETURN __asan_handle_no_return()
 constexpr bool kRunningOnMemoryTool = true;
 constexpr bool kMemoryToolDetectsLeaks = true;
diff --git a/libartbase/base/mman.h b/libartbase/base/mman.h
new file mode 100644
index 0000000..b56edfc
--- /dev/null
+++ b/libartbase/base/mman.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_MMAN_H_
+#define ART_LIBARTBASE_BASE_MMAN_H_
+
+#ifdef _WIN32
+
+// There is no sys/mman.h in mingw.
+
+#define PROT_READ      0x1
+#define PROT_WRITE     0x2
+#define PROT_EXEC      0x4
+#define PROT_NONE      0x0
+
+#define MAP_SHARED     0x01
+#define MAP_PRIVATE    0x02
+
+#define MAP_FAILED     ((void*) -1)
+#define MAP_FIXED      0x10
+#define MAP_ANONYMOUS  0x20
+
+#else
+
+#include <sys/mman.h>
+
+#endif
+
+
+#endif  // ART_LIBARTBASE_BASE_MMAN_H_
diff --git a/libartbase/base/os_linux.cc b/libartbase/base/os_linux.cc
index f8b31cf..a00779e 100644
--- a/libartbase/base/os_linux.cc
+++ b/libartbase/base/os_linux.cc
@@ -50,7 +50,12 @@
 }
 
 File* OS::CreateEmptyFileWriteOnly(const char* name) {
-  return art::CreateEmptyFile(name, O_WRONLY | O_TRUNC | O_NOFOLLOW | O_CLOEXEC);
+#ifdef _WIN32
+  int flags = O_WRONLY | O_TRUNC;
+#else
+  int flags = O_WRONLY | O_TRUNC | O_NOFOLLOW | O_CLOEXEC;
+#endif
+  return art::CreateEmptyFile(name, flags);
 }
 
 File* OS::OpenFileWithFlags(const char* name, int flags, bool auto_flush) {
diff --git a/libartbase/base/safe_copy.cc b/libartbase/base/safe_copy.cc
index b46b921..ad75aa7 100644
--- a/libartbase/base/safe_copy.cc
+++ b/libartbase/base/safe_copy.cc
@@ -16,8 +16,10 @@
 
 #include "safe_copy.h"
 
+#ifdef __linux__
 #include <sys/uio.h>
 #include <sys/user.h>
+#endif
 #include <unistd.h>
 
 #include <algorithm>
diff --git a/libartbase/base/safe_copy_test.cc b/libartbase/base/safe_copy_test.cc
index c23651f..9f7d409 100644
--- a/libartbase/base/safe_copy_test.cc
+++ b/libartbase/base/safe_copy_test.cc
@@ -18,12 +18,12 @@
 
 #include <errno.h>
 #include <string.h>
-#include <sys/mman.h>
 #include <sys/user.h>
 
 #include "android-base/logging.h"
 #include "globals.h"
 #include "gtest/gtest.h"
+#include "mman.h"
 
 
 namespace art {
diff --git a/libartbase/base/scoped_arena_allocator.cc b/libartbase/base/scoped_arena_allocator.cc
index ab05c60..a54f350 100644
--- a/libartbase/base/scoped_arena_allocator.cc
+++ b/libartbase/base/scoped_arena_allocator.cc
@@ -106,7 +106,7 @@
   return ptr;
 }
 
-ScopedArenaAllocator::ScopedArenaAllocator(ScopedArenaAllocator&& other)
+ScopedArenaAllocator::ScopedArenaAllocator(ScopedArenaAllocator&& other) noexcept
     : DebugStackReference(std::move(other)),
       DebugStackRefCounter(),
       ArenaAllocatorStats(other),
diff --git a/libartbase/base/scoped_arena_allocator.h b/libartbase/base/scoped_arena_allocator.h
index 7eaec5e..52d0361 100644
--- a/libartbase/base/scoped_arena_allocator.h
+++ b/libartbase/base/scoped_arena_allocator.h
@@ -138,7 +138,7 @@
 class ScopedArenaAllocator
     : private DebugStackReference, private DebugStackRefCounter, private ArenaAllocatorStats {
  public:
-  ScopedArenaAllocator(ScopedArenaAllocator&& other);
+  ScopedArenaAllocator(ScopedArenaAllocator&& other) noexcept;
   explicit ScopedArenaAllocator(ArenaStack* arena_stack);
   ~ScopedArenaAllocator();
 
diff --git a/libartbase/base/scoped_flock.cc b/libartbase/base/scoped_flock.cc
index d679328..b16a45a 100644
--- a/libartbase/base/scoped_flock.cc
+++ b/libartbase/base/scoped_flock.cc
@@ -22,6 +22,7 @@
 #include <android-base/logging.h>
 #include <android-base/stringprintf.h>
 
+#include "file_utils.h"
 #include "unix_file/fd_file.h"
 
 namespace art {
@@ -34,13 +35,21 @@
 
 /* static */ ScopedFlock LockedFile::Open(const char* filename, int flags, bool block,
                                           std::string* error_msg) {
+#ifdef _WIN32
+  // TODO: implement file locking for Windows.
+  UNUSED(filename);
+  UNUSED(flags);
+  UNUSED(block);
+  *error_msg = "flock is unsupported on Windows";
+  return nullptr;
+#else
   while (true) {
     // NOTE: We don't check usage here because the ScopedFlock should *never* be
     // responsible for flushing its underlying FD. Its only purpose should be
     // to acquire a lock, and the unlock / close in the corresponding
     // destructor. Callers should explicitly flush files they're writing to if
     // that is the desired behaviour.
-    std::unique_ptr<File> file(OS::OpenFileWithFlags(filename, flags, false /* check_usage */));
+    std::unique_ptr<File> file(OS::OpenFileWithFlags(filename, flags, /* auto_flush= */ false));
     if (file.get() == nullptr) {
       *error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
       return nullptr;
@@ -88,17 +97,26 @@
 
     return ScopedFlock(new LockedFile(std::move((*file.get()))));
   }
+#endif
 }
 
 ScopedFlock LockedFile::DupOf(const int fd, const std::string& path,
                               const bool read_only_mode, std::string* error_msg) {
+#ifdef _WIN32
+  // TODO: implement file locking for Windows.
+  UNUSED(fd);
+  UNUSED(path);
+  UNUSED(read_only_mode);
+  *error_msg = "flock is unsupported on Windows.";
+  return nullptr;
+#else
   // NOTE: We don't check usage here because the ScopedFlock should *never* be
   // responsible for flushing its underlying FD. Its only purpose should be
   // to acquire a lock, and the unlock / close in the corresponding
   // destructor. Callers should explicitly flush files they're writing to if
   // that is the desired behaviour.
   ScopedFlock locked_file(
-      new LockedFile(dup(fd), path, false /* check_usage */, read_only_mode));
+      new LockedFile(DupCloexec(fd), path, /* check_usage= */ false, read_only_mode));
   if (locked_file->Fd() == -1) {
     *error_msg = StringPrintf("Failed to duplicate open file '%s': %s",
                               locked_file->GetPath().c_str(), strerror(errno));
@@ -111,9 +129,11 @@
   }
 
   return locked_file;
+#endif
 }
 
 void LockedFile::ReleaseLock() {
+#ifndef _WIN32
   if (this->Fd() != -1) {
     int flock_result = TEMP_FAILURE_RETRY(flock(this->Fd(), LOCK_UN));
     if (flock_result != 0) {
@@ -125,6 +145,7 @@
       PLOG(WARNING) << "Unable to unlock file " << this->GetPath();
     }
   }
+#endif
 }
 
 }  // namespace art
diff --git a/libartbase/base/scoped_flock_test.cc b/libartbase/base/scoped_flock_test.cc
index f9ac1e0..22356cd 100644
--- a/libartbase/base/scoped_flock_test.cc
+++ b/libartbase/base/scoped_flock_test.cc
@@ -38,7 +38,7 @@
     // Attempt to acquire a second lock on the same file. This must fail.
     ScopedFlock second_lock = LockedFile::Open(scratch_file.GetFilename().c_str(),
                                                O_RDONLY,
-                                               /* block */ false,
+                                               /* block= */ false,
                                                &error_msg);
     ASSERT_TRUE(second_lock.get() == nullptr);
     ASSERT_TRUE(!error_msg.empty());
diff --git a/libartbase/base/sdk_version.h b/libartbase/base/sdk_version.h
new file mode 100644
index 0000000..e2dbc50
--- /dev/null
+++ b/libartbase/base/sdk_version.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_SDK_VERSION_H_
+#define ART_LIBARTBASE_BASE_SDK_VERSION_H_
+
+#include <cstdint>
+#include <limits>
+
+namespace art {
+
+enum class SdkVersion : uint32_t {
+  kMin   =  0u,
+  kUnset =  0u,
+  kL     = 21u,
+  kL_MR1 = 22u,
+  kM     = 23u,
+  kN     = 24u,
+  kN_MR1 = 25u,
+  kO     = 26u,
+  kO_MR1 = 27u,
+  kP     = 28u,
+  kMax   = std::numeric_limits<uint32_t>::max(),
+};
+
+inline bool IsSdkVersionSetAndMoreThan(uint32_t lhs, SdkVersion rhs) {
+  return lhs != static_cast<uint32_t>(SdkVersion::kUnset) && lhs > static_cast<uint32_t>(rhs);
+}
+
+inline bool IsSdkVersionSetAndAtLeast(uint32_t lhs, SdkVersion rhs) {
+  return lhs != static_cast<uint32_t>(SdkVersion::kUnset) && lhs >= static_cast<uint32_t>(rhs);
+}
+
+inline bool IsSdkVersionSetAndAtMost(uint32_t lhs, SdkVersion rhs) {
+  return lhs != static_cast<uint32_t>(SdkVersion::kUnset) && lhs <= static_cast<uint32_t>(rhs);
+}
+
+inline bool IsSdkVersionSetAndLessThan(uint32_t lhs, SdkVersion rhs) {
+  return lhs != static_cast<uint32_t>(SdkVersion::kUnset) && lhs < static_cast<uint32_t>(rhs);
+}
+
+}  // namespace art
+
+#endif  // ART_LIBARTBASE_BASE_SDK_VERSION_H_
diff --git a/libartbase/base/socket_peer_is_trusted.cc b/libartbase/base/socket_peer_is_trusted.cc
new file mode 100644
index 0000000..3996d90
--- /dev/null
+++ b/libartbase/base/socket_peer_is_trusted.cc
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "socket_peer_is_trusted.h"
+
+#if !defined(_WIN32)
+#include <pwd.h>
+#include <sys/socket.h>
+#endif
+
+#include <android-base/logging.h>
+
+namespace art {
+
+// Returns true if the user on the other end of the socket is root or shell.
+#ifdef ART_TARGET_ANDROID
+bool SocketPeerIsTrusted(int fd) {
+  ucred cr;
+  socklen_t cr_length = sizeof(cr);
+  if (getsockopt(fd, SOL_SOCKET, SO_PEERCRED, &cr, &cr_length) != 0) {
+    PLOG(ERROR) << "couldn't get socket credentials";
+    return false;
+  }
+
+  passwd* shell = getpwnam("shell");
+  if (cr.uid != 0 && cr.uid != shell->pw_uid) {
+    LOG(ERROR) << "untrusted uid " << cr.uid << " on other end of socket";
+    return false;
+  }
+
+  return true;
+}
+#else
+bool SocketPeerIsTrusted(int /* fd */) {
+  return true;
+}
+#endif
+
+}  // namespace art
diff --git a/libartbase/base/socket_peer_is_trusted.h b/libartbase/base/socket_peer_is_trusted.h
new file mode 100644
index 0000000..4bbadd4
--- /dev/null
+++ b/libartbase/base/socket_peer_is_trusted.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_SOCKET_PEER_IS_TRUSTED_H_
+#define ART_LIBARTBASE_BASE_SOCKET_PEER_IS_TRUSTED_H_
+
+namespace art {
+
+// Returns true if the user on the other end of the socket is root or shell.
+bool SocketPeerIsTrusted(int fd);
+
+}  // namespace art
+
+#endif  // ART_LIBARTBASE_BASE_SOCKET_PEER_IS_TRUSTED_H_
diff --git a/libartbase/base/systrace.h b/libartbase/base/systrace.h
index d995dce..30bba49 100644
--- a/libartbase/base/systrace.h
+++ b/libartbase/base/systrace.h
@@ -17,33 +17,52 @@
 #ifndef ART_LIBARTBASE_BASE_SYSTRACE_H_
 #define ART_LIBARTBASE_BASE_SYSTRACE_H_
 
-#define ATRACE_TAG ATRACE_TAG_DALVIK
-#include <cutils/trace.h>
-
 #include <sstream>
 #include <string>
 
 #include "android-base/stringprintf.h"
 #include "macros.h"
+#include "palette/palette.h"
 
 namespace art {
 
+inline bool ATraceEnabled() {
+  int enabled = 0;
+  if (UNLIKELY(PaletteTraceEnabled(&enabled) == PaletteStatus::kOkay && enabled != 0)) {
+    return true;
+  } else {
+    return false;
+  }
+}
+
+inline void ATraceBegin(const char* name) {
+  PaletteTraceBegin(name);
+}
+
+inline void ATraceEnd() {
+  PaletteTraceEnd();
+}
+
+inline void ATraceIntegerValue(const char* name, int32_t value) {
+  PaletteTraceIntegerValue(name, value);
+}
+
 class ScopedTrace {
  public:
   explicit ScopedTrace(const char* name) {
-    ATRACE_BEGIN(name);
+    ATraceBegin(name);
   }
   template <typename Fn>
   explicit ScopedTrace(Fn fn) {
-    if (ATRACE_ENABLED()) {
-      ATRACE_BEGIN(fn().c_str());
+    if (UNLIKELY(ATraceEnabled())) {
+      ATraceBegin(fn().c_str());
     }
   }
 
   explicit ScopedTrace(const std::string& name) : ScopedTrace(name.c_str()) {}
 
   ~ScopedTrace() {
-    ATRACE_END();
+    ATraceEnd();
   }
 };
 
@@ -54,7 +73,7 @@
   }
 
   ~ScopedTraceNoStart() {
-    ATRACE_END();
+    ATraceEnd();
   }
 
   // Message helper for the macro. Do not use directly.
@@ -63,7 +82,7 @@
     ScopedTraceMessageHelper() {
     }
     ~ScopedTraceMessageHelper() {
-      ATRACE_BEGIN(buffer_.str().c_str());
+      ATraceBegin(buffer_.str().c_str());
     }
 
     std::ostream& stream() {
@@ -77,7 +96,7 @@
 
 #define SCOPED_TRACE \
   ::art::ScopedTraceNoStart APPEND_TOKENS_AFTER_EVAL(trace, __LINE__) ; \
-  (ATRACE_ENABLED()) && ::art::ScopedTraceNoStart::ScopedTraceMessageHelper().stream()
+  (ATraceEnabled()) && ::art::ScopedTraceNoStart::ScopedTraceMessageHelper().stream()
 
 }  // namespace art
 
diff --git a/libartbase/base/time_utils.cc b/libartbase/base/time_utils.cc
index 89a1109..aa6c987 100644
--- a/libartbase/base/time_utils.cc
+++ b/libartbase/base/time_utils.cc
@@ -14,12 +14,14 @@
  * limitations under the License.
  */
 
+#include "time_utils.h"
+
 #include <inttypes.h>
+#include <stdio.h>
+
 #include <limits>
 #include <sstream>
 
-#include "time_utils.h"
-
 #include "android-base/stringprintf.h"
 
 #include "logging.h"
@@ -30,6 +32,20 @@
 
 namespace art {
 
+namespace {
+
+#if !defined(__linux__)
+int GetTimeOfDay(struct timeval* tv, struct timezone* tz) {
+#ifdef _WIN32
+  return mingw_gettimeofday(tv, tz);
+#else
+  return gettimeofday(tv, tz);
+#endif
+}
+#endif
+
+}  // namespace
+
 using android::base::StringPrintf;
 
 std::string PrettyDuration(uint64_t nano_duration, size_t max_fraction_digits) {
@@ -117,7 +133,12 @@
 std::string GetIsoDate() {
   time_t now = time(nullptr);
   tm tmbuf;
+#ifdef _WIN32
+  localtime_s(&tmbuf, &now);
+  tm* ptm = &tmbuf;
+#else
   tm* ptm = localtime_r(&now, &tmbuf);
+#endif
   return StringPrintf("%04d-%02d-%02d %02d:%02d:%02d",
       ptm->tm_year + 1900, ptm->tm_mon+1, ptm->tm_mday,
       ptm->tm_hour, ptm->tm_min, ptm->tm_sec);
@@ -128,9 +149,9 @@
   timespec now;
   clock_gettime(CLOCK_MONOTONIC, &now);
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_nsec / UINT64_C(1000000);
-#else  // __APPLE__
+#else
   timeval now;
-  gettimeofday(&now, nullptr);
+  GetTimeOfDay(&now, nullptr);
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_usec / UINT64_C(1000);
 #endif
 }
@@ -140,9 +161,9 @@
   timespec now;
   clock_gettime(CLOCK_MONOTONIC, &now);
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
-#else  // __APPLE__
+#else
   timeval now;
-  gettimeofday(&now, nullptr);
+  GetTimeOfDay(&now, nullptr);
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_usec;
 #endif
 }
@@ -152,9 +173,9 @@
   timespec now;
   clock_gettime(CLOCK_MONOTONIC, &now);
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec;
-#else  // __APPLE__
+#else
   timeval now;
-  gettimeofday(&now, nullptr);
+  GetTimeOfDay(&now, nullptr);
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_usec * UINT64_C(1000);
 #endif
 }
@@ -164,7 +185,7 @@
   timespec now;
   clock_gettime(CLOCK_THREAD_CPUTIME_ID, &now);
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec;
-#else  // __APPLE__
+#else
   UNIMPLEMENTED(WARNING);
   return -1;
 #endif
@@ -176,8 +197,13 @@
   clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &now);
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec;
 #else
-  UNIMPLEMENTED(WARNING);
-  return -1;
+  // We cannot use clock_gettime() here. Return the process wall clock time
+  // (using art::NanoTime, which relies on gettimeofday()) as approximation of
+  // the process CPU time instead.
+  //
+  // Note: clock_gettime() is available from macOS 10.12 (Darwin 16), but we try
+  // to keep things simple here.
+  return NanoTime();
 #endif
 }
 
@@ -190,12 +216,12 @@
 
 void InitTimeSpec(bool absolute, int clock, int64_t ms, int32_t ns, timespec* ts) {
   if (absolute) {
-#if !defined(__APPLE__)
+#if defined(__linux__)
     clock_gettime(clock, ts);
 #else
     UNUSED(clock);
     timeval tv;
-    gettimeofday(&tv, nullptr);
+    GetTimeOfDay(&tv, nullptr);
     ts->tv_sec = tv.tv_sec;
     ts->tv_nsec = tv.tv_usec * 1000;
 #endif
diff --git a/libartbase/base/time_utils.h b/libartbase/base/time_utils.h
index 431d3e1..15805f3 100644
--- a/libartbase/base/time_utils.h
+++ b/libartbase/base/time_utils.h
@@ -18,6 +18,7 @@
 #define ART_LIBARTBASE_BASE_TIME_UTILS_H_
 
 #include <stdint.h>
+#include <stdio.h>  // Needed for correct _WIN32 build.
 #include <time.h>
 
 #include <string>
diff --git a/libartbase/base/unix_file/fd_file.cc b/libartbase/base/unix_file/fd_file.cc
index d715670..8831b9c 100644
--- a/libartbase/base/unix_file/fd_file.cc
+++ b/libartbase/base/unix_file/fd_file.cc
@@ -25,8 +25,13 @@
 #include <android/fdsan.h>
 #endif
 
+#if defined(_WIN32)
+#include <windows.h>
+#endif
+
 #include <limits>
 
+#include <android-base/file.h>
 #include <android-base/logging.h>
 
 // Includes needed for FdFile::Copy().
@@ -40,6 +45,96 @@
 
 namespace unix_file {
 
+#if defined(_WIN32)
+// RAII wrapper for an event object to allow asynchronous I/O to correctly signal completion.
+class ScopedEvent {
+ public:
+  ScopedEvent() {
+    handle_ = CreateEventA(/*lpEventAttributes*/ nullptr,
+                           /*bManualReset*/ true,
+                           /*bInitialState*/ false,
+                           /*lpName*/ nullptr);
+  }
+
+  ~ScopedEvent() { CloseHandle(handle_); }
+
+  HANDLE handle() { return handle_; }
+
+ private:
+  HANDLE handle_;
+  DISALLOW_COPY_AND_ASSIGN(ScopedEvent);
+};
+
+// Windows implementation of pread/pwrite. Note that these DO move the file descriptor's read/write
+// position, but do so atomically.
+static ssize_t pread(int fd, void* data, size_t byte_count, off64_t offset) {
+  ScopedEvent event;
+  if (event.handle() == INVALID_HANDLE_VALUE) {
+    PLOG(ERROR) << "Could not create event handle.";
+    errno = EIO;
+    return static_cast<ssize_t>(-1);
+  }
+
+  auto handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd));
+  DWORD bytes_read = 0;
+  OVERLAPPED overlapped = {};
+  overlapped.Offset = static_cast<DWORD>(offset);
+  overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32);
+  overlapped.hEvent = event.handle();
+  if (!ReadFile(handle, data, static_cast<DWORD>(byte_count), &bytes_read, &overlapped)) {
+    // If the read failed with other than ERROR_IO_PENDING, return an error.
+    // ERROR_IO_PENDING signals the write was begun asynchronously.
+    // Block until the asynchronous operation has finished or fails, and return
+    // result accordingly.
+    if (::GetLastError() != ERROR_IO_PENDING ||
+        !::GetOverlappedResult(handle, &overlapped, &bytes_read, TRUE)) {
+      // In case someone tries to read errno (since this is masquerading as a POSIX call).
+      errno = EIO;
+      return static_cast<ssize_t>(-1);
+    }
+  }
+  return static_cast<ssize_t>(bytes_read);
+}
+
+static ssize_t pwrite(int fd, const void* buf, size_t count, off64_t offset) {
+  ScopedEvent event;
+  if (event.handle() == INVALID_HANDLE_VALUE) {
+    PLOG(ERROR) << "Could not create event handle.";
+    errno = EIO;
+    return static_cast<ssize_t>(-1);
+  }
+
+  auto handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd));
+  DWORD bytes_written = 0;
+  OVERLAPPED overlapped = {};
+  overlapped.Offset = static_cast<DWORD>(offset);
+  overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32);
+  overlapped.hEvent = event.handle();
+  if (!::WriteFile(handle, buf, count, &bytes_written, &overlapped)) {
+    // If the write failed with other than ERROR_IO_PENDING, return an error.
+    // ERROR_IO_PENDING signals the write was begun asynchronously.
+    // Block until the asynchronous operation has finished or fails, and return
+    // result accordingly.
+    if (::GetLastError() != ERROR_IO_PENDING ||
+        !::GetOverlappedResult(handle, &overlapped, &bytes_written, TRUE)) {
+      // In case someone tries to read errno (since this is masquerading as a POSIX call).
+      errno = EIO;
+      return static_cast<ssize_t>(-1);
+    }
+  }
+  return static_cast<ssize_t>(bytes_written);
+}
+
+static int fsync(int fd) {
+  auto handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd));
+  if (handle != INVALID_HANDLE_VALUE && ::FlushFileBuffers(handle)) {
+    return 0;
+  }
+  errno = EINVAL;
+  return -1;
+}
+#endif
+
 #if defined(__BIONIC__)
 static uint64_t GetFdFileOwnerTag(FdFile* fd_file) {
   return android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_ART_FDFILE,
@@ -91,7 +186,7 @@
   }
 }
 
-FdFile::FdFile(FdFile&& other)
+FdFile::FdFile(FdFile&& other) noexcept
     : guard_state_(other.guard_state_),
       fd_(other.fd_),
       file_path_(std::move(other.file_path_)),
@@ -105,7 +200,7 @@
   other.fd_ = -1;
 }
 
-FdFile& FdFile::operator=(FdFile&& other) {
+FdFile& FdFile::operator=(FdFile&& other) noexcept {
   if (this == &other) {
     return *this;
   }
@@ -431,7 +526,7 @@
   bool is_current = false;
   {
     struct stat this_stat, current_stat;
-    int cur_fd = TEMP_FAILURE_RETRY(open(file_path_.c_str(), O_RDONLY));
+    int cur_fd = TEMP_FAILURE_RETRY(open(file_path_.c_str(), O_RDONLY | O_CLOEXEC));
     if (cur_fd > 0) {
       // File still exists.
       if (fstat(fd_, &this_stat) == 0 && fstat(cur_fd, &current_stat) == 0) {
diff --git a/libartbase/base/unix_file/fd_file.h b/libartbase/base/unix_file/fd_file.h
index e362ed1..f5aa2a5 100644
--- a/libartbase/base/unix_file/fd_file.h
+++ b/libartbase/base/unix_file/fd_file.h
@@ -37,19 +37,19 @@
   FdFile() = default;
   // Creates an FdFile using the given file descriptor.
   // Takes ownership of the file descriptor.
-  FdFile(int fd, bool checkUsage);
-  FdFile(int fd, const std::string& path, bool checkUsage);
-  FdFile(int fd, const std::string& path, bool checkUsage, bool read_only_mode);
+  FdFile(int fd, bool check_usage);
+  FdFile(int fd, const std::string& path, bool check_usage);
+  FdFile(int fd, const std::string& path, bool check_usage, bool read_only_mode);
 
-  FdFile(const std::string& path, int flags, bool checkUsage)
-      : FdFile(path, flags, 0640, checkUsage) {}
-  FdFile(const std::string& path, int flags, mode_t mode, bool checkUsage);
+  FdFile(const std::string& path, int flags, bool check_usage)
+      : FdFile(path, flags, 0640, check_usage) {}
+  FdFile(const std::string& path, int flags, mode_t mode, bool check_usage);
 
   // Move constructor.
-  FdFile(FdFile&& other);
+  FdFile(FdFile&& other) noexcept;
 
   // Move assignment operator.
-  FdFile& operator=(FdFile&& other);
+  FdFile& operator=(FdFile&& other) noexcept;
 
   // Release the file descriptor. This will make further accesses to this FdFile invalid. Disables
   // all further state checking.
diff --git a/libartbase/base/unix_file/fd_file_test.cc b/libartbase/base/unix_file/fd_file_test.cc
index 298b2d7..3a9cf59 100644
--- a/libartbase/base/unix_file/fd_file_test.cc
+++ b/libartbase/base/unix_file/fd_file_test.cc
@@ -15,6 +15,7 @@
  */
 
 #include "base/common_art_test.h"  // For ScratchFile
+#include "base/file_utils.h"
 #include "gtest/gtest.h"
 #include "fd_file.h"
 #include "random_access_file_test.h"
@@ -23,9 +24,9 @@
 
 class FdFileTest : public RandomAccessFileTest {
  protected:
-  virtual RandomAccessFile* MakeTestFile() {
+  RandomAccessFile* MakeTestFile() override {
     FILE* tmp = tmpfile();
-    int fd = dup(fileno(tmp));
+    int fd = art::DupCloexec(fileno(tmp));
     fclose(tmp);
     return new FdFile(fd, false);
   }
diff --git a/libartbase/base/unix_file/random_access_file_test.h b/libartbase/base/unix_file/random_access_file_test.h
index dbe6ca9..178f89d 100644
--- a/libartbase/base/unix_file/random_access_file_test.h
+++ b/libartbase/base/unix_file/random_access_file_test.h
@@ -35,11 +35,11 @@
   virtual RandomAccessFile* MakeTestFile() = 0;
 
   virtual void SetUp() {
-    art::CommonArtTest::SetUpAndroidData(android_data_);
+    art::CommonArtTest::SetUpAndroidDataDir(android_data_);
   }
 
   virtual void TearDown() {
-    art::CommonArtTest::TearDownAndroidData(android_data_, true);
+    art::CommonArtTest::TearDownAndroidDataDir(android_data_, true);
   }
 
   std::string GetTmpPath(const std::string& name) {
diff --git a/libartbase/base/utils.cc b/libartbase/base/utils.cc
index 74cc5b9..b989d9e 100644
--- a/libartbase/base/utils.cc
+++ b/libartbase/base/utils.cc
@@ -19,11 +19,10 @@
 #include <inttypes.h>
 #include <pthread.h>
 #include <sys/stat.h>
-#include <sys/syscall.h>
 #include <sys/types.h>
-#include <sys/wait.h>
 #include <unistd.h>
 
+#include <fstream>
 #include <memory>
 
 #include "android-base/file.h"
@@ -46,6 +45,16 @@
 
 #if defined(__linux__)
 #include <linux/unistd.h>
+#include <sys/syscall.h>
+#endif
+
+#if defined(_WIN32)
+#include <windows.h>
+// This include needs to be here due to our coding conventions.  Unfortunately
+// it drags in the definition of the dread ERROR macro.
+#ifdef ERROR
+#undef ERROR
+#endif
 #endif
 
 namespace art {
@@ -60,6 +69,8 @@
   return owner;
 #elif defined(__BIONIC__)
   return gettid();
+#elif defined(_WIN32)
+  return static_cast<pid_t>(::GetCurrentThreadId());
 #else
   return syscall(__NR_gettid);
 #endif
@@ -67,12 +78,17 @@
 
 std::string GetThreadName(pid_t tid) {
   std::string result;
+#ifdef _WIN32
+  UNUSED(tid);
+  result = "<unknown>";
+#else
   // TODO: make this less Linux-specific.
   if (ReadFileToString(StringPrintf("/proc/self/task/%d/comm", tid), &result)) {
     result.resize(result.size() - 1);  // Lose the trailing '\n'.
   } else {
     result = "<unknown>";
   }
+#endif
   return result;
 }
 
@@ -80,10 +96,10 @@
   // The byte thresholds at which we display amounts.  A byte count is displayed
   // in unit U when kUnitThresholds[U] <= bytes < kUnitThresholds[U+1].
   static const int64_t kUnitThresholds[] = {
-    0,              // B up to...
-    3*1024,         // KB up to...
-    2*1024*1024,    // MB up to...
-    1024*1024*1024  // GB from here.
+    0,       // B up to...
+    10*KB,   // KB up to...
+    10*MB,   // MB up to...
+    10LL*GB  // GB from here.
   };
   static const int64_t kBytesPerUnit[] = { 1, KB, MB, GB };
   static const char* const kUnitStrings[] = { "B", "KB", "MB", "GB" };
@@ -136,7 +152,7 @@
   } else {
     s = thread_name + len - 15;
   }
-#if defined(__linux__)
+#if defined(__linux__) || defined(_WIN32)
   // pthread_setname_np fails rather than truncating long strings.
   char buf[16];       // MAX_TASK_COMM_LEN=16 is hard-coded in the kernel.
   strncpy(buf, s, sizeof(buf)-1);
@@ -152,6 +168,11 @@
 
 void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu) {
   *utime = *stime = *task_cpu = 0;
+#ifdef _WIN32
+  // TODO: implement this.
+  UNUSED(tid);
+  *state = 'S';
+#else
   std::string stats;
   // TODO: make this less Linux-specific.
   if (!ReadFileToString(StringPrintf("/proc/self/task/%d/stat", tid), &stats)) {
@@ -166,6 +187,7 @@
   *utime = strtoull(fields[11].c_str(), nullptr, 10);
   *stime = strtoull(fields[12].c_str(), nullptr, 10);
   *task_cpu = strtoull(fields[36].c_str(), nullptr, 10);
+#endif
 }
 
 static void ParseStringAfterChar(const std::string& s,
@@ -213,42 +235,25 @@
   }
 }
 
-bool FlushInstructionPipeline() {
-  // membarrier(2) is only supported for target builds (b/111199492).
-#if defined(__BIONIC__)
-  static constexpr int kSyncCoreMask =
-      MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE |
-      MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE;
-  static bool have_probed = false;
-  static bool have_sync_core = false;
+std::string GetProcessStatus(const char* key) {
+  // Build search pattern of key and separator.
+  std::string pattern(key);
+  pattern.push_back(':');
 
-  if (UNLIKELY(!have_probed)) {
-    // Probe membarrier(2) commands supported by kernel.
-    int commands = syscall(__NR_membarrier, MEMBARRIER_CMD_QUERY, 0);
-    if (commands >= 0) {
-      have_sync_core = (commands & kSyncCoreMask) == kSyncCoreMask;
-      if (have_sync_core) {
-        // Register with kernel that we'll be using the private expedited sync core command.
-        CheckedCall(syscall,
-                    "membarrier register sync core",
-                    __NR_membarrier,
-                    MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE,
-                    0);
+  // Search for status lines starting with pattern.
+  std::ifstream fs("/proc/self/status");
+  std::string line;
+  while (std::getline(fs, line)) {
+    if (strncmp(pattern.c_str(), line.c_str(), pattern.size()) == 0) {
+      // Skip whitespace in matching line (if any).
+      size_t pos = line.find_first_not_of(" \t", pattern.size());
+      if (UNLIKELY(pos == std::string::npos)) {
+        break;
       }
+      return std::string(line, pos);
     }
-    have_probed = true;
   }
-
-  if (have_sync_core) {
-    CheckedCall(syscall,
-                "membarrier sync core",
-                __NR_membarrier,
-                MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE,
-                0);
-    return true;
-  }
-#endif  // defined(__BIONIC__)
-  return false;
+  return "<unknown>";
 }
 
 }  // namespace art
diff --git a/libartbase/base/utils.h b/libartbase/base/utils.h
index 24adbb3..11472a8 100644
--- a/libartbase/base/utils.h
+++ b/libartbase/base/utils.h
@@ -24,6 +24,7 @@
 #include <string>
 
 #include <android-base/logging.h>
+#include <android-base/parseint.h>
 
 #include "casts.h"
 #include "enums.h"
@@ -33,34 +34,6 @@
 
 namespace art {
 
-template <typename T>
-bool ParseUint(const char *in, T* out) {
-  char* end;
-  unsigned long long int result = strtoull(in, &end, 0);  // NOLINT(runtime/int)
-  if (in == end || *end != '\0') {
-    return false;
-  }
-  if (std::numeric_limits<T>::max() < result) {
-    return false;
-  }
-  *out = static_cast<T>(result);
-  return true;
-}
-
-template <typename T>
-bool ParseInt(const char* in, T* out) {
-  char* end;
-  long long int result = strtoll(in, &end, 0);  // NOLINT(runtime/int)
-  if (in == end || *end != '\0') {
-    return false;
-  }
-  if (result < std::numeric_limits<T>::min() || std::numeric_limits<T>::max() < result) {
-    return false;
-  }
-  *out = static_cast<T>(result);
-  return true;
-}
-
 static inline uint32_t PointerToLowMemUInt32(const void* p) {
   uintptr_t intp = reinterpret_cast<uintptr_t>(p);
   DCHECK_LE(intp, 0xFFFFFFFFU);
@@ -130,7 +103,7 @@
   DCHECK(option.starts_with(option_prefix)) << option << " " << option_prefix;
   const char* value_string = option.substr(option_prefix.size()).data();
   int64_t parsed_integer_value = 0;
-  if (!ParseInt(value_string, &parsed_integer_value)) {
+  if (!android::base::ParseInt(value_string, &parsed_integer_value)) {
     usage("Failed to parse %s '%s' as an integer", option_name.c_str(), value_string);
   }
   *out = dchecked_integral_cast<T>(parsed_integer_value);
@@ -189,9 +162,6 @@
   __builtin___clear_cache(reinterpret_cast<char*>(begin), reinterpret_cast<char*>(end));
 }
 
-// Flush instruction pipeline. Returns true on success, false if feature is unsupported.
-bool FlushInstructionPipeline();
-
 template <typename T>
 constexpr PointerSize ConvertToPointerSize(T any) {
   if (any == 4 || any == 8) {
@@ -202,30 +172,6 @@
   }
 }
 
-// Returns a type cast pointer if object pointed to is within the provided bounds.
-// Otherwise returns nullptr.
-template <typename T>
-inline static T BoundsCheckedCast(const void* pointer,
-                                  const void* lower,
-                                  const void* upper) {
-  const uint8_t* bound_begin = static_cast<const uint8_t*>(lower);
-  const uint8_t* bound_end = static_cast<const uint8_t*>(upper);
-  DCHECK(bound_begin <= bound_end);
-
-  T result = reinterpret_cast<T>(pointer);
-  const uint8_t* begin = static_cast<const uint8_t*>(pointer);
-  const uint8_t* end = begin + sizeof(*result);
-  if (begin < bound_begin || end > bound_end || begin > end) {
-    return nullptr;
-  }
-  return result;
-}
-
-template <typename T, size_t size>
-constexpr size_t ArrayCount(const T (&)[size]) {
-  return size;
-}
-
 // Return -1 if <, 0 if ==, 1 if >.
 template <typename T>
 inline static int32_t Compare(T lhs, T rhs) {
@@ -246,6 +192,11 @@
   }
 }
 
+// Lookup value for a given key in /proc/self/status. Keys and values are separated by a ':' in
+// the status file. Returns value found on success and "<unknown>" if the key is not found or
+// there is an I/O error.
+std::string GetProcessStatus(const char* key);
+
 }  // namespace art
 
 #endif  // ART_LIBARTBASE_BASE_UTILS_H_
diff --git a/libartbase/base/utils_test.cc b/libartbase/base/utils_test.cc
index 892d1fd..631a225 100644
--- a/libartbase/base/utils_test.cc
+++ b/libartbase/base/utils_test.cc
@@ -23,8 +23,8 @@
 class UtilsTest : public testing::Test {};
 
 TEST_F(UtilsTest, PrettySize) {
-  EXPECT_EQ("1GB", PrettySize(1 * GB));
-  EXPECT_EQ("2GB", PrettySize(2 * GB));
+  EXPECT_EQ("1024MB", PrettySize(1 * GB));
+  EXPECT_EQ("2048MB", PrettySize(2 * GB));
   if (sizeof(size_t) > sizeof(uint32_t)) {
     EXPECT_EQ("100GB", PrettySize(100 * GB));
   }
@@ -107,23 +107,12 @@
   EXPECT_EQ(expected, actual);
 }
 
-TEST_F(UtilsTest, ArrayCount) {
-  int i[64];
-  EXPECT_EQ(ArrayCount(i), 64u);
-  char c[7];
-  EXPECT_EQ(ArrayCount(c), 7u);
-}
-
-TEST_F(UtilsTest, BoundsCheckedCast) {
-  char buffer[64];
-  const char* buffer_end = buffer + ArrayCount(buffer);
-  EXPECT_EQ(BoundsCheckedCast<const uint64_t*>(nullptr, buffer, buffer_end), nullptr);
-  EXPECT_EQ(BoundsCheckedCast<const uint64_t*>(buffer, buffer, buffer_end),
-            reinterpret_cast<const uint64_t*>(buffer));
-  EXPECT_EQ(BoundsCheckedCast<const uint64_t*>(buffer + 56, buffer, buffer_end),
-            reinterpret_cast<const uint64_t*>(buffer + 56));
-  EXPECT_EQ(BoundsCheckedCast<const uint64_t*>(buffer - 1, buffer, buffer_end), nullptr);
-  EXPECT_EQ(BoundsCheckedCast<const uint64_t*>(buffer + 57, buffer, buffer_end), nullptr);
+TEST_F(UtilsTest, GetProcessStatus) {
+  EXPECT_EQ("utils_test", GetProcessStatus("Name"));
+  EXPECT_EQ("R (running)", GetProcessStatus("State"));
+  EXPECT_EQ("<unknown>", GetProcessStatus("tate"));
+  EXPECT_EQ("<unknown>", GetProcessStatus("e"));
+  EXPECT_EQ("<unknown>", GetProcessStatus("Dummy"));
 }
 
 }  // namespace art
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index 174d227..5056edc 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -18,7 +18,6 @@
 
 #include <fcntl.h>
 #include <stdio.h>
-#include <sys/mman.h>  // For the PROT_* and MAP_* constants.
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
@@ -27,6 +26,7 @@
 #include "android-base/stringprintf.h"
 #include "ziparchive/zip_archive.h"
 
+#include "base/mman.h"
 #include "bit_utils.h"
 #include "unix_file/fd_file.h"
 
@@ -75,10 +75,9 @@
   name += " extracted in memory from ";
   name += zip_filename;
   MemMap map = MemMap::MapAnonymous(name.c_str(),
-                                    /* addr */ nullptr,
                                     GetUncompressedLength(),
                                     PROT_READ | PROT_WRITE,
-                                    /* low_4gb */ false,
+                                    /*low_4gb=*/ false,
                                     error_msg);
   if (!map.IsValid()) {
     DCHECK(!error_msg->empty());
@@ -138,7 +137,7 @@
                       MAP_PRIVATE,
                       zip_fd,
                       offset,
-                      /* low_4gb */ false,
+                      /*low_4gb=*/ false,
                       name.c_str(),
                       error_msg);
 
@@ -190,8 +189,9 @@
 
 MemMap ZipEntry::MapDirectlyOrExtract(const char* zip_filename,
                                       const char* entry_filename,
-                                      std::string* error_msg) {
-  if (IsUncompressed() && GetFileDescriptor(handle_) >= 0) {
+                                      std::string* error_msg,
+                                      size_t alignment) {
+  if (IsUncompressed() && IsAlignedTo(alignment) && GetFileDescriptor(handle_) >= 0) {
     std::string local_error_msg;
     MemMap ret = MapDirectlyFromFile(zip_filename, &local_error_msg);
     if (ret.IsValid()) {
@@ -203,6 +203,11 @@
 }
 
 static void SetCloseOnExec(int fd) {
+#ifdef _WIN32
+  // Exec is not supported on Windows.
+  UNUSED(fd);
+  PLOG(ERROR) << "SetCloseOnExec is not supported on Windows.";
+#else
   // This dance is more portable than Linux's O_CLOEXEC open(2) flag.
   int flags = fcntl(fd, F_GETFD);
   if (flags == -1) {
@@ -214,6 +219,7 @@
     PLOG(WARNING) << "fcntl(" << fd << ", F_SETFD, " << flags << ") failed";
     return;
   }
+#endif
 }
 
 ZipArchive* ZipArchive::Open(const char* filename, std::string* error_msg) {
diff --git a/libartbase/base/zip_archive.h b/libartbase/base/zip_archive.h
index 8fc8b54..fc04ec1 100644
--- a/libartbase/base/zip_archive.h
+++ b/libartbase/base/zip_archive.h
@@ -30,8 +30,9 @@
 #include "unix_file/random_access_file.h"
 
 // system/core/zip_archive definitions.
+struct ZipArchive;
 struct ZipEntry;
-typedef void* ZipArchiveHandle;
+typedef ZipArchive* ZipArchiveHandle;
 
 namespace art {
 
@@ -58,7 +59,8 @@
 
   MemMap MapDirectlyOrExtract(const char* zip_filename,
                               const char* entry_filename,
-                              std::string* error_msg);
+                              std::string* error_msg,
+                              size_t alignment);
 
   uint32_t GetUncompressedLength();
   uint32_t GetCrc32();
diff --git a/libartbase/base/zip_archive_test.cc b/libartbase/base/zip_archive_test.cc
index b923881..969cf12 100644
--- a/libartbase/base/zip_archive_test.cc
+++ b/libartbase/base/zip_archive_test.cc
@@ -23,6 +23,7 @@
 #include <memory>
 
 #include "base/common_art_test.h"
+#include "file_utils.h"
 #include "os.h"
 #include "unix_file/fd_file.h"
 
@@ -41,7 +42,7 @@
 
   ScratchFile tmp;
   ASSERT_NE(-1, tmp.GetFd());
-  std::unique_ptr<File> file(new File(dup(tmp.GetFd()), tmp.GetFilename(), false));
+  std::unique_ptr<File> file(new File(DupCloexec(tmp.GetFd()), tmp.GetFilename(), false));
   ASSERT_TRUE(file.get() != nullptr);
   bool success = zip_entry->ExtractToFile(*file, &error_msg);
   ASSERT_TRUE(success) << error_msg;
@@ -49,7 +50,7 @@
   file.reset(nullptr);
 
   uint32_t computed_crc = crc32(0L, Z_NULL, 0);
-  int fd = open(tmp.GetFilename().c_str(), O_RDONLY);
+  int fd = open(tmp.GetFilename().c_str(), O_RDONLY | O_CLOEXEC);
   ASSERT_NE(-1, fd);
   const size_t kBufSize = 32768;
   uint8_t buf[kBufSize];
diff --git a/libartbase/libartbase.map b/libartbase/libartbase.map
new file mode 100644
index 0000000..6249930
--- /dev/null
+++ b/libartbase/libartbase.map
@@ -0,0 +1,15 @@
+# This is used only to hide data symbols that get imported through
+# whole_static_libs, or else they might trigger the ASan odr-violation check.
+# Before adding symbols here, please make sure that it doesn't give rise to a
+# real ODR problem. All these symbols are either in .rodata or .data.rel.ro
+# sections.
+LIBARTBASE {
+  local:
+    PPMD7_kExpEscape;
+    XZ_SIG;
+    g_AlignedAlloc;
+    g_Alloc;
+    g_BigAlloc;
+    g_MidAlloc;
+    k7zSignature;
+};
diff --git a/libartpalette/Android.bp b/libartpalette/Android.bp
new file mode 100644
index 0000000..778109d
--- /dev/null
+++ b/libartpalette/Android.bp
@@ -0,0 +1,116 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_defaults {
+  name: "libartpalette_defaults",
+  defaults: ["art_defaults"],
+  host_supported: true,
+  export_include_dirs: ["include"],
+}
+
+// libartpalette-system is the implementation of the abstraction layer. It is
+// only available as a shared library on Android.
+art_cc_library {
+    name: "libartpalette-system",
+    defaults: ["libartpalette_defaults"],
+
+    target: {
+        android: {
+          srcs: ["system/palette_android.cc",],
+          header_libs: ["libbase_headers"],
+          shared_libs: [
+            "libcutils",
+            "liblog",
+            "libprocessgroup",
+          ],
+        },
+        host: {
+          header_libs: ["libbase_headers"],
+          srcs: ["system/palette_fake.cc",],
+        },
+        darwin: {
+            enabled: false,
+        },
+        windows: {
+            enabled: false,
+        },
+    },
+    static: {
+        enabled: false,
+    },
+    version_script: "libartpalette.map.txt",
+}
+
+// libartpalette is the dynamic loader of the platform abstraction
+// layer. It is only used on Android. For other targets, it just
+// implements a fake platform implementation.
+art_cc_library {
+    name: "libartpalette",
+    defaults: ["libartpalette_defaults"],
+    required: ["libartpalette-system"],  // libartpalette.so dlopen()'s libartpalette-system.
+    header_libs: ["libbase_headers"],
+    target: {
+        // Targets supporting dlopen build the client library which loads
+        // and binds the methods in the libartpalette-system library.
+        android: {
+            srcs: ["apex/palette.cc"],
+            shared: {
+                shared_libs: ["liblog"],
+            },
+            static: {
+                static_libs: ["liblog"],
+            },
+            version_script: "libartpalette.map.txt",
+        },
+        linux_bionic: {
+          header_libs: ["libbase_headers"],
+            srcs: ["system/palette_fake.cc"],
+            shared: {
+                shared_libs: ["liblog"],
+            },
+            version_script: "libartpalette.map.txt",
+        },
+        linux_glibc: {
+          header_libs: ["libbase_headers"],
+            srcs: ["system/palette_fake.cc"],
+            shared: {
+                shared_libs: ["liblog"],
+            },
+            version_script: "libartpalette.map.txt",
+        },
+        // Targets without support for dlopen just use the sources for
+        // the system library which actually implements functionality.
+        darwin: {
+            enabled: true,
+            header_libs: ["libbase_headers"],
+            srcs: ["system/palette_fake.cc"],
+        },
+        windows: {
+            enabled: true,
+            header_libs: ["libbase_headers"],
+            srcs: ["system/palette_fake.cc"],
+        },
+    }
+}
+
+art_cc_test {
+    name: "art_libartpalette_tests",
+    defaults: ["art_gtest_defaults"],
+    host_supported: true,
+    srcs: ["apex/palette_test.cc"],
+    shared_libs: ["libartpalette"],
+    test_per_src: true,
+}
diff --git a/libartpalette/apex/palette.cc b/libartpalette/apex/palette.cc
new file mode 100644
index 0000000..0b391f8
--- /dev/null
+++ b/libartpalette/apex/palette.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "palette/palette.h"
+
+#include <dlfcn.h>
+#include <stdlib.h>
+
+#include <android/log.h>
+#include <android-base/macros.h>
+
+namespace {
+
+// Logging tag.
+static constexpr const char* kLogTag = "libartpalette";
+
+// Name of the palette library present in the /system partition.
+static constexpr const char* kPaletteSystemLibrary = "libartpalette-system.so";
+
+// Generic method used when a dynamically loaded palette instance does not
+// support a method.
+enum PaletteStatus PaletteMethodNotSupported() {
+  return PaletteStatus::kNotSupported;
+}
+
+// Declare type aliases for pointers to each function in the interface.
+#define PALETTE_METHOD_TYPE_ALIAS(Name, ...) \
+  using Name ## Method = PaletteStatus(*)(__VA_ARGS__);
+PALETTE_METHOD_LIST(PALETTE_METHOD_TYPE_ALIAS)
+#undef PALETTE_METHOD_TYPE_ALIAS
+
+// Singleton class responsible for dynamically loading the palette library and
+// binding functions there to method pointers.
+class PaletteLoader {
+ public:
+  static PaletteLoader& Instance() {
+    static PaletteLoader instance;
+    return instance;
+  }
+
+  // Accessor methods to get instances of palette methods.
+#define PALETTE_LOADER_METHOD_ACCESSOR(Name, ...)                       \
+  Name ## Method Get ## Name ## Method() const { return Name ## Method ## _; }
+PALETTE_METHOD_LIST(PALETTE_LOADER_METHOD_ACCESSOR)
+#undef PALETTE_LOADER_METHOD_ACCESSOR
+
+ private:
+  PaletteLoader();
+
+  static void* OpenLibrary();
+  static void* GetMethod(void* palette_lib, const char* name);
+
+  // Handle to the palette library from dlopen().
+  void* palette_lib_;
+
+  // Fields to store pointers to palette methods.
+#define PALETTE_LOADER_METHOD_FIELD(Name, ...) \
+  const Name ## Method Name ## Method ## _;
+  PALETTE_METHOD_LIST(PALETTE_LOADER_METHOD_FIELD)
+#undef PALETTE_LOADER_METHOD_FIELD
+
+  DISALLOW_COPY_AND_ASSIGN(PaletteLoader);
+};
+
+void* PaletteLoader::OpenLibrary() {
+  void* handle = dlopen(kPaletteSystemLibrary, RTLD_NOW | RTLD_GLOBAL | RTLD_NODELETE);
+  if (handle == nullptr) {
+    // dlerror message includes details of error and file being opened.
+    __android_log_assert(nullptr, kLogTag, "%s", dlerror());
+  }
+  return handle;
+}
+
+void* PaletteLoader::GetMethod(void* palette_lib, const char* name) {
+  void* method = nullptr;
+  if (palette_lib != nullptr) {
+    method = dlsym(palette_lib, name);
+  }
+  if (method == nullptr) {
+    return reinterpret_cast<void*>(PaletteMethodNotSupported);
+  }
+  // TODO(oth): consider new GetMethodSignature() in the Palette API which
+  // would allow sanity checking the type signatures.
+  return method;
+}
+
+PaletteLoader::PaletteLoader() :
+    palette_lib_(OpenLibrary())
+#define PALETTE_LOADER_BIND_METHOD(Name, ...)                           \
+    , Name ## Method ## _(reinterpret_cast<Name ## Method>(GetMethod(palette_lib_, #Name)))
+    PALETTE_METHOD_LIST(PALETTE_LOADER_BIND_METHOD)
+#undef PALETTE_LOADER_BIND_METHOD
+{
+}
+
+}  // namespace
+
+extern "C" {
+
+enum PaletteStatus PaletteGetVersion(/*out*/int32_t* version) {
+  PaletteGetVersionMethod m = PaletteLoader::Instance().GetPaletteGetVersionMethod();
+  return m(version);
+}
+
+enum PaletteStatus PaletteSchedSetPriority(int32_t tid, int32_t java_priority) {
+  PaletteSchedSetPriorityMethod m = PaletteLoader::Instance().GetPaletteSchedSetPriorityMethod();
+  return m(tid, java_priority);
+}
+
+enum PaletteStatus PaletteSchedGetPriority(int32_t tid, /*out*/int32_t* java_priority) {
+  PaletteSchedGetPriorityMethod m = PaletteLoader::Instance().GetPaletteSchedGetPriorityMethod();
+  return m(tid, java_priority);
+}
+
+enum PaletteStatus PaletteTraceEnabled(/*out*/int32_t* enabled) {
+  PaletteTraceEnabledMethod m = PaletteLoader::Instance().GetPaletteTraceEnabledMethod();
+  return m(enabled);
+}
+
+enum PaletteStatus PaletteTraceBegin(/*in*/const char* name) {
+  PaletteTraceBeginMethod m = PaletteLoader::Instance().GetPaletteTraceBeginMethod();
+  return m(name);
+}
+
+enum PaletteStatus PaletteTraceEnd() {
+  PaletteTraceEndMethod m = PaletteLoader::Instance().GetPaletteTraceEndMethod();
+  return m();
+}
+
+enum PaletteStatus PaletteTraceIntegerValue(/*in*/const char* name, int32_t value) {
+  PaletteTraceIntegerValueMethod m = PaletteLoader::Instance().GetPaletteTraceIntegerValueMethod();
+  return m(name, value);
+}
+
+}  // extern "C"
diff --git a/libartpalette/apex/palette_test.cc b/libartpalette/apex/palette_test.cc
new file mode 100644
index 0000000..8bbe0ee
--- /dev/null
+++ b/libartpalette/apex/palette_test.cc
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "palette/palette.h"
+
+#include <unistd.h>
+#include <sys/syscall.h>
+
+#include "gtest/gtest.h"
+
+namespace {
+
+pid_t GetTid() {
+#ifdef __BIONIC__
+  return gettid();
+#else  // __BIONIC__
+  return syscall(__NR_gettid);
+#endif  // __BIONIC__
+}
+
+}  // namespace
+
+class PaletteClientTest : public testing::Test {};
+
+TEST_F(PaletteClientTest, GetVersion) {
+  int32_t version = -1;
+  PaletteStatus status = PaletteGetVersion(&version);
+  ASSERT_EQ(PaletteStatus::kOkay, status);
+  ASSERT_GE(version, 1);
+}
+
+TEST_F(PaletteClientTest, SchedPriority) {
+  int32_t tid = GetTid();
+  int32_t saved_priority;
+  EXPECT_EQ(PaletteStatus::kOkay, PaletteSchedGetPriority(tid, &saved_priority));
+
+  EXPECT_EQ(PaletteStatus::kInvalidArgument, PaletteSchedSetPriority(tid, /*java_priority=*/ 0));
+  EXPECT_EQ(PaletteStatus::kInvalidArgument, PaletteSchedSetPriority(tid, /*java_priority=*/ -1));
+  EXPECT_EQ(PaletteStatus::kInvalidArgument, PaletteSchedSetPriority(tid, /*java_priority=*/ 11));
+
+  EXPECT_EQ(PaletteStatus::kOkay, PaletteSchedSetPriority(tid, /*java_priority=*/ 1));
+  EXPECT_EQ(PaletteStatus::kOkay, PaletteSchedSetPriority(tid, saved_priority));
+}
+
+TEST_F(PaletteClientTest, Trace) {
+  int32_t enabled;
+  EXPECT_EQ(PaletteStatus::kOkay, PaletteTraceEnabled(&enabled));
+  EXPECT_EQ(PaletteStatus::kOkay, PaletteTraceBegin("Hello world!"));
+  EXPECT_EQ(PaletteStatus::kOkay, PaletteTraceEnd());
+  EXPECT_EQ(PaletteStatus::kOkay, PaletteTraceIntegerValue("Beans", /*value=*/ 3));
+}
diff --git a/libartpalette/include/palette/palette.h b/libartpalette/include/palette/palette.h
new file mode 100644
index 0000000..1f58403
--- /dev/null
+++ b/libartpalette/include/palette/palette.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTPALETTE_INCLUDE_PALETTE_PALETTE_H_
+#define ART_LIBARTPALETTE_INCLUDE_PALETTE_PALETTE_H_
+
+#include "palette_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif  // __cplusplus
+
+// Palette method signatures are defined in palette_method_list.h.
+
+#define PALETTE_METHOD_DECLARATION(Name, ...) \
+  enum PaletteStatus Name(__VA_ARGS__);
+#include "palette_method_list.h"
+PALETTE_METHOD_LIST(PALETTE_METHOD_DECLARATION)
+#undef PALETTE_METHOD_DECLARATION
+
+#ifdef __cplusplus
+}
+#endif  // __cplusplus
+
+#endif  // ART_LIBARTPALETTE_INCLUDE_PALETTE_PALETTE_H_
diff --git a/libartpalette/include/palette/palette_method_list.h b/libartpalette/include/palette/palette_method_list.h
new file mode 100644
index 0000000..dc4ec52
--- /dev/null
+++ b/libartpalette/include/palette/palette_method_list.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTPALETTE_INCLUDE_PALETTE_PALETTE_METHOD_LIST_H_
+#define ART_LIBARTPALETTE_INCLUDE_PALETTE_PALETTE_METHOD_LIST_H_
+
+#include <stdint.h>
+
+// Methods in version 1 API
+#define PALETTE_METHOD_LIST(M)                                              \
+  M(PaletteGetVersion, /*out*/int32_t* version)                             \
+  M(PaletteSchedSetPriority, int32_t tid, int32_t java_priority)            \
+  M(PaletteSchedGetPriority, int32_t tid, /*out*/int32_t* java_priority)    \
+  M(PaletteTraceEnabled, /*out*/int32_t* enabled)                           \
+  M(PaletteTraceBegin, const char* name)                                    \
+  M(PaletteTraceEnd)                                                        \
+  M(PaletteTraceIntegerValue, const char* name, int32_t value)
+
+#endif  // ART_LIBARTPALETTE_INCLUDE_PALETTE_PALETTE_METHOD_LIST_H_
diff --git a/libartpalette/include/palette/palette_types.h b/libartpalette/include/palette/palette_types.h
new file mode 100644
index 0000000..837086e
--- /dev/null
+++ b/libartpalette/include/palette/palette_types.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTPALETTE_INCLUDE_PALETTE_PALETTE_TYPES_H_
+#define ART_LIBARTPALETTE_INCLUDE_PALETTE_PALETTE_TYPES_H_
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif  // __cplusplus
+
+// Return values for palette functions.
+enum PaletteStatus {
+  kOkay = 0,
+  kCheckErrno = 1,
+  kInvalidArgument = 2,
+  kNotSupported = 3,
+};
+
+#ifdef __cplusplus
+}
+#endif  // __cplusplus
+
+#endif  // ART_LIBARTPALETTE_INCLUDE_PALETTE_PALETTE_TYPES_H_
diff --git a/libartpalette/libartpalette.map.txt b/libartpalette/libartpalette.map.txt
new file mode 100644
index 0000000..0920835
--- /dev/null
+++ b/libartpalette/libartpalette.map.txt
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LIBARTPALETTE_1 {
+  global:
+    # --- VERSION 01 API ---
+    PaletteGetVersion;
+    PaletteSchedSetPriority;
+    PaletteSchedGetPriority;
+    PaletteTraceEnabled;
+    PaletteTraceBegin;
+    PaletteTraceEnd;
+    PaletteTraceIntegerValue;
+
+  local:
+    *;
+};
diff --git a/libartpalette/system/palette_android.cc b/libartpalette/system/palette_android.cc
new file mode 100644
index 0000000..aed3862
--- /dev/null
+++ b/libartpalette/system/palette_android.cc
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+
+#include "palette/palette.h"
+
+#include <errno.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include <mutex>
+
+#include <android-base/logging.h>
+#include <android-base/macros.h>
+#include <cutils/sched_policy.h>
+#include <cutils/trace.h>
+#include <log/event_tag_map.h>
+#include <utils/Thread.h>
+
+#include "palette_system.h"
+
+enum PaletteStatus PaletteGetVersion(int32_t* version) {
+  *version = art::palette::kPaletteVersion;
+  return PaletteStatus::kOkay;
+}
+
+// Conversion map for "nice" values.
+//
+// We use Android thread priority constants to be consistent with the rest
+// of the system.  In some cases adjacent entries may overlap.
+//
+static const int kNiceValues[art::palette::kNumManagedThreadPriorities] = {
+  ANDROID_PRIORITY_LOWEST,                // 1 (MIN_PRIORITY)
+  ANDROID_PRIORITY_BACKGROUND + 6,
+  ANDROID_PRIORITY_BACKGROUND + 3,
+  ANDROID_PRIORITY_BACKGROUND,
+  ANDROID_PRIORITY_NORMAL,                // 5 (NORM_PRIORITY)
+  ANDROID_PRIORITY_NORMAL - 2,
+  ANDROID_PRIORITY_NORMAL - 4,
+  ANDROID_PRIORITY_URGENT_DISPLAY + 3,
+  ANDROID_PRIORITY_URGENT_DISPLAY + 2,
+  ANDROID_PRIORITY_URGENT_DISPLAY         // 10 (MAX_PRIORITY)
+};
+
+enum PaletteStatus PaletteSchedSetPriority(int32_t tid, int32_t managed_priority) {
+  if (managed_priority < art::palette::kMinManagedThreadPriority ||
+      managed_priority > art::palette::kMaxManagedThreadPriority) {
+    return PaletteStatus::kInvalidArgument;
+  }
+  int new_nice = kNiceValues[managed_priority - art::palette::kMinManagedThreadPriority];
+
+  // TODO: b/18249098 The code below is broken. It uses getpriority() as a proxy for whether a
+  // thread is already in the SP_FOREGROUND cgroup. This is not necessarily true for background
+  // processes, where all threads are in the SP_BACKGROUND cgroup. This means that callers will
+  // have to call setPriority twice to do what they want :
+  //
+  //     Thread.setPriority(Thread.MIN_PRIORITY);  // no-op wrt to cgroups
+  //     Thread.setPriority(Thread.MAX_PRIORITY);  // will actually change cgroups.
+  if (new_nice >= ANDROID_PRIORITY_BACKGROUND) {
+    set_sched_policy(tid, SP_BACKGROUND);
+  } else if (getpriority(PRIO_PROCESS, tid) >= ANDROID_PRIORITY_BACKGROUND) {
+    set_sched_policy(tid, SP_FOREGROUND);
+  }
+
+  if (setpriority(PRIO_PROCESS, tid, new_nice) != 0) {
+    return PaletteStatus::kCheckErrno;
+  }
+  return PaletteStatus::kOkay;
+}
+
+enum PaletteStatus PaletteSchedGetPriority(int32_t tid, /*out*/int32_t* managed_priority) {
+  errno = 0;
+  int native_priority = getpriority(PRIO_PROCESS, tid);
+  if (native_priority == -1 && errno != 0) {
+    *managed_priority = art::palette::kNormalManagedThreadPriority;
+    return PaletteStatus::kCheckErrno;
+  }
+
+  for (int p = art::palette::kMinManagedThreadPriority;
+       p <= art::palette::kMaxManagedThreadPriority;
+       p = p + 1) {
+    int index = p - art::palette::kMinManagedThreadPriority;
+    if (native_priority >= kNiceValues[index]) {
+      *managed_priority = p;
+      return PaletteStatus::kOkay;
+    }
+  }
+  *managed_priority = art::palette::kMaxManagedThreadPriority;
+  return PaletteStatus::kOkay;
+}
+
+enum PaletteStatus PaletteTraceEnabled(/*out*/int32_t* enabled) {
+  *enabled = (ATRACE_ENABLED() != 0) ? 1 : 0;
+  return PaletteStatus::kOkay;
+}
+
+enum PaletteStatus PaletteTraceBegin(const char* name) {
+  ATRACE_BEGIN(name);
+  return PaletteStatus::kOkay;
+}
+
+enum PaletteStatus PaletteTraceEnd() {
+  ATRACE_END();
+  return PaletteStatus::kOkay;
+}
+
+enum PaletteStatus PaletteTraceIntegerValue(const char* name, int32_t value) {
+  ATRACE_INT(name, value);
+  return PaletteStatus::kOkay;
+}
diff --git a/libartpalette/system/palette_fake.cc b/libartpalette/system/palette_fake.cc
new file mode 100644
index 0000000..0961e77
--- /dev/null
+++ b/libartpalette/system/palette_fake.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "palette/palette.h"
+
+#include <map>
+#include <mutex>
+
+#include <android-base/macros.h>  // For ATTRIBUTE_UNUSED
+
+#include "palette_system.h"
+
+enum PaletteStatus PaletteGetVersion(int32_t* version) {
+  *version = art::palette::kPaletteVersion;
+  return PaletteStatus::kOkay;
+}
+
+// Cached thread priority for testing. No thread priorities are ever affected.
+static std::mutex g_tid_priority_map_mutex;
+static std::map<int32_t, int32_t> g_tid_priority_map;
+
+enum PaletteStatus PaletteSchedSetPriority(int32_t tid, int32_t priority) {
+  if (priority < art::palette::kMinManagedThreadPriority ||
+      priority > art::palette::kMaxManagedThreadPriority) {
+    return PaletteStatus::kInvalidArgument;
+  }
+  std::lock_guard guard(g_tid_priority_map_mutex);
+  g_tid_priority_map[tid] = priority;
+  return PaletteStatus::kOkay;
+}
+
+enum PaletteStatus PaletteSchedGetPriority(int32_t tid,
+                                           /*out*/int32_t* priority) {
+  std::lock_guard guard(g_tid_priority_map_mutex);
+  if (g_tid_priority_map.find(tid) == g_tid_priority_map.end()) {
+    g_tid_priority_map[tid] = art::palette::kNormalManagedThreadPriority;
+  }
+  *priority = g_tid_priority_map[tid];
+  return PaletteStatus::kOkay;
+}
+
+enum PaletteStatus PaletteTraceEnabled(/*out*/int32_t* enabled) {
+  *enabled = 0;
+  return PaletteStatus::kOkay;
+}
+
+enum PaletteStatus PaletteTraceBegin(const char* name ATTRIBUTE_UNUSED) {
+  return PaletteStatus::kOkay;
+}
+
+enum PaletteStatus PaletteTraceEnd() {
+  return PaletteStatus::kOkay;
+}
+
+enum PaletteStatus PaletteTraceIntegerValue(const char* name ATTRIBUTE_UNUSED,
+                                            int32_t value ATTRIBUTE_UNUSED) {
+  return PaletteStatus::kOkay;
+}
diff --git a/libartpalette/system/palette_system.h b/libartpalette/system/palette_system.h
new file mode 100644
index 0000000..b28e00d
--- /dev/null
+++ b/libartpalette/system/palette_system.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTPALETTE_SYSTEM_PALETTE_SYSTEM_H_
+#define ART_LIBARTPALETTE_SYSTEM_PALETTE_SYSTEM_H_
+
+#include <stdint.h>
+
+namespace art {
+namespace palette {
+
+static constexpr int32_t kPaletteVersion = 1;
+
+// Managed thread definitions
+static constexpr int32_t kNormalManagedThreadPriority = 5;
+static constexpr int32_t kMinManagedThreadPriority = 1;
+static constexpr int32_t kMaxManagedThreadPriority = 10;
+static constexpr int32_t kNumManagedThreadPriorities =
+    kMaxManagedThreadPriority - kMinManagedThreadPriority + 1;
+
+}  // namespace palette
+}  // namespace art
+
+#endif  // ART_LIBARTPALETTE_SYSTEM_PALETTE_SYSTEM_H_
diff --git a/libdexfile/Android.bp b/libdexfile/Android.bp
index 06fd19e..f83f18c 100644
--- a/libdexfile/Android.bp
+++ b/libdexfile/Android.bp
@@ -32,6 +32,7 @@
         "dex/dex_instruction.cc",
         "dex/modifiers.cc",
         "dex/primitive.cc",
+        "dex/signature.cc",
         "dex/standard_dex_file.cc",
         "dex/type_lookup_table.cc",
         "dex/utf.cc",
@@ -44,34 +45,84 @@
                 "libz",
             ],
             shared_libs: [
-                "libutils",
+                 // For MemMap.
+                 "libartbase",
+                 "libartpalette",
+                 "liblog",
+                 // For common macros.
+                 "libbase",
+            ],
+            export_shared_lib_headers: [
+                "libartbase",
+                "libbase",
             ],
         },
-        host: {
+        not_windows: {
             shared_libs: [
                 "libziparchive",
                 "libz",
+                 // For MemMap.
+                 "libartbase",
+                 "libartpalette",
+                 "liblog",
+                 // For common macros.
+                 "libbase",
             ],
+            export_shared_lib_headers: [
+                "libartbase",
+                "libbase",
+            ],
+        },
+        windows: {
+            static_libs: [
+                "libziparchive",
+                "libz",
+                 // For MemMap.
+                 "libartbase",
+                 "libartpalette",
+                 "liblog",
+                 // For common macros.
+                 "libbase",
+            ],
+            export_static_lib_headers: [
+                "libartbase",
+                "libbase",
+            ],
+            cflags: ["-Wno-thread-safety"],
         },
     },
     generated_sources: ["dexfile_operator_srcs"],
-    shared_libs: [
-        // For MemMap.
-        "libartbase",
-        "liblog",
-        // For atrace.
-        "libcutils",
-        // For common macros.
-        "libbase",
-        "libz",
-    ],
     export_include_dirs: ["."],
-    export_shared_lib_headers: [
-        "libartbase",
+}
+
+cc_defaults {
+    name: "libdexfile_static_base_defaults",
+    static_libs: [
         "libbase",
+        "liblog",
+        "libz",
+        "libziparchive",
     ],
 }
 
+cc_defaults {
+    name: "libdexfile_static_defaults",
+    defaults: [
+        "libartbase_static_defaults",
+        "libdexfile_static_base_defaults",
+    ],
+    static_libs: ["libdexfile"],
+}
+
+cc_defaults {
+    name: "libdexfiled_static_defaults",
+    defaults: [
+        "libartbased_static_defaults",
+        "libdexfile_static_base_defaults",
+    ],
+    static_libs: ["libdexfiled"],
+}
+
 gensrcs {
     name: "dexfile_operator_srcs",
     cmd: "$(location generate_operator_out) art/libdexfile $(in) > $(out)",
@@ -95,6 +146,14 @@
     strip: {
         keep_symbols: true,
     },
+    target: {
+        windows: {
+            enabled: true,
+            shared: {
+                enabled: false,
+            },
+        },
+    },
 }
 
 art_cc_library {
@@ -103,6 +162,14 @@
         "art_debug_defaults",
         "libdexfile_defaults",
     ],
+    target: {
+        windows: {
+            enabled: true,
+            shared: {
+                enabled: false,
+            },
+        },
+    },
 }
 
 art_cc_test {
@@ -134,3 +201,93 @@
         "external/zlib",
     ],
 }
+
+cc_library_headers {
+    name: "libdexfile_external_headers",
+    host_supported: true,
+    header_libs: ["libbase_headers"],
+    export_header_lib_headers: ["libbase_headers"],
+    export_include_dirs: ["external/include"],
+
+    target: {
+        windows: {
+            enabled: true,
+        },
+    },
+}
+
+cc_library {
+    name: "libdexfile_external",
+    host_supported: true,
+    srcs: [
+        "external/dex_file_ext.cc",
+    ],
+    header_libs: ["libdexfile_external_headers"],
+    shared_libs: [
+        "libbase",
+        "libdexfile",
+    ],
+
+    // TODO(b/120670568): Enable this when linking bug is fixed.
+    // stubs: {
+    //     symbol_file: "external/libdexfile_external.map.txt",
+    //     versions: ["1"],
+    // },
+
+    // Hide symbols using version scripts for targets that support it, i.e. all
+    // but Darwin.
+    // TODO(b/120670568): Clean this up when stubs above is enabled.
+    target: {
+        android: {
+            version_script: "external/libdexfile_external.map.txt",
+        },
+        linux_bionic: {
+            version_script: "external/libdexfile_external.map.txt",
+        },
+        linux_glibc: {
+            version_script: "external/libdexfile_external.map.txt",
+        },
+        windows: {
+            version_script: "external/libdexfile_external.map.txt",
+        },
+    },
+}
+
+art_cc_test {
+    name: "art_libdexfile_external_tests",
+    host_supported: true,
+    test_per_src: true,  // For consistency with other ART gtests.
+    srcs: [
+        "external/dex_file_ext_c_test.c",
+    ],
+    header_libs: ["libdexfile_external_headers"],
+}
+
+// Support library with a C++ API for accessing the libdexfile API for external
+// (non-ART) users. They should link to their own instance of this (either
+// statically or through linker namespaces).
+cc_library {
+    name: "libdexfile_support",
+    host_supported: true,
+    srcs: [
+        "external/dex_file_supp.cc",
+    ],
+    header_libs: ["libdexfile_external_headers"],
+    shared_libs: ["libdexfile_external"],
+    export_header_lib_headers: ["libdexfile_external_headers"],
+}
+
+art_cc_test {
+    name: "art_libdexfile_support_tests",
+    host_supported: true,
+    test_per_src: true,  // For consistency with other ART gtests.
+    srcs: [
+        "external/dex_file_supp_test.cc",
+    ],
+    shared_libs: [
+        "libartbase",
+        "libbase",
+        "libdexfile_external",
+        "libdexfile_support",
+    ],
+}
diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc
index 4f73967..7e93639 100644
--- a/libdexfile/dex/art_dex_file_loader.cc
+++ b/libdexfile/dex/art_dex_file_loader.cc
@@ -16,7 +16,6 @@
 
 #include "art_dex_file_loader.h"
 
-#include <sys/mman.h>  // For the PROT_* and MAP_* constants.
 #include <sys/stat.h>
 
 #include "android-base/stringprintf.h"
@@ -24,6 +23,7 @@
 #include "base/file_magic.h"
 #include "base/file_utils.h"
 #include "base/mem_map.h"
+#include "base/mman.h"  // For the PROT_* and MAP_* constants.
 #include "base/stl_util.h"
 #include "base/systrace.h"
 #include "base/unix_file/fd_file.h"
@@ -95,7 +95,7 @@
   File fd;
   if (zip_fd != -1) {
      if (ReadMagicAndReset(zip_fd, &magic, error_msg)) {
-       fd = File(DupCloexec(zip_fd), false /* check_usage */);
+       fd = File(DupCloexec(zip_fd), /* check_usage= */ false);
      }
   } else {
     fd = OpenAndReadMagic(filename, &magic, error_msg);
@@ -142,9 +142,9 @@
   if (IsMagicValid(magic)) {
     std::unique_ptr<const DexFile> dex_file(OpenFile(fd.Release(),
                                                      filename,
-                                                     /* verify */ false,
-                                                     /* verify_checksum */ false,
-                                                     /* mmap_shared */ false,
+                                                     /* verify= */ false,
+                                                     /* verify_checksum= */ false,
+                                                     /* mmap_shared= */ false,
                                                      error_msg));
     if (dex_file == nullptr) {
       return false;
@@ -156,27 +156,29 @@
   return false;
 }
 
-std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const uint8_t* base,
-                                                      size_t size,
-                                                      const std::string& location,
-                                                      uint32_t location_checksum,
-                                                      const OatDexFile* oat_dex_file,
-                                                      bool verify,
-                                                      bool verify_checksum,
-                                                      std::string* error_msg) const {
+std::unique_ptr<const DexFile> ArtDexFileLoader::Open(
+    const uint8_t* base,
+    size_t size,
+    const std::string& location,
+    uint32_t location_checksum,
+    const OatDexFile* oat_dex_file,
+    bool verify,
+    bool verify_checksum,
+    std::string* error_msg,
+    std::unique_ptr<DexFileContainer> container) const {
   ScopedTrace trace(std::string("Open dex file from RAM ") + location);
   return OpenCommon(base,
                     size,
-                    /*data_base*/ nullptr,
-                    /*data_size*/ 0u,
+                    /*data_base=*/ nullptr,
+                    /*data_size=*/ 0u,
                     location,
                     location_checksum,
                     oat_dex_file,
                     verify,
                     verify_checksum,
                     error_msg,
-                    /*container*/ nullptr,
-                    /*verify_result*/ nullptr);
+                    std::move(container),
+                    /*verify_result=*/ nullptr);
 }
 
 std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const std::string& location,
@@ -199,8 +201,8 @@
   uint8_t* begin = map.Begin();
   std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
                                                  size,
-                                                 /*data_base*/ nullptr,
-                                                 /*data_size*/ 0u,
+                                                 /*data_base=*/ nullptr,
+                                                 /*data_size=*/ 0u,
                                                  location,
                                                  location_checksum,
                                                  kNoOatDexFile,
@@ -208,7 +210,7 @@
                                                  verify_checksum,
                                                  error_msg,
                                                  std::make_unique<MemMapContainer>(std::move(map)),
-                                                 /*verify_result*/ nullptr);
+                                                 /*verify_result=*/ nullptr);
   // Opening CompactDex is only supported from vdex files.
   if (dex_file != nullptr && dex_file->IsCompactDexFile()) {
     *error_msg = StringPrintf("Opening CompactDex file '%s' is only supported from vdex files",
@@ -240,7 +242,7 @@
                                                      location,
                                                      verify,
                                                      verify_checksum,
-                                                     /* mmap_shared */ false,
+                                                     /* mmap_shared= */ false,
                                                      error_msg));
     if (dex_file.get() != nullptr) {
       dex_files->push_back(std::move(dex_file));
@@ -290,7 +292,7 @@
   CHECK(!location.empty());
   MemMap map;
   {
-    File delayed_close(fd, /* check_usage */ false);
+    File delayed_close(fd, /* check_usage= */ false);
     struct stat sbuf;
     memset(&sbuf, 0, sizeof(sbuf));
     if (fstat(fd, &sbuf) == -1) {
@@ -308,7 +310,7 @@
                           mmap_shared ? MAP_SHARED : MAP_PRIVATE,
                           fd,
                           0,
-                          /*low_4gb*/false,
+                          /*low_4gb=*/false,
                           location.c_str(),
                           error_msg);
     if (!map.IsValid()) {
@@ -330,8 +332,8 @@
 
   std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
                                                  size,
-                                                 /*data_base*/ nullptr,
-                                                 /*data_size*/ 0u,
+                                                 /*data_base=*/ nullptr,
+                                                 /*data_size=*/ 0u,
                                                  location,
                                                  dex_header->checksum_,
                                                  kNoOatDexFile,
@@ -339,7 +341,7 @@
                                                  verify_checksum,
                                                  error_msg,
                                                  std::make_unique<MemMapContainer>(std::move(map)),
-                                                 /*verify_result*/ nullptr);
+                                                 /*verify_result=*/ nullptr);
 
   // Opening CompactDex is only supported from vdex files.
   if (dex_file != nullptr && dex_file->IsCompactDexFile()) {
@@ -407,8 +409,8 @@
   size_t size = map.Size();
   std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
                                                  size,
-                                                 /*data_base*/ nullptr,
-                                                 /*data_size*/ 0u,
+                                                 /*data_base=*/ nullptr,
+                                                 /*data_size=*/ 0u,
                                                  location,
                                                  zip_entry->GetCrc32(),
                                                  kNoOatDexFile,
@@ -537,17 +539,17 @@
                                                                 error_msg,
                                                                 std::move(container),
                                                                 verify_result);
-
-  // Check if this dex file is located in the framework directory.
-  // If it is, set a flag on the dex file. This is used by hidden API
-  // policy decision logic.
-  // Location can contain multidex suffix, so fetch its canonical version. Note
-  // that this will call `realpath`.
-  std::string path = DexFileLoader::GetDexCanonicalLocation(location.c_str());
-  if (dex_file != nullptr && LocationIsOnSystemFramework(path.c_str())) {
-    dex_file->SetIsPlatformDexFile();
+  if (dex_file != nullptr) {
+    // Set hidden API domain based based on location.
+    // Location can contain multidex suffix, so fetch its canonical version. Note
+    // that this will call `realpath`.
+    std::string path = DexFileLoader::GetDexCanonicalLocation(location.c_str());
+    if (LocationIsOnSystemFramework(path.c_str())) {
+      dex_file->SetHiddenapiDomain(hiddenapi::Domain::kPlatform);
+    } else if (LocationIsOnRuntimeModule(path.c_str())) {
+      dex_file->SetHiddenapiDomain(hiddenapi::Domain::kCorePlatform);
+    }
   }
-
   return dex_file;
 }
 
diff --git a/libdexfile/dex/art_dex_file_loader.h b/libdexfile/dex/art_dex_file_loader.h
index 40d4673..d41eac5 100644
--- a/libdexfile/dex/art_dex_file_loader.h
+++ b/libdexfile/dex/art_dex_file_loader.h
@@ -54,14 +54,16 @@
                             bool* only_contains_uncompressed_dex = nullptr) const override;
 
   // Opens .dex file, backed by existing memory
-  std::unique_ptr<const DexFile> Open(const uint8_t* base,
-                                      size_t size,
-                                      const std::string& location,
-                                      uint32_t location_checksum,
-                                      const OatDexFile* oat_dex_file,
-                                      bool verify,
-                                      bool verify_checksum,
-                                      std::string* error_msg) const override;
+  std::unique_ptr<const DexFile> Open(
+      const uint8_t* base,
+      size_t size,
+      const std::string& location,
+      uint32_t location_checksum,
+      const OatDexFile* oat_dex_file,
+      bool verify,
+      bool verify_checksum,
+      std::string* error_msg,
+      std::unique_ptr<DexFileContainer> container = nullptr) const override;
 
   // Opens .dex file that has been memory-mapped by the caller.
   std::unique_ptr<const DexFile> Open(const std::string& location,
diff --git a/libdexfile/dex/art_dex_file_loader_test.cc b/libdexfile/dex/art_dex_file_loader_test.cc
index a7d0363..8c9258b 100644
--- a/libdexfile/dex/art_dex_file_loader_test.cc
+++ b/libdexfile/dex/art_dex_file_loader_test.cc
@@ -107,13 +107,13 @@
   ASSERT_TRUE(raw.get() != nullptr);
   EXPECT_EQ(3U, raw->NumClassDefs());
 
-  const DexFile::ClassDef& c0 = raw->GetClassDef(0);
+  const dex::ClassDef& c0 = raw->GetClassDef(0);
   EXPECT_STREQ("LNested$1;", raw->GetClassDescriptor(c0));
 
-  const DexFile::ClassDef& c1 = raw->GetClassDef(1);
+  const dex::ClassDef& c1 = raw->GetClassDef(1);
   EXPECT_STREQ("LNested$Inner;", raw->GetClassDescriptor(c1));
 
-  const DexFile::ClassDef& c2 = raw->GetClassDef(2);
+  const dex::ClassDef& c2 = raw->GetClassDef(2);
   EXPECT_STREQ("LNested;", raw->GetClassDescriptor(c2));
 }
 
@@ -122,7 +122,7 @@
   ASSERT_TRUE(raw.get() != nullptr);
   EXPECT_EQ(1U, raw->NumClassDefs());
 
-  const DexFile::ClassDef& class_def = raw->GetClassDef(0);
+  const dex::ClassDef& class_def = raw->GetClassDef(0);
   ASSERT_STREQ("LGetMethodSignature;", raw->GetClassDescriptor(class_def));
 
   ClassAccessor accessor(*raw, class_def);
@@ -133,7 +133,7 @@
   // Check the signature for the static initializer.
   {
     ASSERT_EQ(1U, accessor.NumDirectMethods());
-    const DexFile::MethodId& method_id = raw->GetMethodId(cur_method->GetIndex());
+    const dex::MethodId& method_id = raw->GetMethodId(cur_method->GetIndex());
     const char* name = raw->StringDataByIdx(method_id.name_idx_);
     ASSERT_STREQ("<init>", name);
     std::string signature(raw->GetMethodSignature(method_id).ToString());
@@ -207,7 +207,7 @@
   for (const Result& r : results) {
     ++cur_method;
     ASSERT_TRUE(cur_method != methods.end());
-    const DexFile::MethodId& method_id = raw->GetMethodId(cur_method->GetIndex());
+    const dex::MethodId& method_id = raw->GetMethodId(cur_method->GetIndex());
 
     const char* name = raw->StringDataByIdx(method_id.name_idx_);
     ASSERT_STREQ(r.name, name);
@@ -217,9 +217,9 @@
 
     std::string plain_method = std::string("GetMethodSignature.") + r.name;
     ASSERT_EQ(plain_method,
-              raw->PrettyMethod(cur_method->GetIndex(), /* with_signature */ false));
+              raw->PrettyMethod(cur_method->GetIndex(), /* with_signature= */ false));
     ASSERT_EQ(r.pretty_method,
-              raw->PrettyMethod(cur_method->GetIndex(), /* with_signature */ true));
+              raw->PrettyMethod(cur_method->GetIndex(), /* with_signature= */ true));
   }
 }
 
@@ -232,7 +232,7 @@
       "D", "I", "J", nullptr };
   for (size_t i = 0; strings[i] != nullptr; i++) {
     const char* str = strings[i];
-    const DexFile::StringId* str_id = raw->FindStringId(str);
+    const dex::StringId* str_id = raw->FindStringId(str);
     const char* dex_str = raw->GetStringData(*str_id);
     EXPECT_STREQ(dex_str, str);
   }
@@ -241,10 +241,10 @@
 TEST_F(ArtDexFileLoaderTest, FindTypeId) {
   for (size_t i = 0; i < java_lang_dex_file_->NumTypeIds(); i++) {
     const char* type_str = java_lang_dex_file_->StringByTypeIdx(dex::TypeIndex(i));
-    const DexFile::StringId* type_str_id = java_lang_dex_file_->FindStringId(type_str);
+    const dex::StringId* type_str_id = java_lang_dex_file_->FindStringId(type_str);
     ASSERT_TRUE(type_str_id != nullptr);
     dex::StringIndex type_str_idx = java_lang_dex_file_->GetIndexForStringId(*type_str_id);
-    const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(type_str_idx);
+    const dex::TypeId* type_id = java_lang_dex_file_->FindTypeId(type_str_idx);
     ASSERT_EQ(type_id, java_lang_dex_file_->FindTypeId(type_str));
     ASSERT_TRUE(type_id != nullptr);
     EXPECT_EQ(java_lang_dex_file_->GetIndexForTypeId(*type_id).index_, i);
@@ -253,15 +253,15 @@
 
 TEST_F(ArtDexFileLoaderTest, FindProtoId) {
   for (size_t i = 0; i < java_lang_dex_file_->NumProtoIds(); i++) {
-    const DexFile::ProtoId& to_find = java_lang_dex_file_->GetProtoId(dex::ProtoIndex(i));
-    const DexFile::TypeList* to_find_tl = java_lang_dex_file_->GetProtoParameters(to_find);
+    const dex::ProtoId& to_find = java_lang_dex_file_->GetProtoId(dex::ProtoIndex(i));
+    const dex::TypeList* to_find_tl = java_lang_dex_file_->GetProtoParameters(to_find);
     std::vector<dex::TypeIndex> to_find_types;
     if (to_find_tl != nullptr) {
       for (size_t j = 0; j < to_find_tl->Size(); j++) {
         to_find_types.push_back(to_find_tl->GetTypeItem(j).type_idx_);
       }
     }
-    const DexFile::ProtoId* found =
+    const dex::ProtoId* found =
         java_lang_dex_file_->FindProtoId(to_find.return_type_idx_, to_find_types);
     ASSERT_TRUE(found != nullptr);
     EXPECT_EQ(java_lang_dex_file_->GetIndexForProtoId(*found), dex::ProtoIndex(i));
@@ -270,11 +270,11 @@
 
 TEST_F(ArtDexFileLoaderTest, FindMethodId) {
   for (size_t i = 0; i < java_lang_dex_file_->NumMethodIds(); i++) {
-    const DexFile::MethodId& to_find = java_lang_dex_file_->GetMethodId(i);
-    const DexFile::TypeId& klass = java_lang_dex_file_->GetTypeId(to_find.class_idx_);
-    const DexFile::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
-    const DexFile::ProtoId& signature = java_lang_dex_file_->GetProtoId(to_find.proto_idx_);
-    const DexFile::MethodId* found = java_lang_dex_file_->FindMethodId(klass, name, signature);
+    const dex::MethodId& to_find = java_lang_dex_file_->GetMethodId(i);
+    const dex::TypeId& klass = java_lang_dex_file_->GetTypeId(to_find.class_idx_);
+    const dex::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
+    const dex::ProtoId& signature = java_lang_dex_file_->GetProtoId(to_find.proto_idx_);
+    const dex::MethodId* found = java_lang_dex_file_->FindMethodId(klass, name, signature);
     ASSERT_TRUE(found != nullptr) << "Didn't find method " << i << ": "
         << java_lang_dex_file_->StringByTypeIdx(to_find.class_idx_) << "."
         << java_lang_dex_file_->GetStringData(name)
@@ -285,11 +285,11 @@
 
 TEST_F(ArtDexFileLoaderTest, FindFieldId) {
   for (size_t i = 0; i < java_lang_dex_file_->NumFieldIds(); i++) {
-    const DexFile::FieldId& to_find = java_lang_dex_file_->GetFieldId(i);
-    const DexFile::TypeId& klass = java_lang_dex_file_->GetTypeId(to_find.class_idx_);
-    const DexFile::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
-    const DexFile::TypeId& type = java_lang_dex_file_->GetTypeId(to_find.type_idx_);
-    const DexFile::FieldId* found = java_lang_dex_file_->FindFieldId(klass, name, type);
+    const dex::FieldId& to_find = java_lang_dex_file_->GetFieldId(i);
+    const dex::TypeId& klass = java_lang_dex_file_->GetTypeId(to_find.class_idx_);
+    const dex::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
+    const dex::TypeId& type = java_lang_dex_file_->GetTypeId(to_find.type_idx_);
+    const dex::FieldId* found = java_lang_dex_file_->FindFieldId(klass, name, type);
     ASSERT_TRUE(found != nullptr) << "Didn't find field " << i << ": "
         << java_lang_dex_file_->StringByTypeIdx(to_find.type_idx_) << " "
         << java_lang_dex_file_->StringByTypeIdx(to_find.class_idx_) << "."
@@ -332,15 +332,15 @@
   std::string error_msg;
   bool success = loader.Open(data_location_path.c_str(),
                              data_location_path,
-                             /* verify */ false,
-                             /* verify_checksum */ false,
+                             /* verify= */ false,
+                             /* verify_checksum= */ false,
                              &error_msg,
                              &dex_files);
   ASSERT_TRUE(success) << error_msg;
 
   ASSERT_GE(dex_files.size(), 1u);
   for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
-    ASSERT_FALSE(dex_file->IsPlatformDexFile());
+    ASSERT_NE(dex_file->GetHiddenapiDomain(), hiddenapi::Domain::kPlatform);
   }
 
   dex_files.clear();
@@ -360,15 +360,15 @@
   std::string error_msg;
   bool success = loader.Open(system_location_path.c_str(),
                              system_location_path,
-                             /* verify */ false,
-                             /* verify_checksum */ false,
+                             /* verify= */ false,
+                             /* verify_checksum= */ false,
                              &error_msg,
                              &dex_files);
   ASSERT_TRUE(success) << error_msg;
 
   ASSERT_GE(dex_files.size(), 1u);
   for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
-    ASSERT_FALSE(dex_file->IsPlatformDexFile());
+    ASSERT_NE(dex_file->GetHiddenapiDomain(), hiddenapi::Domain::kPlatform);
   }
 
   dex_files.clear();
@@ -388,15 +388,15 @@
   std::string error_msg;
   bool success = loader.Open(system_framework_location_path.c_str(),
                              system_framework_location_path,
-                             /* verify */ false,
-                             /* verify_checksum */ false,
+                             /* verify= */ false,
+                             /* verify_checksum= */ false,
                              &error_msg,
                              &dex_files);
   ASSERT_TRUE(success) << error_msg;
 
   ASSERT_GE(dex_files.size(), 1u);
   for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
-    ASSERT_TRUE(dex_file->IsPlatformDexFile());
+    ASSERT_EQ(dex_file->GetHiddenapiDomain(), hiddenapi::Domain::kPlatform);
   }
 
   dex_files.clear();
@@ -416,15 +416,15 @@
   std::string error_msg;
   bool success = loader.Open(data_multi_location_path.c_str(),
                              data_multi_location_path,
-                             /* verify */ false,
-                             /* verify_checksum */ false,
+                             /* verify= */ false,
+                             /* verify_checksum= */ false,
                              &error_msg,
                              &dex_files);
   ASSERT_TRUE(success) << error_msg;
 
   ASSERT_GT(dex_files.size(), 1u);
   for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
-    ASSERT_FALSE(dex_file->IsPlatformDexFile());
+    ASSERT_NE(dex_file->GetHiddenapiDomain(), hiddenapi::Domain::kPlatform);
   }
 
   dex_files.clear();
@@ -445,15 +445,15 @@
   std::string error_msg;
   bool success = loader.Open(system_multi_location_path.c_str(),
                              system_multi_location_path,
-                             /* verify */ false,
-                             /* verify_checksum */ false,
+                             /* verify= */ false,
+                             /* verify_checksum= */ false,
                              &error_msg,
                              &dex_files);
   ASSERT_TRUE(success) << error_msg;
 
   ASSERT_GT(dex_files.size(), 1u);
   for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
-    ASSERT_FALSE(dex_file->IsPlatformDexFile());
+    ASSERT_NE(dex_file->GetHiddenapiDomain(), hiddenapi::Domain::kPlatform);
   }
 
   dex_files.clear();
@@ -474,15 +474,15 @@
   std::string error_msg;
   bool success = loader.Open(system_framework_multi_location_path.c_str(),
                              system_framework_multi_location_path,
-                             /* verify */ false,
-                             /* verify_checksum */ false,
+                             /* verify= */ false,
+                             /* verify_checksum= */ false,
                              &error_msg,
                              &dex_files);
   ASSERT_TRUE(success) << error_msg;
 
   ASSERT_GT(dex_files.size(), 1u);
   for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
-    ASSERT_TRUE(dex_file->IsPlatformDexFile());
+    ASSERT_EQ(dex_file->GetHiddenapiDomain(), hiddenapi::Domain::kPlatform);
   }
 
   dex_files.clear();
diff --git a/libdexfile/dex/class_accessor-inl.h b/libdexfile/dex/class_accessor-inl.h
index 21db2cf..f224a29 100644
--- a/libdexfile/dex/class_accessor-inl.h
+++ b/libdexfile/dex/class_accessor-inl.h
@@ -19,43 +19,72 @@
 
 #include "class_accessor.h"
 
+#include "base/hiddenapi_flags.h"
 #include "base/leb128.h"
+#include "base/utils.h"
 #include "class_iterator.h"
 #include "code_item_accessors-inl.h"
+#include "dex_file.h"
+#include "method_reference.h"
 
 namespace art {
 
 inline ClassAccessor::ClassAccessor(const ClassIteratorData& data)
     : ClassAccessor(data.dex_file_, data.class_def_idx_) {}
 
-inline ClassAccessor::ClassAccessor(const DexFile& dex_file, const DexFile::ClassDef& class_def)
-    : ClassAccessor(dex_file, dex_file.GetIndexForClassDef(class_def)) {}
+inline ClassAccessor::ClassAccessor(const DexFile& dex_file,
+                                    const dex::ClassDef& class_def,
+                                    bool parse_hiddenapi_class_data)
+    : ClassAccessor(dex_file,
+                    dex_file.GetClassData(class_def),
+                    dex_file.GetIndexForClassDef(class_def),
+                    parse_hiddenapi_class_data) {}
 
 inline ClassAccessor::ClassAccessor(const DexFile& dex_file, uint32_t class_def_index)
-    : ClassAccessor(dex_file,
-                    dex_file.GetClassData(dex_file.GetClassDef(class_def_index)),
-                    class_def_index) {}
+    : ClassAccessor(dex_file, dex_file.GetClassDef(class_def_index)) {}
 
 inline ClassAccessor::ClassAccessor(const DexFile& dex_file,
                                     const uint8_t* class_data,
-                                    uint32_t class_def_index)
+                                    uint32_t class_def_index,
+                                    bool parse_hiddenapi_class_data)
     : dex_file_(dex_file),
       class_def_index_(class_def_index),
       ptr_pos_(class_data),
+      hiddenapi_ptr_pos_(nullptr),
       num_static_fields_(ptr_pos_ != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u),
       num_instance_fields_(ptr_pos_ != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u),
       num_direct_methods_(ptr_pos_ != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u),
-      num_virtual_methods_(ptr_pos_ != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u) {}
+      num_virtual_methods_(ptr_pos_ != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u) {
+  if (parse_hiddenapi_class_data && class_def_index != DexFile::kDexNoIndex32) {
+    const dex::HiddenapiClassData* hiddenapi_class_data = dex_file.GetHiddenapiClassData();
+    if (hiddenapi_class_data != nullptr) {
+      hiddenapi_ptr_pos_ = hiddenapi_class_data->GetFlagsPointer(class_def_index);
+    }
+  }
+}
 
 inline void ClassAccessor::Method::Read() {
   index_ += DecodeUnsignedLeb128(&ptr_pos_);
   access_flags_ = DecodeUnsignedLeb128(&ptr_pos_);
   code_off_ = DecodeUnsignedLeb128(&ptr_pos_);
+  if (hiddenapi_ptr_pos_ != nullptr) {
+    hiddenapi_flags_ = DecodeUnsignedLeb128(&hiddenapi_ptr_pos_);
+    DCHECK(hiddenapi::ApiList(hiddenapi_flags_).IsValid());
+  }
 }
 
+inline MethodReference ClassAccessor::Method::GetReference() const {
+  return MethodReference(&dex_file_, GetIndex());
+}
+
+
 inline void ClassAccessor::Field::Read() {
   index_ += DecodeUnsignedLeb128(&ptr_pos_);
   access_flags_ = DecodeUnsignedLeb128(&ptr_pos_);
+  if (hiddenapi_ptr_pos_ != nullptr) {
+    hiddenapi_flags_ = DecodeUnsignedLeb128(&hiddenapi_ptr_pos_);
+    DCHECK(hiddenapi::ApiList(hiddenapi_flags_).IsValid());
+  }
 }
 
 template <typename DataType, typename Visitor>
@@ -78,12 +107,12 @@
     const InstanceFieldVisitor& instance_field_visitor,
     const DirectMethodVisitor& direct_method_visitor,
     const VirtualMethodVisitor& virtual_method_visitor) const {
-  Field field(dex_file_, ptr_pos_);
+  Field field(dex_file_, ptr_pos_, hiddenapi_ptr_pos_);
   VisitMembers(num_static_fields_, static_field_visitor, &field);
   field.NextSection();
   VisitMembers(num_instance_fields_, instance_field_visitor, &field);
 
-  Method method(dex_file_, field.ptr_pos_, /*is_static_or_direct*/ true);
+  Method method(dex_file_, field.ptr_pos_, field.hiddenapi_ptr_pos_, /*is_static_or_direct*/ true);
   VisitMembers(num_direct_methods_, direct_method_visitor, &method);
   method.NextSection();
   VisitMembers(num_virtual_methods_, virtual_method_visitor, &method);
@@ -109,7 +138,7 @@
                         VoidFunctor());
 }
 
-inline const DexFile::CodeItem* ClassAccessor::GetCodeItem(const Method& method) const {
+inline const dex::CodeItem* ClassAccessor::GetCodeItem(const Method& method) const {
   return dex_file_.GetCodeItem(method.GetCodeItemOffset());
 }
 
@@ -125,25 +154,49 @@
   return dex_file_.StringByTypeIdx(GetClassIdx());
 }
 
-inline const DexFile::CodeItem* ClassAccessor::Method::GetCodeItem() const {
+inline const dex::CodeItem* ClassAccessor::Method::GetCodeItem() const {
   return dex_file_.GetCodeItem(code_off_);
 }
 
 inline IterationRange<ClassAccessor::DataIterator<ClassAccessor::Field>>
     ClassAccessor::GetFieldsInternal(size_t count) const {
-  return { DataIterator<Field>(dex_file_, 0u, num_static_fields_, count, ptr_pos_),
-           DataIterator<Field>(dex_file_, count, num_static_fields_, count, ptr_pos_) };
+  return {
+      DataIterator<Field>(dex_file_,
+                          0u,
+                          num_static_fields_,
+                          count,
+                          ptr_pos_,
+                          hiddenapi_ptr_pos_),
+      DataIterator<Field>(dex_file_,
+                          count,
+                          num_static_fields_,
+                          count,
+                          // The following pointers are bogus but unused in the `end` iterator.
+                          ptr_pos_,
+                          hiddenapi_ptr_pos_) };
 }
 
 // Return an iteration range for the first <count> methods.
 inline IterationRange<ClassAccessor::DataIterator<ClassAccessor::Method>>
     ClassAccessor::GetMethodsInternal(size_t count) const {
   // Skip over the fields.
-  Field field(dex_file_, ptr_pos_);
+  Field field(dex_file_, ptr_pos_, hiddenapi_ptr_pos_);
   VisitMembers(NumFields(), VoidFunctor(), &field);
   // Return the iterator pair.
-  return { DataIterator<Method>(dex_file_, 0u, num_direct_methods_, count, field.ptr_pos_),
-           DataIterator<Method>(dex_file_, count, num_direct_methods_, count, field.ptr_pos_) };
+  return {
+      DataIterator<Method>(dex_file_,
+                           0u,
+                           num_direct_methods_,
+                           count,
+                           field.ptr_pos_,
+                           field.hiddenapi_ptr_pos_),
+      DataIterator<Method>(dex_file_,
+                           count,
+                           num_direct_methods_,
+                           count,
+                           // The following pointers are bogus but unused in the `end` iterator.
+                           field.ptr_pos_,
+                           field.hiddenapi_ptr_pos_) };
 }
 
 inline IterationRange<ClassAccessor::DataIterator<ClassAccessor::Field>> ClassAccessor::GetFields()
@@ -181,18 +234,14 @@
   return { std::next(methods.begin(), NumDirectMethods()), methods.end() };
 }
 
-inline void ClassAccessor::Field::UnHideAccessFlags() const {
-  DexFile::UnHideAccessFlags(const_cast<uint8_t*>(ptr_pos_), GetAccessFlags(), /*is_method*/ false);
-}
-
-inline void ClassAccessor::Method::UnHideAccessFlags() const {
-  DexFile::UnHideAccessFlags(const_cast<uint8_t*>(ptr_pos_), GetAccessFlags(), /*is_method*/ true);
-}
-
 inline dex::TypeIndex ClassAccessor::GetClassIdx() const {
   return dex_file_.GetClassDef(class_def_index_).class_idx_;
 }
 
+inline const dex::ClassDef& ClassAccessor::GetClassDef() const {
+  return dex_file_.GetClassDef(GetClassDefIndex());
+}
+
 }  // namespace art
 
 #endif  // ART_LIBDEXFILE_DEX_CLASS_ACCESSOR_INL_H_
diff --git a/libdexfile/dex/class_accessor.h b/libdexfile/dex/class_accessor.h
index d40577f..1628256 100644
--- a/libdexfile/dex/class_accessor.h
+++ b/libdexfile/dex/class_accessor.h
@@ -17,17 +17,22 @@
 #ifndef ART_LIBDEXFILE_DEX_CLASS_ACCESSOR_H_
 #define ART_LIBDEXFILE_DEX_CLASS_ACCESSOR_H_
 
-#include "base/utils.h"
 #include "code_item_accessors.h"
-#include "dex_file.h"
-#include "hidden_api_access_flags.h"
+#include "dex_file_types.h"
 #include "invoke_type.h"
-#include "method_reference.h"
 #include "modifiers.h"
 
 namespace art {
 
+namespace dex {
+struct ClassDef;
+struct CodeItem;
+}  // namespace dex
+
 class ClassIteratorData;
+class DexFile;
+template <typename Iter> class IterationRange;
+class MethodReference;
 
 // Classes to access Dex data.
 class ClassAccessor {
@@ -35,22 +40,20 @@
   class BaseItem {
    public:
     explicit BaseItem(const DexFile& dex_file,
-                      const uint8_t* ptr_pos) : dex_file_(dex_file), ptr_pos_(ptr_pos) {}
+                      const uint8_t* ptr_pos,
+                      const uint8_t* hiddenapi_ptr_pos)
+        : dex_file_(dex_file), ptr_pos_(ptr_pos), hiddenapi_ptr_pos_(hiddenapi_ptr_pos) {}
 
     uint32_t GetIndex() const {
       return index_;
     }
 
-    uint32_t GetRawAccessFlags() const {
+    uint32_t GetAccessFlags() const {
       return access_flags_;
     }
 
-    uint32_t GetAccessFlags() const {
-      return HiddenApiAccessFlags::RemoveFromDex(access_flags_);
-    }
-
-    HiddenApiAccessFlags::ApiList DecodeHiddenAccessFlags() const {
-      return HiddenApiAccessFlags::DecodeFromDex(access_flags_);
+    uint32_t GetHiddenapiFlags() const {
+      return hiddenapi_flags_;
     }
 
     bool IsFinal() const {
@@ -66,19 +69,21 @@
     }
 
     bool MemberIsNative() const {
-      return GetRawAccessFlags() & kAccNative;
+      return GetAccessFlags() & kAccNative;
     }
 
     bool MemberIsFinal() const {
-      return GetRawAccessFlags() & kAccFinal;
+      return GetAccessFlags() & kAccFinal;
     }
 
    protected:
     // Internal data pointer for reading.
     const DexFile& dex_file_;
     const uint8_t* ptr_pos_ = nullptr;
+    const uint8_t* hiddenapi_ptr_pos_ = nullptr;
     uint32_t index_ = 0u;
     uint32_t access_flags_ = 0u;
+    uint32_t hiddenapi_flags_ = 0u;
   };
 
   // A decoded version of the method of a class_data_item.
@@ -94,27 +99,24 @@
           : GetVirtualMethodInvokeType(class_access_flags);
     }
 
-    MethodReference GetReference() const {
-      return MethodReference(&dex_file_, GetIndex());
-    }
+    MethodReference GetReference() const;
 
     CodeItemInstructionAccessor GetInstructions() const;
     CodeItemDataAccessor GetInstructionsAndData() const;
 
-    const DexFile::CodeItem* GetCodeItem() const;
+    const dex::CodeItem* GetCodeItem() const;
 
     bool IsStaticOrDirect() const {
       return is_static_or_direct_;
     }
 
-    // Unhide the hidden API access flags at the iterator position. TODO: Deprecate.
-    void UnHideAccessFlags() const;
-
    private:
-    explicit Method(const DexFile& dex_file,
-                    const uint8_t* ptr_pos,
-                    bool is_static_or_direct = true)
-        : BaseItem(dex_file, ptr_pos), is_static_or_direct_(is_static_or_direct) {}
+    Method(const DexFile& dex_file,
+           const uint8_t* ptr_pos,
+           const uint8_t* hiddenapi_ptr_pos = nullptr,
+           bool is_static_or_direct = true)
+        : BaseItem(dex_file, ptr_pos, hiddenapi_ptr_pos),
+          is_static_or_direct_(is_static_or_direct) {}
 
     void Read();
 
@@ -150,16 +152,15 @@
   // A decoded version of the field of a class_data_item.
   class Field : public BaseItem {
    public:
-    explicit Field(const DexFile& dex_file,
-                   const uint8_t* ptr_pos) : BaseItem(dex_file, ptr_pos) {}
+    Field(const DexFile& dex_file,
+          const uint8_t* ptr_pos,
+          const uint8_t* hiddenapi_ptr_pos = nullptr)
+        : BaseItem(dex_file, ptr_pos, hiddenapi_ptr_pos) {}
 
     bool IsStatic() const {
      return is_static_;
     }
 
-    // Unhide the hidden API access flags at the iterator position. TODO: Deprecate.
-    void UnHideAccessFlags() const;
-
    private:
     void Read();
 
@@ -185,8 +186,9 @@
                  uint32_t position,
                  uint32_t partition_pos,
                  uint32_t iterator_end,
-                 const uint8_t* ptr_pos)
-        : data_(dex_file, ptr_pos),
+                 const uint8_t* ptr_pos,
+                 const uint8_t* hiddenapi_ptr_pos)
+        : data_(dex_file, ptr_pos, hiddenapi_ptr_pos),
           position_(position),
           partition_pos_(partition_pos),
           iterator_end_(iterator_end) {
@@ -266,18 +268,21 @@
   };
 
   // Not explicit specifically for range-based loops.
-  ALWAYS_INLINE ClassAccessor(const ClassIteratorData& data);
+  ALWAYS_INLINE ClassAccessor(const ClassIteratorData& data);  // NOLINT [runtime/explicit] [5]
 
-  ALWAYS_INLINE ClassAccessor(const DexFile& dex_file, const DexFile::ClassDef& class_def);
+  ALWAYS_INLINE ClassAccessor(const DexFile& dex_file,
+                              const dex::ClassDef& class_def,
+                              bool parse_hiddenapi_class_data = false);
 
   ALWAYS_INLINE ClassAccessor(const DexFile& dex_file, uint32_t class_def_index);
 
   ClassAccessor(const DexFile& dex_file,
                 const uint8_t* class_data,
-                uint32_t class_def_index = DexFile::kDexNoIndex32);
+                uint32_t class_def_index = dex::kDexNoIndex,
+                bool parse_hiddenapi_class_data = false);
 
   // Return the code item for a method.
-  const DexFile::CodeItem* GetCodeItem(const Method& method) const;
+  const dex::CodeItem* GetCodeItem(const Method& method) const;
 
   // Iterator data is not very iterator friendly, use visitors to get around this.
   template <typename StaticFieldVisitor,
@@ -353,13 +358,15 @@
     return ptr_pos_ != nullptr;
   }
 
+  bool HasHiddenapiClassData() const {
+    return hiddenapi_ptr_pos_ != nullptr;
+  }
+
   uint32_t GetClassDefIndex() const {
     return class_def_index_;
   }
 
-  const DexFile::ClassDef& GetClassDef() const {
-    return dex_file_.GetClassDef(GetClassDefIndex());
-  }
+  const dex::ClassDef& GetClassDef() const;
 
  protected:
   // Template visitor to reduce copy paste for visiting elements.
@@ -377,6 +384,7 @@
   const DexFile& dex_file_;
   const uint32_t class_def_index_;
   const uint8_t* ptr_pos_ = nullptr;  // Pointer into stream of class_data_item.
+  const uint8_t* hiddenapi_ptr_pos_ = nullptr;  // Pointer into stream of hiddenapi_metadata.
   const uint32_t num_static_fields_ = 0u;
   const uint32_t num_instance_fields_ = 0u;
   const uint32_t num_direct_methods_ = 0u;
diff --git a/libdexfile/dex/class_accessor_test.cc b/libdexfile/dex/class_accessor_test.cc
index 1f30ae5..9f2ee23 100644
--- a/libdexfile/dex/class_accessor_test.cc
+++ b/libdexfile/dex/class_accessor_test.cc
@@ -30,7 +30,7 @@
     uint32_t class_def_idx = 0u;
     ASSERT_GT(dex_file->NumClassDefs(), 0u);
     for (ClassAccessor accessor : dex_file->GetClasses()) {
-      const DexFile::ClassDef& class_def = dex_file->GetClassDef(accessor.GetClassDefIndex());
+      const dex::ClassDef& class_def = dex_file->GetClassDef(accessor.GetClassDefIndex());
       EXPECT_EQ(accessor.GetDescriptor(), dex_file->StringByTypeIdx(class_def.class_idx_));
       EXPECT_EQ(class_def_idx, accessor.GetClassDefIndex());
       ++class_def_idx;
diff --git a/libdexfile/dex/class_iterator.h b/libdexfile/dex/class_iterator.h
index 477c93b..8ed585b 100644
--- a/libdexfile/dex/class_iterator.h
+++ b/libdexfile/dex/class_iterator.h
@@ -17,7 +17,7 @@
 #ifndef ART_LIBDEXFILE_DEX_CLASS_ITERATOR_H_
 #define ART_LIBDEXFILE_DEX_CLASS_ITERATOR_H_
 
-#include "base/logging.h"
+#include <android-base/logging.h>
 
 namespace art {
 
diff --git a/libdexfile/dex/code_item_accessors-inl.h b/libdexfile/dex/code_item_accessors-inl.h
index c166f5f..632a787 100644
--- a/libdexfile/dex/code_item_accessors-inl.h
+++ b/libdexfile/dex/code_item_accessors-inl.h
@@ -19,6 +19,7 @@
 
 #include "code_item_accessors.h"
 
+#include "base/iteration_range.h"
 #include "compact_dex_file.h"
 #include "dex_file-inl.h"
 #include "standard_dex_file.h"
@@ -32,7 +33,9 @@
   insns_ = insns;
 }
 
-inline void CodeItemInstructionAccessor::Init(const CompactDexFile::CodeItem& code_item) {
+template <>
+inline void CodeItemInstructionAccessor::Init<CompactDexFile::CodeItem>(
+    const CompactDexFile::CodeItem& code_item) {
   uint32_t insns_size_in_code_units;
   code_item.DecodeFields</*kDecodeOnlyInstructionCount*/ true>(
       &insns_size_in_code_units,
@@ -43,12 +46,14 @@
   Init(insns_size_in_code_units, code_item.insns_);
 }
 
-inline void CodeItemInstructionAccessor::Init(const StandardDexFile::CodeItem& code_item) {
+template <>
+inline void CodeItemInstructionAccessor::Init<StandardDexFile::CodeItem>(
+    const StandardDexFile::CodeItem& code_item) {
   Init(code_item.insns_size_in_code_units_, code_item.insns_);
 }
 
 inline void CodeItemInstructionAccessor::Init(const DexFile& dex_file,
-                                              const DexFile::CodeItem* code_item) {
+                                              const dex::CodeItem* code_item) {
   if (code_item != nullptr) {
     DCHECK(dex_file.IsInDataSection(code_item));
     if (dex_file.IsCompactDexFile()) {
@@ -62,7 +67,7 @@
 
 inline CodeItemInstructionAccessor::CodeItemInstructionAccessor(
     const DexFile& dex_file,
-    const DexFile::CodeItem* code_item) {
+    const dex::CodeItem* code_item) {
   Init(dex_file, code_item);
 }
 
@@ -82,7 +87,9 @@
       DexInstructionIterator(insns_, insns_size_in_code_units_) };
 }
 
-inline void CodeItemDataAccessor::Init(const CompactDexFile::CodeItem& code_item) {
+template <>
+inline void CodeItemDataAccessor::Init<CompactDexFile::CodeItem>(
+    const CompactDexFile::CodeItem& code_item) {
   uint32_t insns_size_in_code_units;
   code_item.DecodeFields</*kDecodeOnlyInstructionCount*/ false>(&insns_size_in_code_units,
                                                                 &registers_size_,
@@ -92,7 +99,9 @@
   CodeItemInstructionAccessor::Init(insns_size_in_code_units, code_item.insns_);
 }
 
-inline void CodeItemDataAccessor::Init(const StandardDexFile::CodeItem& code_item) {
+template <>
+inline void CodeItemDataAccessor::Init<StandardDexFile::CodeItem>(
+    const StandardDexFile::CodeItem& code_item) {
   CodeItemInstructionAccessor::Init(code_item);
   registers_size_ = code_item.registers_size_;
   ins_size_ = code_item.ins_size_;
@@ -101,24 +110,24 @@
 }
 
 inline void CodeItemDataAccessor::Init(const DexFile& dex_file,
-                                       const DexFile::CodeItem* code_item) {
+                                       const dex::CodeItem* code_item) {
   if (code_item != nullptr) {
     if (dex_file.IsCompactDexFile()) {
-      CodeItemDataAccessor::Init(down_cast<const CompactDexFile::CodeItem&>(*code_item));
+      Init(down_cast<const CompactDexFile::CodeItem&>(*code_item));
     } else {
       DCHECK(dex_file.IsStandardDexFile());
-      CodeItemDataAccessor::Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
+      Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
     }
   }
 }
 
 inline CodeItemDataAccessor::CodeItemDataAccessor(const DexFile& dex_file,
-                                                  const DexFile::CodeItem* code_item) {
+                                                  const dex::CodeItem* code_item) {
   Init(dex_file, code_item);
 }
 
-inline IterationRange<const DexFile::TryItem*> CodeItemDataAccessor::TryItems() const {
-  const DexFile::TryItem* try_items = DexFile::GetTryItems(end(), 0u);
+inline IterationRange<const dex::TryItem*> CodeItemDataAccessor::TryItems() const {
+  const dex::TryItem* try_items = DexFile::GetTryItems(end(), 0u);
   return {
     try_items,
     try_items + TriesSize() };
@@ -128,8 +137,8 @@
   return DexFile::GetCatchHandlerData(end(), TriesSize(), offset);
 }
 
-inline const DexFile::TryItem* CodeItemDataAccessor::FindTryItem(uint32_t try_dex_pc) const {
-  IterationRange<const DexFile::TryItem*> try_items(TryItems());
+inline const dex::TryItem* CodeItemDataAccessor::FindTryItem(uint32_t try_dex_pc) const {
+  IterationRange<const dex::TryItem*> try_items(TryItems());
   int32_t index = DexFile::FindTryItem(try_items.begin(),
                                        try_items.end() - try_items.begin(),
                                        try_dex_pc);
@@ -157,8 +166,25 @@
   return reinterpret_cast<const void*>(handler_data);
 }
 
+template <>
+inline void CodeItemDebugInfoAccessor::Init<CompactDexFile::CodeItem>(
+    const CompactDexFile::CodeItem& code_item,
+    uint32_t dex_method_index) {
+  debug_info_offset_ = down_cast<const CompactDexFile*>(dex_file_)->GetDebugInfoOffset(
+      dex_method_index);
+  CodeItemDataAccessor::Init(code_item);
+}
+
+template <>
+inline void CodeItemDebugInfoAccessor::Init<StandardDexFile::CodeItem>(
+    const StandardDexFile::CodeItem& code_item,
+    uint32_t dex_method_index ATTRIBUTE_UNUSED) {
+  debug_info_offset_ = code_item.debug_info_off_;
+  CodeItemDataAccessor::Init(code_item);
+}
+
 inline void CodeItemDebugInfoAccessor::Init(const DexFile& dex_file,
-                                            const DexFile::CodeItem* code_item,
+                                            const dex::CodeItem* code_item,
                                             uint32_t dex_method_index) {
   if (code_item == nullptr) {
     return;
@@ -168,35 +194,52 @@
     Init(down_cast<const CompactDexFile::CodeItem&>(*code_item), dex_method_index);
   } else {
     DCHECK(dex_file.IsStandardDexFile());
-    Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
+    Init(down_cast<const StandardDexFile::CodeItem&>(*code_item), dex_method_index);
   }
 }
 
-inline void CodeItemDebugInfoAccessor::Init(const CompactDexFile::CodeItem& code_item,
-                                            uint32_t dex_method_index) {
-  debug_info_offset_ = down_cast<const CompactDexFile*>(dex_file_)->GetDebugInfoOffset(
-      dex_method_index);
-  CodeItemDataAccessor::Init(code_item);
-}
-
-inline void CodeItemDebugInfoAccessor::Init(const StandardDexFile::CodeItem& code_item) {
-  debug_info_offset_ = code_item.debug_info_off_;
-  CodeItemDataAccessor::Init(code_item);
-}
-
-template<typename NewLocalCallback>
-inline bool CodeItemDebugInfoAccessor::DecodeDebugLocalInfo(bool is_static,
-                                                            uint32_t method_idx,
-                                                            NewLocalCallback new_local,
-                                                            void* context) const {
+template<typename NewLocalVisitor>
+inline bool CodeItemDebugInfoAccessor::DecodeDebugLocalInfo(
+    bool is_static,
+    uint32_t method_idx,
+    const NewLocalVisitor& new_local) const {
   return dex_file_->DecodeDebugLocalInfo(RegistersSize(),
                                          InsSize(),
                                          InsnsSizeInCodeUnits(),
                                          DebugInfoOffset(),
                                          is_static,
                                          method_idx,
-                                         new_local,
-                                         context);
+                                         new_local);
+}
+
+template <typename Visitor>
+inline uint32_t CodeItemDebugInfoAccessor::VisitParameterNames(const Visitor& visitor) const {
+  const uint8_t* stream = dex_file_->GetDebugInfoStream(DebugInfoOffset());
+  return (stream != nullptr) ? DexFile::DecodeDebugInfoParameterNames(&stream, visitor) : 0u;
+}
+
+inline bool CodeItemDebugInfoAccessor::GetLineNumForPc(const uint32_t address,
+                                                       uint32_t* line_num) const {
+  return DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
+    // We know that this callback will be called in ascending address order, so keep going until we
+    // find a match or we've just gone past it.
+    if (entry.address_ > address) {
+      // The line number from the previous positions callback will be the final result.
+      return true;
+    }
+    *line_num = entry.line_;
+    return entry.address_ == address;
+  });
+}
+
+template <typename Visitor>
+inline bool CodeItemDebugInfoAccessor::DecodeDebugPositionInfo(const Visitor& visitor) const {
+  return dex_file_->DecodeDebugPositionInfo(
+      dex_file_->GetDebugInfoStream(DebugInfoOffset()),
+      [this](uint32_t idx) {
+        return dex_file_->StringDataByIdx(dex::StringIndex(idx));
+      },
+      visitor);
 }
 
 }  // namespace art
diff --git a/libdexfile/dex/code_item_accessors.h b/libdexfile/dex/code_item_accessors.h
index 695cc7b..794f234 100644
--- a/libdexfile/dex/code_item_accessors.h
+++ b/libdexfile/dex/code_item_accessors.h
@@ -19,21 +19,28 @@
 #ifndef ART_LIBDEXFILE_DEX_CODE_ITEM_ACCESSORS_H_
 #define ART_LIBDEXFILE_DEX_CODE_ITEM_ACCESSORS_H_
 
-#include "compact_dex_file.h"
-#include "dex_file.h"
+#include <android-base/logging.h>
+
 #include "dex_instruction_iterator.h"
-#include "standard_dex_file.h"
 
 namespace art {
 
+namespace dex {
+struct CodeItem;
+struct TryItem;
+}  // namespace dex
+
 class ArtMethod;
+class DexFile;
+template <typename Iter>
+class IterationRange;
 
 // Abstracts accesses to the instruction fields of code items for CompactDexFile and
 // StandardDexFile.
 class CodeItemInstructionAccessor {
  public:
   ALWAYS_INLINE CodeItemInstructionAccessor(const DexFile& dex_file,
-                                            const DexFile::CodeItem* code_item);
+                                            const dex::CodeItem* code_item);
 
   ALWAYS_INLINE explicit CodeItemInstructionAccessor(ArtMethod* method);
 
@@ -71,9 +78,10 @@
   CodeItemInstructionAccessor() = default;
 
   ALWAYS_INLINE void Init(uint32_t insns_size_in_code_units, const uint16_t* insns);
-  ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item);
-  ALWAYS_INLINE void Init(const StandardDexFile::CodeItem& code_item);
-  ALWAYS_INLINE void Init(const DexFile& dex_file, const DexFile::CodeItem* code_item);
+  ALWAYS_INLINE void Init(const DexFile& dex_file, const dex::CodeItem* code_item);
+
+  template <typename DexFileCodeItemType>
+  ALWAYS_INLINE void Init(const DexFileCodeItemType& code_item);
 
  private:
   // size of the insns array, in 2 byte code units. 0 if there is no code item.
@@ -87,7 +95,7 @@
 // StandardDexFile.
 class CodeItemDataAccessor : public CodeItemInstructionAccessor {
  public:
-  ALWAYS_INLINE CodeItemDataAccessor(const DexFile& dex_file, const DexFile::CodeItem* code_item);
+  ALWAYS_INLINE CodeItemDataAccessor(const DexFile& dex_file, const dex::CodeItem* code_item);
 
   uint16_t RegistersSize() const {
     return registers_size_;
@@ -105,20 +113,21 @@
     return tries_size_;
   }
 
-  IterationRange<const DexFile::TryItem*> TryItems() const;
+  IterationRange<const dex::TryItem*> TryItems() const;
 
   const uint8_t* GetCatchHandlerData(size_t offset = 0) const;
 
-  const DexFile::TryItem* FindTryItem(uint32_t try_dex_pc) const;
+  const dex::TryItem* FindTryItem(uint32_t try_dex_pc) const;
 
   inline const void* CodeItemDataEnd() const;
 
  protected:
   CodeItemDataAccessor() = default;
 
-  ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item);
-  ALWAYS_INLINE void Init(const StandardDexFile::CodeItem& code_item);
-  ALWAYS_INLINE void Init(const DexFile& dex_file, const DexFile::CodeItem* code_item);
+  ALWAYS_INLINE void Init(const DexFile& dex_file, const dex::CodeItem* code_item);
+
+  template <typename DexFileCodeItemType>
+  ALWAYS_INLINE void Init(const DexFileCodeItemType& code_item);
 
  private:
   // Fields mirrored from the dex/cdex code item.
@@ -136,13 +145,13 @@
 
   // Initialize with an existing offset.
   ALWAYS_INLINE CodeItemDebugInfoAccessor(const DexFile& dex_file,
-                                          const DexFile::CodeItem* code_item,
+                                          const dex::CodeItem* code_item,
                                           uint32_t dex_method_index) {
     Init(dex_file, code_item, dex_method_index);
   }
 
   ALWAYS_INLINE void Init(const DexFile& dex_file,
-                          const DexFile::CodeItem* code_item,
+                          const dex::CodeItem* code_item,
                           uint32_t dex_method_index);
 
   ALWAYS_INLINE explicit CodeItemDebugInfoAccessor(ArtMethod* method);
@@ -151,15 +160,24 @@
     return debug_info_offset_;
   }
 
-  template<typename NewLocalCallback>
+  template<typename NewLocalVisitor>
   bool DecodeDebugLocalInfo(bool is_static,
                             uint32_t method_idx,
-                            NewLocalCallback new_local,
-                            void* context) const;
+                            const NewLocalVisitor& new_local) const;
+
+  // Visit each parameter in the debug information. Returns the line number.
+  // The argument of the Visitor is dex::StringIndex.
+  template <typename Visitor>
+  uint32_t VisitParameterNames(const Visitor& visitor) const;
+
+  template <typename Visitor>
+  bool DecodeDebugPositionInfo(const Visitor& visitor) const;
+
+  bool GetLineNumForPc(const uint32_t pc, uint32_t* line_num) const;
 
  protected:
-  ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item, uint32_t dex_method_index);
-  ALWAYS_INLINE void Init(const StandardDexFile::CodeItem& code_item);
+  template <typename DexFileCodeItemType>
+  ALWAYS_INLINE void Init(const DexFileCodeItemType& code_item, uint32_t dex_method_index);
 
  private:
   const DexFile* dex_file_ = nullptr;
diff --git a/libdexfile/dex/code_item_accessors_test.cc b/libdexfile/dex/code_item_accessors_test.cc
index 2bb4dde..c5891f9 100644
--- a/libdexfile/dex/code_item_accessors_test.cc
+++ b/libdexfile/dex/code_item_accessors_test.cc
@@ -45,10 +45,10 @@
   std::unique_ptr<const DexFile> dex(dex_file_loader.Open(data->data(),
                                                           data->size(),
                                                           "location",
-                                                          /*location_checksum*/ 123,
-                                                          /*oat_dex_file*/nullptr,
-                                                          /*verify*/false,
-                                                          /*verify_checksum*/false,
+                                                          /*location_checksum=*/ 123,
+                                                          /*oat_dex_file=*/nullptr,
+                                                          /*verify=*/false,
+                                                          /*verify_checksum=*/false,
                                                           &error_msg));
   CHECK(dex != nullptr) << error_msg;
   return dex;
@@ -56,11 +56,11 @@
 
 TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor) {
   std::vector<uint8_t> standard_dex_data;
-  std::unique_ptr<const DexFile> standard_dex(CreateFakeDex(/*compact_dex*/false,
+  std::unique_ptr<const DexFile> standard_dex(CreateFakeDex(/*compact_dex=*/false,
                                                             &standard_dex_data));
   ASSERT_TRUE(standard_dex != nullptr);
   std::vector<uint8_t> compact_dex_data;
-  std::unique_ptr<const DexFile> compact_dex(CreateFakeDex(/*compact_dex*/true,
+  std::unique_ptr<const DexFile> compact_dex(CreateFakeDex(/*compact_dex=*/true,
                                                            &compact_dex_data));
   ASSERT_TRUE(compact_dex != nullptr);
   static constexpr uint16_t kRegisterSize = 2;
@@ -71,7 +71,7 @@
   static constexpr size_t kInsnsSizeInCodeUnits = 5;
 
   auto verify_code_item = [&](const DexFile* dex,
-                              const DexFile::CodeItem* item,
+                              const dex::CodeItem* item,
                               const uint16_t* insns) {
     CodeItemInstructionAccessor insns_accessor(*dex, item);
     EXPECT_TRUE(insns_accessor.HasCodeItem());
diff --git a/libdexfile/dex/compact_dex_file.cc b/libdexfile/dex/compact_dex_file.cc
index 302b59e..a5044aa 100644
--- a/libdexfile/dex/compact_dex_file.cc
+++ b/libdexfile/dex/compact_dex_file.cc
@@ -55,7 +55,7 @@
       static_cast<uint32_t>(FeatureFlags::kDefaultMethods)) != 0;
 }
 
-uint32_t CompactDexFile::GetCodeItemSize(const DexFile::CodeItem& item) const {
+uint32_t CompactDexFile::GetCodeItemSize(const dex::CodeItem& item) const {
   DCHECK(IsInDataSection(&item));
   return reinterpret_cast<uintptr_t>(CodeItemDataAccessor(*this, &item).CodeItemDataEnd()) -
       reinterpret_cast<uintptr_t>(&item);
@@ -100,7 +100,7 @@
               location_checksum,
               oat_dex_file,
               std::move(container),
-              /*is_compact_dex*/ true),
+              /*is_compact_dex=*/ true),
       debug_info_offsets_(DataBegin() + GetHeader().debug_info_offsets_pos_,
                           GetHeader().debug_info_base_,
                           GetHeader().debug_info_offsets_table_offset_) {}
diff --git a/libdexfile/dex/compact_dex_file.h b/libdexfile/dex/compact_dex_file.h
index 8eade6d..47edd51 100644
--- a/libdexfile/dex/compact_dex_file.h
+++ b/libdexfile/dex/compact_dex_file.h
@@ -84,7 +84,7 @@
   // Like the standard code item except without a debug info offset. Each code item may have a
   // preheader to encode large methods. In 99% of cases, the preheader is not used. This enables
   // smaller size with a good fast path case in the accessors.
-  struct CodeItem : public DexFile::CodeItem {
+  struct CodeItem : public dex::CodeItem {
     static constexpr size_t kAlignment = sizeof(uint16_t);
     // Max preheader size in uint16_ts.
     static constexpr size_t kMaxPreHeaderSize = 6;
@@ -271,7 +271,7 @@
 
   bool SupportsDefaultMethods() const override;
 
-  uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const override;
+  uint32_t GetCodeItemSize(const dex::CodeItem& item) const override;
 
   uint32_t GetDebugInfoOffset(uint32_t dex_method_index) const {
     return debug_info_offsets_.GetOffset(dex_method_index);
diff --git a/libdexfile/dex/compact_dex_file_test.cc b/libdexfile/dex/compact_dex_file_test.cc
index 517c587..799967e 100644
--- a/libdexfile/dex/compact_dex_file_test.cc
+++ b/libdexfile/dex/compact_dex_file_test.cc
@@ -68,11 +68,11 @@
     uint16_t out_outs_size;
     uint16_t out_tries_size;
     uint32_t out_insns_size_in_code_units;
-    code_item->DecodeFields</*kDecodeOnlyInstructionCount*/false>(&out_insns_size_in_code_units,
-                                                                  &out_registers_size,
-                                                                  &out_ins_size,
-                                                                  &out_outs_size,
-                                                                  &out_tries_size);
+    code_item->DecodeFields</*kDecodeOnlyInstructionCount=*/false>(&out_insns_size_in_code_units,
+                                                                   &out_registers_size,
+                                                                   &out_ins_size,
+                                                                   &out_outs_size,
+                                                                   &out_tries_size);
     ASSERT_EQ(registers_size, out_registers_size);
     ASSERT_EQ(ins_size, out_ins_size);
     ASSERT_EQ(outs_size, out_outs_size);
@@ -80,11 +80,11 @@
     ASSERT_EQ(insns_size_in_code_units, out_insns_size_in_code_units);
 
     ++out_insns_size_in_code_units;  // Force value to change.
-    code_item->DecodeFields</*kDecodeOnlyInstructionCount*/true>(&out_insns_size_in_code_units,
-                                                                 /*registers_size*/ nullptr,
-                                                                 /*ins_size*/ nullptr,
-                                                                 /*outs_size*/ nullptr,
-                                                                 /*tries_size*/ nullptr);
+    code_item->DecodeFields</*kDecodeOnlyInstructionCount=*/true>(&out_insns_size_in_code_units,
+                                                                  /*registers_size=*/ nullptr,
+                                                                  /*ins_size=*/ nullptr,
+                                                                  /*outs_size=*/ nullptr,
+                                                                  /*tries_size=*/ nullptr);
     ASSERT_EQ(insns_size_in_code_units, out_insns_size_in_code_units);
   };
   static constexpr uint32_t kMax32 = std::numeric_limits<uint32_t>::max();
diff --git a/libdexfile/dex/dex_file-inl.h b/libdexfile/dex/dex_file-inl.h
index c512361..2af1e04 100644
--- a/libdexfile/dex/dex_file-inl.h
+++ b/libdexfile/dex/dex_file-inl.h
@@ -20,8 +20,10 @@
 #include "dex_file.h"
 
 #include "base/casts.h"
+#include "base/iteration_range.h"
 #include "base/leb128.h"
 #include "base/stringpiece.h"
+#include "base/utils.h"
 #include "class_iterator.h"
 #include "compact_dex_file.h"
 #include "dex_instruction_iterator.h"
@@ -30,12 +32,12 @@
 
 namespace art {
 
-inline int32_t DexFile::GetStringLength(const StringId& string_id) const {
+inline int32_t DexFile::GetStringLength(const dex::StringId& string_id) const {
   const uint8_t* ptr = DataBegin() + string_id.string_data_off_;
   return DecodeUnsignedLeb128(&ptr);
 }
 
-inline const char* DexFile::GetStringDataAndUtf16Length(const StringId& string_id,
+inline const char* DexFile::GetStringDataAndUtf16Length(const dex::StringId& string_id,
                                                         uint32_t* utf16_length) const {
   DCHECK(utf16_length != nullptr) << GetLocation();
   const uint8_t* ptr = DataBegin() + string_id.string_data_off_;
@@ -43,7 +45,7 @@
   return reinterpret_cast<const char*>(ptr);
 }
 
-inline const char* DexFile::GetStringData(const StringId& string_id) const {
+inline const char* DexFile::GetStringData(const dex::StringId& string_id) const {
   uint32_t ignored;
   return GetStringDataAndUtf16Length(string_id, &ignored);
 }
@@ -54,7 +56,7 @@
     *utf16_length = 0;
     return nullptr;
   }
-  const StringId& string_id = GetStringId(idx);
+  const dex::StringId& string_id = GetStringId(idx);
   return GetStringDataAndUtf16Length(string_id, utf16_length);
 }
 
@@ -67,7 +69,7 @@
   if (!idx.IsValid()) {
     return nullptr;
   }
-  const TypeId& type_id = GetTypeId(idx);
+  const dex::TypeId& type_id = GetTypeId(idx);
   return StringDataAndUtf16LengthByIdx(type_id.descriptor_idx_, unicode_length);
 }
 
@@ -75,74 +77,86 @@
   if (!idx.IsValid()) {
     return nullptr;
   }
-  const TypeId& type_id = GetTypeId(idx);
+  const dex::TypeId& type_id = GetTypeId(idx);
   return StringDataByIdx(type_id.descriptor_idx_);
 }
 
-inline const char* DexFile::GetTypeDescriptor(const TypeId& type_id) const {
+inline const char* DexFile::GetTypeDescriptor(const dex::TypeId& type_id) const {
   return StringDataByIdx(type_id.descriptor_idx_);
 }
 
-inline const char* DexFile::GetFieldTypeDescriptor(const FieldId& field_id) const {
-  const DexFile::TypeId& type_id = GetTypeId(field_id.type_idx_);
+inline const char* DexFile::GetFieldTypeDescriptor(const dex::FieldId& field_id) const {
+  const dex::TypeId& type_id = GetTypeId(field_id.type_idx_);
   return GetTypeDescriptor(type_id);
 }
 
-inline const char* DexFile::GetFieldName(const FieldId& field_id) const {
+inline const char* DexFile::GetFieldName(const dex::FieldId& field_id) const {
   return StringDataByIdx(field_id.name_idx_);
 }
 
-inline const char* DexFile::GetMethodDeclaringClassDescriptor(const MethodId& method_id) const {
-  const DexFile::TypeId& type_id = GetTypeId(method_id.class_idx_);
+inline const char* DexFile::GetMethodDeclaringClassDescriptor(const dex::MethodId& method_id)
+    const {
+  const dex::TypeId& type_id = GetTypeId(method_id.class_idx_);
   return GetTypeDescriptor(type_id);
 }
 
-inline const Signature DexFile::GetMethodSignature(const MethodId& method_id) const {
+inline const Signature DexFile::GetMethodSignature(const dex::MethodId& method_id) const {
   return Signature(this, GetProtoId(method_id.proto_idx_));
 }
 
-inline const Signature DexFile::GetProtoSignature(const ProtoId& proto_id) const {
+inline const Signature DexFile::GetProtoSignature(const dex::ProtoId& proto_id) const {
   return Signature(this, proto_id);
 }
 
-inline const char* DexFile::GetMethodName(const MethodId& method_id) const {
+inline const char* DexFile::GetMethodName(const dex::MethodId& method_id) const {
   return StringDataByIdx(method_id.name_idx_);
 }
 
+inline const char* DexFile::GetMethodName(const dex::MethodId& method_id, uint32_t* utf_length)
+    const {
+  return StringDataAndUtf16LengthByIdx(method_id.name_idx_, utf_length);
+}
+
+inline const char* DexFile::GetMethodName(uint32_t idx, uint32_t* utf_length) const {
+  return StringDataAndUtf16LengthByIdx(GetMethodId(idx).name_idx_, utf_length);
+}
+
 inline const char* DexFile::GetMethodShorty(uint32_t idx) const {
   return StringDataByIdx(GetProtoId(GetMethodId(idx).proto_idx_).shorty_idx_);
 }
 
-inline const char* DexFile::GetMethodShorty(const MethodId& method_id) const {
+inline const char* DexFile::GetMethodShorty(const dex::MethodId& method_id) const {
   return StringDataByIdx(GetProtoId(method_id.proto_idx_).shorty_idx_);
 }
 
-inline const char* DexFile::GetMethodShorty(const MethodId& method_id, uint32_t* length) const {
+inline const char* DexFile::GetMethodShorty(const dex::MethodId& method_id, uint32_t* length)
+    const {
   // Using the UTF16 length is safe here as shorties are guaranteed to be ASCII characters.
   return StringDataAndUtf16LengthByIdx(GetProtoId(method_id.proto_idx_).shorty_idx_, length);
 }
 
-inline const char* DexFile::GetClassDescriptor(const ClassDef& class_def) const {
+inline const char* DexFile::GetClassDescriptor(const dex::ClassDef& class_def) const {
   return StringByTypeIdx(class_def.class_idx_);
 }
 
-inline const char* DexFile::GetReturnTypeDescriptor(const ProtoId& proto_id) const {
+inline const char* DexFile::GetReturnTypeDescriptor(const dex::ProtoId& proto_id) const {
   return StringByTypeIdx(proto_id.return_type_idx_);
 }
 
 inline const char* DexFile::GetShorty(dex::ProtoIndex proto_idx) const {
-  const ProtoId& proto_id = GetProtoId(proto_idx);
+  const dex::ProtoId& proto_id = GetProtoId(proto_idx);
   return StringDataByIdx(proto_id.shorty_idx_);
 }
 
-inline const DexFile::TryItem* DexFile::GetTryItems(const DexInstructionIterator& code_item_end,
-                                                    uint32_t offset) {
-  return reinterpret_cast<const TryItem*>
-      (RoundUp(reinterpret_cast<uintptr_t>(&code_item_end.Inst()), TryItem::kAlignment)) + offset;
+inline const dex::TryItem* DexFile::GetTryItems(const DexInstructionIterator& code_item_end,
+                                                uint32_t offset) {
+  return reinterpret_cast<const dex::TryItem*>
+      (RoundUp(reinterpret_cast<uintptr_t>(&code_item_end.Inst()), dex::TryItem::kAlignment)) +
+          offset;
 }
 
-static inline bool DexFileStringEquals(const DexFile* df1, dex::StringIndex sidx1,
-                                       const DexFile* df2, dex::StringIndex sidx2) {
+inline bool DexFile::StringEquals(const DexFile* df1, dex::StringIndex sidx1,
+                                  const DexFile* df2, dex::StringIndex sidx2) {
   uint32_t s1_len;  // Note: utf16 length != mutf8 length.
   const char* s1_data = df1->StringDataAndUtf16LengthByIdx(sidx1, &s1_len);
   uint32_t s2_len;
@@ -150,60 +164,6 @@
   return (s1_len == s2_len) && (strcmp(s1_data, s2_data) == 0);
 }
 
-inline bool Signature::operator==(const Signature& rhs) const {
-  if (dex_file_ == nullptr) {
-    return rhs.dex_file_ == nullptr;
-  }
-  if (rhs.dex_file_ == nullptr) {
-    return false;
-  }
-  if (dex_file_ == rhs.dex_file_) {
-    return proto_id_ == rhs.proto_id_;
-  }
-  uint32_t lhs_shorty_len;  // For a shorty utf16 length == mutf8 length.
-  const char* lhs_shorty_data = dex_file_->StringDataAndUtf16LengthByIdx(proto_id_->shorty_idx_,
-                                                                         &lhs_shorty_len);
-  StringPiece lhs_shorty(lhs_shorty_data, lhs_shorty_len);
-  {
-    uint32_t rhs_shorty_len;
-    const char* rhs_shorty_data =
-        rhs.dex_file_->StringDataAndUtf16LengthByIdx(rhs.proto_id_->shorty_idx_,
-                                                     &rhs_shorty_len);
-    StringPiece rhs_shorty(rhs_shorty_data, rhs_shorty_len);
-    if (lhs_shorty != rhs_shorty) {
-      return false;  // Shorty mismatch.
-    }
-  }
-  if (lhs_shorty[0] == 'L') {
-    const DexFile::TypeId& return_type_id = dex_file_->GetTypeId(proto_id_->return_type_idx_);
-    const DexFile::TypeId& rhs_return_type_id =
-        rhs.dex_file_->GetTypeId(rhs.proto_id_->return_type_idx_);
-    if (!DexFileStringEquals(dex_file_, return_type_id.descriptor_idx_,
-                             rhs.dex_file_, rhs_return_type_id.descriptor_idx_)) {
-      return false;  // Return type mismatch.
-    }
-  }
-  if (lhs_shorty.find('L', 1) != StringPiece::npos) {
-    const DexFile::TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
-    const DexFile::TypeList* rhs_params = rhs.dex_file_->GetProtoParameters(*rhs.proto_id_);
-    // We found a reference parameter in the matching shorty, so both lists must be non-empty.
-    DCHECK(params != nullptr);
-    DCHECK(rhs_params != nullptr);
-    uint32_t params_size = params->Size();
-    DCHECK_EQ(params_size, rhs_params->Size());  // Parameter list size must match.
-    for (uint32_t i = 0; i < params_size; ++i) {
-      const DexFile::TypeId& param_id = dex_file_->GetTypeId(params->GetTypeItem(i).type_idx_);
-      const DexFile::TypeId& rhs_param_id =
-          rhs.dex_file_->GetTypeId(rhs_params->GetTypeItem(i).type_idx_);
-      if (!DexFileStringEquals(dex_file_, param_id.descriptor_idx_,
-                               rhs.dex_file_, rhs_param_id.descriptor_idx_)) {
-        return false;  // Parameter type mismatch.
-      }
-    }
-  }
-  return true;
-}
-
 template<typename NewLocalCallback, typename IndexToStringData, typename TypeIndexToStringData>
 bool DexFile::DecodeDebugLocalInfo(const uint8_t* stream,
                                    const std::string& location,
@@ -214,10 +174,9 @@
                                    uint16_t registers_size,
                                    uint16_t ins_size,
                                    uint16_t insns_size_in_code_units,
-                                   IndexToStringData index_to_string_data,
-                                   TypeIndexToStringData type_index_to_string_data,
-                                   NewLocalCallback new_local_callback,
-                                   void* context) {
+                                   const IndexToStringData& index_to_string_data,
+                                   const TypeIndexToStringData& type_index_to_string_data,
+                                   const NewLocalCallback& new_local_callback) {
   if (stream == nullptr) {
     return false;
   }
@@ -277,7 +236,7 @@
         for (uint16_t reg = 0; reg < registers_size; reg++) {
           if (local_in_reg[reg].is_live_) {
             local_in_reg[reg].end_address_ = insns_size_in_code_units;
-            new_local_callback(context, local_in_reg[reg]);
+            new_local_callback(local_in_reg[reg]);
           }
         }
         return true;
@@ -306,7 +265,7 @@
         // Emit what was previously there, if anything
         if (local_in_reg[reg].is_live_) {
           local_in_reg[reg].end_address_ = address;
-          new_local_callback(context, local_in_reg[reg]);
+          new_local_callback(local_in_reg[reg]);
         }
 
         local_in_reg[reg].name_ = index_to_string_data(name_idx);
@@ -328,7 +287,7 @@
         // closed register is sloppy, but harmless if no further action is taken.
         if (local_in_reg[reg].is_live_) {
           local_in_reg[reg].end_address_ = address;
-          new_local_callback(context, local_in_reg[reg]);
+          new_local_callback(local_in_reg[reg]);
           local_in_reg[reg].is_live_ = false;
         }
         break;
@@ -368,8 +327,7 @@
                                    uint32_t debug_info_offset,
                                    bool is_static,
                                    uint32_t method_idx,
-                                   NewLocalCallback new_local_callback,
-                                   void* context) const {
+                                   const NewLocalCallback& new_local_callback) const {
   const uint8_t* const stream = GetDebugInfoStream(debug_info_offset);
   if (stream == nullptr) {
     return false;
@@ -395,25 +353,19 @@
                                 return StringByTypeIdx(dex::TypeIndex(
                                     dchecked_integral_cast<uint16_t>(idx)));
                               },
-                              new_local_callback,
-                              context);
+                              new_local_callback);
 }
 
 template<typename DexDebugNewPosition, typename IndexToStringData>
 bool DexFile::DecodeDebugPositionInfo(const uint8_t* stream,
-                                      IndexToStringData index_to_string_data,
-                                      DexDebugNewPosition position_functor,
-                                      void* context) {
+                                      const IndexToStringData& index_to_string_data,
+                                      const DexDebugNewPosition& position_functor) {
   if (stream == nullptr) {
     return false;
   }
 
-  PositionInfo entry = PositionInfo();
-  entry.line_ = DecodeUnsignedLeb128(&stream);
-  uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
-  for (uint32_t i = 0; i < parameters_size; ++i) {
-    DecodeUnsignedLeb128P1(&stream);  // Parameter name.
-  }
+  PositionInfo entry;
+  entry.line_ = DecodeDebugInfoParameterNames(&stream, VoidFunctor());
 
   for (;;)  {
     uint8_t opcode = *stream++;
@@ -456,7 +408,7 @@
         int adjopcode = opcode - DBG_FIRST_SPECIAL;
         entry.address_ += adjopcode / DBG_LINE_RANGE;
         entry.line_ += DBG_LINE_BASE + (adjopcode % DBG_LINE_RANGE);
-        if (position_functor(context, entry)) {
+        if (position_functor(entry)) {
           return true;  // early exit.
         }
         entry.prologue_end_ = false;
@@ -467,18 +419,6 @@
   }
 }
 
-template<typename DexDebugNewPosition>
-bool DexFile::DecodeDebugPositionInfo(uint32_t debug_info_offset,
-                                      DexDebugNewPosition position_functor,
-                                      void* context) const {
-  return DecodeDebugPositionInfo(GetDebugInfoStream(debug_info_offset),
-                                 [this](uint32_t idx) {
-                                   return StringDataByIdx(dex::StringIndex(idx));
-                                 },
-                                 position_functor,
-                                 context);
-}
-
 inline const CompactDexFile* DexFile::AsCompactDexFile() const {
   DCHECK(IsCompactDexFile());
   return down_cast<const CompactDexFile*>(this);
@@ -502,6 +442,18 @@
   return { ClassIterator(*this, 0u), ClassIterator(*this, NumClassDefs()) };
 }
 
+// Returns the line number
+template <typename Visitor>
+inline uint32_t DexFile::DecodeDebugInfoParameterNames(const uint8_t** debug_info,
+                                                       const Visitor& visitor) {
+  uint32_t line = DecodeUnsignedLeb128(debug_info);
+  const uint32_t parameters_size = DecodeUnsignedLeb128(debug_info);
+  for (uint32_t i = 0; i < parameters_size; ++i) {
+    visitor(dex::StringIndex(DecodeUnsignedLeb128P1(debug_info)));
+  }
+  return line;
+}
+
 }  // namespace art
 
 #endif  // ART_LIBDEXFILE_DEX_DEX_FILE_INL_H_
diff --git a/libdexfile/dex/dex_file.cc b/libdexfile/dex/dex_file.cc
index a2198b7..39377a3 100644
--- a/libdexfile/dex/dex_file.cc
+++ b/libdexfile/dex/dex_file.cc
@@ -23,6 +23,7 @@
 #include <zlib.h>
 
 #include <memory>
+#include <ostream>
 #include <sstream>
 #include <type_traits>
 
@@ -41,25 +42,24 @@
 
 using android::base::StringPrintf;
 
+using dex::CallSiteIdItem;
+using dex::ClassDef;
+using dex::FieldId;
+using dex::MapList;
+using dex::MapItem;
+using dex::MethodHandleItem;
+using dex::MethodId;
+using dex::ProtoId;
+using dex::StringId;
+using dex::TryItem;
+using dex::TypeId;
+using dex::TypeList;
+
 static_assert(sizeof(dex::StringIndex) == sizeof(uint32_t), "StringIndex size is wrong");
 static_assert(std::is_trivially_copyable<dex::StringIndex>::value, "StringIndex not trivial");
 static_assert(sizeof(dex::TypeIndex) == sizeof(uint16_t), "TypeIndex size is wrong");
 static_assert(std::is_trivially_copyable<dex::TypeIndex>::value, "TypeIndex not trivial");
 
-void DexFile::UnHideAccessFlags(uint8_t* data_ptr,
-                                uint32_t new_access_flags,
-                                bool is_method) {
-  // Go back 1 uleb to start.
-  data_ptr = ReverseSearchUnsignedLeb128(data_ptr);
-  if (is_method) {
-    // Methods have another uleb field before the access flags
-    data_ptr = ReverseSearchUnsignedLeb128(data_ptr);
-  }
-  DCHECK_EQ(HiddenApiAccessFlags::RemoveFromDex(DecodeUnsignedLeb128WithoutMovingCursor(data_ptr)),
-            new_access_flags);
-  UpdateUnsignedLeb128(data_ptr, new_access_flags);
-}
-
 uint32_t DexFile::CalculateChecksum() const {
   return CalculateChecksum(Begin(), Size());
 }
@@ -119,10 +119,11 @@
       num_method_handles_(0),
       call_site_ids_(nullptr),
       num_call_site_ids_(0),
+      hiddenapi_class_data_(nullptr),
       oat_dex_file_(oat_dex_file),
       container_(std::move(container)),
       is_compact_dex_(is_compact_dex),
-      is_platform_dex_(false) {
+      hiddenapi_domain_(hiddenapi::Domain::kApplication) {
   CHECK(begin_ != nullptr) << GetLocation();
   CHECK_GT(size_, 0U) << GetLocation();
   // Check base (=header) alignment.
@@ -194,6 +195,11 @@
     } else if (map_item.type_ == kDexTypeCallSiteIdItem) {
       call_site_ids_ = reinterpret_cast<const CallSiteIdItem*>(Begin() + map_item.offset_);
       num_call_site_ids_ = map_item.size_;
+    } else if (map_item.type_ == kDexTypeHiddenapiClassData) {
+      hiddenapi_class_data_ = GetHiddenapiClassDataAtOffset(map_item.offset_);
+    } else {
+      // Pointers to other sections are not necessary to retain in the DexFile struct.
+      // Other items have pointers directly into their data.
     }
   }
 }
@@ -203,7 +209,7 @@
   return atoi(version);
 }
 
-const DexFile::ClassDef* DexFile::FindClassDef(dex::TypeIndex type_idx) const {
+const ClassDef* DexFile::FindClassDef(dex::TypeIndex type_idx) const {
   size_t num_class_defs = NumClassDefs();
   // Fast path for rare no class defs case.
   if (num_class_defs == 0) {
@@ -218,8 +224,7 @@
   return nullptr;
 }
 
-uint32_t DexFile::FindCodeItemOffset(const DexFile::ClassDef& class_def,
-                                     uint32_t method_idx) const {
+uint32_t DexFile::FindCodeItemOffset(const ClassDef& class_def, uint32_t method_idx) const {
   ClassAccessor accessor(*this, class_def);
   CHECK(accessor.HasClassData());
   for (const ClassAccessor::Method& method : accessor.GetMethods()) {
@@ -231,9 +236,9 @@
   UNREACHABLE();
 }
 
-const DexFile::FieldId* DexFile::FindFieldId(const DexFile::TypeId& declaring_klass,
-                                             const DexFile::StringId& name,
-                                             const DexFile::TypeId& type) const {
+const FieldId* DexFile::FindFieldId(const TypeId& declaring_klass,
+                                    const StringId& name,
+                                    const TypeId& type) const {
   // Binary search MethodIds knowing that they are sorted by class_idx, name_idx then proto_idx
   const dex::TypeIndex class_idx = GetIndexForTypeId(declaring_klass);
   const dex::StringIndex name_idx = GetIndexForStringId(name);
@@ -242,7 +247,7 @@
   int32_t hi = NumFieldIds() - 1;
   while (hi >= lo) {
     int32_t mid = (hi + lo) / 2;
-    const DexFile::FieldId& field = GetFieldId(mid);
+    const FieldId& field = GetFieldId(mid);
     if (class_idx > field.class_idx_) {
       lo = mid + 1;
     } else if (class_idx < field.class_idx_) {
@@ -266,9 +271,9 @@
   return nullptr;
 }
 
-const DexFile::MethodId* DexFile::FindMethodId(const DexFile::TypeId& declaring_klass,
-                                               const DexFile::StringId& name,
-                                               const DexFile::ProtoId& signature) const {
+const MethodId* DexFile::FindMethodId(const TypeId& declaring_klass,
+                                      const StringId& name,
+                                      const ProtoId& signature) const {
   // Binary search MethodIds knowing that they are sorted by class_idx, name_idx then proto_idx
   const dex::TypeIndex class_idx = GetIndexForTypeId(declaring_klass);
   const dex::StringIndex name_idx = GetIndexForStringId(name);
@@ -277,7 +282,7 @@
   int32_t hi = NumMethodIds() - 1;
   while (hi >= lo) {
     int32_t mid = (hi + lo) / 2;
-    const DexFile::MethodId& method = GetMethodId(mid);
+    const MethodId& method = GetMethodId(mid);
     if (class_idx > method.class_idx_) {
       lo = mid + 1;
     } else if (class_idx < method.class_idx_) {
@@ -301,12 +306,12 @@
   return nullptr;
 }
 
-const DexFile::StringId* DexFile::FindStringId(const char* string) const {
+const StringId* DexFile::FindStringId(const char* string) const {
   int32_t lo = 0;
   int32_t hi = NumStringIds() - 1;
   while (hi >= lo) {
     int32_t mid = (hi + lo) / 2;
-    const DexFile::StringId& str_id = GetStringId(dex::StringIndex(mid));
+    const StringId& str_id = GetStringId(dex::StringIndex(mid));
     const char* str = GetStringData(str_id);
     int compare = CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(string, str);
     if (compare > 0) {
@@ -320,13 +325,13 @@
   return nullptr;
 }
 
-const DexFile::TypeId* DexFile::FindTypeId(const char* string) const {
+const TypeId* DexFile::FindTypeId(const char* string) const {
   int32_t lo = 0;
   int32_t hi = NumTypeIds() - 1;
   while (hi >= lo) {
     int32_t mid = (hi + lo) / 2;
     const TypeId& type_id = GetTypeId(dex::TypeIndex(mid));
-    const DexFile::StringId& str_id = GetStringId(type_id.descriptor_idx_);
+    const StringId& str_id = GetStringId(type_id.descriptor_idx_);
     const char* str = GetStringData(str_id);
     int compare = CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(string, str);
     if (compare > 0) {
@@ -340,7 +345,7 @@
   return nullptr;
 }
 
-const DexFile::TypeId* DexFile::FindTypeId(dex::StringIndex string_idx) const {
+const TypeId* DexFile::FindTypeId(dex::StringIndex string_idx) const {
   int32_t lo = 0;
   int32_t hi = NumTypeIds() - 1;
   while (hi >= lo) {
@@ -357,15 +362,15 @@
   return nullptr;
 }
 
-const DexFile::ProtoId* DexFile::FindProtoId(dex::TypeIndex return_type_idx,
-                                             const dex::TypeIndex* signature_type_idxs,
-                                             uint32_t signature_length) const {
+const ProtoId* DexFile::FindProtoId(dex::TypeIndex return_type_idx,
+                                    const dex::TypeIndex* signature_type_idxs,
+                                    uint32_t signature_length) const {
   int32_t lo = 0;
   int32_t hi = NumProtoIds() - 1;
   while (hi >= lo) {
     int32_t mid = (hi + lo) / 2;
     const dex::ProtoIndex proto_idx = static_cast<dex::ProtoIndex>(mid);
-    const DexFile::ProtoId& proto = GetProtoId(proto_idx);
+    const ProtoId& proto = GetProtoId(proto_idx);
     int compare = return_type_idx.index_ - proto.return_type_idx_.index_;
     if (compare == 0) {
       DexFileParameterIterator it(*this, proto);
@@ -430,7 +435,7 @@
     }
     // TODO: avoid creating a std::string just to get a 0-terminated char array
     std::string descriptor(signature.data() + start_offset, offset - start_offset);
-    const DexFile::TypeId* type_id = FindTypeId(descriptor.c_str());
+    const TypeId* type_id = FindTypeId(descriptor.c_str());
     if (type_id == nullptr) {
       return false;
     }
@@ -465,7 +470,7 @@
   while (min < max) {
     const uint32_t mid = (min + max) / 2;
 
-    const art::DexFile::TryItem& ti = try_items[mid];
+    const TryItem& ti = try_items[mid];
     const uint32_t start = ti.start_addr_;
     const uint32_t end = start + ti.insn_count_;
 
@@ -481,22 +486,6 @@
   return -1;
 }
 
-bool DexFile::LineNumForPcCb(void* raw_context, const PositionInfo& entry) {
-  LineNumFromPcContext* context = reinterpret_cast<LineNumFromPcContext*>(raw_context);
-
-  // We know that this callback will be called in
-  // ascending address order, so keep going until we find
-  // a match or we've just gone past it.
-  if (entry.address_ > context->address_) {
-    // The line number from the previous positions callback
-    // wil be the final result.
-    return true;
-  } else {
-    context->line_num_ = entry.line_;
-    return entry.address_ == context->address_;
-  }
-}
-
 // Read a signed integer.  "zwidth" is the zero-based byte count.
 int32_t DexFile::ReadSignedInt(const uint8_t* ptr, int zwidth) {
   int32_t val = 0;
@@ -547,9 +536,9 @@
   if (method_idx >= NumMethodIds()) {
     return StringPrintf("<<invalid-method-idx-%d>>", method_idx);
   }
-  const DexFile::MethodId& method_id = GetMethodId(method_idx);
+  const MethodId& method_id = GetMethodId(method_idx);
   std::string result;
-  const DexFile::ProtoId* proto_id = with_signature ? &GetProtoId(method_id.proto_idx_) : nullptr;
+  const ProtoId* proto_id = with_signature ? &GetProtoId(method_id.proto_idx_) : nullptr;
   if (with_signature) {
     AppendPrettyDescriptor(StringByTypeIdx(proto_id->return_type_idx_), &result);
     result += ' ';
@@ -559,7 +548,7 @@
   result += GetMethodName(method_id);
   if (with_signature) {
     result += '(';
-    const DexFile::TypeList* params = GetProtoParameters(*proto_id);
+    const TypeList* params = GetProtoParameters(*proto_id);
     if (params != nullptr) {
       const char* separator = "";
       for (uint32_t i = 0u, size = params->Size(); i != size; ++i) {
@@ -577,7 +566,7 @@
   if (field_idx >= NumFieldIds()) {
     return StringPrintf("<<invalid-field-idx-%d>>", field_idx);
   }
-  const DexFile::FieldId& field_id = GetFieldId(field_idx);
+  const FieldId& field_id = GetFieldId(field_idx);
   std::string result;
   if (with_type) {
     result += GetFieldTypeDescriptor(field_id);
@@ -593,12 +582,12 @@
   if (type_idx.index_ >= NumTypeIds()) {
     return StringPrintf("<<invalid-type-idx-%d>>", type_idx.index_);
   }
-  const DexFile::TypeId& type_id = GetTypeId(type_idx);
+  const TypeId& type_id = GetTypeId(type_idx);
   return PrettyDescriptor(GetTypeDescriptor(type_id));
 }
 
 dex::ProtoIndex DexFile::GetProtoIndexForCallSite(uint32_t call_site_idx) const {
-  const DexFile::CallSiteIdItem& csi = GetCallSiteId(call_site_idx);
+  const CallSiteIdItem& csi = GetCallSiteId(call_site_idx);
   CallSiteArrayValueIterator it(*this, csi);
   it.Next();
   it.Next();
@@ -616,66 +605,6 @@
   return os;
 }
 
-std::string Signature::ToString() const {
-  if (dex_file_ == nullptr) {
-    CHECK(proto_id_ == nullptr);
-    return "<no signature>";
-  }
-  const DexFile::TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
-  std::string result;
-  if (params == nullptr) {
-    result += "()";
-  } else {
-    result += "(";
-    for (uint32_t i = 0; i < params->Size(); ++i) {
-      result += dex_file_->StringByTypeIdx(params->GetTypeItem(i).type_idx_);
-    }
-    result += ")";
-  }
-  result += dex_file_->StringByTypeIdx(proto_id_->return_type_idx_);
-  return result;
-}
-
-uint32_t Signature::GetNumberOfParameters() const {
-  const DexFile::TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
-  return (params != nullptr) ? params->Size() : 0;
-}
-
-bool Signature::IsVoid() const {
-  const char* return_type = dex_file_->GetReturnTypeDescriptor(*proto_id_);
-  return strcmp(return_type, "V") == 0;
-}
-
-bool Signature::operator==(const StringPiece& rhs) const {
-  if (dex_file_ == nullptr) {
-    return false;
-  }
-  StringPiece tail(rhs);
-  if (!tail.starts_with("(")) {
-    return false;  // Invalid signature
-  }
-  tail.remove_prefix(1);  // "(";
-  const DexFile::TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
-  if (params != nullptr) {
-    for (uint32_t i = 0; i < params->Size(); ++i) {
-      StringPiece param(dex_file_->StringByTypeIdx(params->GetTypeItem(i).type_idx_));
-      if (!tail.starts_with(param)) {
-        return false;
-      }
-      tail.remove_prefix(param.length());
-    }
-  }
-  if (!tail.starts_with(")")) {
-    return false;
-  }
-  tail.remove_prefix(1);  // ")";
-  return tail == dex_file_->StringByTypeIdx(proto_id_->return_type_idx_);
-}
-
-std::ostream& operator<<(std::ostream& os, const Signature& sig) {
-  return os << sig.ToString();
-}
-
 EncodedArrayValueIterator::EncodedArrayValueIterator(const DexFile& dex_file,
                                                      const uint8_t* array_data)
     : dex_file_(dex_file),
diff --git a/libdexfile/dex/dex_file.h b/libdexfile/dex/dex_file.h
index 98787d1..8ea3c09 100644
--- a/libdexfile/dex/dex_file.h
+++ b/libdexfile/dex/dex_file.h
@@ -24,14 +24,15 @@
 #include <android-base/logging.h>
 
 #include "base/globals.h"
-#include "base/iteration_range.h"
+#include "base/hiddenapi_domain.h"
 #include "base/macros.h"
 #include "base/value_object.h"
 #include "class_iterator.h"
+#include "dex_file_structs.h"
 #include "dex_file_types.h"
-#include "hidden_api_access_flags.h"
 #include "jni.h"
 #include "modifiers.h"
+#include "signature.h"
 
 namespace art {
 
@@ -39,9 +40,9 @@
 class CompactDexFile;
 class DexInstructionIterator;
 enum InvokeType : uint32_t;
+template <typename Iter> class IterationRange;
 class MemMap;
 class OatDexFile;
-class Signature;
 class StandardDexFile;
 class StringPiece;
 class ZipArchive;
@@ -92,7 +93,7 @@
     uint32_t endian_tag_ = 0;
     uint32_t link_size_ = 0;  // unused
     uint32_t link_off_ = 0;  // unused
-    uint32_t map_off_ = 0;  // unused
+    uint32_t map_off_ = 0;  // map list offset from data_off_
     uint32_t string_ids_size_ = 0;  // number of StringIds
     uint32_t string_ids_off_ = 0;  // file offset of StringIds array
     uint32_t type_ids_size_ = 0;  // number of TypeIds, we don't support more than 65535
@@ -134,148 +135,7 @@
     kDexTypeAnnotationItem           = 0x2004,
     kDexTypeEncodedArrayItem         = 0x2005,
     kDexTypeAnnotationsDirectoryItem = 0x2006,
-  };
-
-  struct MapItem {
-    uint16_t type_;
-    uint16_t unused_;
-    uint32_t size_;
-    uint32_t offset_;
-  };
-
-  struct MapList {
-    uint32_t size_;
-    MapItem list_[1];
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(MapList);
-  };
-
-  // Raw string_id_item.
-  struct StringId {
-    uint32_t string_data_off_;  // offset in bytes from the base address
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(StringId);
-  };
-
-  // Raw type_id_item.
-  struct TypeId {
-    dex::StringIndex descriptor_idx_;  // index into string_ids
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(TypeId);
-  };
-
-  // Raw field_id_item.
-  struct FieldId {
-    dex::TypeIndex class_idx_;   // index into type_ids_ array for defining class
-    dex::TypeIndex type_idx_;    // index into type_ids_ array for field type
-    dex::StringIndex name_idx_;  // index into string_ids_ array for field name
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(FieldId);
-  };
-
-  // Raw proto_id_item.
-  struct ProtoId {
-    dex::StringIndex shorty_idx_;     // index into string_ids array for shorty descriptor
-    dex::TypeIndex return_type_idx_;  // index into type_ids array for return type
-    uint16_t pad_;                    // padding = 0
-    uint32_t parameters_off_;         // file offset to type_list for parameter types
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(ProtoId);
-  };
-
-  // Raw method_id_item.
-  struct MethodId {
-    dex::TypeIndex class_idx_;   // index into type_ids_ array for defining class
-    dex::ProtoIndex proto_idx_;  // index into proto_ids_ array for method prototype
-    dex::StringIndex name_idx_;  // index into string_ids_ array for method name
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(MethodId);
-  };
-
-  // Base code_item, compact dex and standard dex have different code item layouts.
-  struct CodeItem {
-   protected:
-    CodeItem() = default;
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(CodeItem);
-  };
-
-  // Raw class_def_item.
-  struct ClassDef {
-    dex::TypeIndex class_idx_;  // index into type_ids_ array for this class
-    uint16_t pad1_;  // padding = 0
-    uint32_t access_flags_;
-    dex::TypeIndex superclass_idx_;  // index into type_ids_ array for superclass
-    uint16_t pad2_;  // padding = 0
-    uint32_t interfaces_off_;  // file offset to TypeList
-    dex::StringIndex source_file_idx_;  // index into string_ids_ for source file name
-    uint32_t annotations_off_;  // file offset to annotations_directory_item
-    uint32_t class_data_off_;  // file offset to class_data_item
-    uint32_t static_values_off_;  // file offset to EncodedArray
-
-    // Returns the valid access flags, that is, Java modifier bits relevant to the ClassDef type
-    // (class or interface). These are all in the lower 16b and do not contain runtime flags.
-    uint32_t GetJavaAccessFlags() const {
-      // Make sure that none of our runtime-only flags are set.
-      static_assert((kAccValidClassFlags & kAccJavaFlagsMask) == kAccValidClassFlags,
-                    "Valid class flags not a subset of Java flags");
-      static_assert((kAccValidInterfaceFlags & kAccJavaFlagsMask) == kAccValidInterfaceFlags,
-                    "Valid interface flags not a subset of Java flags");
-
-      if ((access_flags_ & kAccInterface) != 0) {
-        // Interface.
-        return access_flags_ & kAccValidInterfaceFlags;
-      } else {
-        // Class.
-        return access_flags_ & kAccValidClassFlags;
-      }
-    }
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(ClassDef);
-  };
-
-  // Raw type_item.
-  struct TypeItem {
-    dex::TypeIndex type_idx_;  // index into type_ids section
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(TypeItem);
-  };
-
-  // Raw type_list.
-  class TypeList {
-   public:
-    uint32_t Size() const {
-      return size_;
-    }
-
-    const TypeItem& GetTypeItem(uint32_t idx) const {
-      DCHECK_LT(idx, this->size_);
-      return this->list_[idx];
-    }
-
-    // Size in bytes of the part of the list that is common.
-    static constexpr size_t GetHeaderSize() {
-      return 4U;
-    }
-
-    // Size in bytes of the whole type list including all the stored elements.
-    static constexpr size_t GetListSize(size_t count) {
-      return GetHeaderSize() + sizeof(TypeItem) * count;
-    }
-
-   private:
-    uint32_t size_;  // size of the list, in entries
-    TypeItem list_[1];  // elements of the list
-    DISALLOW_COPY_AND_ASSIGN(TypeList);
+    kDexTypeHiddenapiClassData       = 0xF000,
   };
 
   // MethodHandle Types
@@ -294,37 +154,6 @@
     kLast = kInvokeInterface
   };
 
-  // raw method_handle_item
-  struct MethodHandleItem {
-    uint16_t method_handle_type_;
-    uint16_t reserved1_;            // Reserved for future use.
-    uint16_t field_or_method_idx_;  // Field index for accessors, method index otherwise.
-    uint16_t reserved2_;            // Reserved for future use.
-   private:
-    DISALLOW_COPY_AND_ASSIGN(MethodHandleItem);
-  };
-
-  // raw call_site_id_item
-  struct CallSiteIdItem {
-    uint32_t data_off_;  // Offset into data section pointing to encoded array items.
-   private:
-    DISALLOW_COPY_AND_ASSIGN(CallSiteIdItem);
-  };
-
-  // Raw try_item.
-  struct TryItem {
-    static constexpr size_t kAlignment = sizeof(uint32_t);
-
-    uint32_t start_addr_;
-    uint16_t insn_count_;
-    uint16_t handler_off_;
-
-   private:
-    TryItem() = default;
-    friend class DexWriter;
-    DISALLOW_COPY_AND_ASSIGN(TryItem);
-  };
-
   // Annotation constants.
   enum {
     kDexVisibilityBuild         = 0x00,     /* annotation visibility */
@@ -354,71 +183,6 @@
     kDexAnnotationValueArgShift = 5,
   };
 
-  struct AnnotationsDirectoryItem {
-    uint32_t class_annotations_off_;
-    uint32_t fields_size_;
-    uint32_t methods_size_;
-    uint32_t parameters_size_;
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(AnnotationsDirectoryItem);
-  };
-
-  struct FieldAnnotationsItem {
-    uint32_t field_idx_;
-    uint32_t annotations_off_;
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(FieldAnnotationsItem);
-  };
-
-  struct MethodAnnotationsItem {
-    uint32_t method_idx_;
-    uint32_t annotations_off_;
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(MethodAnnotationsItem);
-  };
-
-  struct ParameterAnnotationsItem {
-    uint32_t method_idx_;
-    uint32_t annotations_off_;
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(ParameterAnnotationsItem);
-  };
-
-  struct AnnotationSetRefItem {
-    uint32_t annotations_off_;
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(AnnotationSetRefItem);
-  };
-
-  struct AnnotationSetRefList {
-    uint32_t size_;
-    AnnotationSetRefItem list_[1];
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(AnnotationSetRefList);
-  };
-
-  struct AnnotationSetItem {
-    uint32_t size_;
-    uint32_t entries_[1];
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(AnnotationSetItem);
-  };
-
-  struct AnnotationItem {
-    uint8_t visibility_;
-    uint8_t annotation_[1];
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(AnnotationItem);
-  };
-
   enum AnnotationResultStyle {  // private
     kAllObjects,
     kPrimitivesOrObjects,
@@ -473,25 +237,26 @@
   }
 
   // Returns the StringId at the specified index.
-  const StringId& GetStringId(dex::StringIndex idx) const {
+  const dex::StringId& GetStringId(dex::StringIndex idx) const {
     DCHECK_LT(idx.index_, NumStringIds()) << GetLocation();
     return string_ids_[idx.index_];
   }
 
-  dex::StringIndex GetIndexForStringId(const StringId& string_id) const {
+  dex::StringIndex GetIndexForStringId(const dex::StringId& string_id) const {
     CHECK_GE(&string_id, string_ids_) << GetLocation();
     CHECK_LT(&string_id, string_ids_ + header_->string_ids_size_) << GetLocation();
     return dex::StringIndex(&string_id - string_ids_);
   }
 
-  int32_t GetStringLength(const StringId& string_id) const;
+  int32_t GetStringLength(const dex::StringId& string_id) const;
 
   // Returns a pointer to the UTF-8 string data referred to by the given string_id as well as the
   // length of the string when decoded as a UTF-16 string. Note the UTF-16 length is not the same
   // as the string length of the string data.
-  const char* GetStringDataAndUtf16Length(const StringId& string_id, uint32_t* utf16_length) const;
+  const char* GetStringDataAndUtf16Length(const dex::StringId& string_id,
+                                          uint32_t* utf16_length) const;
 
-  const char* GetStringData(const StringId& string_id) const;
+  const char* GetStringData(const dex::StringId& string_id) const;
 
   // Index version of GetStringDataAndUtf16Length.
   const char* StringDataAndUtf16LengthByIdx(dex::StringIndex idx, uint32_t* utf16_length) const;
@@ -499,9 +264,9 @@
   const char* StringDataByIdx(dex::StringIndex idx) const;
 
   // Looks up a string id for a given modified utf8 string.
-  const StringId* FindStringId(const char* string) const;
+  const dex::StringId* FindStringId(const char* string) const;
 
-  const TypeId* FindTypeId(const char* string) const;
+  const dex::TypeId* FindTypeId(const char* string) const;
 
   // Returns the number of type identifiers in the .dex file.
   uint32_t NumTypeIds() const {
@@ -514,12 +279,12 @@
   }
 
   // Returns the TypeId at the specified index.
-  const TypeId& GetTypeId(dex::TypeIndex idx) const {
+  const dex::TypeId& GetTypeId(dex::TypeIndex idx) const {
     DCHECK_LT(idx.index_, NumTypeIds()) << GetLocation();
     return type_ids_[idx.index_];
   }
 
-  dex::TypeIndex GetIndexForTypeId(const TypeId& type_id) const {
+  dex::TypeIndex GetIndexForTypeId(const dex::TypeId& type_id) const {
     CHECK_GE(&type_id, type_ids_) << GetLocation();
     CHECK_LT(&type_id, type_ids_ + header_->type_ids_size_) << GetLocation();
     size_t result = &type_id - type_ids_;
@@ -533,10 +298,10 @@
   const char* StringByTypeIdx(dex::TypeIndex idx) const;
 
   // Returns the type descriptor string of a type id.
-  const char* GetTypeDescriptor(const TypeId& type_id) const;
+  const char* GetTypeDescriptor(const dex::TypeId& type_id) const;
 
   // Looks up a type for the given string index
-  const TypeId* FindTypeId(dex::StringIndex string_idx) const;
+  const dex::TypeId* FindTypeId(dex::StringIndex string_idx) const;
 
   // Returns the number of field identifiers in the .dex file.
   size_t NumFieldIds() const {
@@ -545,38 +310,38 @@
   }
 
   // Returns the FieldId at the specified index.
-  const FieldId& GetFieldId(uint32_t idx) const {
+  const dex::FieldId& GetFieldId(uint32_t idx) const {
     DCHECK_LT(idx, NumFieldIds()) << GetLocation();
     return field_ids_[idx];
   }
 
-  uint32_t GetIndexForFieldId(const FieldId& field_id) const {
+  uint32_t GetIndexForFieldId(const dex::FieldId& field_id) const {
     CHECK_GE(&field_id, field_ids_) << GetLocation();
     CHECK_LT(&field_id, field_ids_ + header_->field_ids_size_) << GetLocation();
     return &field_id - field_ids_;
   }
 
   // Looks up a field by its declaring class, name and type
-  const FieldId* FindFieldId(const DexFile::TypeId& declaring_klass,
-                             const DexFile::StringId& name,
-                             const DexFile::TypeId& type) const;
+  const dex::FieldId* FindFieldId(const dex::TypeId& declaring_klass,
+                                  const dex::StringId& name,
+                                  const dex::TypeId& type) const;
 
-  uint32_t FindCodeItemOffset(const DexFile::ClassDef& class_def,
+  uint32_t FindCodeItemOffset(const dex::ClassDef& class_def,
                               uint32_t dex_method_idx) const;
 
-  virtual uint32_t GetCodeItemSize(const DexFile::CodeItem& disk_code_item) const = 0;
+  virtual uint32_t GetCodeItemSize(const dex::CodeItem& disk_code_item) const = 0;
 
   // Returns the declaring class descriptor string of a field id.
-  const char* GetFieldDeclaringClassDescriptor(const FieldId& field_id) const {
-    const DexFile::TypeId& type_id = GetTypeId(field_id.class_idx_);
+  const char* GetFieldDeclaringClassDescriptor(const dex::FieldId& field_id) const {
+    const dex::TypeId& type_id = GetTypeId(field_id.class_idx_);
     return GetTypeDescriptor(type_id);
   }
 
   // Returns the class descriptor string of a field id.
-  const char* GetFieldTypeDescriptor(const FieldId& field_id) const;
+  const char* GetFieldTypeDescriptor(const dex::FieldId& field_id) const;
 
   // Returns the name of a field id.
-  const char* GetFieldName(const FieldId& field_id) const;
+  const char* GetFieldName(const dex::FieldId& field_id) const;
 
   // Returns the number of method identifiers in the .dex file.
   size_t NumMethodIds() const {
@@ -585,45 +350,47 @@
   }
 
   // Returns the MethodId at the specified index.
-  const MethodId& GetMethodId(uint32_t idx) const {
+  const dex::MethodId& GetMethodId(uint32_t idx) const {
     DCHECK_LT(idx, NumMethodIds()) << GetLocation();
     return method_ids_[idx];
   }
 
-  uint32_t GetIndexForMethodId(const MethodId& method_id) const {
+  uint32_t GetIndexForMethodId(const dex::MethodId& method_id) const {
     CHECK_GE(&method_id, method_ids_) << GetLocation();
     CHECK_LT(&method_id, method_ids_ + header_->method_ids_size_) << GetLocation();
     return &method_id - method_ids_;
   }
 
   // Looks up a method by its declaring class, name and proto_id
-  const MethodId* FindMethodId(const DexFile::TypeId& declaring_klass,
-                               const DexFile::StringId& name,
-                               const DexFile::ProtoId& signature) const;
+  const dex::MethodId* FindMethodId(const dex::TypeId& declaring_klass,
+                                    const dex::StringId& name,
+                                    const dex::ProtoId& signature) const;
 
   // Returns the declaring class descriptor string of a method id.
-  const char* GetMethodDeclaringClassDescriptor(const MethodId& method_id) const;
+  const char* GetMethodDeclaringClassDescriptor(const dex::MethodId& method_id) const;
 
   // Returns the prototype of a method id.
-  const ProtoId& GetMethodPrototype(const MethodId& method_id) const {
+  const dex::ProtoId& GetMethodPrototype(const dex::MethodId& method_id) const {
     return GetProtoId(method_id.proto_idx_);
   }
 
   // Returns a representation of the signature of a method id.
-  const Signature GetMethodSignature(const MethodId& method_id) const;
+  const Signature GetMethodSignature(const dex::MethodId& method_id) const;
 
   // Returns a representation of the signature of a proto id.
-  const Signature GetProtoSignature(const ProtoId& proto_id) const;
+  const Signature GetProtoSignature(const dex::ProtoId& proto_id) const;
 
   // Returns the name of a method id.
-  const char* GetMethodName(const MethodId& method_id) const;
+  const char* GetMethodName(const dex::MethodId& method_id) const;
+  const char* GetMethodName(const dex::MethodId& method_id, uint32_t* utf_length) const;
+  const char* GetMethodName(uint32_t idx, uint32_t* utf_length) const;
 
   // Returns the shorty of a method by its index.
   const char* GetMethodShorty(uint32_t idx) const;
 
   // Returns the shorty of a method id.
-  const char* GetMethodShorty(const MethodId& method_id) const;
-  const char* GetMethodShorty(const MethodId& method_id, uint32_t* length) const;
+  const char* GetMethodShorty(const dex::MethodId& method_id) const;
+  const char* GetMethodShorty(const dex::MethodId& method_id, uint32_t* length) const;
 
   // Returns the number of class definitions in the .dex file.
   uint32_t NumClassDefs() const {
@@ -632,32 +399,32 @@
   }
 
   // Returns the ClassDef at the specified index.
-  const ClassDef& GetClassDef(uint16_t idx) const {
+  const dex::ClassDef& GetClassDef(uint16_t idx) const {
     DCHECK_LT(idx, NumClassDefs()) << GetLocation();
     return class_defs_[idx];
   }
 
-  uint16_t GetIndexForClassDef(const ClassDef& class_def) const {
+  uint16_t GetIndexForClassDef(const dex::ClassDef& class_def) const {
     CHECK_GE(&class_def, class_defs_) << GetLocation();
     CHECK_LT(&class_def, class_defs_ + header_->class_defs_size_) << GetLocation();
     return &class_def - class_defs_;
   }
 
   // Returns the class descriptor string of a class definition.
-  const char* GetClassDescriptor(const ClassDef& class_def) const;
+  const char* GetClassDescriptor(const dex::ClassDef& class_def) const;
 
   // Looks up a class definition by its type index.
-  const ClassDef* FindClassDef(dex::TypeIndex type_idx) const;
+  const dex::ClassDef* FindClassDef(dex::TypeIndex type_idx) const;
 
-  const TypeList* GetInterfacesList(const ClassDef& class_def) const {
-    return DataPointer<TypeList>(class_def.interfaces_off_);
+  const dex::TypeList* GetInterfacesList(const dex::ClassDef& class_def) const {
+    return DataPointer<dex::TypeList>(class_def.interfaces_off_);
   }
 
   uint32_t NumMethodHandles() const {
     return num_method_handles_;
   }
 
-  const MethodHandleItem& GetMethodHandle(uint32_t idx) const {
+  const dex::MethodHandleItem& GetMethodHandle(uint32_t idx) const {
     CHECK_LT(idx, NumMethodHandles());
     return method_handles_[idx];
   }
@@ -666,23 +433,23 @@
     return num_call_site_ids_;
   }
 
-  const CallSiteIdItem& GetCallSiteId(uint32_t idx) const {
+  const dex::CallSiteIdItem& GetCallSiteId(uint32_t idx) const {
     CHECK_LT(idx, NumCallSiteIds());
     return call_site_ids_[idx];
   }
 
   // Returns a pointer to the raw memory mapped class_data_item
-  const uint8_t* GetClassData(const ClassDef& class_def) const {
+  const uint8_t* GetClassData(const dex::ClassDef& class_def) const {
     return DataPointer<uint8_t>(class_def.class_data_off_);
   }
 
   // Return the code item for a provided offset.
-  const CodeItem* GetCodeItem(const uint32_t code_off) const {
+  const dex::CodeItem* GetCodeItem(const uint32_t code_off) const {
     // May be null for native or abstract methods.
-    return DataPointer<CodeItem>(code_off);
+    return DataPointer<dex::CodeItem>(code_off);
   }
 
-  const char* GetReturnTypeDescriptor(const ProtoId& proto_id) const;
+  const char* GetReturnTypeDescriptor(const dex::ProtoId& proto_id) const;
 
   // Returns the number of prototype identifiers in the .dex file.
   size_t NumProtoIds() const {
@@ -691,23 +458,23 @@
   }
 
   // Returns the ProtoId at the specified index.
-  const ProtoId& GetProtoId(dex::ProtoIndex idx) const {
+  const dex::ProtoId& GetProtoId(dex::ProtoIndex idx) const {
     DCHECK_LT(idx.index_, NumProtoIds()) << GetLocation();
     return proto_ids_[idx.index_];
   }
 
-  dex::ProtoIndex GetIndexForProtoId(const ProtoId& proto_id) const {
+  dex::ProtoIndex GetIndexForProtoId(const dex::ProtoId& proto_id) const {
     CHECK_GE(&proto_id, proto_ids_) << GetLocation();
     CHECK_LT(&proto_id, proto_ids_ + header_->proto_ids_size_) << GetLocation();
     return dex::ProtoIndex(&proto_id - proto_ids_);
   }
 
   // Looks up a proto id for a given return type and signature type list
-  const ProtoId* FindProtoId(dex::TypeIndex return_type_idx,
-                             const dex::TypeIndex* signature_type_idxs,
+  const dex::ProtoId* FindProtoId(dex::TypeIndex return_type_idx,
+                                  const dex::TypeIndex* signature_type_idxs,
                              uint32_t signature_length) const;
-  const ProtoId* FindProtoId(dex::TypeIndex return_type_idx,
-                             const std::vector<dex::TypeIndex>& signature_type_idxs) const {
+  const dex::ProtoId* FindProtoId(dex::TypeIndex return_type_idx,
+                                  const std::vector<dex::TypeIndex>& signature_type_idxs) const {
     return FindProtoId(return_type_idx, &signature_type_idxs[0], signature_type_idxs.size());
   }
 
@@ -723,21 +490,22 @@
   // Returns the short form method descriptor for the given prototype.
   const char* GetShorty(dex::ProtoIndex proto_idx) const;
 
-  const TypeList* GetProtoParameters(const ProtoId& proto_id) const {
-    return DataPointer<TypeList>(proto_id.parameters_off_);
+  const dex::TypeList* GetProtoParameters(const dex::ProtoId& proto_id) const {
+    return DataPointer<dex::TypeList>(proto_id.parameters_off_);
   }
 
-  const uint8_t* GetEncodedStaticFieldValuesArray(const ClassDef& class_def) const {
+  const uint8_t* GetEncodedStaticFieldValuesArray(const dex::ClassDef& class_def) const {
     return DataPointer<uint8_t>(class_def.static_values_off_);
   }
 
-  const uint8_t* GetCallSiteEncodedValuesArray(const CallSiteIdItem& call_site_id) const {
+  const uint8_t* GetCallSiteEncodedValuesArray(const dex::CallSiteIdItem& call_site_id) const {
     return DataBegin() + call_site_id.data_off_;
   }
 
   dex::ProtoIndex GetProtoIndexForCallSite(uint32_t call_site_idx) const;
 
-  static const TryItem* GetTryItems(const DexInstructionIterator& code_item_end, uint32_t offset);
+  static const dex::TryItem* GetTryItems(const DexInstructionIterator& code_item_end,
+                                         uint32_t offset);
 
   // Get the base of the encoded data for the given DexCode.
   static const uint8_t* GetCatchHandlerData(const DexInstructionIterator& code_item_end,
@@ -745,7 +513,7 @@
                                             uint32_t offset);
 
   // Find which try region is associated with the given address (ie dex pc). Returns -1 if none.
-  static int32_t FindTryItem(const TryItem* try_items, uint32_t tries_size, uint32_t address);
+  static int32_t FindTryItem(const dex::TryItem* try_items, uint32_t tries_size, uint32_t address);
 
   // Get the pointer to the start of the debugging data
   const uint8_t* GetDebugInfoStream(uint32_t debug_info_off) const {
@@ -782,70 +550,87 @@
   // Callback for "new locals table entry".
   typedef void (*DexDebugNewLocalCb)(void* context, const LocalInfo& entry);
 
-  static bool LineNumForPcCb(void* context, const PositionInfo& entry);
-
-  const AnnotationsDirectoryItem* GetAnnotationsDirectory(const ClassDef& class_def) const {
-    return DataPointer<AnnotationsDirectoryItem>(class_def.annotations_off_);
+  const dex::AnnotationsDirectoryItem* GetAnnotationsDirectory(const dex::ClassDef& class_def)
+      const {
+    return DataPointer<dex::AnnotationsDirectoryItem>(class_def.annotations_off_);
   }
 
-  const AnnotationSetItem* GetClassAnnotationSet(const AnnotationsDirectoryItem* anno_dir) const {
-    return DataPointer<AnnotationSetItem>(anno_dir->class_annotations_off_);
+  const dex::AnnotationSetItem* GetClassAnnotationSet(const dex::AnnotationsDirectoryItem* anno_dir)
+      const {
+    return DataPointer<dex::AnnotationSetItem>(anno_dir->class_annotations_off_);
   }
 
-  const FieldAnnotationsItem* GetFieldAnnotations(const AnnotationsDirectoryItem* anno_dir) const {
+  const dex::FieldAnnotationsItem* GetFieldAnnotations(
+      const dex::AnnotationsDirectoryItem* anno_dir) const {
     return (anno_dir->fields_size_ == 0)
          ? nullptr
-         : reinterpret_cast<const FieldAnnotationsItem*>(&anno_dir[1]);
+         : reinterpret_cast<const dex::FieldAnnotationsItem*>(&anno_dir[1]);
   }
 
-  const MethodAnnotationsItem* GetMethodAnnotations(const AnnotationsDirectoryItem* anno_dir)
-      const {
+  const dex::MethodAnnotationsItem* GetMethodAnnotations(
+      const dex::AnnotationsDirectoryItem* anno_dir) const {
     if (anno_dir->methods_size_ == 0) {
       return nullptr;
     }
     // Skip past the header and field annotations.
     const uint8_t* addr = reinterpret_cast<const uint8_t*>(&anno_dir[1]);
-    addr += anno_dir->fields_size_ * sizeof(FieldAnnotationsItem);
-    return reinterpret_cast<const MethodAnnotationsItem*>(addr);
+    addr += anno_dir->fields_size_ * sizeof(dex::FieldAnnotationsItem);
+    return reinterpret_cast<const dex::MethodAnnotationsItem*>(addr);
   }
 
-  const ParameterAnnotationsItem* GetParameterAnnotations(const AnnotationsDirectoryItem* anno_dir)
-      const {
+  const dex::ParameterAnnotationsItem* GetParameterAnnotations(
+      const dex::AnnotationsDirectoryItem* anno_dir) const {
     if (anno_dir->parameters_size_ == 0) {
       return nullptr;
     }
     // Skip past the header, field annotations, and method annotations.
     const uint8_t* addr = reinterpret_cast<const uint8_t*>(&anno_dir[1]);
-    addr += anno_dir->fields_size_ * sizeof(FieldAnnotationsItem);
-    addr += anno_dir->methods_size_ * sizeof(MethodAnnotationsItem);
-    return reinterpret_cast<const ParameterAnnotationsItem*>(addr);
+    addr += anno_dir->fields_size_ * sizeof(dex::FieldAnnotationsItem);
+    addr += anno_dir->methods_size_ * sizeof(dex::MethodAnnotationsItem);
+    return reinterpret_cast<const dex::ParameterAnnotationsItem*>(addr);
   }
 
-  const AnnotationSetItem* GetFieldAnnotationSetItem(const FieldAnnotationsItem& anno_item) const {
-    return DataPointer<AnnotationSetItem>(anno_item.annotations_off_);
+  const dex::AnnotationSetItem* GetFieldAnnotationSetItem(
+      const dex::FieldAnnotationsItem& anno_item) const {
+    return DataPointer<dex::AnnotationSetItem>(anno_item.annotations_off_);
   }
 
-  const AnnotationSetItem* GetMethodAnnotationSetItem(const MethodAnnotationsItem& anno_item)
+  const dex::AnnotationSetItem* GetMethodAnnotationSetItem(
+      const dex::MethodAnnotationsItem& anno_item) const {
+    return DataPointer<dex::AnnotationSetItem>(anno_item.annotations_off_);
+  }
+
+  const dex::AnnotationSetRefList* GetParameterAnnotationSetRefList(
+      const dex::ParameterAnnotationsItem* anno_item) const {
+    return DataPointer<dex::AnnotationSetRefList>(anno_item->annotations_off_);
+  }
+
+  ALWAYS_INLINE const dex::AnnotationItem* GetAnnotationItemAtOffset(uint32_t offset) const {
+    return DataPointer<dex::AnnotationItem>(offset);
+  }
+
+  ALWAYS_INLINE const dex::HiddenapiClassData* GetHiddenapiClassDataAtOffset(uint32_t offset)
       const {
-    return DataPointer<AnnotationSetItem>(anno_item.annotations_off_);
+    return DataPointer<dex::HiddenapiClassData>(offset);
   }
 
-  const AnnotationSetRefList* GetParameterAnnotationSetRefList(
-      const ParameterAnnotationsItem* anno_item) const {
-    return DataPointer<AnnotationSetRefList>(anno_item->annotations_off_);
+  ALWAYS_INLINE const dex::HiddenapiClassData* GetHiddenapiClassData() const {
+    return hiddenapi_class_data_;
   }
 
-  ALWAYS_INLINE const AnnotationItem* GetAnnotationItemAtOffset(uint32_t offset) const {
-    return DataPointer<AnnotationItem>(offset);
+  ALWAYS_INLINE bool HasHiddenapiClassData() const {
+    return hiddenapi_class_data_ != nullptr;
   }
 
-  const AnnotationItem* GetAnnotationItem(const AnnotationSetItem* set_item, uint32_t index) const {
+  const dex::AnnotationItem* GetAnnotationItem(const dex::AnnotationSetItem* set_item,
+                                               uint32_t index) const {
     DCHECK_LE(index, set_item->size_);
     return GetAnnotationItemAtOffset(set_item->entries_[index]);
   }
 
-  const AnnotationSetItem* GetSetRefItemItem(const AnnotationSetRefItem* anno_item) const {
-    return DataPointer<AnnotationSetItem>(anno_item->annotations_off_);
+  const dex::AnnotationSetItem* GetSetRefItemItem(const dex::AnnotationSetRefItem* anno_item)
+      const {
+    return DataPointer<dex::AnnotationSetItem>(anno_item->annotations_off_);
   }
 
   // Debug info opcodes and constants
@@ -865,15 +650,6 @@
     DBG_LINE_RANGE           = 15,
   };
 
-  struct LineNumFromPcContext {
-    LineNumFromPcContext(uint32_t address, uint32_t line_num)
-        : address_(address), line_num_(line_num) {}
-    uint32_t address_;
-    uint32_t line_num_;
-   private:
-    DISALLOW_COPY_AND_ASSIGN(LineNumFromPcContext);
-  };
-
   // Returns false if there is no debugging information or if it cannot be decoded.
   template<typename NewLocalCallback, typename IndexToStringData, typename TypeIndexToStringData>
   static bool DecodeDebugLocalInfo(const uint8_t* stream,
@@ -885,10 +661,9 @@
                                    uint16_t registers_size,
                                    uint16_t ins_size,
                                    uint16_t insns_size_in_code_units,
-                                   IndexToStringData index_to_string_data,
-                                   TypeIndexToStringData type_index_to_string_data,
-                                   NewLocalCallback new_local,
-                                   void* context);
+                                   const IndexToStringData& index_to_string_data,
+                                   const TypeIndexToStringData& type_index_to_string_data,
+                                   const NewLocalCallback& new_local) NO_THREAD_SAFETY_ANALYSIS;
   template<typename NewLocalCallback>
   bool DecodeDebugLocalInfo(uint32_t registers_size,
                             uint32_t ins_size,
@@ -896,21 +671,15 @@
                             uint32_t debug_info_offset,
                             bool is_static,
                             uint32_t method_idx,
-                            NewLocalCallback new_local,
-                            void* context) const;
+                            const NewLocalCallback& new_local) const;
 
   // Returns false if there is no debugging information or if it cannot be decoded.
   template<typename DexDebugNewPosition, typename IndexToStringData>
   static bool DecodeDebugPositionInfo(const uint8_t* stream,
-                                      IndexToStringData index_to_string_data,
-                                      DexDebugNewPosition position_functor,
-                                      void* context);
-  template<typename DexDebugNewPosition>
-  bool DecodeDebugPositionInfo(uint32_t debug_info_offset,
-                               DexDebugNewPosition position_functor,
-                               void* context) const;
+                                      const IndexToStringData& index_to_string_data,
+                                      const DexDebugNewPosition& position_functor);
 
-  const char* GetSourceFile(const ClassDef& class_def) const {
+  const char* GetSourceFile(const dex::ClassDef& class_def) const {
     if (!class_def.source_file_idx_.IsValid()) {
       return nullptr;
     } else {
@@ -958,8 +727,8 @@
   }
 
   // Read MapItems and validate/set remaining offsets.
-  const DexFile::MapList* GetMapList() const {
-    return reinterpret_cast<const DexFile::MapList*>(DataBegin() + header_->map_off_);
+  const dex::MapList* GetMapList() const {
+    return reinterpret_cast<const dex::MapList*>(DataBegin() + header_->map_off_);
   }
 
   // Utility methods for reading integral values from a buffer.
@@ -990,13 +759,8 @@
   ALWAYS_INLINE const StandardDexFile* AsStandardDexFile() const;
   ALWAYS_INLINE const CompactDexFile* AsCompactDexFile() const;
 
-  ALWAYS_INLINE bool IsPlatformDexFile() const {
-    return is_platform_dex_;
-  }
-
-  ALWAYS_INLINE void SetIsPlatformDexFile() {
-    is_platform_dex_ = true;
-  }
+  hiddenapi::Domain GetHiddenapiDomain() const { return hiddenapi_domain_; }
+  void SetHiddenapiDomain(hiddenapi::Domain value) { hiddenapi_domain_ = value; }
 
   bool IsInMainSection(const void* addr) const {
     return Begin() <= addr && addr < Begin() + Size();
@@ -1010,10 +774,14 @@
     return container_.get();
   }
 
-  // Changes the dex class data pointed to by data_ptr it to not have any hiddenapi flags.
-  static void UnHideAccessFlags(uint8_t* data_ptr, uint32_t new_access_flags, bool is_method);
+  IterationRange<ClassIterator> GetClasses() const;
 
-  inline IterationRange<ClassIterator> GetClasses() const;
+  template <typename Visitor>
+  static uint32_t DecodeDebugInfoParameterNames(const uint8_t** debug_info,
+                                                const Visitor& visitor);
+
+  static inline bool StringEquals(const DexFile* df1, dex::StringIndex sidx1,
+                                  const DexFile* df2, dex::StringIndex sidx2);
 
  protected:
   // First Dex format version supporting default methods.
@@ -1062,35 +830,39 @@
   const Header* const header_;
 
   // Points to the base of the string identifier list.
-  const StringId* const string_ids_;
+  const dex::StringId* const string_ids_;
 
   // Points to the base of the type identifier list.
-  const TypeId* const type_ids_;
+  const dex::TypeId* const type_ids_;
 
   // Points to the base of the field identifier list.
-  const FieldId* const field_ids_;
+  const dex::FieldId* const field_ids_;
 
   // Points to the base of the method identifier list.
-  const MethodId* const method_ids_;
+  const dex::MethodId* const method_ids_;
 
   // Points to the base of the prototype identifier list.
-  const ProtoId* const proto_ids_;
+  const dex::ProtoId* const proto_ids_;
 
   // Points to the base of the class definition list.
-  const ClassDef* const class_defs_;
+  const dex::ClassDef* const class_defs_;
 
   // Points to the base of the method handles list.
-  const MethodHandleItem* method_handles_;
+  const dex::MethodHandleItem* method_handles_;
 
   // Number of elements in the method handles list.
   size_t num_method_handles_;
 
   // Points to the base of the call sites id list.
-  const CallSiteIdItem* call_site_ids_;
+  const dex::CallSiteIdItem* call_site_ids_;
 
   // Number of elements in the call sites list.
   size_t num_call_site_ids_;
 
+  // Points to the base of the hiddenapi class data item_, or nullptr if the dex
+  // file does not have one.
+  const dex::HiddenapiClassData* hiddenapi_class_data_;
+
   // If this dex file was loaded from an oat file, oat_dex_file_ contains a
   // pointer to the OatDexFile it was loaded from. Otherwise oat_dex_file_ is
   // null.
@@ -1102,8 +874,7 @@
   // If the dex file is a compact dex file. If false then the dex file is a standard dex file.
   const bool is_compact_dex_;
 
-  // If the dex file is located in /system/framework/.
-  bool is_platform_dex_;
+  hiddenapi::Domain hiddenapi_domain_;
 
   friend class DexFileLoader;
   friend class DexFileVerifierTest;
@@ -1115,7 +886,7 @@
 // Iterate over a dex file's ProtoId's paramters
 class DexFileParameterIterator {
  public:
-  DexFileParameterIterator(const DexFile& dex_file, const DexFile::ProtoId& proto_id)
+  DexFileParameterIterator(const DexFile& dex_file, const dex::ProtoId& proto_id)
       : dex_file_(dex_file) {
     type_list_ = dex_file_.GetProtoParameters(proto_id);
     if (type_list_ != nullptr) {
@@ -1133,44 +904,12 @@
   }
  private:
   const DexFile& dex_file_;
-  const DexFile::TypeList* type_list_ = nullptr;
+  const dex::TypeList* type_list_ = nullptr;
   uint32_t size_ = 0;
   uint32_t pos_ = 0;
   DISALLOW_IMPLICIT_CONSTRUCTORS(DexFileParameterIterator);
 };
 
-// Abstract the signature of a method.
-class Signature : public ValueObject {
- public:
-  std::string ToString() const;
-
-  static Signature NoSignature() {
-    return Signature();
-  }
-
-  bool IsVoid() const;
-  uint32_t GetNumberOfParameters() const;
-
-  bool operator==(const Signature& rhs) const;
-  bool operator!=(const Signature& rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator==(const StringPiece& rhs) const;
-
- private:
-  Signature(const DexFile* dex, const DexFile::ProtoId& proto) : dex_file_(dex), proto_id_(&proto) {
-  }
-
-  Signature() = default;
-
-  friend class DexFile;
-
-  const DexFile* const dex_file_ = nullptr;
-  const DexFile::ProtoId* const proto_id_ = nullptr;
-};
-std::ostream& operator<<(std::ostream& os, const Signature& sig);
-
 class EncodedArrayValueIterator {
  public:
   EncodedArrayValueIterator(const DexFile& dex_file, const uint8_t* array_data);
@@ -1222,7 +961,7 @@
 class EncodedStaticFieldValueIterator : public EncodedArrayValueIterator {
  public:
   EncodedStaticFieldValueIterator(const DexFile& dex_file,
-                                  const DexFile::ClassDef& class_def)
+                                  const dex::ClassDef& class_def)
       : EncodedArrayValueIterator(dex_file,
                                   dex_file.GetEncodedStaticFieldValuesArray(class_def))
   {}
@@ -1235,7 +974,7 @@
 class CallSiteArrayValueIterator : public EncodedArrayValueIterator {
  public:
   CallSiteArrayValueIterator(const DexFile& dex_file,
-                             const DexFile::CallSiteIdItem& call_site_id)
+                             const dex::CallSiteIdItem& call_site_id)
       : EncodedArrayValueIterator(dex_file,
                                   dex_file.GetCallSiteEncodedValuesArray(call_site_id))
   {}
diff --git a/libdexfile/dex/dex_file_exception_helpers.cc b/libdexfile/dex/dex_file_exception_helpers.cc
index 8e597fd..72b2554 100644
--- a/libdexfile/dex/dex_file_exception_helpers.cc
+++ b/libdexfile/dex/dex_file_exception_helpers.cc
@@ -17,6 +17,7 @@
 #include "dex_file_exception_helpers.h"
 
 #include "code_item_accessors-inl.h"
+#include "dex_file_structs.h"
 
 namespace art {
 
@@ -29,7 +30,7 @@
     case 0:
       break;
     case 1: {
-      const DexFile::TryItem* tries = accessor.TryItems().begin();
+      const dex::TryItem* tries = accessor.TryItems().begin();
       uint32_t start = tries->start_addr_;
       if (address >= start) {
         uint32_t end = start + tries->insn_count_;
@@ -40,7 +41,7 @@
       break;
     }
     default: {
-      const DexFile::TryItem* try_item = accessor.FindTryItem(address);
+      const dex::TryItem* try_item = accessor.FindTryItem(address);
       offset = try_item != nullptr ? try_item->handler_off_ : -1;
       break;
     }
@@ -49,7 +50,7 @@
 }
 
 CatchHandlerIterator::CatchHandlerIterator(const CodeItemDataAccessor& accessor,
-                                           const DexFile::TryItem& try_item) {
+                                           const dex::TryItem& try_item) {
   handler_.address_ = -1;
   Init(accessor, try_item.handler_off_);
 }
diff --git a/libdexfile/dex/dex_file_exception_helpers.h b/libdexfile/dex/dex_file_exception_helpers.h
index a05fd68..08127c8 100644
--- a/libdexfile/dex/dex_file_exception_helpers.h
+++ b/libdexfile/dex/dex_file_exception_helpers.h
@@ -17,17 +17,23 @@
 #ifndef ART_LIBDEXFILE_DEX_DEX_FILE_EXCEPTION_HELPERS_H_
 #define ART_LIBDEXFILE_DEX_DEX_FILE_EXCEPTION_HELPERS_H_
 
-#include "dex_file.h"
+#include <android-base/logging.h>
+
+#include "dex_file_types.h"
 
 namespace art {
 
+namespace dex {
+struct TryItem;
+}  // namespace dex
+
 class CodeItemDataAccessor;
 
 class CatchHandlerIterator {
  public:
   CatchHandlerIterator(const CodeItemDataAccessor& accessor, uint32_t address);
 
-  CatchHandlerIterator(const CodeItemDataAccessor& accessor, const DexFile::TryItem& try_item);
+  CatchHandlerIterator(const CodeItemDataAccessor& accessor, const dex::TryItem& try_item);
 
   explicit CatchHandlerIterator(const uint8_t* handler_data) {
     Init(handler_data);
diff --git a/libdexfile/dex/dex_file_layout.cc b/libdexfile/dex/dex_file_layout.cc
index 1e36e05..929025a 100644
--- a/libdexfile/dex/dex_file_layout.cc
+++ b/libdexfile/dex/dex_file_layout.cc
@@ -16,8 +16,9 @@
 
 #include "dex_file_layout.h"
 
-#include <sys/mman.h>
 
+#include "base/bit_utils.h"
+#include "base/mman.h"
 #include "dex_file.h"
 
 namespace art {
@@ -25,6 +26,12 @@
 int DexLayoutSection::MadviseLargestPageAlignedRegion(const uint8_t* begin,
                                                       const uint8_t* end,
                                                       int advice) {
+#ifdef _WIN32
+  UNUSED(begin);
+  UNUSED(end);
+  UNUSED(advice);
+  PLOG(WARNING) << "madvise is unsupported on Windows.";
+#else
   DCHECK_LE(begin, end);
   begin = AlignUp(begin, kPageSize);
   end = AlignDown(end, kPageSize);
@@ -36,6 +43,7 @@
     }
     return result;
   }
+#endif
   return 0;
 }
 
@@ -49,6 +57,11 @@
 }
 
 void DexLayoutSections::Madvise(const DexFile* dex_file, MadviseState state) const {
+#ifdef _WIN32
+  UNUSED(dex_file);
+  UNUSED(state);
+  PLOG(WARNING) << "madvise is unsupported on Windows.";
+#else
   // The dex file is already defaulted to random access everywhere.
   for (const DexLayoutSection& section : sections_) {
     switch (state) {
@@ -78,6 +91,7 @@
       }
     }
   }
+#endif
 }
 
 std::ostream& operator<<(std::ostream& os, const DexLayoutSection& section) {
diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc
index 400c32b..a719d41 100644
--- a/libdexfile/dex/dex_file_loader.cc
+++ b/libdexfile/dex/dex_file_loader.cc
@@ -25,10 +25,6 @@
 #include "standard_dex_file.h"
 #include "ziparchive/zip_archive.h"
 
-// system/core/zip_archive definitions.
-struct ZipEntry;
-typedef void* ZipArchiveHandle;
-
 namespace art {
 
 namespace {
@@ -191,12 +187,18 @@
   std::string base_location = GetBaseLocation(dex_location);
   const char* suffix = dex_location + base_location.size();
   DCHECK(suffix[0] == 0 || suffix[0] == kMultiDexSeparator);
+#ifdef _WIN32
+  // Warning: No symbolic link processing here.
+  PLOG(WARNING) << "realpath is unsupported on Windows.";
+#else
   // Warning: Bionic implementation of realpath() allocates > 12KB on the stack.
   // Do not run this code on a small stack, e.g. in signal handler.
   UniqueCPtr<const char[]> path(realpath(base_location.c_str(), nullptr));
   if (path != nullptr && path.get() != base_location) {
     return std::string(path.get()) + suffix;
-  } else if (suffix[0] == 0) {
+  }
+#endif
+  if (suffix[0] == 0) {
     return base_location;
   } else {
     return dex_location;
@@ -216,26 +218,28 @@
   return false;
 }
 
-std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base,
-                                                   size_t size,
-                                                   const std::string& location,
-                                                   uint32_t location_checksum,
-                                                   const OatDexFile* oat_dex_file,
-                                                   bool verify,
-                                                   bool verify_checksum,
-                                                   std::string* error_msg) const {
+std::unique_ptr<const DexFile> DexFileLoader::Open(
+    const uint8_t* base,
+    size_t size,
+    const std::string& location,
+    uint32_t location_checksum,
+    const OatDexFile* oat_dex_file,
+    bool verify,
+    bool verify_checksum,
+    std::string* error_msg,
+    std::unique_ptr<DexFileContainer> container) const {
   return OpenCommon(base,
                     size,
-                    /*data_base*/ nullptr,
-                    /*data_size*/ 0,
+                    /*data_base=*/ nullptr,
+                    /*data_size=*/ 0,
                     location,
                     location_checksum,
                     oat_dex_file,
                     verify,
                     verify_checksum,
                     error_msg,
-                    /*container*/ nullptr,
-                    /*verify_result*/ nullptr);
+                    std::move(container),
+                    /*verify_result=*/ nullptr);
 }
 
 std::unique_ptr<const DexFile> DexFileLoader::OpenWithDataSection(
@@ -259,8 +263,8 @@
                     verify,
                     verify_checksum,
                     error_msg,
-                    /*container*/ nullptr,
-                    /*verify_result*/ nullptr);
+                    /*container=*/ nullptr,
+                    /*verify_result=*/ nullptr);
 }
 
 bool DexFileLoader::OpenAll(
@@ -294,7 +298,7 @@
                                                  size,
                                                  location,
                                                  dex_header->checksum_,
-                                                 /*oat_dex_file*/ nullptr,
+                                                 /*oat_dex_file=*/ nullptr,
                                                  verify,
                                                  verify_checksum,
                                                  error_msg));
@@ -414,11 +418,11 @@
   std::unique_ptr<const DexFile> dex_file = OpenCommon(
       map.data(),
       map.size(),
-      /*data_base*/ nullptr,
-      /*data_size*/ 0u,
+      /*data_base=*/ nullptr,
+      /*data_size=*/ 0u,
       location,
       zip_entry->GetCrc32(),
-      /*oat_dex_file*/ nullptr,
+      /*oat_dex_file=*/ nullptr,
       verify,
       verify_checksum,
       error_msg,
diff --git a/libdexfile/dex/dex_file_loader.h b/libdexfile/dex/dex_file_loader.h
index 8fc836e..49e177f 100644
--- a/libdexfile/dex/dex_file_loader.h
+++ b/libdexfile/dex/dex_file_loader.h
@@ -121,14 +121,16 @@
                                     bool* zip_file_only_contains_uncompress_dex = nullptr) const;
 
   // Opens .dex file, backed by existing memory
-  virtual std::unique_ptr<const DexFile> Open(const uint8_t* base,
-                                              size_t size,
-                                              const std::string& location,
-                                              uint32_t location_checksum,
-                                              const OatDexFile* oat_dex_file,
-                                              bool verify,
-                                              bool verify_checksum,
-                                              std::string* error_msg) const;
+  virtual std::unique_ptr<const DexFile> Open(
+      const uint8_t* base,
+      size_t size,
+      const std::string& location,
+      uint32_t location_checksum,
+      const OatDexFile* oat_dex_file,
+      bool verify,
+      bool verify_checksum,
+      std::string* error_msg,
+      std::unique_ptr<DexFileContainer> container = nullptr) const;
 
   // Open a dex file with a separate data section.
   virtual std::unique_ptr<const DexFile> OpenWithDataSection(
diff --git a/libdexfile/dex/dex_file_loader_test.cc b/libdexfile/dex/dex_file_loader_test.cc
index 5bb01dd..8b7ca17 100644
--- a/libdexfile/dex/dex_file_loader_test.cc
+++ b/libdexfile/dex/dex_file_loader_test.cc
@@ -221,7 +221,7 @@
   bool success = dex_file_loader.OpenAll(dex_bytes->data(),
                                          dex_bytes->size(),
                                          location,
-                                         /* verify */ true,
+                                         /* verify= */ true,
                                          kVerifyChecksum,
                                          error_code,
                                          error_msg,
@@ -256,9 +256,9 @@
                                                                dex_bytes->size(),
                                                                location,
                                                                location_checksum,
-                                                               /* oat_dex_file */ nullptr,
-                                                               /* verify */ true,
-                                                               /* verify_checksum */ true,
+                                                               /* oat_dex_file= */ nullptr,
+                                                               /* verify= */ true,
+                                                               /* verify_checksum= */ true,
                                                                &error_message));
   if (expect_success) {
     CHECK(dex_file != nullptr) << error_message;
@@ -348,7 +348,7 @@
   ASSERT_FALSE(dex_file_loader.OpenAll(dex_bytes.data(),
                                        dex_bytes.size(),
                                        kLocationString,
-                                       /* verify */ true,
+                                       /* verify= */ true,
                                        kVerifyChecksum,
                                        &error_code,
                                        &error_msg,
@@ -367,7 +367,7 @@
   ASSERT_FALSE(dex_file_loader.OpenAll(dex_bytes.data(),
                                        dex_bytes.size(),
                                        kLocationString,
-                                       /* verify */ true,
+                                       /* verify= */ true,
                                        kVerifyChecksum,
                                        &error_code,
                                        &error_msg,
@@ -386,7 +386,7 @@
   ASSERT_FALSE(dex_file_loader.OpenAll(dex_bytes.data(),
                                        dex_bytes.size(),
                                        kLocationString,
-                                       /* verify */ true,
+                                       /* verify= */ true,
                                        kVerifyChecksum,
                                        &error_code,
                                        &error_msg,
@@ -480,10 +480,6 @@
   EXPECT_EQ(raw->StringByTypeIdx(idx), nullptr);
 }
 
-static void Callback(void* context ATTRIBUTE_UNUSED,
-                     const DexFile::LocalInfo& entry ATTRIBUTE_UNUSED) {
-}
-
 TEST_F(DexFileLoaderTest, OpenDexDebugInfoLocalNullType) {
   std::vector<uint8_t> dex_bytes;
   std::unique_ptr<const DexFile> raw = OpenDexFileInMemoryBase64(kRawDexDebugInfoLocalNullType,
@@ -491,12 +487,11 @@
                                                                  0xf25f2b38U,
                                                                  true,
                                                                  &dex_bytes);
-  const DexFile::ClassDef& class_def = raw->GetClassDef(0);
+  const dex::ClassDef& class_def = raw->GetClassDef(0);
   constexpr uint32_t kMethodIdx = 1;
-  const DexFile::CodeItem* code_item = raw->GetCodeItem(raw->FindCodeItemOffset(class_def,
-                                                                                kMethodIdx));
+  const dex::CodeItem* code_item = raw->GetCodeItem(raw->FindCodeItemOffset(class_def, kMethodIdx));
   CodeItemDebugInfoAccessor accessor(*raw, code_item, kMethodIdx);
-  ASSERT_TRUE(accessor.DecodeDebugLocalInfo(true, 1, Callback, nullptr));
+  ASSERT_TRUE(accessor.DecodeDebugLocalInfo(true, 1, VoidFunctor()));
 }
 
 }  // namespace art
diff --git a/libdexfile/dex/dex_file_structs.h b/libdexfile/dex/dex_file_structs.h
new file mode 100644
index 0000000..2d25227
--- /dev/null
+++ b/libdexfile/dex/dex_file_structs.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBDEXFILE_DEX_DEX_FILE_STRUCTS_H_
+#define ART_LIBDEXFILE_DEX_DEX_FILE_STRUCTS_H_
+
+#include <android-base/logging.h>
+#include <android-base/macros.h>
+
+#include <inttypes.h>
+
+#include "dex_file_types.h"
+#include "modifiers.h"
+
+namespace art {
+
+class DexWriter;
+
+namespace dex {
+
+struct MapItem {
+  uint16_t type_;
+  uint16_t unused_;
+  uint32_t size_;
+  uint32_t offset_;
+};
+
+struct MapList {
+  uint32_t size_;
+  MapItem list_[1];
+
+  size_t Size() const { return sizeof(uint32_t) + (size_ * sizeof(MapItem)); }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MapList);
+};
+
+// Raw string_id_item.
+struct StringId {
+  uint32_t string_data_off_;  // offset in bytes from the base address
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StringId);
+};
+
+// Raw type_id_item.
+struct TypeId {
+  dex::StringIndex descriptor_idx_;  // index into string_ids
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TypeId);
+};
+
+// Raw field_id_item.
+struct FieldId {
+  dex::TypeIndex class_idx_;   // index into type_ids_ array for defining class
+  dex::TypeIndex type_idx_;    // index into type_ids_ array for field type
+  dex::StringIndex name_idx_;  // index into string_ids_ array for field name
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FieldId);
+};
+
+// Raw proto_id_item.
+struct ProtoId {
+  dex::StringIndex shorty_idx_;     // index into string_ids array for shorty descriptor
+  dex::TypeIndex return_type_idx_;  // index into type_ids array for return type
+  uint16_t pad_;                    // padding = 0
+  uint32_t parameters_off_;         // file offset to type_list for parameter types
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ProtoId);
+};
+
+// Raw method_id_item.
+struct MethodId {
+  dex::TypeIndex class_idx_;   // index into type_ids_ array for defining class
+  dex::ProtoIndex proto_idx_;  // index into proto_ids_ array for method prototype
+  dex::StringIndex name_idx_;  // index into string_ids_ array for method name
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MethodId);
+};
+
+// Base code_item, compact dex and standard dex have different code item layouts.
+struct CodeItem {
+ protected:
+  CodeItem() = default;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CodeItem);
+};
+
+// Raw class_def_item.
+struct ClassDef {
+  dex::TypeIndex class_idx_;  // index into type_ids_ array for this class
+  uint16_t pad1_;  // padding = 0
+  uint32_t access_flags_;
+  dex::TypeIndex superclass_idx_;  // index into type_ids_ array for superclass
+  uint16_t pad2_;  // padding = 0
+  uint32_t interfaces_off_;  // file offset to TypeList
+  dex::StringIndex source_file_idx_;  // index into string_ids_ for source file name
+  uint32_t annotations_off_;  // file offset to annotations_directory_item
+  uint32_t class_data_off_;  // file offset to class_data_item
+  uint32_t static_values_off_;  // file offset to EncodedArray
+
+  // Returns the valid access flags, that is, Java modifier bits relevant to the ClassDef type
+  // (class or interface). These are all in the lower 16b and do not contain runtime flags.
+  uint32_t GetJavaAccessFlags() const {
+    // Make sure that none of our runtime-only flags are set.
+    static_assert((kAccValidClassFlags & kAccJavaFlagsMask) == kAccValidClassFlags,
+                  "Valid class flags not a subset of Java flags");
+    static_assert((kAccValidInterfaceFlags & kAccJavaFlagsMask) == kAccValidInterfaceFlags,
+                  "Valid interface flags not a subset of Java flags");
+
+    if ((access_flags_ & kAccInterface) != 0) {
+      // Interface.
+      return access_flags_ & kAccValidInterfaceFlags;
+    } else {
+      // Class.
+      return access_flags_ & kAccValidClassFlags;
+    }
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ClassDef);
+};
+
+// Raw type_item.
+struct TypeItem {
+  dex::TypeIndex type_idx_;  // index into type_ids section
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TypeItem);
+};
+
+// Raw type_list.
+class TypeList {
+ public:
+  uint32_t Size() const {
+    return size_;
+  }
+
+  const TypeItem& GetTypeItem(uint32_t idx) const {
+    DCHECK_LT(idx, this->size_);
+    return this->list_[idx];
+  }
+
+  // Size in bytes of the part of the list that is common.
+  static constexpr size_t GetHeaderSize() {
+    return 4U;
+  }
+
+  // Size in bytes of the whole type list including all the stored elements.
+  static constexpr size_t GetListSize(size_t count) {
+    return GetHeaderSize() + sizeof(TypeItem) * count;
+  }
+
+ private:
+  uint32_t size_;  // size of the list, in entries
+  TypeItem list_[1];  // elements of the list
+  DISALLOW_COPY_AND_ASSIGN(TypeList);
+};
+
+// raw method_handle_item
+struct MethodHandleItem {
+  uint16_t method_handle_type_;
+  uint16_t reserved1_;            // Reserved for future use.
+  uint16_t field_or_method_idx_;  // Field index for accessors, method index otherwise.
+  uint16_t reserved2_;            // Reserved for future use.
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MethodHandleItem);
+};
+
+// raw call_site_id_item
+struct CallSiteIdItem {
+  uint32_t data_off_;  // Offset into data section pointing to encoded array items.
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CallSiteIdItem);
+};
+
+// Raw try_item.
+struct TryItem {
+  static constexpr size_t kAlignment = sizeof(uint32_t);
+
+  uint32_t start_addr_;
+  uint16_t insn_count_;
+  uint16_t handler_off_;
+
+ private:
+  TryItem() = default;
+  friend class ::art::DexWriter;
+  DISALLOW_COPY_AND_ASSIGN(TryItem);
+};
+
+struct AnnotationsDirectoryItem {
+  uint32_t class_annotations_off_;
+  uint32_t fields_size_;
+  uint32_t methods_size_;
+  uint32_t parameters_size_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(AnnotationsDirectoryItem);
+};
+
+struct FieldAnnotationsItem {
+  uint32_t field_idx_;
+  uint32_t annotations_off_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FieldAnnotationsItem);
+};
+
+struct MethodAnnotationsItem {
+  uint32_t method_idx_;
+  uint32_t annotations_off_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MethodAnnotationsItem);
+};
+
+struct ParameterAnnotationsItem {
+  uint32_t method_idx_;
+  uint32_t annotations_off_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ParameterAnnotationsItem);
+};
+
+struct AnnotationSetRefItem {
+  uint32_t annotations_off_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(AnnotationSetRefItem);
+};
+
+struct AnnotationSetRefList {
+  uint32_t size_;
+  AnnotationSetRefItem list_[1];
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(AnnotationSetRefList);
+};
+
+struct AnnotationSetItem {
+  uint32_t size_;
+  uint32_t entries_[1];
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(AnnotationSetItem);
+};
+
+struct AnnotationItem {
+  uint8_t visibility_;
+  uint8_t annotation_[1];
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(AnnotationItem);
+};
+
+struct HiddenapiClassData {
+  uint32_t size_;             // total size of the item
+  uint32_t flags_offset_[1];  // array of offsets from the beginning of this item,
+                              // indexed by class def index
+
+  // Returns a pointer to the beginning of a uleb128-stream of hiddenapi
+  // flags for a class def of given index. Values are in the same order
+  // as fields/methods in the class data. Returns null if the class does
+  // not have class data.
+  const uint8_t* GetFlagsPointer(uint32_t class_def_idx) const {
+    if (flags_offset_[class_def_idx] == 0) {
+      return nullptr;
+    } else {
+      return reinterpret_cast<const uint8_t*>(this) + flags_offset_[class_def_idx];
+    }
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HiddenapiClassData);
+};
+
+}  // namespace dex
+}  // namespace art
+
+#endif  // ART_LIBDEXFILE_DEX_DEX_FILE_STRUCTS_H_
diff --git a/libdexfile/dex/dex_file_tracking_registrar.cc b/libdexfile/dex/dex_file_tracking_registrar.cc
index 29ff6be..1903dc9 100644
--- a/libdexfile/dex/dex_file_tracking_registrar.cc
+++ b/libdexfile/dex/dex_file_tracking_registrar.cc
@@ -158,7 +158,7 @@
 void DexFileTrackingRegistrar::SetAllCodeItemRegistration(bool should_poison) {
   for (ClassAccessor accessor : dex_file_->GetClasses()) {
     for (const ClassAccessor::Method& method : accessor.GetMethods()) {
-      const DexFile::CodeItem* code_item = method.GetCodeItem();
+      const dex::CodeItem* code_item = method.GetCodeItem();
       if (code_item != nullptr) {
         const void* code_item_begin = reinterpret_cast<const void*>(code_item);
         size_t code_item_size = dex_file_->GetCodeItemSize(*code_item);
@@ -171,7 +171,7 @@
 void DexFileTrackingRegistrar::SetAllCodeItemStartRegistration(bool should_poison) {
   for (ClassAccessor class_accessor : dex_file_->GetClasses()) {
     for (const ClassAccessor::Method& method : class_accessor.GetMethods()) {
-      const DexFile::CodeItem* code_item = method.GetCodeItem();
+      const dex::CodeItem* code_item = method.GetCodeItem();
       if (code_item != nullptr) {
         const void* code_item_begin = reinterpret_cast<const void*>(code_item);
         size_t code_item_start = reinterpret_cast<size_t>(code_item);
@@ -189,7 +189,7 @@
 void DexFileTrackingRegistrar::SetAllInsnsRegistration(bool should_poison) {
   for (ClassAccessor class_accessor : dex_file_->GetClasses()) {
     for (const ClassAccessor::Method& method : class_accessor.GetMethods()) {
-      const DexFile::CodeItem* code_item = method.GetCodeItem();
+      const dex::CodeItem* code_item = method.GetCodeItem();
       if (code_item != nullptr) {
         CodeItemInstructionAccessor accessor(*dex_file_, code_item);
         const void* insns_begin = reinterpret_cast<const void*>(accessor.Insns());
@@ -204,9 +204,9 @@
 void DexFileTrackingRegistrar::SetCodeItemRegistration(const char* class_name, bool should_poison) {
   for (ClassAccessor accessor : dex_file_->GetClasses()) {
     for (const ClassAccessor::Method& method : accessor.GetMethods()) {
-      const DexFile::MethodId& methodid_item = dex_file_->GetMethodId(method.GetIndex());
+      const dex::MethodId& methodid_item = dex_file_->GetMethodId(method.GetIndex());
       const char * methodid_name = dex_file_->GetMethodName(methodid_item);
-      const DexFile::CodeItem* code_item = method.GetCodeItem();
+      const dex::CodeItem* code_item = method.GetCodeItem();
       if (code_item != nullptr && strcmp(methodid_name, class_name) == 0) {
         const void* code_item_begin = reinterpret_cast<const void*>(code_item);
         size_t code_item_size = dex_file_->GetCodeItemSize(*code_item);
@@ -218,7 +218,7 @@
 
 void DexFileTrackingRegistrar::SetAllStringDataStartRegistration(bool should_poison) {
   for (size_t stringid_ctr = 0; stringid_ctr < dex_file_->NumStringIds(); ++stringid_ctr) {
-    const DexFile::StringId & string_id = dex_file_->GetStringId(StringIndex(stringid_ctr));
+    const dex::StringId & string_id = dex_file_->GetStringId(StringIndex(stringid_ctr));
     const void* string_data_begin = reinterpret_cast<const void*>(dex_file_->Begin() + string_id.string_data_off_);
     // Data Section of String Data Item
     const void* string_data_data_begin = reinterpret_cast<const void*>(dex_file_->GetStringData(string_id));
@@ -229,11 +229,11 @@
 
 void DexFileTrackingRegistrar::SetAllStringDataRegistration(bool should_poison) {
   size_t map_offset = dex_file_->GetHeader().map_off_;
-  auto map_list = reinterpret_cast<const DexFile::MapList*>(dex_file_->Begin() + map_offset);
+  auto map_list = reinterpret_cast<const dex::MapList*>(dex_file_->Begin() + map_offset);
   for (size_t map_ctr = 0; map_ctr < map_list->size_; ++map_ctr) {
-    const DexFile::MapItem& map_item = map_list->list_[map_ctr];
+    const dex::MapItem& map_item = map_list->list_[map_ctr];
     if (map_item.type_ == DexFile::kDexTypeStringDataItem) {
-      const DexFile::MapItem& next_map_item = map_list->list_[map_ctr + 1];
+      const dex::MapItem& next_map_item = map_list->list_[map_ctr + 1];
       const void* string_data_begin = reinterpret_cast<const void*>(dex_file_->Begin() + map_item.offset_);
       size_t string_data_size = next_map_item.offset_ - map_item.offset_;
       range_values_.push_back(std::make_tuple(string_data_begin, string_data_size, should_poison));
diff --git a/libdexfile/dex/dex_file_types.h b/libdexfile/dex/dex_file_types.h
index d4fb3de..ecc0482 100644
--- a/libdexfile/dex/dex_file_types.h
+++ b/libdexfile/dex/dex_file_types.h
@@ -17,8 +17,9 @@
 #ifndef ART_LIBDEXFILE_DEX_DEX_FILE_TYPES_H_
 #define ART_LIBDEXFILE_DEX_DEX_FILE_TYPES_H_
 
+#include <iosfwd>
 #include <limits>
-#include <ostream>
+#include <utility>
 
 namespace art {
 namespace dex {
diff --git a/libdexfile/dex/dex_file_verifier.cc b/libdexfile/dex/dex_file_verifier.cc
index fd011c8..86a28e5 100644
--- a/libdexfile/dex/dex_file_verifier.cc
+++ b/libdexfile/dex/dex_file_verifier.cc
@@ -67,6 +67,7 @@
     case DexFile::kDexTypeAnnotationItem:           return 1 << 17;
     case DexFile::kDexTypeEncodedArrayItem:         return 1 << 18;
     case DexFile::kDexTypeAnnotationsDirectoryItem: return 1 << 19;
+    case DexFile::kDexTypeHiddenapiClassData:       return 1 << 20;
   }
   return 0;
 }
@@ -94,6 +95,7 @@
     case DexFile::kDexTypeAnnotationItem:
     case DexFile::kDexTypeEncodedArrayItem:
     case DexFile::kDexTypeAnnotationsDirectoryItem:
+    case DexFile::kDexTypeHiddenapiClassData:
       return true;
   }
   return true;
@@ -114,22 +116,22 @@
   return CheckLoadStringByIdx(dex_file_->GetTypeId(type_idx).descriptor_idx_, error_string);
 }
 
-const DexFile::FieldId* DexFileVerifier::CheckLoadFieldId(uint32_t idx, const char* error_string) {
+const dex::FieldId* DexFileVerifier::CheckLoadFieldId(uint32_t idx, const char* error_string) {
   if (UNLIKELY(!CheckIndex(idx, dex_file_->NumFieldIds(), error_string))) {
     return nullptr;
   }
   return &dex_file_->GetFieldId(idx);
 }
 
-const DexFile::MethodId* DexFileVerifier::CheckLoadMethodId(uint32_t idx, const char* err_string) {
+const dex::MethodId* DexFileVerifier::CheckLoadMethodId(uint32_t idx, const char* err_string) {
   if (UNLIKELY(!CheckIndex(idx, dex_file_->NumMethodIds(), err_string))) {
     return nullptr;
   }
   return &dex_file_->GetMethodId(idx);
 }
 
-const DexFile::ProtoId* DexFileVerifier::CheckLoadProtoId(dex::ProtoIndex idx,
-                                                          const char* err_string) {
+const dex::ProtoId* DexFileVerifier::CheckLoadProtoId(dex::ProtoIndex idx,
+                                                      const char* err_string) {
   if (UNLIKELY(!CheckIndex(idx.index_, dex_file_->NumProtoIds(), err_string))) {
     return nullptr;
   }
@@ -152,14 +154,14 @@
 
 // Helper macro to load method id. Return last parameter on error.
 #define LOAD_METHOD(var, idx, error_string, error_stmt)                   \
-  const DexFile::MethodId* (var)  = CheckLoadMethodId(idx, error_string); \
+  const dex::MethodId* (var)  = CheckLoadMethodId(idx, error_string); \
   if (UNLIKELY((var) == nullptr)) {                                       \
     error_stmt;                                                           \
   }
 
 // Helper macro to load method id. Return last parameter on error.
 #define LOAD_FIELD(var, idx, fmt, error_stmt)                 \
-  const DexFile::FieldId* (var) = CheckLoadFieldId(idx, fmt); \
+  const dex::FieldId* (var) = CheckLoadFieldId(idx, fmt); \
   if (UNLIKELY((var) == nullptr)) {                           \
     error_stmt;                                               \
   }
@@ -341,55 +343,55 @@
   bool result =
       CheckValidOffsetAndSize(header_->link_off_,
                               header_->link_size_,
-                              0 /* unaligned */,
+                              /* alignment= */ 0,
                               "link") &&
       CheckValidOffsetAndSize(header_->map_off_,
                               header_->map_off_,
-                              4,
+                              /* alignment= */ 4,
                               "map") &&
       CheckValidOffsetAndSize(header_->string_ids_off_,
                               header_->string_ids_size_,
-                              4,
+                              /* alignment= */ 4,
                               "string-ids") &&
       CheckValidOffsetAndSize(header_->type_ids_off_,
                               header_->type_ids_size_,
-                              4,
+                              /* alignment= */ 4,
                               "type-ids") &&
       CheckSizeLimit(header_->type_ids_size_, DexFile::kDexNoIndex16, "type-ids") &&
       CheckValidOffsetAndSize(header_->proto_ids_off_,
                               header_->proto_ids_size_,
-                              4,
+                              /* alignment= */ 4,
                               "proto-ids") &&
       CheckSizeLimit(header_->proto_ids_size_, DexFile::kDexNoIndex16, "proto-ids") &&
       CheckValidOffsetAndSize(header_->field_ids_off_,
                               header_->field_ids_size_,
-                              4,
+                              /* alignment= */ 4,
                               "field-ids") &&
       CheckValidOffsetAndSize(header_->method_ids_off_,
                               header_->method_ids_size_,
-                              4,
+                              /* alignment= */ 4,
                               "method-ids") &&
       CheckValidOffsetAndSize(header_->class_defs_off_,
                               header_->class_defs_size_,
-                              4,
+                              /* alignment= */ 4,
                               "class-defs") &&
       CheckValidOffsetAndSize(header_->data_off_,
                               header_->data_size_,
-                              0,  // Unaligned, spec doesn't talk about it, even though size
-                                  // is supposed to be a multiple of 4.
+                              // Unaligned, spec doesn't talk about it, even though size
+                              // is supposed to be a multiple of 4.
+                              /* alignment= */ 0,
                               "data");
   return result;
 }
 
 bool DexFileVerifier::CheckMap() {
-  const DexFile::MapList* map = reinterpret_cast<const DexFile::MapList*>(begin_ +
-                                                                          header_->map_off_);
+  const dex::MapList* map = reinterpret_cast<const dex::MapList*>(begin_ + header_->map_off_);
   // Check that map list content is available.
-  if (!CheckListSize(map, 1, sizeof(DexFile::MapList), "maplist content")) {
+  if (!CheckListSize(map, 1, sizeof(dex::MapList), "maplist content")) {
     return false;
   }
 
-  const DexFile::MapItem* item = map->list_;
+  const dex::MapItem* item = map->list_;
 
   uint32_t count = map->size_;
   uint32_t last_offset = 0;
@@ -399,7 +401,7 @@
   uint32_t used_bits = 0;
 
   // Sanity check the size of the map list.
-  if (!CheckListSize(item, count, sizeof(DexFile::MapItem), "map size")) {
+  if (!CheckListSize(item, count, sizeof(dex::MapItem), "map size")) {
     return false;
   }
 
@@ -523,8 +525,9 @@
     return false;                                                   \
   }
 
-bool DexFileVerifier::CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_item,
-                                                uint32_t* handler_offsets, uint32_t handlers_size) {
+bool DexFileVerifier::CheckAndGetHandlerOffsets(const dex::CodeItem* code_item,
+                                                uint32_t* handler_offsets,
+                                                uint32_t handlers_size) {
   CodeItemDataAccessor accessor(*dex_file_, code_item);
   const uint8_t* handlers_base = accessor.GetCatchHandlerData();
 
@@ -584,8 +587,7 @@
 
   // Check that it's the right class.
   dex::TypeIndex my_class_index =
-      (reinterpret_cast<const DexFile::FieldId*>(begin_ + header_->field_ids_off_) + idx)->
-          class_idx_;
+      (reinterpret_cast<const dex::FieldId*>(begin_ + header_->field_ids_off_) + idx)->class_idx_;
   if (class_type_index != my_class_index) {
     ErrorStringPrintf("Field's class index unexpected, %" PRIu16 "vs %" PRIu16,
                       my_class_index.index_,
@@ -622,8 +624,8 @@
     return false;
   }
 
-  const DexFile::MethodId& method_id =
-      *(reinterpret_cast<const DexFile::MethodId*>(begin_ + header_->method_ids_off_) + idx);
+  const dex::MethodId& method_id =
+      *(reinterpret_cast<const dex::MethodId*>(begin_ + header_->method_ids_off_) + idx);
 
   // Check that it's the right class.
   dex::TypeIndex my_class_index = method_id.class_idx_;
@@ -866,7 +868,7 @@
 bool DexFileVerifier::CheckEncodedArray() {
   DECODE_UNSIGNED_CHECKED_FROM(ptr_, size);
 
-  while (size--) {
+  for (; size != 0u; --size) {
     if (!CheckEncodedValue()) {
       failure_reason_ = StringPrintf("Bad encoded_array value: %s", failure_reason_.c_str());
       return false;
@@ -908,7 +910,7 @@
 bool DexFileVerifier::FindClassIndexAndDef(uint32_t index,
                                            bool is_field,
                                            dex::TypeIndex* class_type_index,
-                                           const DexFile::ClassDef** output_class_def) {
+                                           const dex::ClassDef** output_class_def) {
   DCHECK(class_type_index != nullptr);
   DCHECK(output_class_def != nullptr);
 
@@ -920,11 +922,11 @@
   // Next get the type index.
   if (is_field) {
     *class_type_index =
-        (reinterpret_cast<const DexFile::FieldId*>(begin_ + header_->field_ids_off_) + index)->
+        (reinterpret_cast<const dex::FieldId*>(begin_ + header_->field_ids_off_) + index)->
             class_idx_;
   } else {
     *class_type_index =
-        (reinterpret_cast<const DexFile::MethodId*>(begin_ + header_->method_ids_off_) + index)->
+        (reinterpret_cast<const dex::MethodId*>(begin_ + header_->method_ids_off_) + index)->
             class_idx_;
   }
 
@@ -935,10 +937,10 @@
 
   // Now search for the class def. This is basically a specialized version of the DexFile code, as
   // we should not trust that this is a valid DexFile just yet.
-  const DexFile::ClassDef* class_def_begin =
-      reinterpret_cast<const DexFile::ClassDef*>(begin_ + header_->class_defs_off_);
+  const dex::ClassDef* class_def_begin =
+      reinterpret_cast<const dex::ClassDef*>(begin_ + header_->class_defs_off_);
   for (size_t i = 0; i < header_->class_defs_size_; ++i) {
-    const DexFile::ClassDef* class_def = class_def_begin + i;
+    const dex::ClassDef* class_def = class_def_begin + i;
     if (class_def->class_idx_ == *class_type_index) {
       *output_class_def = class_def;
       return true;
@@ -962,7 +964,7 @@
   return true;
 }
 
-bool DexFileVerifier::CheckStaticFieldTypes(const DexFile::ClassDef* class_def) {
+bool DexFileVerifier::CheckStaticFieldTypes(const dex::ClassDef* class_def) {
   if (class_def == nullptr) {
     return true;
   }
@@ -975,7 +977,7 @@
       break;
     }
     uint32_t index = field.GetIndex();
-    const DexFile::TypeId& type_id = dex_file_->GetTypeId(dex_file_->GetFieldId(index).type_idx_);
+    const dex::TypeId& type_id = dex_file_->GetTypeId(dex_file_->GetFieldId(index).type_idx_);
     const char* field_type_name =
         dex_file_->GetStringData(dex_file_->GetStringId(type_id.descriptor_idx_));
     Primitive::Type field_type = Primitive::GetType(field_type_name[0]);
@@ -1066,7 +1068,7 @@
                                                     ClassAccessor::Field* field,
                                                     bool* have_class,
                                                     dex::TypeIndex* class_type_index,
-                                                    const DexFile::ClassDef** class_def) {
+                                                    const dex::ClassDef** class_def) {
   DCHECK(field != nullptr);
   constexpr const char* kTypeDescr = kStatic ? "static field" : "instance field";
 
@@ -1095,7 +1097,7 @@
       return false;
     }
     if (!CheckClassDataItemField(curr_index,
-                                 field->GetRawAccessFlags(),
+                                 field->GetAccessFlags(),
                                  (*class_def)->access_flags_,
                                  *class_type_index,
                                  kStatic)) {
@@ -1118,7 +1120,7 @@
                                                      size_t num_directs,
                                                      bool* have_class,
                                                      dex::TypeIndex* class_type_index,
-                                                     const DexFile::ClassDef** class_def) {
+                                                     const dex::ClassDef** class_def) {
   DCHECK(method != nullptr);
   const char* kTypeDescr = method->IsStaticOrDirect() ? "direct method" : "virtual method";
 
@@ -1146,7 +1148,7 @@
       return false;
     }
     if (!CheckClassDataItemMethod(curr_index,
-                                  method->GetRawAccessFlags(),
+                                  method->GetAccessFlags(),
                                   (*class_def)->access_flags_,
                                   *class_type_index,
                                   method->GetCodeItemOffset(),
@@ -1173,7 +1175,7 @@
   // as the lookup is expensive, cache the result.
   bool have_class = false;
   dex::TypeIndex class_type_index;
-  const DexFile::ClassDef* class_def = nullptr;
+  const dex::ClassDef* class_def = nullptr;
 
   ClassAccessor::Field field(*dex_file_, accessor.ptr_pos_);
   // Check fields.
@@ -1197,7 +1199,7 @@
   ClassAccessor::Method method(*dex_file_, field.ptr_pos_);
   if (!CheckIntraClassDataItemMethods(&method,
                                       accessor.NumDirectMethods(),
-                                      nullptr /* direct_it */,
+                                      /* direct_method= */ nullptr,
                                       0u,
                                       &have_class,
                                       &class_type_index,
@@ -1229,8 +1231,8 @@
 }
 
 bool DexFileVerifier::CheckIntraCodeItem() {
-  const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(ptr_);
-  if (!CheckListSize(code_item, 1, sizeof(DexFile::CodeItem), "code")) {
+  const dex::CodeItem* code_item = reinterpret_cast<const dex::CodeItem*>(ptr_);
+  if (!CheckListSize(code_item, 1, sizeof(dex::CodeItem), "code")) {
     return false;
   }
 
@@ -1272,8 +1274,8 @@
     return false;
   }
 
-  const DexFile::TryItem* try_items = accessor.TryItems().begin();
-  if (!CheckListSize(try_items, try_items_size, sizeof(DexFile::TryItem), "try_items size")) {
+  const dex::TryItem* try_items = accessor.TryItems().begin();
+  if (!CheckListSize(try_items, try_items_size, sizeof(dex::TryItem), "try_items size")) {
     return false;
   }
 
@@ -1304,7 +1306,7 @@
   }
 
   uint32_t last_addr = 0;
-  while (try_items_size--) {
+  for (; try_items_size != 0u; --try_items_size) {
     if (UNLIKELY(try_items->start_addr_ < last_addr)) {
       ErrorStringPrintf("Out-of_order try_item with start_addr: %x", try_items->start_addr_);
       return false;
@@ -1554,25 +1556,129 @@
   return true;
 }
 
+bool DexFileVerifier::CheckIntraHiddenapiClassData() {
+  const dex::HiddenapiClassData* item = reinterpret_cast<const dex::HiddenapiClassData*>(ptr_);
+
+  // Check expected header size.
+  uint32_t num_header_elems = dex_file_->NumClassDefs() + 1;
+  uint32_t elem_size = sizeof(uint32_t);
+  uint32_t header_size = num_header_elems * elem_size;
+  if (!CheckListSize(item, num_header_elems, elem_size, "hiddenapi class data section header")) {
+    return false;
+  }
+
+  // Check total size.
+  if (!CheckListSize(item, item->size_, 1u, "hiddenapi class data section")) {
+    return false;
+  }
+
+  // Check that total size can fit header.
+  if (item->size_ < header_size) {
+    ErrorStringPrintf(
+        "Hiddenapi class data too short to store header (%u < %u)", item->size_, header_size);
+    return false;
+  }
+
+  const uint8_t* data_end = ptr_ + item->size_;
+  ptr_ += header_size;
+
+  // Check offsets for each class def.
+  for (uint32_t i = 0; i < dex_file_->NumClassDefs(); ++i) {
+    const dex::ClassDef& class_def = dex_file_->GetClassDef(i);
+    const uint8_t* class_data = dex_file_->GetClassData(class_def);
+    uint32_t offset = item->flags_offset_[i];
+
+    if (offset == 0) {
+      continue;
+    }
+
+    // Check that class defs with no class data do not have any hiddenapi class data.
+    if (class_data == nullptr) {
+      ErrorStringPrintf(
+          "Hiddenapi class data offset not zero for class def %u with no class data", i);
+      return false;
+    }
+
+    // Check that the offset is within the section.
+    if (offset > item->size_) {
+      ErrorStringPrintf(
+          "Hiddenapi class data offset out of section bounds (%u > %u) for class def %u",
+          offset, item->size_, i);
+      return false;
+    }
+
+    // Check that the offset matches current pointer position. We do not allow
+    // offsets into already parsed data, or gaps between class def data.
+    uint32_t ptr_offset = ptr_ - reinterpret_cast<const uint8_t*>(item);
+    if (offset != ptr_offset) {
+      ErrorStringPrintf(
+          "Hiddenapi class data unexpected offset (%u != %u) for class def %u",
+          offset, ptr_offset, i);
+      return false;
+    }
+
+    // Parse a uleb128 value for each field and method of this class.
+    bool failure = false;
+    auto fn_member = [&](const ClassAccessor::BaseItem& member, const char* member_type) {
+      if (failure) {
+        return;
+      }
+      uint32_t decoded_flags;
+      if (!DecodeUnsignedLeb128Checked(&ptr_, data_end, &decoded_flags)) {
+        ErrorStringPrintf("Hiddenapi class data value out of bounds (%p > %p) for %s %i",
+                          ptr_, data_end, member_type, member.GetIndex());
+        failure = true;
+        return;
+      }
+      if (!hiddenapi::ApiList(decoded_flags).IsValid()) {
+        ErrorStringPrintf("Hiddenapi class data flags invalid (%u) for %s %i",
+                          decoded_flags, member_type, member.GetIndex());
+        failure = true;
+        return;
+      }
+    };
+    auto fn_field = [&](const ClassAccessor::Field& field) { fn_member(field, "field"); };
+    auto fn_method = [&](const ClassAccessor::Method& method) { fn_member(method, "method"); };
+    ClassAccessor accessor(*dex_file_, class_data);
+    accessor.VisitFieldsAndMethods(fn_field, fn_field, fn_method, fn_method);
+    if (failure) {
+      return false;
+    }
+  }
+
+  if (ptr_ != data_end) {
+    ErrorStringPrintf("Hiddenapi class data wrong reported size (%u != %u)",
+                       static_cast<uint32_t>(ptr_ - reinterpret_cast<const uint8_t*>(item)),
+                       item->size_);
+    return false;
+  }
+
+  return true;
+}
+
 bool DexFileVerifier::CheckIntraAnnotationsDirectoryItem() {
-  const DexFile::AnnotationsDirectoryItem* item =
-      reinterpret_cast<const DexFile::AnnotationsDirectoryItem*>(ptr_);
-  if (!CheckListSize(item, 1, sizeof(DexFile::AnnotationsDirectoryItem), "annotations_directory")) {
+  const dex::AnnotationsDirectoryItem* item =
+      reinterpret_cast<const dex::AnnotationsDirectoryItem*>(ptr_);
+  if (!CheckListSize(item, 1, sizeof(dex::AnnotationsDirectoryItem), "annotations_directory")) {
     return false;
   }
 
   // Field annotations follow immediately after the annotations directory.
-  const DexFile::FieldAnnotationsItem* field_item =
-      reinterpret_cast<const DexFile::FieldAnnotationsItem*>(item + 1);
+  const dex::FieldAnnotationsItem* field_item =
+      reinterpret_cast<const dex::FieldAnnotationsItem*>(item + 1);
   uint32_t field_count = item->fields_size_;
-  if (!CheckListSize(field_item, field_count, sizeof(DexFile::FieldAnnotationsItem), "field_annotations list")) {
+  if (!CheckListSize(field_item,
+                     field_count,
+                     sizeof(dex::FieldAnnotationsItem),
+                     "field_annotations list")) {
     return false;
   }
 
   uint32_t last_idx = 0;
   for (uint32_t i = 0; i < field_count; i++) {
     if (UNLIKELY(last_idx >= field_item->field_idx_ && i != 0)) {
-      ErrorStringPrintf("Out-of-order field_idx for annotation: %x then %x", last_idx, field_item->field_idx_);
+      ErrorStringPrintf("Out-of-order field_idx for annotation: %x then %x",
+                        last_idx, field_item->field_idx_);
       return false;
     }
     last_idx = field_item->field_idx_;
@@ -1580,10 +1686,13 @@
   }
 
   // Method annotations follow immediately after field annotations.
-  const DexFile::MethodAnnotationsItem* method_item =
-      reinterpret_cast<const DexFile::MethodAnnotationsItem*>(field_item);
+  const dex::MethodAnnotationsItem* method_item =
+      reinterpret_cast<const dex::MethodAnnotationsItem*>(field_item);
   uint32_t method_count = item->methods_size_;
-  if (!CheckListSize(method_item, method_count, sizeof(DexFile::MethodAnnotationsItem), "method_annotations list")) {
+  if (!CheckListSize(method_item,
+                     method_count,
+                     sizeof(dex::MethodAnnotationsItem),
+                     "method_annotations list")) {
     return false;
   }
 
@@ -1599,10 +1708,10 @@
   }
 
   // Parameter annotations follow immediately after method annotations.
-  const DexFile::ParameterAnnotationsItem* parameter_item =
-      reinterpret_cast<const DexFile::ParameterAnnotationsItem*>(method_item);
+  const dex::ParameterAnnotationsItem* parameter_item =
+      reinterpret_cast<const dex::ParameterAnnotationsItem*>(method_item);
   uint32_t parameter_count = item->parameters_size_;
-  if (!CheckListSize(parameter_item, parameter_count, sizeof(DexFile::ParameterAnnotationsItem),
+  if (!CheckListSize(parameter_item, parameter_count, sizeof(dex::ParameterAnnotationsItem),
                      "parameter_annotations list")) {
     return false;
   }
@@ -1653,69 +1762,69 @@
     const uint8_t* start_ptr = ptr_;
     switch (kType) {
       case DexFile::kDexTypeStringIdItem: {
-        if (!CheckListSize(ptr_, 1, sizeof(DexFile::StringId), "string_ids")) {
+        if (!CheckListSize(ptr_, 1, sizeof(dex::StringId), "string_ids")) {
           return false;
         }
-        ptr_ += sizeof(DexFile::StringId);
+        ptr_ += sizeof(dex::StringId);
         break;
       }
       case DexFile::kDexTypeTypeIdItem: {
-        if (!CheckListSize(ptr_, 1, sizeof(DexFile::TypeId), "type_ids")) {
+        if (!CheckListSize(ptr_, 1, sizeof(dex::TypeId), "type_ids")) {
           return false;
         }
-        ptr_ += sizeof(DexFile::TypeId);
+        ptr_ += sizeof(dex::TypeId);
         break;
       }
       case DexFile::kDexTypeProtoIdItem: {
-        if (!CheckListSize(ptr_, 1, sizeof(DexFile::ProtoId), "proto_ids")) {
+        if (!CheckListSize(ptr_, 1, sizeof(dex::ProtoId), "proto_ids")) {
           return false;
         }
-        ptr_ += sizeof(DexFile::ProtoId);
+        ptr_ += sizeof(dex::ProtoId);
         break;
       }
       case DexFile::kDexTypeFieldIdItem: {
-        if (!CheckListSize(ptr_, 1, sizeof(DexFile::FieldId), "field_ids")) {
+        if (!CheckListSize(ptr_, 1, sizeof(dex::FieldId), "field_ids")) {
           return false;
         }
-        ptr_ += sizeof(DexFile::FieldId);
+        ptr_ += sizeof(dex::FieldId);
         break;
       }
       case DexFile::kDexTypeMethodIdItem: {
-        if (!CheckListSize(ptr_, 1, sizeof(DexFile::MethodId), "method_ids")) {
+        if (!CheckListSize(ptr_, 1, sizeof(dex::MethodId), "method_ids")) {
           return false;
         }
-        ptr_ += sizeof(DexFile::MethodId);
+        ptr_ += sizeof(dex::MethodId);
         break;
       }
       case DexFile::kDexTypeClassDefItem: {
-        if (!CheckListSize(ptr_, 1, sizeof(DexFile::ClassDef), "class_defs")) {
+        if (!CheckListSize(ptr_, 1, sizeof(dex::ClassDef), "class_defs")) {
           return false;
         }
-        ptr_ += sizeof(DexFile::ClassDef);
+        ptr_ += sizeof(dex::ClassDef);
         break;
       }
       case DexFile::kDexTypeCallSiteIdItem: {
-        if (!CheckListSize(ptr_, 1, sizeof(DexFile::CallSiteIdItem), "call_site_ids")) {
+        if (!CheckListSize(ptr_, 1, sizeof(dex::CallSiteIdItem), "call_site_ids")) {
           return false;
         }
-        ptr_ += sizeof(DexFile::CallSiteIdItem);
+        ptr_ += sizeof(dex::CallSiteIdItem);
         break;
       }
       case DexFile::kDexTypeMethodHandleItem: {
-        if (!CheckListSize(ptr_, 1, sizeof(DexFile::MethodHandleItem), "method_handles")) {
+        if (!CheckListSize(ptr_, 1, sizeof(dex::MethodHandleItem), "method_handles")) {
           return false;
         }
-        ptr_ += sizeof(DexFile::MethodHandleItem);
+        ptr_ += sizeof(dex::MethodHandleItem);
         break;
       }
       case DexFile::kDexTypeTypeList: {
-        if (!CheckList(sizeof(DexFile::TypeItem), "type_list", &ptr_)) {
+        if (!CheckList(sizeof(dex::TypeItem), "type_list", &ptr_)) {
           return false;
         }
         break;
       }
       case DexFile::kDexTypeAnnotationSetRefList: {
-        if (!CheckList(sizeof(DexFile::AnnotationSetRefItem), "annotation_set_ref_list", &ptr_)) {
+        if (!CheckList(sizeof(dex::AnnotationSetRefItem), "annotation_set_ref_list", &ptr_)) {
           return false;
         }
         break;
@@ -1768,6 +1877,12 @@
         }
         break;
       }
+      case DexFile::kDexTypeHiddenapiClassData: {
+        if (!CheckIntraHiddenapiClassData()) {
+          return false;
+        }
+        break;
+      }
       case DexFile::kDexTypeHeaderItem:
       case DexFile::kDexTypeMapList:
         break;
@@ -1876,15 +1991,14 @@
 }
 
 bool DexFileVerifier::CheckIntraSection() {
-  const DexFile::MapList* map =
-      reinterpret_cast<const DexFile::MapList*>(begin_ + header_->map_off_);
-  const DexFile::MapItem* item = map->list_;
+  const dex::MapList* map = reinterpret_cast<const dex::MapList*>(begin_ + header_->map_off_);
+  const dex::MapItem* item = map->list_;
   size_t offset = 0;
   uint32_t count = map->size_;
   ptr_ = begin_;
 
   // Check the items listed in the map.
-  while (count--) {
+  for (; count != 0u; --count) {
     const size_t current_offset = offset;
     uint32_t section_offset = item->offset_;
     uint32_t section_count = item->size_;
@@ -1942,8 +2056,8 @@
                             section_offset, header_->map_off_);
           return false;
         }
-        ptr_ += sizeof(uint32_t) + (map->size_ * sizeof(DexFile::MapItem));
-        offset = section_offset + sizeof(uint32_t) + (map->size_ * sizeof(DexFile::MapItem));
+        ptr_ += sizeof(uint32_t) + (map->size_ * sizeof(dex::MapItem));
+        offset = section_offset + sizeof(uint32_t) + (map->size_ * sizeof(dex::MapItem));
         break;
 
 #define CHECK_INTRA_SECTION_ITERATE_CASE(type)                              \
@@ -1972,6 +2086,7 @@
       CHECK_INTRA_DATA_SECTION_CASE(DexFile::kDexTypeAnnotationItem)
       CHECK_INTRA_DATA_SECTION_CASE(DexFile::kDexTypeEncodedArrayItem)
       CHECK_INTRA_DATA_SECTION_CASE(DexFile::kDexTypeAnnotationsDirectoryItem)
+      CHECK_INTRA_DATA_SECTION_CASE(DexFile::kDexTypeHiddenapiClassData)
 #undef CHECK_INTRA_DATA_SECTION_CASE
     }
 
@@ -2026,26 +2141,26 @@
 
 dex::TypeIndex DexFileVerifier::FindFirstAnnotationsDirectoryDefiner(const uint8_t* ptr,
                                                                      bool* success) {
-  const DexFile::AnnotationsDirectoryItem* item =
-      reinterpret_cast<const DexFile::AnnotationsDirectoryItem*>(ptr);
+  const dex::AnnotationsDirectoryItem* item =
+      reinterpret_cast<const dex::AnnotationsDirectoryItem*>(ptr);
   *success = true;
 
   if (item->fields_size_ != 0) {
-    DexFile::FieldAnnotationsItem* field_items = (DexFile::FieldAnnotationsItem*) (item + 1);
+    dex::FieldAnnotationsItem* field_items = (dex::FieldAnnotationsItem*) (item + 1);
     LOAD_FIELD(field, field_items[0].field_idx_, "first_annotations_dir_definer field_id",
                *success = false; return dex::TypeIndex(DexFile::kDexNoIndex16))
     return field->class_idx_;
   }
 
   if (item->methods_size_ != 0) {
-    DexFile::MethodAnnotationsItem* method_items = (DexFile::MethodAnnotationsItem*) (item + 1);
+    dex::MethodAnnotationsItem* method_items = (dex::MethodAnnotationsItem*) (item + 1);
     LOAD_METHOD(method, method_items[0].method_idx_, "first_annotations_dir_definer method id",
                 *success = false; return dex::TypeIndex(DexFile::kDexNoIndex16))
     return method->class_idx_;
   }
 
   if (item->parameters_size_ != 0) {
-    DexFile::ParameterAnnotationsItem* parameter_items = (DexFile::ParameterAnnotationsItem*) (item + 1);
+    dex::ParameterAnnotationsItem* parameter_items = (dex::ParameterAnnotationsItem*) (item + 1);
     LOAD_METHOD(method, parameter_items[0].method_idx_, "first_annotations_dir_definer method id",
                 *success = false; return dex::TypeIndex(DexFile::kDexNoIndex16))
     return method->class_idx_;
@@ -2055,7 +2170,7 @@
 }
 
 bool DexFileVerifier::CheckInterStringIdItem() {
-  const DexFile::StringId* item = reinterpret_cast<const DexFile::StringId*>(ptr_);
+  const dex::StringId* item = reinterpret_cast<const dex::StringId*>(ptr_);
 
   // Check the map to make sure it has the right offset->type.
   if (!CheckOffsetToTypeMap(item->string_data_off_, DexFile::kDexTypeStringDataItem)) {
@@ -2064,7 +2179,7 @@
 
   // Check ordering between items.
   if (previous_item_ != nullptr) {
-    const DexFile::StringId* prev_item = reinterpret_cast<const DexFile::StringId*>(previous_item_);
+    const dex::StringId* prev_item = reinterpret_cast<const dex::StringId*>(previous_item_);
     const char* prev_str = dex_file_->GetStringData(*prev_item);
     const char* str = dex_file_->GetStringData(*item);
     if (UNLIKELY(CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(prev_str, str) >= 0)) {
@@ -2073,12 +2188,12 @@
     }
   }
 
-  ptr_ += sizeof(DexFile::StringId);
+  ptr_ += sizeof(dex::StringId);
   return true;
 }
 
 bool DexFileVerifier::CheckInterTypeIdItem() {
-  const DexFile::TypeId* item = reinterpret_cast<const DexFile::TypeId*>(ptr_);
+  const dex::TypeId* item = reinterpret_cast<const dex::TypeId*>(ptr_);
 
   LOAD_STRING(descriptor, item->descriptor_idx_, "inter_type_id_item descriptor_idx")
 
@@ -2090,7 +2205,7 @@
 
   // Check ordering between items.
   if (previous_item_ != nullptr) {
-    const DexFile::TypeId* prev_item = reinterpret_cast<const DexFile::TypeId*>(previous_item_);
+    const dex::TypeId* prev_item = reinterpret_cast<const dex::TypeId*>(previous_item_);
     if (UNLIKELY(prev_item->descriptor_idx_ >= item->descriptor_idx_)) {
       ErrorStringPrintf("Out-of-order type_ids: %x then %x",
                         prev_item->descriptor_idx_.index_,
@@ -2099,12 +2214,12 @@
     }
   }
 
-  ptr_ += sizeof(DexFile::TypeId);
+  ptr_ += sizeof(dex::TypeId);
   return true;
 }
 
 bool DexFileVerifier::CheckInterProtoIdItem() {
-  const DexFile::ProtoId* item = reinterpret_cast<const DexFile::ProtoId*>(ptr_);
+  const dex::ProtoId* item = reinterpret_cast<const dex::ProtoId*>(ptr_);
 
   LOAD_STRING(shorty, item->shorty_idx_, "inter_proto_id_item shorty_idx")
 
@@ -2147,7 +2262,7 @@
 
   // Check ordering between items. This relies on type_ids being in order.
   if (previous_item_ != nullptr) {
-    const DexFile::ProtoId* prev = reinterpret_cast<const DexFile::ProtoId*>(previous_item_);
+    const dex::ProtoId* prev = reinterpret_cast<const dex::ProtoId*>(previous_item_);
     if (UNLIKELY(prev->return_type_idx_ > item->return_type_idx_)) {
       ErrorStringPrintf("Out-of-order proto_id return types");
       return false;
@@ -2180,12 +2295,12 @@
     }
   }
 
-  ptr_ += sizeof(DexFile::ProtoId);
+  ptr_ += sizeof(dex::ProtoId);
   return true;
 }
 
 bool DexFileVerifier::CheckInterFieldIdItem() {
-  const DexFile::FieldId* item = reinterpret_cast<const DexFile::FieldId*>(ptr_);
+  const dex::FieldId* item = reinterpret_cast<const dex::FieldId*>(ptr_);
 
   // Check that the class descriptor is valid.
   LOAD_STRING_BY_TYPE(class_descriptor, item->class_idx_, "inter_field_id_item class_idx")
@@ -2210,7 +2325,7 @@
 
   // Check ordering between items. This relies on the other sections being in order.
   if (previous_item_ != nullptr) {
-    const DexFile::FieldId* prev_item = reinterpret_cast<const DexFile::FieldId*>(previous_item_);
+    const dex::FieldId* prev_item = reinterpret_cast<const dex::FieldId*>(previous_item_);
     if (UNLIKELY(prev_item->class_idx_ > item->class_idx_)) {
       ErrorStringPrintf("Out-of-order field_ids");
       return false;
@@ -2227,12 +2342,12 @@
     }
   }
 
-  ptr_ += sizeof(DexFile::FieldId);
+  ptr_ += sizeof(dex::FieldId);
   return true;
 }
 
 bool DexFileVerifier::CheckInterMethodIdItem() {
-  const DexFile::MethodId* item = reinterpret_cast<const DexFile::MethodId*>(ptr_);
+  const dex::MethodId* item = reinterpret_cast<const dex::MethodId*>(ptr_);
 
   // Check that the class descriptor is a valid reference name.
   LOAD_STRING_BY_TYPE(class_descriptor, item->class_idx_, "inter_method_id_item class_idx")
@@ -2257,7 +2372,7 @@
 
   // Check ordering between items. This relies on the other sections being in order.
   if (previous_item_ != nullptr) {
-    const DexFile::MethodId* prev_item = reinterpret_cast<const DexFile::MethodId*>(previous_item_);
+    const dex::MethodId* prev_item = reinterpret_cast<const dex::MethodId*>(previous_item_);
     if (UNLIKELY(prev_item->class_idx_ > item->class_idx_)) {
       ErrorStringPrintf("Out-of-order method_ids");
       return false;
@@ -2274,12 +2389,12 @@
     }
   }
 
-  ptr_ += sizeof(DexFile::MethodId);
+  ptr_ += sizeof(dex::MethodId);
   return true;
 }
 
 bool DexFileVerifier::CheckInterClassDefItem() {
-  const DexFile::ClassDef* item = reinterpret_cast<const DexFile::ClassDef*>(ptr_);
+  const dex::ClassDef* item = reinterpret_cast<const dex::ClassDef*>(ptr_);
 
   // Check that class_idx_ is representable as a uint16_t;
   if (UNLIKELY(!IsValidTypeId(item->class_idx_.index_, item->pad1_))) {
@@ -2341,7 +2456,7 @@
 
       // Check that a class is defined after its super class (if the
       // latter is defined in the same Dex file).
-      const DexFile::ClassDef* superclass_def = dex_file_->FindClassDef(item->superclass_idx_);
+      const dex::ClassDef* superclass_def = dex_file_->FindClassDef(item->superclass_idx_);
       if (superclass_def != nullptr) {
         // The superclass is defined in this Dex file.
         if (superclass_def > item) {
@@ -2365,7 +2480,7 @@
   }
 
   // Check interfaces.
-  const DexFile::TypeList* interfaces = dex_file_->GetInterfacesList(*item);
+  const dex::TypeList* interfaces = dex_file_->GetInterfacesList(*item);
   if (interfaces != nullptr) {
     uint32_t size = interfaces->Size();
     for (uint32_t i = 0; i < size; i++) {
@@ -2380,7 +2495,7 @@
 
         // Check that a class is defined after the interfaces it implements
         // (if they are defined in the same Dex file).
-        const DexFile::ClassDef* interface_def =
+        const dex::ClassDef* interface_def =
             dex_file_->FindClassDef(interfaces->GetTypeItem(i).type_idx_);
         if (interface_def != nullptr) {
           // The interface is defined in this Dex file.
@@ -2456,12 +2571,12 @@
     }
   }
 
-  ptr_ += sizeof(DexFile::ClassDef);
+  ptr_ += sizeof(dex::ClassDef);
   return true;
 }
 
 bool DexFileVerifier::CheckInterCallSiteIdItem() {
-  const DexFile::CallSiteIdItem* item = reinterpret_cast<const DexFile::CallSiteIdItem*>(ptr_);
+  const dex::CallSiteIdItem* item = reinterpret_cast<const dex::CallSiteIdItem*>(ptr_);
 
   // Check call site referenced by item is in encoded array section.
   if (!CheckOffsetToTypeMap(item->data_off_, DexFile::kDexTypeEncodedArrayItem)) {
@@ -2511,12 +2626,12 @@
     return false;
   }
 
-  ptr_ += sizeof(DexFile::CallSiteIdItem);
+  ptr_ += sizeof(dex::CallSiteIdItem);
   return true;
 }
 
 bool DexFileVerifier::CheckInterMethodHandleItem() {
-  const DexFile::MethodHandleItem* item = reinterpret_cast<const DexFile::MethodHandleItem*>(ptr_);
+  const dex::MethodHandleItem* item = reinterpret_cast<const dex::MethodHandleItem*>(ptr_);
 
   DexFile::MethodHandleType method_handle_type =
       static_cast<DexFile::MethodHandleType>(item->method_handle_type_);
@@ -2544,17 +2659,16 @@
     }
   }
 
-  ptr_ += sizeof(DexFile::MethodHandleItem);
+  ptr_ += sizeof(dex::MethodHandleItem);
   return true;
 }
 
 bool DexFileVerifier::CheckInterAnnotationSetRefList() {
-  const DexFile::AnnotationSetRefList* list =
-      reinterpret_cast<const DexFile::AnnotationSetRefList*>(ptr_);
-  const DexFile::AnnotationSetRefItem* item = list->list_;
+  const dex::AnnotationSetRefList* list = reinterpret_cast<const dex::AnnotationSetRefList*>(ptr_);
+  const dex::AnnotationSetRefItem* item = list->list_;
   uint32_t count = list->size_;
 
-  while (count--) {
+  for (; count != 0u; --count) {
     if (item->annotations_off_ != 0 &&
         !CheckOffsetToTypeMap(item->annotations_off_, DexFile::kDexTypeAnnotationSetItem)) {
       return false;
@@ -2567,7 +2681,7 @@
 }
 
 bool DexFileVerifier::CheckInterAnnotationSetItem() {
-  const DexFile::AnnotationSetItem* set = reinterpret_cast<const DexFile::AnnotationSetItem*>(ptr_);
+  const dex::AnnotationSetItem* set = reinterpret_cast<const dex::AnnotationSetItem*>(ptr_);
   const uint32_t* offsets = set->entries_;
   uint32_t count = set->size_;
   uint32_t last_idx = 0;
@@ -2578,8 +2692,8 @@
     }
 
     // Get the annotation from the offset and the type index for the annotation.
-    const DexFile::AnnotationItem* annotation =
-        reinterpret_cast<const DexFile::AnnotationItem*>(begin_ + *offsets);
+    const dex::AnnotationItem* annotation =
+        reinterpret_cast<const dex::AnnotationItem*>(begin_ + *offsets);
     const uint8_t* data = annotation->annotation_;
     DECODE_UNSIGNED_CHECKED_FROM(data, idx);
 
@@ -2630,8 +2744,8 @@
 }
 
 bool DexFileVerifier::CheckInterAnnotationsDirectoryItem() {
-  const DexFile::AnnotationsDirectoryItem* item =
-      reinterpret_cast<const DexFile::AnnotationsDirectoryItem*>(ptr_);
+  const dex::AnnotationsDirectoryItem* item =
+      reinterpret_cast<const dex::AnnotationsDirectoryItem*>(ptr_);
   bool success;
   dex::TypeIndex defining_class = FindFirstAnnotationsDirectoryDefiner(ptr_, &success);
   if (!success) {
@@ -2644,8 +2758,8 @@
   }
 
   // Field annotations follow immediately after the annotations directory.
-  const DexFile::FieldAnnotationsItem* field_item =
-      reinterpret_cast<const DexFile::FieldAnnotationsItem*>(item + 1);
+  const dex::FieldAnnotationsItem* field_item =
+      reinterpret_cast<const dex::FieldAnnotationsItem*>(item + 1);
   uint32_t field_count = item->fields_size_;
   for (uint32_t i = 0; i < field_count; i++) {
     LOAD_FIELD(field, field_item->field_idx_, "inter_annotations_directory_item field_id",
@@ -2661,8 +2775,8 @@
   }
 
   // Method annotations follow immediately after field annotations.
-  const DexFile::MethodAnnotationsItem* method_item =
-      reinterpret_cast<const DexFile::MethodAnnotationsItem*>(field_item);
+  const dex::MethodAnnotationsItem* method_item =
+      reinterpret_cast<const dex::MethodAnnotationsItem*>(field_item);
   uint32_t method_count = item->methods_size_;
   for (uint32_t i = 0; i < method_count; i++) {
     LOAD_METHOD(method, method_item->method_idx_, "inter_annotations_directory_item method_id",
@@ -2678,8 +2792,8 @@
   }
 
   // Parameter annotations follow immediately after method annotations.
-  const DexFile::ParameterAnnotationsItem* parameter_item =
-      reinterpret_cast<const DexFile::ParameterAnnotationsItem*>(method_item);
+  const dex::ParameterAnnotationsItem* parameter_item =
+      reinterpret_cast<const dex::ParameterAnnotationsItem*>(method_item);
   uint32_t parameter_count = item->parameters_size_;
   for (uint32_t i = 0; i < parameter_count; i++) {
     LOAD_METHOD(parameter_method, parameter_item->method_idx_,
@@ -2735,6 +2849,7 @@
       case DexFile::kDexTypeDebugInfoItem:
       case DexFile::kDexTypeAnnotationItem:
       case DexFile::kDexTypeEncodedArrayItem:
+      case DexFile::kDexTypeHiddenapiClassData:
         break;
       case DexFile::kDexTypeStringIdItem: {
         if (!CheckInterStringIdItem()) {
@@ -2834,12 +2949,12 @@
 }
 
 bool DexFileVerifier::CheckInterSection() {
-  const DexFile::MapList* map = reinterpret_cast<const DexFile::MapList*>(begin_ + header_->map_off_);
-  const DexFile::MapItem* item = map->list_;
+  const dex::MapList* map = reinterpret_cast<const dex::MapList*>(begin_ + header_->map_off_);
+  const dex::MapItem* item = map->list_;
   uint32_t count = map->size_;
 
   // Cross check the items listed in the map.
-  while (count--) {
+  for (; count != 0u; --count) {
     uint32_t section_offset = item->offset_;
     uint32_t section_count = item->size_;
     DexFile::MapItemType type = static_cast<DexFile::MapItemType>(item->type_);
@@ -2867,7 +2982,8 @@
       case DexFile::kDexTypeAnnotationSetRefList:
       case DexFile::kDexTypeAnnotationSetItem:
       case DexFile::kDexTypeClassDataItem:
-      case DexFile::kDexTypeAnnotationsDirectoryItem: {
+      case DexFile::kDexTypeAnnotationsDirectoryItem:
+      case DexFile::kDexTypeHiddenapiClassData: {
         if (!CheckInterSectionIterate(section_offset, section_count, type)) {
           return false;
         }
@@ -2943,9 +3059,8 @@
     return "(error)";
   }
 
-  const DexFile::StringId* string_id =
-      reinterpret_cast<const DexFile::StringId*>(begin + header->string_ids_off_)
-          + string_idx.index_;
+  const dex::StringId* string_id =
+      reinterpret_cast<const dex::StringId*>(begin + header->string_ids_off_) + string_idx.index_;
 
   // Assume that the data is OK at this point. String data has been checked at this point.
 
@@ -2966,8 +3081,8 @@
   // a valid defining class.
   CHECK_LT(class_idx.index_, header->type_ids_size_);
 
-  const DexFile::TypeId* type_id =
-      reinterpret_cast<const DexFile::TypeId*>(begin + header->type_ids_off_) + class_idx.index_;
+  const dex::TypeId* type_id =
+      reinterpret_cast<const dex::TypeId*>(begin + header->type_ids_off_) + class_idx.index_;
 
   // Assume that the data is OK at this point. Type id offsets have been checked at this point.
 
@@ -2980,8 +3095,8 @@
   // The `idx` has already been checked in `DexFileVerifier::CheckClassDataItemField()`.
   CHECK_LT(idx, header->field_ids_size_);
 
-  const DexFile::FieldId* field_id =
-      reinterpret_cast<const DexFile::FieldId*>(begin + header->field_ids_off_) + idx;
+  const dex::FieldId* field_id =
+      reinterpret_cast<const dex::FieldId*>(begin + header->field_ids_off_) + idx;
 
   // Assume that the data is OK at this point. Field id offsets have been checked at this point.
 
@@ -2997,8 +3112,8 @@
   // The `idx` has already been checked in `DexFileVerifier::CheckClassDataItemMethod()`.
   CHECK_LT(idx, header->method_ids_size_);
 
-  const DexFile::MethodId* method_id =
-      reinterpret_cast<const DexFile::MethodId*>(begin + header->method_ids_off_) + idx;
+  const dex::MethodId* method_id =
+      reinterpret_cast<const dex::MethodId*>(begin + header->method_ids_off_) + idx;
 
   // Assume that the data is OK at this point. Method id offsets have been checked at this point.
 
@@ -3089,16 +3204,16 @@
 
 void DexFileVerifier::FindStringRangesForMethodNames() {
   // Use DexFile::StringId* as RandomAccessIterator.
-  const DexFile::StringId* first = reinterpret_cast<const DexFile::StringId*>(
+  const dex::StringId* first = reinterpret_cast<const dex::StringId*>(
       begin_ + header_->string_ids_off_);
-  const DexFile::StringId* last = first + header_->string_ids_size_;
+  const dex::StringId* last = first + header_->string_ids_size_;
 
-  auto get_string = [begin = begin_](const DexFile::StringId& id) {
+  auto get_string = [begin = begin_](const dex::StringId& id) {
     const uint8_t* str_data_ptr = begin + id.string_data_off_;
     DecodeUnsignedLeb128(&str_data_ptr);
     return reinterpret_cast<const char*>(str_data_ptr);
   };
-  auto compare = [&get_string](const DexFile::StringId& lhs, const char* rhs) {
+  auto compare = [&get_string](const dex::StringId& lhs, const char* rhs) {
     return CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(get_string(lhs), rhs) < 0;
   };
 
@@ -3338,8 +3453,8 @@
          constructor_flags == (kAccConstructor | kAccStatic));
 
   // Check signature matches expectations.
-  const DexFile::MethodId* const method_id = CheckLoadMethodId(method_index,
-                                                               "Bad <init>/<clinit> method id");
+  const dex::MethodId* const method_id = CheckLoadMethodId(method_index,
+                                                           "Bad <init>/<clinit> method id");
   if (method_id == nullptr) {
     return false;
   }
@@ -3349,8 +3464,8 @@
   // TODO(oth): the error message here is to satisfy the MethodId test
   // in the DexFileVerifierTest. The test is checking that the error
   // contains this string if the index is out of range.
-  const DexFile::ProtoId* const proto_id = CheckLoadProtoId(method_id->proto_idx_,
-                                                            "inter_method_id_item proto_idx");
+  const dex::ProtoId* const proto_id = CheckLoadProtoId(method_id->proto_idx_,
+                                                        "inter_method_id_item proto_idx");
   if (proto_id == nullptr) {
     return false;
   }
diff --git a/libdexfile/dex/dex_file_verifier.h b/libdexfile/dex/dex_file_verifier.h
index 79ddea4..b51a417 100644
--- a/libdexfile/dex/dex_file_verifier.h
+++ b/libdexfile/dex/dex_file_verifier.h
@@ -79,7 +79,7 @@
   bool CheckMap();
 
   uint32_t ReadUnsignedLittleEndian(uint32_t size);
-  bool CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_item,
+  bool CheckAndGetHandlerOffsets(const dex::CodeItem* code_item,
                                  uint32_t* handler_offsets, uint32_t handlers_size);
   bool CheckClassDataItemField(uint32_t idx,
                                uint32_t access_flags,
@@ -95,7 +95,7 @@
                                 size_t* remaining_directs);
   ALWAYS_INLINE
   bool CheckOrder(const char* type_descr, uint32_t curr_index, uint32_t prev_index);
-  bool CheckStaticFieldTypes(const DexFile::ClassDef* class_def);
+  bool CheckStaticFieldTypes(const dex::ClassDef* class_def);
 
   bool CheckPadding(size_t offset, uint32_t aligned_offset, DexFile::MapItemType type);
   bool CheckEncodedValue();
@@ -110,7 +110,7 @@
                                      ClassAccessor::Field* field,
                                      bool* have_class,
                                      dex::TypeIndex* class_type_index,
-                                     const DexFile::ClassDef** class_def);
+                                     const dex::ClassDef** class_def);
   // Check all methods of the given type from the given iterator. Load the class data from the first
   // method, if necessary (and return it), or use the given values.
   bool CheckIntraClassDataItemMethods(ClassAccessor::Method* method,
@@ -119,13 +119,14 @@
                                       size_t num_directs,
                                       bool* have_class,
                                       dex::TypeIndex* class_type_index,
-                                      const DexFile::ClassDef** class_def);
+                                      const dex::ClassDef** class_def);
 
   bool CheckIntraCodeItem();
   bool CheckIntraStringDataItem();
   bool CheckIntraDebugInfoItem();
   bool CheckIntraAnnotationItem();
   bool CheckIntraAnnotationsDirectoryItem();
+  bool CheckIntraHiddenapiClassData();
 
   template <DexFile::MapItemType kType>
   bool CheckIntraSectionIterate(size_t offset, uint32_t count);
@@ -165,9 +166,9 @@
 
   // Load a field/method/proto Id by index. Checks whether the index is in bounds, printing the
   // error if not. If there is an error, null is returned.
-  const DexFile::FieldId* CheckLoadFieldId(uint32_t idx, const char* error_fmt);
-  const DexFile::MethodId* CheckLoadMethodId(uint32_t idx, const char* error_fmt);
-  const DexFile::ProtoId* CheckLoadProtoId(dex::ProtoIndex idx, const char* error_fmt);
+  const dex::FieldId* CheckLoadFieldId(uint32_t idx, const char* error_fmt);
+  const dex::MethodId* CheckLoadMethodId(uint32_t idx, const char* error_fmt);
+  const dex::ProtoId* CheckLoadProtoId(dex::ProtoIndex idx, const char* error_fmt);
 
   void ErrorStringPrintf(const char* fmt, ...)
       __attribute__((__format__(__printf__, 2, 3))) COLD_ATTR;
@@ -181,7 +182,7 @@
   bool FindClassIndexAndDef(uint32_t index,
                             bool is_field,
                             dex::TypeIndex* class_type_index,
-                            const DexFile::ClassDef** output_class_def);
+                            const dex::ClassDef** output_class_def);
 
   // Check validity of the given access flags, interpreted for a field in the context of a class
   // with the given second access flags.
@@ -246,7 +247,7 @@
   std::string failure_reason_;
 
   // Set of type ids for which there are ClassDef elements in the dex file.
-  std::unordered_set<decltype(DexFile::ClassDef::class_idx_)> defined_classes_;
+  std::unordered_set<decltype(dex::ClassDef::class_idx_)> defined_classes_;
 
   // Cached string indices for "interesting" entries wrt/ method names. Will be populated by
   // FindStringRangesForMethodNames (which is automatically called before verifying the
diff --git a/libdexfile/dex/dex_file_verifier_test.cc b/libdexfile/dex/dex_file_verifier_test.cc
index a22a457..b2cff4f 100644
--- a/libdexfile/dex/dex_file_verifier_test.cc
+++ b/libdexfile/dex/dex_file_verifier_test.cc
@@ -107,8 +107,8 @@
   bool success = dex_file_loader.OpenAll(dex_bytes.get(),
                                          length,
                                          location,
-                                         /* verify */ true,
-                                         /* verify_checksum */ true,
+                                         /* verify= */ true,
+                                         /* verify_checksum= */ true,
                                          &error_code,
                                          error_msg,
                                          &tmp);
@@ -153,7 +153,7 @@
       kGoodTestDex,
       "method_id_class_idx",
       [](DexFile* dex_file) {
-        DexFile::MethodId* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(0));
+        dex::MethodId* method_id = const_cast<dex::MethodId*>(&dex_file->GetMethodId(0));
         method_id->class_idx_ = dex::TypeIndex(0xFF);
       },
       "could not find declaring class for direct method index 0");
@@ -163,7 +163,7 @@
       kGoodTestDex,
       "method_id_proto_idx",
       [](DexFile* dex_file) {
-        DexFile::MethodId* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(0));
+        dex::MethodId* method_id = const_cast<dex::MethodId*>(&dex_file->GetMethodId(0));
         method_id->proto_idx_ = dex::ProtoIndex(0xFF);
       },
       "inter_method_id_item proto_idx");
@@ -173,7 +173,7 @@
       kGoodTestDex,
       "method_id_name_idx",
       [](DexFile* dex_file) {
-        DexFile::MethodId* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(0));
+        dex::MethodId* method_id = const_cast<dex::MethodId*>(&dex_file->GetMethodId(0));
         method_id->name_idx_ = dex::StringIndex(0xFF);
       },
       "Bad index for method flags verification");
@@ -244,7 +244,7 @@
   for (const ClassAccessor::Method& method : accessor.GetMethods()) {
     uint32_t method_index = method.GetIndex();
     dex::StringIndex name_index = dex_file->GetMethodId(method_index).name_idx_;
-    const DexFile::StringId& string_id = dex_file->GetStringId(name_index);
+    const dex::StringId& string_id = dex_file->GetStringId(name_index);
     const char* str = dex_file->GetStringData(string_id);
     if (strcmp(name, str) == 0) {
       if (method_idx != nullptr) {
@@ -837,7 +837,7 @@
   for (const ClassAccessor::Field& field : accessor.GetFields()) {
     uint32_t field_index = field.GetIndex();
     dex::StringIndex name_index = dex_file->GetFieldId(field_index).name_idx_;
-    const DexFile::StringId& string_id = dex_file->GetStringId(name_index);
+    const dex::StringId& string_id = dex_file->GetStringId(name_index);
     const char* str = dex_file->GetStringData(string_id);
     if (strcmp(name, str) == 0) {
       // Go to the back of the access flags.
@@ -1415,9 +1415,9 @@
                    dex_file->GetMethodId(method_idx + 1).proto_idx_.index_);
           // Their return types should be the same.
           dex::ProtoIndex proto1_idx = dex_file->GetMethodId(method_idx).proto_idx_;
-          const DexFile::ProtoId& proto1 = dex_file->GetProtoId(proto1_idx);
+          const dex::ProtoId& proto1 = dex_file->GetProtoId(proto1_idx);
           dex::ProtoIndex proto2_idx(proto1_idx.index_ + 1u);
-          const DexFile::ProtoId& proto2 = dex_file->GetProtoId(proto2_idx);
+          const dex::ProtoId& proto2 = dex_file->GetProtoId(proto2_idx);
           CHECK_EQ(proto1.return_type_idx_, proto2.return_type_idx_);
           // And the first should not have any parameters while the second should have some.
           CHECK(!DexFileParameterIterator(*dex_file, proto1).HasNext());
@@ -1621,13 +1621,13 @@
                                       dex_file->Begin(),
                                       dex_file->Size(),
                                        "good checksum, no verify",
-                                      /*verify_checksum*/ false,
+                                      /*verify_checksum=*/ false,
                                       &error_msg));
   EXPECT_TRUE(DexFileVerifier::Verify(dex_file.get(),
                                       dex_file->Begin(),
                                       dex_file->Size(),
                                       "good checksum, verify",
-                                      /*verify_checksum*/ true,
+                                      /*verify_checksum=*/ true,
                                       &error_msg));
 
   // Bad checksum: !verify_checksum passes verify_checksum fails.
@@ -1638,13 +1638,13 @@
                                       dex_file->Begin(),
                                       dex_file->Size(),
                                       "bad checksum, no verify",
-                                      /*verify_checksum*/ false,
+                                      /*verify_checksum=*/ false,
                                       &error_msg));
   EXPECT_FALSE(DexFileVerifier::Verify(dex_file.get(),
                                        dex_file->Begin(),
                                        dex_file->Size(),
                                        "bad checksum, verify",
-                                       /*verify_checksum*/ true,
+                                       /*verify_checksum=*/ true,
                                        &error_msg));
   EXPECT_NE(error_msg.find("Bad checksum"), std::string::npos) << error_msg;
 }
@@ -1691,7 +1691,7 @@
                                        dex_file->Begin(),
                                        dex_file->Size(),
                                        "bad static method name",
-                                       /*verify_checksum*/ true,
+                                       /*verify_checksum=*/ true,
                                        &error_msg));
 }
 
@@ -1735,7 +1735,7 @@
                                        dex_file->Begin(),
                                        dex_file->Size(),
                                        "bad virtual method name",
-                                       /*verify_checksum*/ true,
+                                       /*verify_checksum=*/ true,
                                        &error_msg));
 }
 
@@ -1779,7 +1779,7 @@
                                        dex_file->Begin(),
                                        dex_file->Size(),
                                        "bad clinit signature",
-                                       /*verify_checksum*/ true,
+                                       /*verify_checksum=*/ true,
                                        &error_msg));
 }
 
@@ -1823,7 +1823,7 @@
                                        dex_file->Begin(),
                                        dex_file->Size(),
                                        "bad clinit signature",
-                                       /*verify_checksum*/ true,
+                                       /*verify_checksum=*/ true,
                                        &error_msg));
 }
 
@@ -1860,7 +1860,7 @@
                                        dex_file->Begin(),
                                        dex_file->Size(),
                                        "bad init signature",
-                                       /*verify_checksum*/ true,
+                                       /*verify_checksum=*/ true,
                                        &error_msg));
 }
 
@@ -2063,7 +2063,7 @@
                                         dex_file->Begin(),
                                         dex_file->Size(),
                                         "good checksum, verify",
-                                        /*verify_checksum*/ true,
+                                        /*verify_checksum=*/ true,
                                         &error_msg));
     // TODO(oth): Test corruptions (b/35308502)
   }
@@ -2110,7 +2110,7 @@
                                        dex_file->Begin(),
                                        dex_file->Size(),
                                        "bad static field initial values array",
-                                       /*verify_checksum*/ true,
+                                       /*verify_checksum=*/ true,
                                        &error_msg));
 }
 
@@ -2166,7 +2166,7 @@
                                       dex_file->Begin(),
                                       dex_file->Size(),
                                       "good static field initial values array",
-                                      /*verify_checksum*/ true,
+                                      /*verify_checksum=*/ true,
                                       &error_msg));
 }
 
diff --git a/libdexfile/dex/dex_instruction.cc b/libdexfile/dex/dex_instruction.cc
index 8378211..f36a2aa 100644
--- a/libdexfile/dex/dex_instruction.cc
+++ b/libdexfile/dex/dex_instruction.cc
@@ -85,7 +85,7 @@
     default: LOG(FATAL) << "Tried to access the branch offset of an instruction " << Name() <<
         " which does not have a target operand.";
   }
-  return 0;
+  UNREACHABLE();
 }
 
 bool Instruction::CanFlowThrough() const {
@@ -402,9 +402,9 @@
         case INVOKE_VIRTUAL_QUICK:
           if (file != nullptr) {
             os << opcode << " {";
-            uint32_t method_idx = VRegB_35c();
+            uint32_t vtable_offset = VRegB_35c();
             DumpArgs(VRegA_35c());
-            os << "},  // vtable@" << method_idx;
+            os << "},  // vtable@" << vtable_offset;
             break;
           }
           FALLTHROUGH_INTENDED;
diff --git a/libdexfile/dex/dex_instruction.h b/libdexfile/dex/dex_instruction.h
index ad8a184..4b38904 100644
--- a/libdexfile/dex/dex_instruction.h
+++ b/libdexfile/dex/dex_instruction.h
@@ -173,7 +173,7 @@
   };
 
   enum VerifyFlag : uint32_t {
-    kVerifyNone               = 0x0000000,
+    kVerifyNothing            = 0x0000000,
     kVerifyRegA               = 0x0000001,
     kVerifyRegAWide           = 0x0000002,
     kVerifyRegB               = 0x0000004,
diff --git a/libdexfile/dex/dex_instruction_iterator.h b/libdexfile/dex/dex_instruction_iterator.h
index b75a95b..6c7f42a 100644
--- a/libdexfile/dex/dex_instruction_iterator.h
+++ b/libdexfile/dex/dex_instruction_iterator.h
@@ -63,7 +63,6 @@
   using value_type = std::iterator<std::forward_iterator_tag, DexInstructionPcPair>::value_type;
   using difference_type = std::iterator<std::forward_iterator_tag, value_type>::difference_type;
 
-  DexInstructionIteratorBase() = default;
   explicit DexInstructionIteratorBase(const Instruction* inst, uint32_t dex_pc)
       : data_(reinterpret_cast<const uint16_t*>(inst), dex_pc) {}
 
diff --git a/libdexfile/dex/dex_instruction_list.h b/libdexfile/dex/dex_instruction_list.h
index 9f0aba4..b9540a6 100644
--- a/libdexfile/dex/dex_instruction_list.h
+++ b/libdexfile/dex/dex_instruction_list.h
@@ -19,7 +19,7 @@
 
 // V(opcode, instruction_code, name, format, index, flags, extended_flags, verifier_flags);
 #define DEX_INSTRUCTION_LIST(V) \
-  V(0x00, NOP, "nop", k10x, kIndexNone, kContinue, 0, kVerifyNone) \
+  V(0x00, NOP, "nop", k10x, kIndexNone, kContinue, 0, kVerifyNothing) \
   V(0x01, MOVE, "move", k12x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegB) \
   V(0x02, MOVE_FROM16, "move/from16", k22x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegB) \
   V(0x03, MOVE_16, "move/16", k32x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegB) \
@@ -33,7 +33,7 @@
   V(0x0B, MOVE_RESULT_WIDE, "move-result-wide", k11x, kIndexNone, kContinue, 0, kVerifyRegAWide) \
   V(0x0C, MOVE_RESULT_OBJECT, "move-result-object", k11x, kIndexNone, kContinue, 0, kVerifyRegA) \
   V(0x0D, MOVE_EXCEPTION, "move-exception", k11x, kIndexNone, kContinue, 0, kVerifyRegA) \
-  V(0x0E, RETURN_VOID, "return-void", k10x, kIndexNone, kReturn, 0, kVerifyNone) \
+  V(0x0E, RETURN_VOID, "return-void", k10x, kIndexNone, kReturn, 0, kVerifyNothing) \
   V(0x0F, RETURN, "return", k11x, kIndexNone, kReturn, 0, kVerifyRegA) \
   V(0x10, RETURN_WIDE, "return-wide", k11x, kIndexNone, kReturn, 0, kVerifyRegAWide) \
   V(0x11, RETURN_OBJECT, "return-object", k11x, kIndexNone, kReturn, 0, kVerifyRegA) \
@@ -134,7 +134,7 @@
   V(0x70, INVOKE_DIRECT, "invoke-direct", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgNonZero) \
   V(0x71, INVOKE_STATIC, "invoke-static", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArg) \
   V(0x72, INVOKE_INTERFACE, "invoke-interface", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgNonZero) \
-  V(0x73, RETURN_VOID_NO_BARRIER, "return-void-no-barrier", k10x, kIndexNone, kReturn, 0, kVerifyNone) \
+  V(0x73, RETURN_VOID_NO_BARRIER, "return-void-no-barrier", k10x, kIndexNone, kReturn, 0, kVerifyNothing) \
   V(0x74, INVOKE_VIRTUAL_RANGE, "invoke-virtual/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
   V(0x75, INVOKE_SUPER_RANGE, "invoke-super/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
   V(0x76, INVOKE_DIRECT_RANGE, "invoke-direct/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
diff --git a/libdexfile/dex/dex_instruction_test.cc b/libdexfile/dex/dex_instruction_test.cc
index 6ce9dba..3f79abf 100644
--- a/libdexfile/dex/dex_instruction_test.cc
+++ b/libdexfile/dex/dex_instruction_test.cc
@@ -26,7 +26,7 @@
   EXPECT_EQ(Instruction::k10x, Instruction::FormatOf(nop));
   EXPECT_EQ(Instruction::kIndexNone, Instruction::IndexTypeOf(nop));
   EXPECT_EQ(Instruction::kContinue, Instruction::FlagsOf(nop));
-  EXPECT_EQ(Instruction::kVerifyNone, Instruction::VerifyFlagsOf(nop));
+  EXPECT_EQ(Instruction::kVerifyNothing, Instruction::VerifyFlagsOf(nop));
 }
 
 static void Build45cc(uint8_t num_args, uint16_t method_idx, uint16_t proto_idx,
@@ -71,10 +71,13 @@
 
 TEST(Instruction, PropertiesOf45cc) {
   uint16_t instruction[4];
-  Build45cc(4u /* num_vregs */, 16u /* method_idx */, 32u /* proto_idx */,
-            0xcafe /* arg_regs */, instruction);
+  Build45cc(/* num_args= */ 4u,
+            /* method_idx= */ 16u,
+            /* proto_idx= */ 32u,
+            /* arg_regs= */ 0xcafe,
+            instruction);
 
-  DexInstructionIterator ins(instruction, /*dex_pc*/ 0u);
+  DexInstructionIterator ins(instruction, /*dex_pc=*/ 0u);
   ASSERT_EQ(4u, ins->SizeInCodeUnits());
 
   ASSERT_TRUE(ins->HasVRegA());
@@ -106,10 +109,13 @@
 
 TEST(Instruction, PropertiesOf4rcc) {
   uint16_t instruction[4];
-  Build4rcc(4u /* num_vregs */, 16u /* method_idx */, 32u /* proto_idx */,
-            0xcafe /* arg_regs */, instruction);
+  Build4rcc(/* num_args= */ 4u,
+            /* method_idx= */ 16u,
+            /* proto_idx= */ 32u,
+            /* arg_regs_start= */ 0xcafe,
+            instruction);
 
-  DexInstructionIterator ins(instruction, /*dex_pc*/ 0u);
+  DexInstructionIterator ins(instruction, /*dex_pc=*/ 0u);
   ASSERT_EQ(4u, ins->SizeInCodeUnits());
 
   ASSERT_TRUE(ins->HasVRegA());
diff --git a/libdexfile/dex/hidden_api_access_flags.h b/libdexfile/dex/hidden_api_access_flags.h
deleted file mode 100644
index 1aaeabd..0000000
--- a/libdexfile/dex/hidden_api_access_flags.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_LIBDEXFILE_DEX_HIDDEN_API_ACCESS_FLAGS_H_
-#define ART_LIBDEXFILE_DEX_HIDDEN_API_ACCESS_FLAGS_H_
-
-#include "base/bit_utils.h"
-#include "base/macros.h"
-#include "dex/modifiers.h"
-
-namespace art {
-
-/* This class is used for encoding and decoding access flags of class members
- * from the boot class path. These access flags might contain additional two bits
- * of information on whether the given class member should be hidden from apps
- * and under what circumstances.
- *
- * The encoding is different inside DexFile, where we are concerned with size,
- * and at runtime where we want to optimize for speed of access. The class
- * provides helper functions to decode/encode both of them.
- *
- * Encoding in DexFile
- * ===================
- *
- * First bit is encoded as inversion of visibility flags (public/private/protected).
- * At most one can be set for any given class member. If two or three are set,
- * this is interpreted as the first bit being set and actual visibility flags
- * being the complement of the encoded flags.
- *
- * Second bit is either encoded as bit 5 for fields and non-native methods, where
- * it carries no other meaning. If a method is native (bit 8 set), bit 9 is used.
- *
- * Bits were selected so that they never increase the length of unsigned LEB-128
- * encoding of the access flags.
- *
- * Encoding at runtime
- * ===================
- *
- * Two bits are set aside in the uint32_t access flags in the intrinsics ordinal
- * space (thus intrinsics need to be special-cased). These are two consecutive
- * bits and they are directly used to store the integer value of the ApiList
- * enum values.
- *
- */
-class HiddenApiAccessFlags {
- public:
-  enum ApiList {
-    kWhitelist = 0,
-    kLightGreylist,
-    kDarkGreylist,
-    kBlacklist,
-  };
-
-  static ALWAYS_INLINE ApiList DecodeFromDex(uint32_t dex_access_flags) {
-    DexHiddenAccessFlags flags(dex_access_flags);
-    uint32_t int_value = (flags.IsFirstBitSet() ? 1 : 0) + (flags.IsSecondBitSet() ? 2 : 0);
-    return static_cast<ApiList>(int_value);
-  }
-
-  static ALWAYS_INLINE uint32_t RemoveFromDex(uint32_t dex_access_flags) {
-    DexHiddenAccessFlags flags(dex_access_flags);
-    flags.SetFirstBit(false);
-    flags.SetSecondBit(false);
-    return flags.GetEncoding();
-  }
-
-  static ALWAYS_INLINE uint32_t EncodeForDex(uint32_t dex_access_flags, ApiList value) {
-    DexHiddenAccessFlags flags(RemoveFromDex(dex_access_flags));
-    uint32_t int_value = static_cast<uint32_t>(value);
-    flags.SetFirstBit((int_value & 1) != 0);
-    flags.SetSecondBit((int_value & 2) != 0);
-    return flags.GetEncoding();
-  }
-
-  static ALWAYS_INLINE ApiList DecodeFromRuntime(uint32_t runtime_access_flags) {
-    // This is used in the fast path, only DCHECK here.
-    DCHECK_EQ(runtime_access_flags & kAccIntrinsic, 0u);
-    uint32_t int_value = (runtime_access_flags & kAccHiddenApiBits) >> kAccFlagsShift;
-    return static_cast<ApiList>(int_value);
-  }
-
-  static ALWAYS_INLINE uint32_t EncodeForRuntime(uint32_t runtime_access_flags, ApiList value) {
-    CHECK_EQ(runtime_access_flags & kAccIntrinsic, 0u);
-
-    uint32_t hidden_api_flags = static_cast<uint32_t>(value) << kAccFlagsShift;
-    CHECK_EQ(hidden_api_flags & ~kAccHiddenApiBits, 0u);
-
-    runtime_access_flags &= ~kAccHiddenApiBits;
-    return runtime_access_flags | hidden_api_flags;
-  }
-
- private:
-  static const int kAccFlagsShift = CTZ(kAccHiddenApiBits);
-  static_assert(IsPowerOfTwo((kAccHiddenApiBits >> kAccFlagsShift) + 1),
-                "kAccHiddenApiBits are not continuous");
-
-  struct DexHiddenAccessFlags {
-    explicit DexHiddenAccessFlags(uint32_t access_flags) : access_flags_(access_flags) {}
-
-    ALWAYS_INLINE uint32_t GetSecondFlag() {
-      return ((access_flags_ & kAccNative) != 0) ? kAccDexHiddenBitNative : kAccDexHiddenBit;
-    }
-
-    ALWAYS_INLINE bool IsFirstBitSet() {
-      static_assert(IsPowerOfTwo(0u), "Following statement checks if *at most* one bit is set");
-      return !IsPowerOfTwo(access_flags_ & kAccVisibilityFlags);
-    }
-
-    ALWAYS_INLINE void SetFirstBit(bool value) {
-      if (IsFirstBitSet() != value) {
-        access_flags_ ^= kAccVisibilityFlags;
-      }
-    }
-
-    ALWAYS_INLINE bool IsSecondBitSet() {
-      return (access_flags_ & GetSecondFlag()) != 0;
-    }
-
-    ALWAYS_INLINE void SetSecondBit(bool value) {
-      if (value) {
-        access_flags_ |= GetSecondFlag();
-      } else {
-        access_flags_ &= ~GetSecondFlag();
-      }
-    }
-
-    ALWAYS_INLINE uint32_t GetEncoding() const {
-      return access_flags_;
-    }
-
-    uint32_t access_flags_;
-  };
-};
-
-inline std::ostream& operator<<(std::ostream& os, HiddenApiAccessFlags::ApiList value) {
-  switch (value) {
-    case HiddenApiAccessFlags::kWhitelist:
-      os << "whitelist";
-      break;
-    case HiddenApiAccessFlags::kLightGreylist:
-      os << "light greylist";
-      break;
-    case HiddenApiAccessFlags::kDarkGreylist:
-      os << "dark greylist";
-      break;
-    case HiddenApiAccessFlags::kBlacklist:
-      os << "blacklist";
-      break;
-  }
-  return os;
-}
-
-}  // namespace art
-
-
-#endif  // ART_LIBDEXFILE_DEX_HIDDEN_API_ACCESS_FLAGS_H_
diff --git a/libdexfile/dex/method_reference.h b/libdexfile/dex/method_reference.h
index 266582b..f66ac30 100644
--- a/libdexfile/dex/method_reference.h
+++ b/libdexfile/dex/method_reference.h
@@ -31,7 +31,7 @@
   std::string PrettyMethod(bool with_signature = true) const {
     return dex_file->PrettyMethod(index, with_signature);
   }
-  const DexFile::MethodId& GetMethodId() const {
+  const dex::MethodId& GetMethodId() const {
     return dex_file->GetMethodId(index);
   }
 };
@@ -50,8 +50,8 @@
   bool SlowCompare(MethodReference mr1, MethodReference mr2) const {
     // The order is the same as for method ids in a single dex file.
     // Compare the class descriptors first.
-    const DexFile::MethodId& mid1 = mr1.GetMethodId();
-    const DexFile::MethodId& mid2 = mr2.GetMethodId();
+    const dex::MethodId& mid1 = mr1.GetMethodId();
+    const dex::MethodId& mid2 = mr2.GetMethodId();
     int descriptor_diff = strcmp(mr1.dex_file->StringByTypeIdx(mid1.class_idx_),
                                  mr2.dex_file->StringByTypeIdx(mid2.class_idx_));
     if (descriptor_diff != 0) {
@@ -63,17 +63,17 @@
       return name_diff < 0;
     }
     // And then compare proto ids, starting with return type comparison.
-    const DexFile::ProtoId& prid1 = mr1.dex_file->GetProtoId(mid1.proto_idx_);
-    const DexFile::ProtoId& prid2 = mr2.dex_file->GetProtoId(mid2.proto_idx_);
+    const dex::ProtoId& prid1 = mr1.dex_file->GetProtoId(mid1.proto_idx_);
+    const dex::ProtoId& prid2 = mr2.dex_file->GetProtoId(mid2.proto_idx_);
     int return_type_diff = strcmp(mr1.dex_file->StringByTypeIdx(prid1.return_type_idx_),
                                   mr2.dex_file->StringByTypeIdx(prid2.return_type_idx_));
     if (return_type_diff != 0) {
       return return_type_diff < 0;
     }
     // And finishing with lexicographical parameter comparison.
-    const DexFile::TypeList* params1 = mr1.dex_file->GetProtoParameters(prid1);
+    const dex::TypeList* params1 = mr1.dex_file->GetProtoParameters(prid1);
     size_t param1_size = (params1 != nullptr) ? params1->Size() : 0u;
-    const DexFile::TypeList* params2 = mr2.dex_file->GetProtoParameters(prid2);
+    const dex::TypeList* params2 = mr2.dex_file->GetProtoParameters(prid2);
     size_t param2_size = (params2 != nullptr) ? params2->Size() : 0u;
     for (size_t i = 0, num = std::min(param1_size, param2_size); i != num; ++i) {
       int param_diff = strcmp(mr1.dex_file->StringByTypeIdx(params1->GetTypeItem(i).type_idx_),
diff --git a/libdexfile/dex/modifiers.h b/libdexfile/dex/modifiers.h
index 38f8455..0c79c96 100644
--- a/libdexfile/dex/modifiers.h
+++ b/libdexfile/dex/modifiers.h
@@ -42,11 +42,6 @@
 
 static constexpr uint32_t kAccJavaFlagsMask = 0xffff;  // bits set from Java sources (low 16)
 
-// The following flags are used to insert hidden API access flags into boot class path dex files.
-// They are decoded by ClassAccessor and removed from the access flags before used by the runtime.
-static constexpr uint32_t kAccDexHiddenBit =          0x00000020;  // field, method (not native)
-static constexpr uint32_t kAccDexHiddenBitNative =    0x00000200;  // method (native)
-
 static constexpr uint32_t kAccConstructor =           0x00010000;  // method (dex only) <(cl)init>
 static constexpr uint32_t kAccDeclaredSynchronized =  0x00020000;  // method (dex only)
 static constexpr uint32_t kAccClassIsProxy =          0x00040000;  // class  (dex only)
@@ -57,7 +52,7 @@
 static constexpr uint32_t kAccSkipAccessChecks =      0x00080000;  // method (runtime, not native)
 // Used by a class to denote that the verifier has attempted to check it at least once.
 static constexpr uint32_t kAccVerificationAttempted = 0x00080000;  // class (runtime)
-static constexpr uint32_t kAccSkipHiddenApiChecks =   0x00100000;  // class (runtime)
+static constexpr uint32_t kAccSkipHiddenapiChecks =   0x00100000;  // class (runtime)
 // This is set by the class linker during LinkInterfaceMethods. It is used by a method to represent
 // that it was copied from its declaring class into another class. All methods marked kAccMiranda
 // and kAccDefaultConflict will have this bit set. Any kAccDefault method contained in the methods_
@@ -89,11 +84,12 @@
 // virtual call.
 static constexpr uint32_t kAccSingleImplementation =  0x08000000;  // method (runtime)
 
-static constexpr uint32_t kAccHiddenApiBits =         0x30000000;  // field, method
+static constexpr uint32_t kAccPublicApi =             0x10000000;  // field, method
+static constexpr uint32_t kAccCorePlatformApi =       0x20000000;  // field, method
 
-// Not currently used, except for intrinsic methods where these bits
-// are part of the intrinsic ordinal.
-static constexpr uint32_t kAccMayBeUnusedBits =       0x40000000;
+// Non-intrinsics: Caches whether we can use fast-path in the interpreter invokes.
+// Intrinsics: These bits are part of the intrinsic ordinal.
+static constexpr uint32_t kAccFastInterpreterToInterpreterInvoke = 0x40000000;  // method.
 
 // Set by the compiler driver when compiling boot classes with instrinsic methods.
 static constexpr uint32_t kAccIntrinsic  =            0x80000000;  // method (runtime)
@@ -106,11 +102,13 @@
 // class/ancestor overrides finalize()
 static constexpr uint32_t kAccClassIsFinalizable        = 0x80000000;
 
+static constexpr uint32_t kAccHiddenapiBits = kAccPublicApi | kAccCorePlatformApi;
+
 // Continuous sequence of bits used to hold the ordinal of an intrinsic method. Flags
 // which overlap are not valid when kAccIntrinsic is set.
-static constexpr uint32_t kAccIntrinsicBits = kAccMayBeUnusedBits | kAccHiddenApiBits |
+static constexpr uint32_t kAccIntrinsicBits = kAccHiddenapiBits |
     kAccSingleImplementation | kAccMustCountLocks | kAccCompileDontBother | kAccDefaultConflict |
-    kAccPreviouslyWarm;
+    kAccPreviouslyWarm | kAccFastInterpreterToInterpreterInvoke;
 
 // Valid (meaningful) bits for a field.
 static constexpr uint32_t kAccValidFieldFlags = kAccPublic | kAccPrivate | kAccProtected |
diff --git a/libdexfile/dex/signature-inl.h b/libdexfile/dex/signature-inl.h
new file mode 100644
index 0000000..ccc7ea9
--- /dev/null
+++ b/libdexfile/dex/signature-inl.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBDEXFILE_DEX_SIGNATURE_INL_H_
+#define ART_LIBDEXFILE_DEX_SIGNATURE_INL_H_
+
+#include "signature.h"
+
+#include "base/stringpiece.h"
+#include "dex_file-inl.h"
+
+namespace art {
+
+inline bool Signature::operator==(const Signature& rhs) const {
+  if (dex_file_ == nullptr) {
+    return rhs.dex_file_ == nullptr;
+  }
+  if (rhs.dex_file_ == nullptr) {
+    return false;
+  }
+  if (dex_file_ == rhs.dex_file_) {
+    return proto_id_ == rhs.proto_id_;
+  }
+  uint32_t lhs_shorty_len;  // For a shorty utf16 length == mutf8 length.
+  const char* lhs_shorty_data = dex_file_->StringDataAndUtf16LengthByIdx(proto_id_->shorty_idx_,
+                                                                         &lhs_shorty_len);
+  StringPiece lhs_shorty(lhs_shorty_data, lhs_shorty_len);
+  {
+    uint32_t rhs_shorty_len;
+    const char* rhs_shorty_data =
+        rhs.dex_file_->StringDataAndUtf16LengthByIdx(rhs.proto_id_->shorty_idx_,
+                                                     &rhs_shorty_len);
+    StringPiece rhs_shorty(rhs_shorty_data, rhs_shorty_len);
+    if (lhs_shorty != rhs_shorty) {
+      return false;  // Shorty mismatch.
+    }
+  }
+  if (lhs_shorty[0] == 'L') {
+    const dex::TypeId& return_type_id = dex_file_->GetTypeId(proto_id_->return_type_idx_);
+    const dex::TypeId& rhs_return_type_id =
+        rhs.dex_file_->GetTypeId(rhs.proto_id_->return_type_idx_);
+    if (!DexFile::StringEquals(dex_file_, return_type_id.descriptor_idx_,
+                               rhs.dex_file_, rhs_return_type_id.descriptor_idx_)) {
+      return false;  // Return type mismatch.
+    }
+  }
+  if (lhs_shorty.find('L', 1) != StringPiece::npos) {
+    const dex::TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
+    const dex::TypeList* rhs_params = rhs.dex_file_->GetProtoParameters(*rhs.proto_id_);
+    // We found a reference parameter in the matching shorty, so both lists must be non-empty.
+    DCHECK(params != nullptr);
+    DCHECK(rhs_params != nullptr);
+    uint32_t params_size = params->Size();
+    DCHECK_EQ(params_size, rhs_params->Size());  // Parameter list size must match.
+    for (uint32_t i = 0; i < params_size; ++i) {
+      const dex::TypeId& param_id = dex_file_->GetTypeId(params->GetTypeItem(i).type_idx_);
+      const dex::TypeId& rhs_param_id =
+          rhs.dex_file_->GetTypeId(rhs_params->GetTypeItem(i).type_idx_);
+      if (!DexFile::StringEquals(dex_file_, param_id.descriptor_idx_,
+                                 rhs.dex_file_, rhs_param_id.descriptor_idx_)) {
+        return false;  // Parameter type mismatch.
+      }
+    }
+  }
+  return true;
+}
+
+}  // namespace art
+
+#endif  // ART_LIBDEXFILE_DEX_SIGNATURE_INL_H_
diff --git a/libdexfile/dex/signature.cc b/libdexfile/dex/signature.cc
new file mode 100644
index 0000000..34b4b55
--- /dev/null
+++ b/libdexfile/dex/signature.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "signature-inl.h"
+
+#include <string.h>
+
+#include <ostream>
+#include <type_traits>
+
+namespace art {
+
+using dex::TypeList;
+
+std::string Signature::ToString() const {
+  if (dex_file_ == nullptr) {
+    CHECK(proto_id_ == nullptr);
+    return "<no signature>";
+  }
+  const TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
+  std::string result;
+  if (params == nullptr) {
+    result += "()";
+  } else {
+    result += "(";
+    for (uint32_t i = 0; i < params->Size(); ++i) {
+      result += dex_file_->StringByTypeIdx(params->GetTypeItem(i).type_idx_);
+    }
+    result += ")";
+  }
+  result += dex_file_->StringByTypeIdx(proto_id_->return_type_idx_);
+  return result;
+}
+
+uint32_t Signature::GetNumberOfParameters() const {
+  const TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
+  return (params != nullptr) ? params->Size() : 0;
+}
+
+bool Signature::IsVoid() const {
+  const char* return_type = dex_file_->GetReturnTypeDescriptor(*proto_id_);
+  return strcmp(return_type, "V") == 0;
+}
+
+bool Signature::operator==(const StringPiece& rhs) const {
+  if (dex_file_ == nullptr) {
+    return false;
+  }
+  StringPiece tail(rhs);
+  if (!tail.starts_with("(")) {
+    return false;  // Invalid signature
+  }
+  tail.remove_prefix(1);  // "(";
+  const TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
+  if (params != nullptr) {
+    for (uint32_t i = 0; i < params->Size(); ++i) {
+      StringPiece param(dex_file_->StringByTypeIdx(params->GetTypeItem(i).type_idx_));
+      if (!tail.starts_with(param)) {
+        return false;
+      }
+      tail.remove_prefix(param.length());
+    }
+  }
+  if (!tail.starts_with(")")) {
+    return false;
+  }
+  tail.remove_prefix(1);  // ")";
+  return tail == dex_file_->StringByTypeIdx(proto_id_->return_type_idx_);
+}
+
+std::ostream& operator<<(std::ostream& os, const Signature& sig) {
+  return os << sig.ToString();
+}
+
+}  // namespace art
diff --git a/libdexfile/dex/signature.h b/libdexfile/dex/signature.h
new file mode 100644
index 0000000..235f37c
--- /dev/null
+++ b/libdexfile/dex/signature.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBDEXFILE_DEX_SIGNATURE_H_
+#define ART_LIBDEXFILE_DEX_SIGNATURE_H_
+
+#include <iosfwd>
+#include <string>
+
+#include <android-base/logging.h>
+
+#include "base/value_object.h"
+
+namespace art {
+
+namespace dex {
+struct ProtoId;
+}  // namespace dex
+class DexFile;
+class StringPiece;
+
+// Abstract the signature of a method.
+class Signature : public ValueObject {
+ public:
+  std::string ToString() const;
+
+  static Signature NoSignature() {
+    return Signature();
+  }
+
+  bool IsVoid() const;
+  uint32_t GetNumberOfParameters() const;
+
+  bool operator==(const Signature& rhs) const;
+  bool operator!=(const Signature& rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator==(const StringPiece& rhs) const;
+
+ private:
+  Signature(const DexFile* dex, const dex::ProtoId& proto) : dex_file_(dex), proto_id_(&proto) {
+  }
+
+  Signature() = default;
+
+  friend class DexFile;
+
+  const DexFile* const dex_file_ = nullptr;
+  const dex::ProtoId* const proto_id_ = nullptr;
+};
+std::ostream& operator<<(std::ostream& os, const Signature& sig);
+
+}  // namespace art
+
+#endif  // ART_LIBDEXFILE_DEX_SIGNATURE_H_
diff --git a/libdexfile/dex/standard_dex_file.cc b/libdexfile/dex/standard_dex_file.cc
index 40dcafd..8bac44e 100644
--- a/libdexfile/dex/standard_dex_file.cc
+++ b/libdexfile/dex/standard_dex_file.cc
@@ -72,7 +72,7 @@
   return GetDexVersion() >= DexFile::kDefaultMethodsVersion;
 }
 
-uint32_t StandardDexFile::GetCodeItemSize(const DexFile::CodeItem& item) const {
+uint32_t StandardDexFile::GetCodeItemSize(const dex::CodeItem& item) const {
   DCHECK(IsInDataSection(&item));
   return reinterpret_cast<uintptr_t>(CodeItemDataAccessor(*this, &item).CodeItemDataEnd()) -
       reinterpret_cast<uintptr_t>(&item);
diff --git a/libdexfile/dex/standard_dex_file.h b/libdexfile/dex/standard_dex_file.h
index fd7e78f..48671c9 100644
--- a/libdexfile/dex/standard_dex_file.h
+++ b/libdexfile/dex/standard_dex_file.h
@@ -32,7 +32,7 @@
     // Same for now.
   };
 
-  struct CodeItem : public DexFile::CodeItem {
+  struct CodeItem : public dex::CodeItem {
     static constexpr size_t kAlignment = 4;
 
    private:
@@ -81,10 +81,13 @@
 
   bool SupportsDefaultMethods() const override;
 
-  uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const override;
+  uint32_t GetCodeItemSize(const dex::CodeItem& item) const override;
 
   size_t GetDequickenedSize() const override {
-    return Size();
+    // JVMTI will run dex layout on standard dex files that have hidden API data,
+    // in order to remove that data. As dexlayout may increase the size of the dex file,
+    // be (very) conservative and add one MB to the size.
+    return Size() + (HasHiddenapiClassData() ? 1 * MB : 0);
   }
 
  private:
diff --git a/libdexfile/dex/test_dex_file_builder.h b/libdexfile/dex/test_dex_file_builder.h
index 2d8a0bb..2b0bad0 100644
--- a/libdexfile/dex/test_dex_file_builder.h
+++ b/libdexfile/dex/test_dex_file_builder.h
@@ -26,6 +26,7 @@
 
 #include <android-base/logging.h>
 
+#include "base/bit_utils.h"
 #include "dex/dex_file_loader.h"
 #include "dex/standard_dex_file.h"
 
@@ -111,7 +112,7 @@
     header->string_ids_size_ = strings_.size();
     header->string_ids_off_ = strings_.empty() ? 0u : string_ids_offset;
 
-    uint32_t type_ids_offset = string_ids_offset + strings_.size() * sizeof(DexFile::StringId);
+    uint32_t type_ids_offset = string_ids_offset + strings_.size() * sizeof(dex::StringId);
     uint32_t type_idx = 0u;
     for (auto& entry : types_) {
       entry.second = type_idx;
@@ -120,7 +121,7 @@
     header->type_ids_size_ = types_.size();
     header->type_ids_off_ = types_.empty() ? 0u : type_ids_offset;
 
-    uint32_t proto_ids_offset = type_ids_offset + types_.size() * sizeof(DexFile::TypeId);
+    uint32_t proto_ids_offset = type_ids_offset + types_.size() * sizeof(dex::TypeId);
     uint32_t proto_idx = 0u;
     for (auto& entry : protos_) {
       entry.second.idx = proto_idx;
@@ -128,7 +129,7 @@
       size_t num_args = entry.first.args.size();
       if (num_args != 0u) {
         entry.second.data_offset = RoundUp(data_section_size, 4u);
-        data_section_size = entry.second.data_offset + 4u + num_args * sizeof(DexFile::TypeItem);
+        data_section_size = entry.second.data_offset + 4u + num_args * sizeof(dex::TypeItem);
       } else {
         entry.second.data_offset = 0u;
       }
@@ -136,7 +137,7 @@
     header->proto_ids_size_ = protos_.size();
     header->proto_ids_off_ = protos_.empty() ? 0u : proto_ids_offset;
 
-    uint32_t field_ids_offset = proto_ids_offset + protos_.size() * sizeof(DexFile::ProtoId);
+    uint32_t field_ids_offset = proto_ids_offset + protos_.size() * sizeof(dex::ProtoId);
     uint32_t field_idx = 0u;
     for (auto& entry : fields_) {
       entry.second = field_idx;
@@ -145,7 +146,7 @@
     header->field_ids_size_ = fields_.size();
     header->field_ids_off_ = fields_.empty() ? 0u : field_ids_offset;
 
-    uint32_t method_ids_offset = field_ids_offset + fields_.size() * sizeof(DexFile::FieldId);
+    uint32_t method_ids_offset = field_ids_offset + fields_.size() * sizeof(dex::FieldId);
     uint32_t method_idx = 0u;
     for (auto& entry : methods_) {
       entry.second = method_idx;
@@ -158,7 +159,7 @@
     header->class_defs_size_ = 0u;
     header->class_defs_off_ = 0u;
 
-    uint32_t data_section_offset = method_ids_offset + methods_.size() * sizeof(DexFile::MethodId);
+    uint32_t data_section_offset = method_ids_offset + methods_.size() * sizeof(dex::MethodId);
     header->data_size_ = data_section_size;
     header->data_off_ = (data_section_size != 0u) ? data_section_offset : 0u;
 
@@ -171,11 +172,11 @@
       uint32_t raw_offset = data_section_offset + entry.second.data_offset;
       dex_file_data_[raw_offset] = static_cast<uint8_t>(entry.first.size());
       std::memcpy(&dex_file_data_[raw_offset + 1], entry.first.c_str(), entry.first.size() + 1);
-      Write32(string_ids_offset + entry.second.idx * sizeof(DexFile::StringId), raw_offset);
+      Write32(string_ids_offset + entry.second.idx * sizeof(dex::StringId), raw_offset);
     }
 
     for (const auto& entry : types_) {
-      Write32(type_ids_offset + entry.second * sizeof(DexFile::TypeId), GetStringIdx(entry.first));
+      Write32(type_ids_offset + entry.second * sizeof(dex::TypeId), GetStringIdx(entry.first));
       ++type_idx;
     }
 
@@ -183,7 +184,7 @@
       size_t num_args = entry.first.args.size();
       uint32_t type_list_offset =
           (num_args != 0u) ? data_section_offset + entry.second.data_offset : 0u;
-      uint32_t raw_offset = proto_ids_offset + entry.second.idx * sizeof(DexFile::ProtoId);
+      uint32_t raw_offset = proto_ids_offset + entry.second.idx * sizeof(dex::ProtoId);
       Write32(raw_offset + 0u, GetStringIdx(entry.first.shorty));
       Write16(raw_offset + 4u, GetTypeIdx(entry.first.return_type));
       Write32(raw_offset + 8u, type_list_offset);
@@ -191,21 +192,21 @@
         CHECK_NE(entry.second.data_offset, 0u);
         Write32(type_list_offset, num_args);
         for (size_t i = 0; i != num_args; ++i) {
-          Write16(type_list_offset + 4u + i * sizeof(DexFile::TypeItem),
+          Write16(type_list_offset + 4u + i * sizeof(dex::TypeItem),
                   GetTypeIdx(entry.first.args[i]));
         }
       }
     }
 
     for (const auto& entry : fields_) {
-      uint32_t raw_offset = field_ids_offset + entry.second * sizeof(DexFile::FieldId);
+      uint32_t raw_offset = field_ids_offset + entry.second * sizeof(dex::FieldId);
       Write16(raw_offset + 0u, GetTypeIdx(entry.first.class_descriptor));
       Write16(raw_offset + 2u, GetTypeIdx(entry.first.type));
       Write32(raw_offset + 4u, GetStringIdx(entry.first.name));
     }
 
     for (const auto& entry : methods_) {
-      uint32_t raw_offset = method_ids_offset + entry.second * sizeof(DexFile::MethodId);
+      uint32_t raw_offset = method_ids_offset + entry.second * sizeof(dex::MethodId);
       Write16(raw_offset + 0u, GetTypeIdx(entry.first.class_descriptor));
       auto it = protos_.find(*entry.first.proto);
       CHECK(it != protos_.end());
diff --git a/libdexfile/dex/type_lookup_table.cc b/libdexfile/dex/type_lookup_table.cc
index 00ec358..c46b488 100644
--- a/libdexfile/dex/type_lookup_table.cc
+++ b/libdexfile/dex/type_lookup_table.cc
@@ -47,9 +47,9 @@
   // occupied then delay the insertion of the element to the second stage to reduce probing
   // distance.
   for (size_t class_def_idx = 0; class_def_idx < dex_file.NumClassDefs(); ++class_def_idx) {
-    const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_idx);
-    const DexFile::TypeId& type_id = dex_file.GetTypeId(class_def.class_idx_);
-    const DexFile::StringId& str_id = dex_file.GetStringId(type_id.descriptor_idx_);
+    const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_idx);
+    const dex::TypeId& type_id = dex_file.GetTypeId(class_def.class_idx_);
+    const dex::StringId& str_id = dex_file.GetStringId(type_id.descriptor_idx_);
     const uint32_t hash = ComputeModifiedUtf8Hash(dex_file.GetStringData(str_id));
     const uint32_t pos = hash & mask;
     if (entries[pos].IsEmpty()) {
@@ -62,9 +62,9 @@
   // The second stage. The initial position of these elements had a collision. Put these elements
   // into the nearest free cells and link them together by updating next_pos_delta.
   for (uint16_t class_def_idx : conflict_class_defs) {
-    const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_idx);
-    const DexFile::TypeId& type_id = dex_file.GetTypeId(class_def.class_idx_);
-    const DexFile::StringId& str_id = dex_file.GetStringId(type_id.descriptor_idx_);
+    const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_idx);
+    const dex::TypeId& type_id = dex_file.GetTypeId(class_def.class_idx_);
+    const dex::StringId& str_id = dex_file.GetStringId(type_id.descriptor_idx_);
     const uint32_t hash = ComputeModifiedUtf8Hash(dex_file.GetStringData(str_id));
     // Find the last entry in the chain.
     uint32_t tail_pos = hash & mask;
@@ -94,7 +94,7 @@
   DCHECK_ALIGNED(raw_data, alignof(Entry));
   const Entry* entries = reinterpret_cast<const Entry*>(raw_data);
   size_t mask_bits = CalculateMaskBits(num_class_defs);
-  return TypeLookupTable(dex_data_pointer, mask_bits, entries, /* owned_entries */ nullptr);
+  return TypeLookupTable(dex_data_pointer, mask_bits, entries, /* owned_entries= */ nullptr);
 }
 
 uint32_t TypeLookupTable::Lookup(const char* str, uint32_t hash) const {
diff --git a/libdexfile/dex/type_lookup_table.h b/libdexfile/dex/type_lookup_table.h
index 7005d34..5f002d1 100644
--- a/libdexfile/dex/type_lookup_table.h
+++ b/libdexfile/dex/type_lookup_table.h
@@ -17,7 +17,8 @@
 #ifndef ART_LIBDEXFILE_DEX_TYPE_LOOKUP_TABLE_H_
 #define ART_LIBDEXFILE_DEX_TYPE_LOOKUP_TABLE_H_
 
-#include "base/logging.h"
+#include <android-base/logging.h>
+
 #include "dex/dex_file_types.h"
 
 namespace art {
diff --git a/libdexfile/dex/type_reference.h b/libdexfile/dex/type_reference.h
index 9e7b880..3207e32 100644
--- a/libdexfile/dex/type_reference.h
+++ b/libdexfile/dex/type_reference.h
@@ -31,8 +31,8 @@
 // A type is located by its DexFile and the string_ids_ table index into that DexFile.
 class TypeReference : public DexFileReference {
  public:
-  TypeReference(const DexFile* file, dex::TypeIndex index)
-      : DexFileReference(file, index.index_) {}
+  TypeReference(const DexFile* dex_file, dex::TypeIndex index)
+      : DexFileReference(dex_file, index.index_) {}
 
   dex::TypeIndex TypeIndex() const {
     return dex::TypeIndex(index);
diff --git a/libdexfile/dex/utf.cc b/libdexfile/dex/utf.cc
index d09da73..ed07568 100644
--- a/libdexfile/dex/utf.cc
+++ b/libdexfile/dex/utf.cc
@@ -194,9 +194,10 @@
 uint32_t ComputeModifiedUtf8Hash(const char* chars) {
   uint32_t hash = 0;
   while (*chars != '\0') {
-    hash = hash * 31 + *chars++;
+    hash = hash * 31 + static_cast<uint8_t>(*chars);
+    ++chars;
   }
-  return static_cast<int32_t>(hash);
+  return hash;
 }
 
 int CompareModifiedUtf8ToUtf16AsCodePointValues(const char* utf8, const uint16_t* utf16,
diff --git a/libdexfile/dex/utf_test.cc b/libdexfile/dex/utf_test.cc
index d2f22d1..c7a6a34 100644
--- a/libdexfile/dex/utf_test.cc
+++ b/libdexfile/dex/utf_test.cc
@@ -378,4 +378,11 @@
   }
 }
 
+TEST_F(UtfTest, NonAscii) {
+  const char kNonAsciiCharacter = '\x80';
+  const char input[] = { kNonAsciiCharacter, '\0' };
+  uint32_t hash = ComputeModifiedUtf8Hash(input);
+  EXPECT_EQ(static_cast<uint8_t>(kNonAsciiCharacter), hash);
+}
+
 }  // namespace art
diff --git a/libdexfile/external/dex_file_ext.cc b/libdexfile/external/dex_file_ext.cc
new file mode 100644
index 0000000..e1b7874
--- /dev/null
+++ b/libdexfile/external/dex_file_ext.cc
@@ -0,0 +1,342 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_api/dex_file_external.h"
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <cerrno>
+#include <cstring>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <android-base/logging.h>
+#include <android-base/macros.h>
+#include <android-base/mapped_file.h>
+#include <android-base/stringprintf.h>
+
+#include <dex/class_accessor-inl.h>
+#include <dex/code_item_accessors-inl.h>
+#include <dex/dex_file-inl.h>
+#include <dex/dex_file_loader.h>
+
+namespace art {
+namespace {
+
+struct MethodCacheEntry {
+  int32_t offset;  // Offset relative to the start of the dex file header.
+  int32_t len;
+  int32_t index;  // Method index.
+};
+
+class MappedFileContainer : public DexFileContainer {
+ public:
+  explicit MappedFileContainer(std::unique_ptr<android::base::MappedFile>&& map)
+      : map_(std::move(map)) {}
+  ~MappedFileContainer() override {}
+  int GetPermissions() override { return 0; }
+  bool IsReadOnly() override { return true; }
+  bool EnableWrite() override { return false; }
+  bool DisableWrite() override { return false; }
+
+ private:
+  std::unique_ptr<android::base::MappedFile> map_;
+  DISALLOW_COPY_AND_ASSIGN(MappedFileContainer);
+};
+
+}  // namespace
+}  // namespace art
+
+extern "C" {
+
+struct ExtDexFileString {
+  const std::string str_;
+};
+
+static const ExtDexFileString empty_string{""};
+
+const ExtDexFileString* ExtDexFileMakeString(const char* str, size_t size) {
+  if (size == 0) {
+    return &empty_string;
+  }
+  return new ExtDexFileString{std::string(str, size)};
+}
+
+const char* ExtDexFileGetString(const ExtDexFileString* ext_string, /*out*/ size_t* size) {
+  DCHECK(ext_string != nullptr);
+  *size = ext_string->str_.size();
+  return ext_string->str_.data();
+}
+
+void ExtDexFileFreeString(const ExtDexFileString* ext_string) {
+  DCHECK(ext_string != nullptr);
+  if (ext_string != &empty_string) {
+    delete (ext_string);
+  }
+}
+
+// Wraps DexFile to add the caching needed by the external interface. This is
+// what gets passed over as ExtDexFile*.
+struct ExtDexFile {
+ private:
+  // Method cache for GetMethodInfoForOffset. This is populated as we iterate
+  // sequentially through the class defs. MethodCacheEntry.name is only set for
+  // methods returned by GetMethodInfoForOffset.
+  std::map<int32_t, art::MethodCacheEntry> method_cache_;
+
+  // Index of first class def for which method_cache_ isn't complete.
+  uint32_t class_def_index_ = 0;
+
+ public:
+  std::unique_ptr<const art::DexFile> dex_file_;
+  explicit ExtDexFile(std::unique_ptr<const art::DexFile>&& dex_file)
+      : dex_file_(std::move(dex_file)) {}
+
+  art::MethodCacheEntry* GetMethodCacheEntryForOffset(int64_t dex_offset) {
+    // First look in the method cache.
+    auto it = method_cache_.upper_bound(dex_offset);
+    if (it != method_cache_.end() && dex_offset >= it->second.offset) {
+      return &it->second;
+    }
+
+    for (; class_def_index_ < dex_file_->NumClassDefs(); class_def_index_++) {
+      art::ClassAccessor accessor(*dex_file_, class_def_index_);
+
+      for (const art::ClassAccessor::Method& method : accessor.GetMethods()) {
+        art::CodeItemInstructionAccessor code = method.GetInstructions();
+        if (!code.HasCodeItem()) {
+          continue;
+        }
+
+        int32_t offset = reinterpret_cast<const uint8_t*>(code.Insns()) - dex_file_->Begin();
+        int32_t len = code.InsnsSizeInBytes();
+        int32_t index = method.GetIndex();
+        auto res = method_cache_.emplace(offset + len, art::MethodCacheEntry{offset, len, index});
+        if (offset <= dex_offset && dex_offset < offset + len) {
+          return &res.first->second;
+        }
+      }
+    }
+
+    return nullptr;
+  }
+};
+
+int ExtDexFileOpenFromMemory(const void* addr,
+                             /*inout*/ size_t* size,
+                             const char* location,
+                             /*out*/ const ExtDexFileString** ext_error_msg,
+                             /*out*/ ExtDexFile** ext_dex_file) {
+  if (*size < sizeof(art::DexFile::Header)) {
+    *size = sizeof(art::DexFile::Header);
+    *ext_error_msg = nullptr;
+    return false;
+  }
+
+  const art::DexFile::Header* header = reinterpret_cast<const art::DexFile::Header*>(addr);
+  uint32_t file_size = header->file_size_;
+  if (art::CompactDexFile::IsMagicValid(header->magic_)) {
+    // Compact dex files store the data section separately so that it can be shared.
+    // Therefore we need to extend the read memory range to include it.
+    // TODO: This might be wasteful as we might read data in between as well.
+    //       In practice, this should be fine, as such sharing only happens on disk.
+    uint32_t computed_file_size;
+    if (__builtin_add_overflow(header->data_off_, header->data_size_, &computed_file_size)) {
+      *ext_error_msg = new ExtDexFileString{
+          android::base::StringPrintf("Corrupt CompactDexFile header in '%s'", location)};
+      return false;
+    }
+    if (computed_file_size > file_size) {
+      file_size = computed_file_size;
+    }
+  } else if (!art::StandardDexFile::IsMagicValid(header->magic_)) {
+    *ext_error_msg = new ExtDexFileString{
+        android::base::StringPrintf("Unrecognized dex file header in '%s'", location)};
+    return false;
+  }
+
+  if (*size < file_size) {
+    *size = file_size;
+    *ext_error_msg = nullptr;
+    return false;
+  }
+
+  std::string loc_str(location);
+  art::DexFileLoader loader;
+  std::string error_msg;
+  std::unique_ptr<const art::DexFile> dex_file = loader.Open(static_cast<const uint8_t*>(addr),
+                                                             *size,
+                                                             loc_str,
+                                                             header->checksum_,
+                                                             /*oat_dex_file=*/nullptr,
+                                                             /*verify=*/false,
+                                                             /*verify_checksum=*/false,
+                                                             &error_msg);
+  if (dex_file == nullptr) {
+    *ext_error_msg = new ExtDexFileString{std::move(error_msg)};
+    return false;
+  }
+
+  *ext_dex_file = new ExtDexFile(std::move(dex_file));
+  return true;
+}
+
+int ExtDexFileOpenFromFd(int fd,
+                         off_t offset,
+                         const char* location,
+                         /*out*/ const ExtDexFileString** ext_error_msg,
+                         /*out*/ ExtDexFile** ext_dex_file) {
+  size_t length;
+  {
+    struct stat sbuf;
+    std::memset(&sbuf, 0, sizeof(sbuf));
+    if (fstat(fd, &sbuf) == -1) {
+      *ext_error_msg = new ExtDexFileString{
+          android::base::StringPrintf("fstat '%s' failed: %s", location, std::strerror(errno))};
+      return false;
+    }
+    if (S_ISDIR(sbuf.st_mode)) {
+      *ext_error_msg = new ExtDexFileString{
+          android::base::StringPrintf("Attempt to mmap directory '%s'", location)};
+      return false;
+    }
+    length = sbuf.st_size;
+  }
+
+  if (length < offset + sizeof(art::DexFile::Header)) {
+    *ext_error_msg = new ExtDexFileString{android::base::StringPrintf(
+        "Offset %" PRId64 " too large for '%s' of size %zu",
+        int64_t{offset},
+        location,
+        length)};
+    return false;
+  }
+
+  // Cannot use MemMap in libartbase here, because it pulls in dlopen which we
+  // can't have when being compiled statically.
+  std::unique_ptr<android::base::MappedFile> map =
+      android::base::MappedFile::FromFd(fd, offset, length, PROT_READ);
+  if (map == nullptr) {
+    *ext_error_msg = new ExtDexFileString{
+        android::base::StringPrintf("mmap '%s' failed: %s", location, std::strerror(errno))};
+    return false;
+  }
+
+  const art::DexFile::Header* header = reinterpret_cast<const art::DexFile::Header*>(map->data());
+  uint32_t file_size;
+  if (__builtin_add_overflow(offset, header->file_size_, &file_size)) {
+    *ext_error_msg =
+        new ExtDexFileString{android::base::StringPrintf("Corrupt header in '%s'", location)};
+    return false;
+  }
+  if (length < file_size) {
+    *ext_error_msg = new ExtDexFileString{
+        android::base::StringPrintf("Dex file '%s' too short: expected %" PRIu32 ", got %" PRIu64,
+                                    location,
+                                    file_size,
+                                    uint64_t{length})};
+    return false;
+  }
+
+  void* addr = map->data();
+  size_t size = map->size();
+  auto container = std::make_unique<art::MappedFileContainer>(std::move(map));
+
+  std::string loc_str(location);
+  std::string error_msg;
+  art::DexFileLoader loader;
+  std::unique_ptr<const art::DexFile> dex_file = loader.Open(reinterpret_cast<const uint8_t*>(addr),
+                                                             size,
+                                                             loc_str,
+                                                             header->checksum_,
+                                                             /*oat_dex_file=*/nullptr,
+                                                             /*verify=*/false,
+                                                             /*verify_checksum=*/false,
+                                                             &error_msg,
+                                                             std::move(container));
+  if (dex_file == nullptr) {
+    *ext_error_msg = new ExtDexFileString{std::move(error_msg)};
+    return false;
+  }
+  *ext_dex_file = new ExtDexFile(std::move(dex_file));
+  return true;
+}
+
+int ExtDexFileGetMethodInfoForOffset(ExtDexFile* ext_dex_file,
+                                     int64_t dex_offset,
+                                     int with_signature,
+                                     /*out*/ ExtDexFileMethodInfo* method_info) {
+  if (!ext_dex_file->dex_file_->IsInDataSection(ext_dex_file->dex_file_->Begin() + dex_offset)) {
+    return false;  // The DEX offset is not within the bytecode of this dex file.
+  }
+
+  if (ext_dex_file->dex_file_->IsCompactDexFile()) {
+    // The data section of compact dex files might be shared.
+    // Check the subrange unique to this compact dex.
+    const art::CompactDexFile::Header& cdex_header =
+        ext_dex_file->dex_file_->AsCompactDexFile()->GetHeader();
+    uint32_t begin = cdex_header.data_off_ + cdex_header.OwnedDataBegin();
+    uint32_t end = cdex_header.data_off_ + cdex_header.OwnedDataEnd();
+    if (dex_offset < begin || dex_offset >= end) {
+      return false;  // The DEX offset is not within the bytecode of this dex file.
+    }
+  }
+
+  art::MethodCacheEntry* entry = ext_dex_file->GetMethodCacheEntryForOffset(dex_offset);
+  if (entry != nullptr) {
+    method_info->offset = entry->offset;
+    method_info->len = entry->len;
+    method_info->name =
+        new ExtDexFileString{ext_dex_file->dex_file_->PrettyMethod(entry->index, with_signature)};
+    return true;
+  }
+
+  return false;
+}
+
+void ExtDexFileGetAllMethodInfos(ExtDexFile* ext_dex_file,
+                                 int with_signature,
+                                 ExtDexFileMethodInfoCallback* method_info_cb,
+                                 void* user_data) {
+  for (art::ClassAccessor accessor : ext_dex_file->dex_file_->GetClasses()) {
+    for (const art::ClassAccessor::Method& method : accessor.GetMethods()) {
+      art::CodeItemInstructionAccessor code = method.GetInstructions();
+      if (!code.HasCodeItem()) {
+        continue;
+      }
+
+      ExtDexFileMethodInfo method_info;
+      method_info.offset = static_cast<int32_t>(reinterpret_cast<const uint8_t*>(code.Insns()) -
+                                                ext_dex_file->dex_file_->Begin());
+      method_info.len = code.InsnsSizeInBytes();
+      method_info.name = new ExtDexFileString{
+          ext_dex_file->dex_file_->PrettyMethod(method.GetIndex(), with_signature)};
+      method_info_cb(&method_info, user_data);
+    }
+  }
+}
+
+void ExtDexFileFree(ExtDexFile* ext_dex_file) { delete (ext_dex_file); }
+
+}  // extern "C"
diff --git a/libdexfile/external/dex_file_ext_c_test.c b/libdexfile/external/dex_file_ext_c_test.c
new file mode 100644
index 0000000..c448a16
--- /dev/null
+++ b/libdexfile/external/dex_file_ext_c_test.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+/* The main purpose of this test is to ensure this C header compiles in C, so
+ * that no C++ features inadvertently leak into the C ABI. */
+#include "art_api/dex_file_external.h"
+
+static const char gtest_output_arg[] = "--gtest_output=xml:";
+static const char gtest_output_xml[] = "\
+<?xml version=\"1.0\"?>\n\
+<testsuites tests=\"0\" failures=\"0\" disabled=\"0\" errors=\"0\" name=\"AllTests\">";
+
+/* Writes a dummy gtest xml report to the given path. */
+static int write_gtest_output_xml(char* gtest_output_path) {
+  FILE* output_fd = fopen(gtest_output_path, "w");
+  if (output_fd == NULL) {
+    fprintf(stderr, "Failed to open %s: %s\n", gtest_output_path, strerror(errno));
+    return 1;
+  }
+  if (fprintf(output_fd, gtest_output_xml) != sizeof(gtest_output_xml) - 1) {
+    fprintf(stderr, "Failed to write %s: %s\n", gtest_output_path, strerror(errno));
+    fclose(output_fd);
+    return 1;
+  }
+  if (fclose(output_fd) != 0) {
+    fprintf(stderr, "Failed to close %s: %s\n", gtest_output_path, strerror(errno));
+    return 1;
+  }
+  return 0;
+}
+
+int main(int argc, char** argv) {
+  if (argc >= 2 && strncmp(argv[1], gtest_output_arg, sizeof(gtest_output_arg) - 1) == 0) {
+    /* The ART gtest framework expects all tests to understand --gtest_output. */
+    return write_gtest_output_xml(argv[1] + sizeof(gtest_output_arg) - 1);
+  }
+  return 0;
+}
diff --git a/libdexfile/external/dex_file_supp.cc b/libdexfile/external/dex_file_supp.cc
new file mode 100644
index 0000000..5bd25fc
--- /dev/null
+++ b/libdexfile/external/dex_file_supp.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_api/dex_file_support.h"
+
+namespace art_api {
+namespace dex {
+
+DexFile::~DexFile() { ExtDexFileFree(ext_dex_file_); }
+
+MethodInfo DexFile::AbsorbMethodInfo(const ExtDexFileMethodInfo& ext_method_info) {
+  return {ext_method_info.offset, ext_method_info.len, DexString(ext_method_info.name)};
+}
+
+void DexFile::AddMethodInfoCallback(const ExtDexFileMethodInfo* ext_method_info, void* ctx) {
+  auto vect = static_cast<MethodInfoVector*>(ctx);
+  vect->emplace_back(AbsorbMethodInfo(*ext_method_info));
+}
+
+}  // namespace dex
+}  // namespace art_api
diff --git a/libdexfile/external/dex_file_supp_test.cc b/libdexfile/external/dex_file_supp_test.cc
new file mode 100644
index 0000000..2f7ad50
--- /dev/null
+++ b/libdexfile/external/dex_file_supp_test.cc
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sys/types.h>
+
+#include <memory>
+#include <string>
+#include <string_view>
+
+#include <android-base/file.h>
+#include <dex/dex_file.h>
+#include <gtest/gtest.h>
+
+#include "art_api/dex_file_support.h"
+
+namespace art_api {
+namespace dex {
+
+static constexpr uint32_t kDexData[] = {
+    0x0a786564, 0x00383330, 0xc98b3ab8, 0xf3749d94, 0xaecca4d8, 0xffc7b09a, 0xdca9ca7f, 0x5be5deab,
+    0x00000220, 0x00000070, 0x12345678, 0x00000000, 0x00000000, 0x0000018c, 0x00000008, 0x00000070,
+    0x00000004, 0x00000090, 0x00000002, 0x000000a0, 0x00000000, 0x00000000, 0x00000003, 0x000000b8,
+    0x00000001, 0x000000d0, 0x00000130, 0x000000f0, 0x00000122, 0x0000012a, 0x00000132, 0x00000146,
+    0x00000151, 0x00000154, 0x00000158, 0x0000016d, 0x00000001, 0x00000002, 0x00000004, 0x00000006,
+    0x00000004, 0x00000002, 0x00000000, 0x00000005, 0x00000002, 0x0000011c, 0x00000000, 0x00000000,
+    0x00010000, 0x00000007, 0x00000001, 0x00000000, 0x00000000, 0x00000001, 0x00000001, 0x00000000,
+    0x00000003, 0x00000000, 0x0000017e, 0x00000000, 0x00010001, 0x00000001, 0x00000173, 0x00000004,
+    0x00021070, 0x000e0000, 0x00010001, 0x00000000, 0x00000178, 0x00000001, 0x0000000e, 0x00000001,
+    0x3c060003, 0x74696e69, 0x4c06003e, 0x6e69614d, 0x4c12003b, 0x6176616a, 0x6e616c2f, 0x624f2f67,
+    0x7463656a, 0x4d09003b, 0x2e6e6961, 0x6176616a, 0x00560100, 0x004c5602, 0x6a4c5b13, 0x2f617661,
+    0x676e616c, 0x7274532f, 0x3b676e69, 0x616d0400, 0x01006e69, 0x000e0700, 0x07000103, 0x0000000e,
+    0x81000002, 0x01f00480, 0x02880901, 0x0000000c, 0x00000000, 0x00000001, 0x00000000, 0x00000001,
+    0x00000008, 0x00000070, 0x00000002, 0x00000004, 0x00000090, 0x00000003, 0x00000002, 0x000000a0,
+    0x00000005, 0x00000003, 0x000000b8, 0x00000006, 0x00000001, 0x000000d0, 0x00002001, 0x00000002,
+    0x000000f0, 0x00001001, 0x00000001, 0x0000011c, 0x00002002, 0x00000008, 0x00000122, 0x00002003,
+    0x00000002, 0x00000173, 0x00002000, 0x00000001, 0x0000017e, 0x00001000, 0x00000001, 0x0000018c,
+};
+
+TEST(DexStringTest, alloc_string) {
+  auto s = DexString("123");
+  EXPECT_EQ(std::string_view(s), "123");
+}
+
+TEST(DexStringTest, alloc_empty_string) {
+  auto s = DexString("");
+  EXPECT_TRUE(std::string_view(s).empty());
+}
+
+TEST(DexStringTest, move_construct) {
+  auto s1 = DexString("foo");
+  auto s2 = DexString(std::move(s1));
+  EXPECT_TRUE(std::string_view(s1).empty());
+  EXPECT_EQ(std::string_view(s2), "foo");
+}
+
+TEST(DexStringTest, move_assign) {
+  auto s1 = DexString("foo");
+  DexString s2;
+  EXPECT_TRUE(std::string_view(s2).empty());
+  s2 = std::move(s1);
+  EXPECT_TRUE(std::string_view(s1).empty());
+  EXPECT_EQ(std::string_view(s2), "foo");
+}
+
+TEST(DexStringTest, reassign) {
+  auto s = DexString("foo");
+  s = DexString("bar");
+  EXPECT_EQ(std::string_view(s), "bar");
+}
+
+TEST(DexStringTest, data_access) {
+  auto s = DexString("foo");
+  EXPECT_STREQ(s.data(), "foo");
+  EXPECT_STREQ(s.c_str(), "foo");
+}
+
+TEST(DexStringTest, size_access) {
+  auto s = DexString("foo");
+  EXPECT_EQ(s.size(), size_t{3});
+  EXPECT_EQ(s.length(), size_t{3});
+}
+
+TEST(DexStringTest, equality) {
+  auto s = DexString("foo");
+  EXPECT_EQ(s, DexString("foo"));
+  EXPECT_FALSE(s == DexString("bar"));
+}
+
+TEST(DexStringTest, equality_with_nul) {
+  auto s = DexString(std::string("foo\0bar", 7));
+  EXPECT_EQ(s.size(), size_t{7});
+  EXPECT_EQ(s, DexString(std::string("foo\0bar", 7)));
+  EXPECT_FALSE(s == DexString(std::string("foo\0baz", 7)));
+}
+
+TEST(DexFileTest, from_memory_header_too_small) {
+  size_t size = sizeof(art::DexFile::Header) - 1;
+  std::string error_msg;
+  EXPECT_EQ(DexFile::OpenFromMemory(kDexData, &size, "", &error_msg), nullptr);
+  EXPECT_EQ(size, sizeof(art::DexFile::Header));
+  EXPECT_TRUE(error_msg.empty());
+}
+
+TEST(DexFileTest, from_memory_file_too_small) {
+  size_t size = sizeof(art::DexFile::Header);
+  std::string error_msg;
+  EXPECT_EQ(DexFile::OpenFromMemory(kDexData, &size, "", &error_msg), nullptr);
+  EXPECT_EQ(size, sizeof(kDexData));
+  EXPECT_TRUE(error_msg.empty());
+}
+
+static std::unique_ptr<DexFile> GetTestDexData() {
+  size_t size = sizeof(kDexData);
+  std::string error_msg;
+  std::unique_ptr<DexFile> dex_file = DexFile::OpenFromMemory(kDexData, &size, "", &error_msg);
+  EXPECT_TRUE(error_msg.empty());
+  return dex_file;
+}
+
+TEST(DexFileTest, from_memory) {
+  EXPECT_NE(GetTestDexData(), nullptr);
+}
+
+TEST(DexFileTest, from_fd_header_too_small) {
+  TemporaryFile tf;
+  ASSERT_NE(tf.fd, -1);
+  ASSERT_EQ(sizeof(art::DexFile::Header) - 1,
+            static_cast<size_t>(
+                TEMP_FAILURE_RETRY(write(tf.fd, kDexData, sizeof(art::DexFile::Header) - 1))));
+
+  std::string error_msg;
+  EXPECT_EQ(DexFile::OpenFromFd(tf.fd, 0, tf.path, &error_msg), nullptr);
+  EXPECT_FALSE(error_msg.empty());
+}
+
+TEST(DexFileTest, from_fd_file_too_small) {
+  TemporaryFile tf;
+  ASSERT_NE(tf.fd, -1);
+  ASSERT_EQ(sizeof(art::DexFile::Header),
+            static_cast<size_t>(
+                TEMP_FAILURE_RETRY(write(tf.fd, kDexData, sizeof(art::DexFile::Header)))));
+
+  std::string error_msg;
+  EXPECT_EQ(DexFile::OpenFromFd(tf.fd, 0, tf.path, &error_msg), nullptr);
+  EXPECT_FALSE(error_msg.empty());
+}
+
+TEST(DexFileTest, from_fd) {
+  TemporaryFile tf;
+  ASSERT_NE(tf.fd, -1);
+  ASSERT_EQ(sizeof(kDexData),
+            static_cast<size_t>(TEMP_FAILURE_RETRY(write(tf.fd, kDexData, sizeof(kDexData)))));
+
+  std::string error_msg;
+  EXPECT_NE(DexFile::OpenFromFd(tf.fd, 0, tf.path, &error_msg), nullptr);
+  EXPECT_TRUE(error_msg.empty());
+}
+
+TEST(DexFileTest, from_fd_non_zero_offset) {
+  TemporaryFile tf;
+  ASSERT_NE(tf.fd, -1);
+  ASSERT_EQ(0x100, lseek(tf.fd, 0x100, SEEK_SET));
+  ASSERT_EQ(sizeof(kDexData),
+            static_cast<size_t>(TEMP_FAILURE_RETRY(write(tf.fd, kDexData, sizeof(kDexData)))));
+
+  std::string error_msg;
+  EXPECT_NE(DexFile::OpenFromFd(tf.fd, 0x100, tf.path, &error_msg), nullptr);
+  EXPECT_TRUE(error_msg.empty());
+}
+
+TEST(DexFileTest, get_method_info_for_offset_without_signature) {
+  std::unique_ptr<DexFile> dex_file = GetTestDexData();
+  ASSERT_NE(dex_file, nullptr);
+
+  MethodInfo info = dex_file->GetMethodInfoForOffset(0x102, false);
+  EXPECT_EQ(info.offset, int32_t{0x100});
+  EXPECT_EQ(info.len, int32_t{8});
+  EXPECT_STREQ(info.name.data(), "Main.<init>");
+
+  info = dex_file->GetMethodInfoForOffset(0x118, false);
+  EXPECT_EQ(info.offset, int32_t{0x118});
+  EXPECT_EQ(info.len, int32_t{2});
+  EXPECT_STREQ(info.name.data(), "Main.main");
+
+  // Retrieve a cached result.
+  info = dex_file->GetMethodInfoForOffset(0x104, false);
+  EXPECT_EQ(info.offset, int32_t{0x100});
+  EXPECT_EQ(info.len, int32_t{8});
+  EXPECT_STREQ(info.name.data(), "Main.<init>");
+}
+
+TEST(DexFileTest, get_method_info_for_offset_with_signature) {
+  std::unique_ptr<DexFile> dex_file = GetTestDexData();
+  ASSERT_NE(dex_file, nullptr);
+
+  MethodInfo info = dex_file->GetMethodInfoForOffset(0x102, true);
+  EXPECT_EQ(info.offset, int32_t{0x100});
+  EXPECT_EQ(info.len, int32_t{8});
+  EXPECT_STREQ(info.name.data(), "void Main.<init>()");
+
+  info = dex_file->GetMethodInfoForOffset(0x118, true);
+  EXPECT_EQ(info.offset, int32_t{0x118});
+  EXPECT_EQ(info.len, int32_t{2});
+  EXPECT_STREQ(info.name.data(), "void Main.main(java.lang.String[])");
+
+  // Retrieve a cached result.
+  info = dex_file->GetMethodInfoForOffset(0x104, true);
+  EXPECT_EQ(info.offset, int32_t{0x100});
+  EXPECT_EQ(info.len, int32_t{8});
+  EXPECT_STREQ(info.name.data(), "void Main.<init>()");
+
+  // with_signature doesn't affect the cache.
+  info = dex_file->GetMethodInfoForOffset(0x104, false);
+  EXPECT_EQ(info.offset, int32_t{0x100});
+  EXPECT_EQ(info.len, int32_t{8});
+  EXPECT_STREQ(info.name.data(), "Main.<init>");
+}
+
+TEST(DexFileTest, get_method_info_for_offset_boundaries) {
+  std::unique_ptr<DexFile> dex_file = GetTestDexData();
+  ASSERT_NE(dex_file, nullptr);
+
+  MethodInfo info = dex_file->GetMethodInfoForOffset(0x100000, false);
+  EXPECT_EQ(info.offset, int32_t{0});
+
+  info = dex_file->GetMethodInfoForOffset(0x99, false);
+  EXPECT_EQ(info.offset, int32_t{0});
+  info = dex_file->GetMethodInfoForOffset(0x100, false);
+  EXPECT_EQ(info.offset, int32_t{0x100});
+  info = dex_file->GetMethodInfoForOffset(0x107, false);
+  EXPECT_EQ(info.offset, int32_t{0x100});
+  info = dex_file->GetMethodInfoForOffset(0x108, false);
+  EXPECT_EQ(info.offset, int32_t{0});
+
+  // Make sure that once the whole dex file has been cached, no problems occur.
+  info = dex_file->GetMethodInfoForOffset(0x98, false);
+  EXPECT_EQ(info.offset, int32_t{0});
+
+  // Choose a value that is in the cached map, but not in a valid method.
+  info = dex_file->GetMethodInfoForOffset(0x110, false);
+  EXPECT_EQ(info.offset, int32_t{0});
+}
+
+TEST(DexFileTest, get_all_method_infos_without_signature) {
+  std::unique_ptr<DexFile> dex_file = GetTestDexData();
+  ASSERT_NE(dex_file, nullptr);
+
+  std::vector<MethodInfo> infos;
+  infos.emplace_back(MethodInfo{0x100, 8, DexString("Main.<init>")});
+  infos.emplace_back(MethodInfo{0x118, 2, DexString("Main.main")});
+  ASSERT_EQ(dex_file->GetAllMethodInfos(false), infos);
+}
+
+TEST(DexFileTest, get_all_method_infos_with_signature) {
+  std::unique_ptr<DexFile> dex_file = GetTestDexData();
+  ASSERT_NE(dex_file, nullptr);
+
+  std::vector<MethodInfo> infos;
+  infos.emplace_back(MethodInfo{0x100, 8, DexString("void Main.<init>()")});
+  infos.emplace_back(MethodInfo{0x118, 2, DexString("void Main.main(java.lang.String[])")});
+  ASSERT_EQ(dex_file->GetAllMethodInfos(true), infos);
+}
+
+TEST(DexFileTest, move_construct) {
+  std::unique_ptr<DexFile> dex_file = GetTestDexData();
+  ASSERT_NE(dex_file, nullptr);
+
+  auto df1 = DexFile(std::move(*dex_file));
+  auto df2 = DexFile(std::move(df1));
+
+  MethodInfo info = df2.GetMethodInfoForOffset(0x100, false);
+  EXPECT_EQ(info.offset, int32_t{0x100});
+}
+
+}  // namespace dex
+}  // namespace art_api
diff --git a/libdexfile/external/include/art_api/dex_file_external.h b/libdexfile/external/include/art_api/dex_file_external.h
new file mode 100644
index 0000000..b29e759
--- /dev/null
+++ b/libdexfile/external/include/art_api/dex_file_external.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBDEXFILE_EXTERNAL_INCLUDE_ART_API_DEX_FILE_EXTERNAL_H_
+#define ART_LIBDEXFILE_EXTERNAL_INCLUDE_ART_API_DEX_FILE_EXTERNAL_H_
+
+// Dex file external API
+
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// This is the stable C ABI that backs art_api::dex below. Structs and functions
+// may only be added here. C++ users should use dex_file_support.h instead.
+
+// Opaque wrapper for an std::string allocated in libdexfile which must be freed
+// using ExtDexFileFreeString.
+struct ExtDexFileString;
+
+// Returns an ExtDexFileString initialized to the given string.
+const struct ExtDexFileString* ExtDexFileMakeString(const char* str, size_t size);
+
+// Returns a pointer to the underlying null-terminated character array and its
+// size for the given ExtDexFileString.
+const char* ExtDexFileGetString(const struct ExtDexFileString* ext_string, /*out*/ size_t* size);
+
+// Frees an ExtDexFileString.
+void ExtDexFileFreeString(const struct ExtDexFileString* ext_string);
+
+struct ExtDexFileMethodInfo {
+  int32_t offset;
+  int32_t len;
+  const struct ExtDexFileString* name;
+};
+
+struct ExtDexFile;
+
+// See art_api::dex::DexFile::OpenFromMemory. Returns true on success.
+int ExtDexFileOpenFromMemory(const void* addr,
+                             /*inout*/ size_t* size,
+                             const char* location,
+                             /*out*/ const struct ExtDexFileString** error_msg,
+                             /*out*/ struct ExtDexFile** ext_dex_file);
+
+// See art_api::dex::DexFile::OpenFromFd. Returns true on success.
+int ExtDexFileOpenFromFd(int fd,
+                         off_t offset,
+                         const char* location,
+                         /*out*/ const struct ExtDexFileString** error_msg,
+                         /*out*/ struct ExtDexFile** ext_dex_file);
+
+// See art_api::dex::DexFile::GetMethodInfoForOffset. Returns true on success.
+int ExtDexFileGetMethodInfoForOffset(struct ExtDexFile* ext_dex_file,
+                                     int64_t dex_offset,
+                                     int with_signature,
+                                     /*out*/ struct ExtDexFileMethodInfo* method_info);
+
+typedef void ExtDexFileMethodInfoCallback(const struct ExtDexFileMethodInfo* ext_method_info,
+                                          void* user_data);
+
+// See art_api::dex::DexFile::GetAllMethodInfos.
+void ExtDexFileGetAllMethodInfos(struct ExtDexFile* ext_dex_file,
+                                 int with_signature,
+                                 ExtDexFileMethodInfoCallback* method_info_cb,
+                                 void* user_data);
+
+// Frees an ExtDexFile.
+void ExtDexFileFree(struct ExtDexFile* ext_dex_file);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // ART_LIBDEXFILE_EXTERNAL_INCLUDE_ART_API_DEX_FILE_EXTERNAL_H_
diff --git a/libdexfile/external/include/art_api/dex_file_support.h b/libdexfile/external/include/art_api/dex_file_support.h
new file mode 100644
index 0000000..24222af
--- /dev/null
+++ b/libdexfile/external/include/art_api/dex_file_support.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBDEXFILE_EXTERNAL_INCLUDE_ART_API_DEX_FILE_SUPPORT_H_
+#define ART_LIBDEXFILE_EXTERNAL_INCLUDE_ART_API_DEX_FILE_SUPPORT_H_
+
+// C++ wrapper for the dex file external API.
+
+#include <cstring>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <utility>
+#include <vector>
+
+#include <android-base/macros.h>
+
+#include "art_api/dex_file_external.h"
+
+namespace art_api {
+namespace dex {
+
+// Minimal std::string look-alike for a string returned from libdexfile.
+class DexString final {
+ public:
+  DexString(DexString&& dex_str) noexcept : ext_string_(dex_str.ext_string_) {
+    dex_str.ext_string_ = ExtDexFileMakeString("", 0);
+  }
+  explicit DexString(const char* str = "")
+      : ext_string_(ExtDexFileMakeString(str, std::strlen(str))) {}
+  explicit DexString(std::string_view str)
+      : ext_string_(ExtDexFileMakeString(str.data(), str.size())) {}
+  ~DexString() { ExtDexFileFreeString(ext_string_); }
+
+  DexString& operator=(DexString&& dex_str) noexcept {
+    std::swap(ext_string_, dex_str.ext_string_);
+    return *this;
+  }
+
+  const char* data() const {
+    size_t ignored;
+    return ExtDexFileGetString(ext_string_, &ignored);
+  }
+  const char* c_str() const { return data(); }
+
+  size_t size() const {
+    size_t len;
+    (void)ExtDexFileGetString(ext_string_, &len);
+    return len;
+  }
+  size_t length() const { return size(); }
+
+  operator std::string_view() const {
+    size_t len;
+    const char* chars = ExtDexFileGetString(ext_string_, &len);
+    return std::string_view(chars, len);
+  }
+
+ private:
+  friend class DexFile;
+  friend bool operator==(const DexString&, const DexString&);
+  explicit DexString(const ExtDexFileString* ext_string) : ext_string_(ext_string) {}
+  const ExtDexFileString* ext_string_;  // Owned instance. Never nullptr.
+
+  DISALLOW_COPY_AND_ASSIGN(DexString);
+};
+
+inline bool operator==(const DexString& s1, const DexString& s2) {
+  size_t l1, l2;
+  const char* str1 = ExtDexFileGetString(s1.ext_string_, &l1);
+  const char* str2 = ExtDexFileGetString(s2.ext_string_, &l2);
+  // Use memcmp to avoid assumption about absence of null characters in the strings.
+  return l1 == l2 && !std::memcmp(str1, str2, l1);
+}
+
+struct MethodInfo {
+  int32_t offset;  // Code offset relative to the start of the dex file header
+  int32_t len;  // Code length
+  DexString name;
+};
+
+inline bool operator==(const MethodInfo& s1, const MethodInfo& s2) {
+  return s1.offset == s2.offset && s1.len == s2.len && s1.name == s2.name;
+}
+
+// External stable API to access ordinary dex files and CompactDex. This wraps
+// the stable C ABI and handles instance ownership. Thread-compatible but not
+// thread-safe.
+class DexFile {
+ public:
+  DexFile(DexFile&& dex_file) noexcept {
+    ext_dex_file_ = dex_file.ext_dex_file_;
+    dex_file.ext_dex_file_ = nullptr;
+  }
+  virtual ~DexFile();
+
+  // Interprets a chunk of memory as a dex file. As long as *size is too small,
+  // returns nullptr, sets *size to a new size to try again with, and sets
+  // *error_msg to "". That might happen repeatedly. Also returns nullptr
+  // on error in which case *error_msg is set to a nonempty string.
+  //
+  // location is a string that describes the dex file, and is preferably its
+  // path. It is mostly used to make error messages better, and may be "".
+  //
+  // The caller must retain the memory.
+  static std::unique_ptr<DexFile> OpenFromMemory(const void* addr,
+                                                 size_t* size,
+                                                 const std::string& location,
+                                                 /*out*/ std::string* error_msg) {
+    ExtDexFile* ext_dex_file;
+    const ExtDexFileString* ext_error_msg = nullptr;
+    if (ExtDexFileOpenFromMemory(addr, size, location.c_str(), &ext_error_msg, &ext_dex_file)) {
+      return std::unique_ptr<DexFile>(new DexFile(ext_dex_file));
+    }
+    *error_msg = (ext_error_msg == nullptr) ? "" : std::string(DexString(ext_error_msg));
+    return nullptr;
+  }
+
+  // mmaps the given file offset in the open fd and reads a dexfile from there.
+  // Returns nullptr on error in which case *error_msg is set.
+  //
+  // location is a string that describes the dex file, and is preferably its
+  // path. It is mostly used to make error messages better, and may be "".
+  static std::unique_ptr<DexFile> OpenFromFd(int fd,
+                                             off_t offset,
+                                             const std::string& location,
+                                             /*out*/ std::string* error_msg) {
+    ExtDexFile* ext_dex_file;
+    const ExtDexFileString* ext_error_msg = nullptr;
+    if (ExtDexFileOpenFromFd(fd, offset, location.c_str(), &ext_error_msg, &ext_dex_file)) {
+      return std::unique_ptr<DexFile>(new DexFile(ext_dex_file));
+    }
+    *error_msg = std::string(DexString(ext_error_msg));
+    return nullptr;
+  }
+
+  // Given an offset relative to the start of the dex file header, if there is a
+  // method whose instruction range includes that offset then returns info about
+  // it, otherwise returns a struct with offset == 0. MethodInfo.name receives
+  // the full function signature if with_signature is set, otherwise it gets the
+  // class and method name only.
+  MethodInfo GetMethodInfoForOffset(int64_t dex_offset, bool with_signature) {
+    ExtDexFileMethodInfo ext_method_info;
+    if (ExtDexFileGetMethodInfoForOffset(ext_dex_file_,
+                                         dex_offset,
+                                         with_signature,
+                                         &ext_method_info)) {
+      return AbsorbMethodInfo(ext_method_info);
+    }
+    return {/*offset=*/0, /*len=*/0, /*name=*/DexString()};
+  }
+
+  // Returns info structs about all methods in the dex file. MethodInfo.name
+  // receives the full function signature if with_signature is set, otherwise it
+  // gets the class and method name only.
+  std::vector<MethodInfo> GetAllMethodInfos(bool with_signature) {
+    MethodInfoVector res;
+    ExtDexFileGetAllMethodInfos(ext_dex_file_,
+                                with_signature,
+                                AddMethodInfoCallback,
+                                static_cast<void*>(&res));
+    return res;
+  }
+
+ private:
+  explicit DexFile(ExtDexFile* ext_dex_file) : ext_dex_file_(ext_dex_file) {}
+  ExtDexFile* ext_dex_file_;  // Owned instance. nullptr only in moved-from zombies.
+
+  typedef std::vector<MethodInfo> MethodInfoVector;
+
+  static MethodInfo AbsorbMethodInfo(const ExtDexFileMethodInfo& ext_method_info);
+  static void AddMethodInfoCallback(const ExtDexFileMethodInfo* ext_method_info, void* user_data);
+
+  DISALLOW_COPY_AND_ASSIGN(DexFile);
+};
+
+}  // namespace dex
+}  // namespace art_api
+
+#endif  // ART_LIBDEXFILE_EXTERNAL_INCLUDE_ART_API_DEX_FILE_SUPPORT_H_
diff --git a/libdexfile/external/libdexfile_external.map.txt b/libdexfile/external/libdexfile_external.map.txt
new file mode 100644
index 0000000..450b633
--- /dev/null
+++ b/libdexfile/external/libdexfile_external.map.txt
@@ -0,0 +1,13 @@
+LIBDEXFILE_EXTERNAL_1 {
+  global:
+    ExtDexFileFree;
+    ExtDexFileFreeString;
+    ExtDexFileGetAllMethodInfos;
+    ExtDexFileGetMethodInfoForOffset;
+    ExtDexFileGetString;
+    ExtDexFileMakeString;
+    ExtDexFileOpenFromFd;
+    ExtDexFileOpenFromMemory;
+  local:
+    *;
+};
diff --git a/libprofile/Android.bp b/libprofile/Android.bp
index b9883f6..986adce 100644
--- a/libprofile/Android.bp
+++ b/libprofile/Android.bp
@@ -23,37 +23,78 @@
     ],
     target: {
         android: {
+            shared_libs: [
+                "libartbase",
+                "libartpalette",
+                "libdexfile",
+                "libbase",
+            ],
             static_libs: [
                 // ZipArchive support, the order matters here to get all symbols.
                 "libziparchive",
                 "libz",
             ],
-	    shared_libs: [
-	        // For android::FileMap used by libziparchive.
-                "libutils",
-	    ],
+            export_shared_lib_headers: ["libbase"],
         },
-        host: {
+        not_windows: {
             shared_libs: [
+                "libartbase",
+                "libartpalette",
+                "libdexfile",
                 "libziparchive",
                 "libz",
+                "libbase",
             ],
+            export_shared_lib_headers: ["libbase"],
+        },
+        windows: {
+	    cflags: ["-Wno-thread-safety"],
+            static_libs: [
+                "libartbase",
+                "libartpalette",
+                "libdexfile",
+                "libziparchive",
+                "libz",
+                "libbase",
+            ],
+            export_static_lib_headers: ["libbase"],
         },
     },
     //generated_sources: ["art_libartbase_operator_srcs"],
     cflags: ["-DBUILDING_LIBART=1"],
-    shared_libs: [
-        "libartbase",
-        "libdexfile",
-        "libartbase",
-	// For atrace.
-        "libcutils",
-    ],
     export_include_dirs: ["."],
     // ART's macros.h depends on libbase's macros.h.
     // Note: runtime_options.h depends on cmdline. But we don't really want to export this
     //       generically. dex2oat takes care of it itself.
-    export_shared_lib_headers: ["libbase"],
+}
+
+cc_defaults {
+    name: "libprofile_static_base_defaults",
+    static_libs: [
+        "libbase",
+        "libz",
+        "libziparchive",
+    ],
+}
+
+cc_defaults {
+    name: "libprofile_static_defaults",
+    defaults: [
+        "libprofile_static_base_defaults",
+        "libartbase_static_defaults",
+        "libdexfile_static_defaults",
+    ],
+    static_libs: ["libprofile"],
+}
+
+cc_defaults {
+    name: "libprofiled_static_defaults",
+    defaults: [
+        "libprofile_static_base_defaults",
+        "libartbased_static_defaults",
+        "libdexfiled_static_defaults",
+    ],
+    static_libs: ["libprofiled"],
 }
 
 art_cc_library {
@@ -69,6 +110,14 @@
         "libziparchive",
     ],
     export_shared_lib_headers: ["libbase"],
+    target: {
+        windows: {
+	    enabled: true,
+	    shared: {
+	        enabled: false,
+	    },
+	},
+    }
 }
 
 art_cc_library {
diff --git a/libprofile/profile/profile_compilation_info.cc b/libprofile/profile/profile_compilation_info.cc
index 9b70e62..47b17ae 100644
--- a/libprofile/profile/profile_compilation_info.cc
+++ b/libprofile/profile/profile_compilation_info.cc
@@ -19,16 +19,18 @@
 #include <sys/file.h>
 #include <sys/stat.h>
 #include <sys/types.h>
-#include <sys/uio.h>
 #include <unistd.h>
 #include <zlib.h>
 
+#include <algorithm>
 #include <cerrno>
 #include <climits>
 #include <cstdlib>
+#include <iostream>
+#include <numeric>
+#include <random>
 #include <string>
 #include <vector>
-#include <iostream>
 
 #include "android-base/file.h"
 
@@ -55,6 +57,12 @@
 // profile_compilation_info object. All the profile line headers are now placed together
 // before corresponding method_encodings and class_ids.
 const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '1', '0', '\0' };
+const uint8_t ProfileCompilationInfo::kProfileVersionWithCounters[] = { '5', '0', '0', '\0' };
+
+static_assert(sizeof(ProfileCompilationInfo::kProfileVersion) == 4,
+              "Invalid profile version size");
+static_assert(sizeof(ProfileCompilationInfo::kProfileVersionWithCounters) == 4,
+              "Invalid profile version size");
 
 // The name of the profile entry in the dex metadata file.
 // DO NOT CHANGE THIS! (it's similar to classes.dex in the apk files).
@@ -81,18 +89,31 @@
   return kDebugIgnoreChecksum || dex_file_checksum == checksum;
 }
 
+// For storage efficiency we store aggregation counts of up to at most 2^16.
+static uint16_t IncrementAggregationCounter(uint16_t counter, uint16_t value) {
+  if (counter < (std::numeric_limits<uint16_t>::max() - value)) {
+    return counter + value;
+  } else {
+    return std::numeric_limits<uint16_t>::max();
+  }
+}
+
 ProfileCompilationInfo::ProfileCompilationInfo(ArenaPool* custom_arena_pool)
     : default_arena_pool_(),
       allocator_(custom_arena_pool),
       info_(allocator_.Adapter(kArenaAllocProfile)),
-      profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)) {
+      profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)),
+      aggregation_count_(0) {
+  InitProfileVersionInternal(kProfileVersion);
 }
 
 ProfileCompilationInfo::ProfileCompilationInfo()
     : default_arena_pool_(),
       allocator_(&default_arena_pool_),
       info_(allocator_.Adapter(kArenaAllocProfile)),
-      profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)) {
+      profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)),
+      aggregation_count_(0) {
+  InitProfileVersionInternal(kProfileVersion);
 }
 
 ProfileCompilationInfo::~ProfileCompilationInfo() {
@@ -186,9 +207,13 @@
 
 bool ProfileCompilationInfo::MergeWith(const std::string& filename) {
   std::string error;
+#ifdef _WIN32
+  int flags = O_RDONLY;
+#else
   int flags = O_RDONLY | O_NOFOLLOW | O_CLOEXEC;
-  ScopedFlock profile_file = LockedFile::Open(filename.c_str(), flags,
-      /*block*/false, &error);
+#endif
+  ScopedFlock profile_file =
+      LockedFile::Open(filename.c_str(), flags, /*block=*/false, &error);
 
   if (profile_file.get() == nullptr) {
     LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
@@ -214,12 +239,16 @@
     return kProfileLoadWouldOverwiteData;
   }
 
+#ifdef _WIN32
+  int flags = O_RDWR;
+#else
   int flags = O_RDWR | O_NOFOLLOW | O_CLOEXEC;
+#endif
   // There's no need to fsync profile data right away. We get many chances
   // to write it again in case something goes wrong. We can rely on a simple
   // close(), no sync, and let to the kernel decide when to write to disk.
-  ScopedFlock profile_file = LockedFile::Open(filename.c_str(), flags,
-                                              /*block*/false, &error);
+  ScopedFlock profile_file =
+      LockedFile::Open(filename.c_str(), flags, /*block=*/false, &error);
 
   if (profile_file.get() == nullptr) {
     LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
@@ -252,12 +281,16 @@
 bool ProfileCompilationInfo::Save(const std::string& filename, uint64_t* bytes_written) {
   ScopedTrace trace(__PRETTY_FUNCTION__);
   std::string error;
+#ifdef _WIN32
+  int flags = O_WRONLY;
+#else
   int flags = O_WRONLY | O_NOFOLLOW | O_CLOEXEC;
+#endif
   // There's no need to fsync profile data right away. We get many chances
   // to write it again in case something goes wrong. We can rely on a simple
   // close(), no sync, and let to the kernel decide when to write to disk.
-  ScopedFlock profile_file = LockedFile::Open(filename.c_str(), flags,
-                                              /*block*/false, &error);
+  ScopedFlock profile_file =
+      LockedFile::Open(filename.c_str(), flags, /*block=*/false, &error);
   if (profile_file.get() == nullptr) {
     LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
     return false;
@@ -323,13 +356,15 @@
 /**
  * Serialization format:
  * [profile_header, zipped[[profile_line_header1, profile_line_header2...],[profile_line_data1,
- *    profile_line_data2...]]]
+ *    profile_line_data2...]],global_aggregation_counter]
  * profile_header:
  *   magic,version,number_of_dex_files,uncompressed_size_of_zipped_data,compressed_data_size
  * profile_line_header:
  *   dex_location,number_of_classes,methods_region_size,dex_location_checksum,num_method_ids
  * profile_line_data:
- *   method_encoding_1,method_encoding_2...,class_id1,class_id2...,startup/post startup bitmap
+ *   method_encoding_1,method_encoding_2...,class_id1,class_id2...,startup/post startup bitmap,
+ *   num_classes,class_counters,num_methods,method_counters
+ * The aggregation counters are only stored if the profile version is kProfileVersionWithCounters.
  * The method_encoding is:
  *    method_id,number_of_inline_caches,inline_cache1,inline_cache2...
  * The inline_cache is:
@@ -352,7 +387,7 @@
   if (!WriteBuffer(fd, kProfileMagic, sizeof(kProfileMagic))) {
     return false;
   }
-  if (!WriteBuffer(fd, kProfileVersion, sizeof(kProfileVersion))) {
+  if (!WriteBuffer(fd, version_, sizeof(version_))) {
     return false;
   }
   DCHECK_LE(info_.size(), std::numeric_limits<uint8_t>::max());
@@ -367,7 +402,17 @@
         sizeof(uint16_t) * dex_data.class_set.size() +
         methods_region_size +
         dex_data.bitmap_storage.size();
+    if (StoresAggregationCounters()) {
+      required_capacity += sizeof(uint16_t) +  // num class counters
+          sizeof(uint16_t) * dex_data.class_set.size() +
+          sizeof(uint16_t) +  // num method counter
+          sizeof(uint16_t) * dex_data_ptr->GetNumMethodCounters();
+    }
   }
+  if (StoresAggregationCounters()) {
+    required_capacity += sizeof(uint16_t);  // global counter
+  }
+
   // Allow large profiles for non target builds for the case where we are merging many profiles
   // to generate a boot image profile.
   if (kIsTargetBuild && required_capacity > kProfileSizeErrorThresholdInBytes) {
@@ -440,6 +485,24 @@
     buffer.insert(buffer.end(),
                   dex_data.bitmap_storage.begin(),
                   dex_data.bitmap_storage.end());
+
+    if (StoresAggregationCounters()) {
+      AddUintToBuffer(&buffer, static_cast<uint16_t>(dex_data.class_set.size()));
+      for (const auto& class_id : dex_data.class_set) {
+        uint16_t type_idx = class_id.index_;
+        AddUintToBuffer(&buffer, dex_data.class_counters[type_idx]);
+      }
+      AddUintToBuffer(&buffer, dex_data.GetNumMethodCounters());
+      for (uint16_t method_idx = 0; method_idx < dex_data.num_method_ids; method_idx++) {
+        if (dex_data.GetHotnessInfo(method_idx).IsInProfile()) {
+          AddUintToBuffer(&buffer, dex_data.method_counters[method_idx]);
+        }
+      }
+    }
+  }
+
+  if (StoresAggregationCounters()) {
+    AddUintToBuffer(&buffer, aggregation_count_);
   }
 
   uint32_t output_size = 0;
@@ -580,7 +643,8 @@
         profile_key,
         checksum,
         profile_index,
-        num_method_ids);
+        num_method_ids,
+        StoresAggregationCounters());
     info_.push_back(dex_file_data);
   }
   DexFileData* result = info_[profile_index];
@@ -940,7 +1004,7 @@
   // Read magic and version
   const size_t kMagicVersionSize =
     sizeof(kProfileMagic) +
-    sizeof(kProfileVersion) +
+    kProfileVersionSize +
     sizeof(uint8_t) +  // number of dex files
     sizeof(uint32_t) +  // size of uncompressed profile data
     sizeof(uint32_t);  // size of compressed profile data
@@ -956,10 +1020,18 @@
     *error = "Profile missing magic";
     return kProfileLoadVersionMismatch;
   }
-  if (!safe_buffer.CompareAndAdvance(kProfileVersion, sizeof(kProfileVersion))) {
+  if (safe_buffer.CountUnreadBytes() < kProfileVersionSize) {
+     *error = "Cannot read profile version";
+     return kProfileLoadBadData;
+  }
+  memcpy(version_, safe_buffer.GetCurrentPtr(), kProfileVersionSize);
+  safe_buffer.Advance(kProfileVersionSize);
+  if ((memcmp(version_, kProfileVersion, kProfileVersionSize) != 0) &&
+      (memcmp(version_, kProfileVersionWithCounters, kProfileVersionSize) != 0)) {
     *error = "Profile version mismatch";
     return kProfileLoadVersionMismatch;
   }
+
   if (!safe_buffer.ReadUintAndAdvance<uint8_t>(number_of_dex_files)) {
     *error = "Cannot read the number of dex files";
     return kProfileLoadBadData;
@@ -1044,6 +1116,7 @@
     }
   }
 
+  // Read method bitmap.
   const size_t bytes = data->bitmap_storage.size();
   if (buffer.CountUnreadBytes() < bytes) {
     *error += "Profile EOF reached prematurely for ReadProfileHeaderDexLocation";
@@ -1052,10 +1125,51 @@
   const uint8_t* base_ptr = buffer.GetCurrentPtr();
   std::copy_n(base_ptr, bytes, data->bitmap_storage.data());
   buffer.Advance(bytes);
-  // Read method bitmap.
+
+  if (StoresAggregationCounters()) {
+    ReadAggregationCounters(buffer, *data, error);
+  }
+
   return kProfileLoadSuccess;
 }
 
+bool ProfileCompilationInfo::ReadAggregationCounters(
+      SafeBuffer& buffer,
+      DexFileData& dex_data,
+      /*out*/std::string* error) {
+  size_t unread_bytes_before_op = buffer.CountUnreadBytes();
+  size_t expected_byte_count = sizeof(uint16_t) *
+      (dex_data.class_set.size() + dex_data.method_map.size() + 2);
+  if (unread_bytes_before_op < expected_byte_count) {
+    *error += "Profile EOF reached prematurely for ReadAggregationCounters";
+    return false;
+  }
+
+  uint16_t num_class_counters;
+  READ_UINT(uint16_t, buffer, num_class_counters, error);
+  if (num_class_counters != dex_data.class_set.size()) {
+    *error = "Invalid class size when reading counters";
+    return false;
+  }
+  for (const auto& class_it : dex_data.class_set) {
+    READ_UINT(uint16_t, buffer, dex_data.class_counters[class_it.index_], error);
+  }
+
+  uint16_t num_method_counters;
+  READ_UINT(uint16_t, buffer, num_method_counters, error);
+  if (num_method_counters != dex_data.GetNumMethodCounters()) {
+    *error = "Invalid class size when reading counters";
+    return false;
+  }
+  for (uint16_t method_idx = 0; method_idx < dex_data.num_method_ids; method_idx++) {
+    if (dex_data.GetHotnessInfo(method_idx).IsInProfile()) {
+      READ_UINT(uint16_t, buffer, dex_data.method_counters[method_idx], error);
+    }
+  }
+
+  return true;
+}
+
 // TODO(calin): Fix this API. ProfileCompilationInfo::Load should be static and
 // return a unique pointer to a ProfileCompilationInfo upon success.
 bool ProfileCompilationInfo::Load(
@@ -1194,7 +1308,8 @@
     }
 
     // TODO(calin) pass along file names to assist with debugging.
-    MemMap map = zip_entry->MapDirectlyOrExtract(kDexMetadataProfileEntry, "profile file", error);
+    MemMap map = zip_entry->MapDirectlyOrExtract(
+        kDexMetadataProfileEntry, "profile file", error, alignof(ProfileSource));
 
     if (map.IsValid()) {
       source->reset(ProfileSource::Create(std::move(map)));
@@ -1366,9 +1481,17 @@
     }
   }
 
+  if (StoresAggregationCounters()) {
+    if (!uncompressed_data.ReadUintAndAdvance<uint16_t>(&aggregation_count_)) {
+      *error = "Cannot read the global aggregation count";
+      return kProfileLoadBadData;
+    }
+  }
+
   // Check that we read everything and that profiles don't contain junk data.
   if (uncompressed_data.CountUnreadBytes() > 0) {
-    *error = "Unexpected content in the profile file";
+    *error = "Unexpected content in the profile file: " +
+        std::to_string(uncompressed_data.CountUnreadBytes()) + " extra bytes";
     return kProfileLoadBadData;
   } else {
     return kProfileLoadSuccess;
@@ -1390,8 +1513,8 @@
     // verify_checksum is false because we want to differentiate between a missing dex data and
     // a mismatched checksum.
     const DexFileData* dex_data = FindDexData(other_profile_line_header.dex_location,
-                                              0u,
-                                              false /* verify_checksum */);
+                                              /* checksum= */ 0u,
+                                              /* verify_checksum= */ false);
     if ((dex_data != nullptr) && (dex_data->checksum != other_profile_line_header.checksum)) {
       LOG(WARNING) << "Checksum mismatch for dex " << other_profile_line_header.dex_location;
       return false;
@@ -1478,8 +1601,8 @@
     // verify_checksum is false because we want to differentiate between a missing dex data and
     // a mismatched checksum.
     const DexFileData* dex_data = FindDexData(other_dex_data->profile_key,
-                                              0u,
-                                              /* verify_checksum */ false);
+                                              /* checksum= */ 0u,
+                                              /* verify_checksum= */ false);
     if ((dex_data != nullptr) && (dex_data->checksum != other_dex_data->checksum)) {
       LOG(WARNING) << "Checksum mismatch for dex " << other_dex_data->profile_key;
       return false;
@@ -1514,6 +1637,33 @@
                                                                  other_dex_data->checksum));
     DCHECK(dex_data != nullptr);
 
+    // Merge counters for methods and class. Must be done before we merge the bitmaps so that
+    // we can tell if the data is new or not.
+    if (StoresAggregationCounters()) {
+      // Class aggregation counters.
+      if (merge_classes) {
+        for (const dex::TypeIndex& type_idx : other_dex_data->class_set) {
+          uint16_t amount = other.StoresAggregationCounters()
+              ? other_dex_data->class_counters[type_idx.index_]
+              : (dex_data->ContainsClass(type_idx) ? 1 : 0);
+
+          dex_data->class_counters[type_idx.index_] =
+              IncrementAggregationCounter(dex_data->class_counters[type_idx.index_], amount);
+        }
+      }
+
+      // Method aggregation counters.
+      for (uint16_t method_idx = 0; method_idx < other_dex_data->num_method_ids; method_idx++) {
+        if (other_dex_data->GetHotnessInfo(method_idx).IsInProfile()) {
+          uint16_t amount = other.StoresAggregationCounters()
+              ? other_dex_data->method_counters[method_idx]
+              : (dex_data->GetHotnessInfo(method_idx).IsInProfile() ? 1 : 0);
+          dex_data->method_counters[method_idx] =
+              IncrementAggregationCounter(dex_data->method_counters[method_idx], amount);
+        }
+      }
+    }
+
     // Merge the classes.
     if (merge_classes) {
       dex_data->class_set.insert(other_dex_data->class_set.begin(),
@@ -1548,6 +1698,13 @@
     // Merge the method bitmaps.
     dex_data->MergeBitmap(*other_dex_data);
   }
+
+  // Global aggregation counter.
+  if (StoresAggregationCounters()) {
+    uint16_t amount = other.StoresAggregationCounters() ? other.aggregation_count_ : 1;
+    aggregation_count_ = IncrementAggregationCounter(aggregation_count_, amount);
+  }
+
   return true;
 }
 
@@ -1610,11 +1767,7 @@
 
 bool ProfileCompilationInfo::ContainsClass(const DexFile& dex_file, dex::TypeIndex type_idx) const {
   const DexFileData* dex_data = FindDexData(&dex_file);
-  if (dex_data != nullptr) {
-    const ArenaSet<dex::TypeIndex>& classes = dex_data->class_set;
-    return classes.find(type_idx) != classes.end();
-  }
-  return false;
+  return (dex_data != nullptr) && dex_data->ContainsClass(type_idx);
 }
 
 uint32_t ProfileCompilationInfo::GetNumberOfMethods() const {
@@ -1633,25 +1786,7 @@
   return total;
 }
 
-// Produce a non-owning vector from a vector.
-template<typename T>
-const std::vector<T*>* MakeNonOwningVector(const std::vector<std::unique_ptr<T>>* owning_vector) {
-  auto non_owning_vector = new std::vector<T*>();
-  for (auto& element : *owning_vector) {
-    non_owning_vector->push_back(element.get());
-  }
-  return non_owning_vector;
-}
-
-std::string ProfileCompilationInfo::DumpInfo(
-    const std::vector<std::unique_ptr<const DexFile>>* dex_files,
-    bool print_full_dex_location) const {
-  std::unique_ptr<const std::vector<const DexFile*>> non_owning_dex_files(
-      MakeNonOwningVector(dex_files));
-  return DumpInfo(non_owning_dex_files.get(), print_full_dex_location);
-}
-
-std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>* dex_files,
+std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>& dex_files,
                                              bool print_full_dex_location) const {
   std::ostringstream os;
   if (info_.empty()) {
@@ -1674,11 +1809,10 @@
     os << " [index=" << static_cast<uint32_t>(dex_data->profile_index) << "]";
     os << " [checksum=" << std::hex << dex_data->checksum << "]" << std::dec;
     const DexFile* dex_file = nullptr;
-    if (dex_files != nullptr) {
-      for (size_t i = 0; i < dex_files->size(); i++) {
-        if (dex_data->profile_key == (*dex_files)[i]->GetLocation()) {
-          dex_file = (*dex_files)[i];
-        }
+    for (const DexFile* current : dex_files) {
+      if (dex_data->profile_key == current->GetLocation() &&
+          dex_data->checksum == current->GetLocationChecksum()) {
+        dex_file = current;
       }
     }
     os << "\n\thot methods: ";
@@ -1768,6 +1902,9 @@
 bool ProfileCompilationInfo::Equals(const ProfileCompilationInfo& other) {
   // No need to compare profile_key_map_. That's only a cache for fast search.
   // All the information is already in the info_ vector.
+  if (memcmp(version_, other.version_, kProfileVersionSize) != 0) {
+    return false;
+  }
   if (info_.size() != other.info_.size()) {
     return false;
   }
@@ -1778,6 +1915,9 @@
       return false;
     }
   }
+  if (aggregation_count_ != other.aggregation_count_) {
+    return false;
+  }
   return true;
 }
 
@@ -1845,7 +1985,7 @@
       flags |= ((m & 1) != 0) ? MethodHotness::kFlagPostStartup : MethodHotness::kFlagStartup;
       info.AddMethodIndex(static_cast<MethodHotness::Flag>(flags),
                           profile_key,
-                          /*method_idx*/ 0,
+                          /*checksum=*/ 0,
                           method_idx,
                           max_method);
     }
@@ -1872,43 +2012,42 @@
     uint16_t method_percentage,
     uint16_t class_percentage,
     uint32_t random_seed) {
-  std::srand(random_seed);
   ProfileCompilationInfo info;
+  std::default_random_engine rng(random_seed);
+  auto create_shuffled_range = [&rng](uint32_t take, uint32_t out_of) {
+    CHECK_LE(take, out_of);
+    std::vector<uint32_t> vec(out_of);
+    std::iota(vec.begin(), vec.end(), 0u);
+    std::shuffle(vec.begin(), vec.end(), rng);
+    vec.erase(vec.begin() + take, vec.end());
+    std::sort(vec.begin(), vec.end());
+    return vec;
+  };
   for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
     const std::string& location = dex_file->GetLocation();
     uint32_t checksum = dex_file->GetLocationChecksum();
 
     uint32_t number_of_classes = dex_file->NumClassDefs();
     uint32_t classes_required_in_profile = (number_of_classes * class_percentage) / 100;
-    uint32_t class_start_index = rand() % number_of_classes;
-    for (uint32_t i = 0; i < number_of_classes && classes_required_in_profile; ++i) {
-      if (number_of_classes - i == classes_required_in_profile ||
-          std::rand() % (number_of_classes - i - classes_required_in_profile) == 0) {
-        uint32_t class_index = (i + class_start_index) % number_of_classes;
-        info.AddClassIndex(location,
-                           checksum,
-                           dex_file->GetClassDef(class_index).class_idx_,
-                           dex_file->NumMethodIds());
-        classes_required_in_profile--;
-      }
+    for (uint32_t class_index : create_shuffled_range(classes_required_in_profile,
+                                                      number_of_classes)) {
+      info.AddClassIndex(location,
+                         checksum,
+                         dex_file->GetClassDef(class_index).class_idx_,
+                         dex_file->NumMethodIds());
     }
 
     uint32_t number_of_methods = dex_file->NumMethodIds();
     uint32_t methods_required_in_profile = (number_of_methods * method_percentage) / 100;
-    uint32_t method_start_index = rand() % number_of_methods;
-    for (uint32_t i = 0; i < number_of_methods && methods_required_in_profile; ++i) {
-      if (number_of_methods - i == methods_required_in_profile ||
-          std::rand() % (number_of_methods - i - methods_required_in_profile) == 0) {
-        uint32_t method_index = (method_start_index + i) % number_of_methods;
-        // Alternate between startup and post startup.
-        uint32_t flags = MethodHotness::kFlagHot;
-        flags |= ((method_index & 1) != 0)
-            ? MethodHotness::kFlagPostStartup
-            : MethodHotness::kFlagStartup;
-        info.AddMethodIndex(static_cast<MethodHotness::Flag>(flags),
-                            MethodReference(dex_file.get(), method_index));
-        methods_required_in_profile--;
-      }
+    for (uint32_t method_index : create_shuffled_range(methods_required_in_profile,
+                                                       number_of_methods)) {
+      // Alternate between startup and post startup.
+      uint32_t flags = MethodHotness::kFlagHot;
+      flags |= ((method_index & 1) != 0)
+                   ? MethodHotness::kFlagPostStartup
+                   : MethodHotness::kFlagStartup;
+      info.AddMethodIndex(static_cast<MethodHotness::Flag>(flags),
+                          MethodReference(dex_file.get(), method_index));
     }
   }
   return info.Save(fd);
@@ -1981,9 +2120,8 @@
   SetMethodHotness(index, flags);
 
   if ((flags & MethodHotness::kFlagHot) != 0) {
-    method_map.FindOrAdd(
-        index,
-        InlineCacheMap(std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)));
+    ProfileCompilationInfo::InlineCacheMap* result = FindOrAddMethod(index);
+    DCHECK(result != nullptr);
   }
   return true;
 }
@@ -1992,20 +2130,20 @@
                                                            MethodHotness::Flag flags) {
   DCHECK_LT(index, num_method_ids);
   if ((flags & MethodHotness::kFlagStartup) != 0) {
-    method_bitmap.StoreBit(MethodBitIndex(/*startup*/ true, index), /*value*/ true);
+    method_bitmap.StoreBit(MethodBitIndex(/*startup=*/ true, index), /*value=*/ true);
   }
   if ((flags & MethodHotness::kFlagPostStartup) != 0) {
-    method_bitmap.StoreBit(MethodBitIndex(/*startup*/ false, index), /*value*/ true);
+    method_bitmap.StoreBit(MethodBitIndex(/*startup=*/ false, index), /*value=*/ true);
   }
 }
 
 ProfileCompilationInfo::MethodHotness ProfileCompilationInfo::DexFileData::GetHotnessInfo(
     uint32_t dex_method_index) const {
   MethodHotness ret;
-  if (method_bitmap.LoadBit(MethodBitIndex(/*startup*/ true, dex_method_index))) {
+  if (method_bitmap.LoadBit(MethodBitIndex(/*startup=*/ true, dex_method_index))) {
     ret.AddFlag(MethodHotness::kFlagStartup);
   }
-  if (method_bitmap.LoadBit(MethodBitIndex(/*startup*/ false, dex_method_index))) {
+  if (method_bitmap.LoadBit(MethodBitIndex(/*startup=*/ false, dex_method_index))) {
     ret.AddFlag(MethodHotness::kFlagPostStartup);
   }
   auto it = method_map.find(dex_method_index);
@@ -2016,6 +2154,43 @@
   return ret;
 }
 
+int32_t ProfileCompilationInfo::DexFileData::GetMethodAggregationCounter(
+      uint16_t method_idx) const {
+  CHECK_GT(method_counters.size(), method_idx) << "Profile not prepared for aggregation counters";
+  if (!GetHotnessInfo(method_idx).IsInProfile()) {
+    return -1;
+  }
+
+  return method_counters[method_idx];
+}
+
+int32_t ProfileCompilationInfo::DexFileData::GetClassAggregationCounter(uint16_t type_idx) const {
+  CHECK_GT(class_counters.size(), type_idx) << "Profile not prepared for aggregation counters";
+  if (!ContainsClass(dex::TypeIndex(type_idx))) {
+    return -1;
+  }
+
+  return class_counters[type_idx];
+}
+
+int32_t ProfileCompilationInfo::GetMethodAggregationCounter(
+      const MethodReference& method_ref) const {
+  CHECK(StoresAggregationCounters()) << "Profile not prepared for aggregation counters";
+  const DexFileData* dex_data = FindDexData(method_ref.dex_file);
+  return dex_data == nullptr ? -1 : dex_data->GetMethodAggregationCounter(method_ref.index);
+}
+
+int32_t ProfileCompilationInfo::GetClassAggregationCounter(const TypeReference& type_ref) const {
+  CHECK(StoresAggregationCounters()) << "Profile not prepared for aggregation counters";
+  const DexFileData* dex_data = FindDexData(type_ref.dex_file);
+  return dex_data == nullptr ? -1 : dex_data->GetClassAggregationCounter(type_ref.index);
+}
+
+uint16_t ProfileCompilationInfo::GetAggregationCounter() const {
+  CHECK(StoresAggregationCounters()) << "Profile not prepared for aggregation counters";
+  return aggregation_count_;
+}
+
 ProfileCompilationInfo::DexPcData*
 ProfileCompilationInfo::FindOrAddDexPc(InlineCacheMap* inline_cache, uint32_t dex_pc) {
   return &(inline_cache->FindOrAdd(dex_pc, DexPcData(&allocator_))->second);
@@ -2034,7 +2209,7 @@
               << type_idx.index_ << " in dex " << dex_file->GetLocation();
           return HashSet<std::string>();
         }
-        const DexFile::TypeId& type_id = dex_file->GetTypeId(type_idx);
+        const dex::TypeId& type_id = dex_file->GetTypeId(type_idx);
         ret.insert(dex_file->GetTypeDescriptor(type_id));
       }
     } else {
@@ -2112,4 +2287,46 @@
   profile_key_map_.clear();
 }
 
+bool ProfileCompilationInfo::StoresAggregationCounters() const {
+  return memcmp(version_, kProfileVersionWithCounters, sizeof(kProfileVersionWithCounters)) == 0;
+}
+
+void ProfileCompilationInfo::PrepareForAggregationCounters() {
+  InitProfileVersionInternal(kProfileVersionWithCounters);
+  for (DexFileData* dex_data : info_) {
+    dex_data->PrepareForAggregationCounters();
+  }
+}
+
+void ProfileCompilationInfo::DexFileData::PrepareForAggregationCounters() {
+  method_counters.resize(num_method_ids);
+  // TODO(calin): we should store the maximum number of types in the profile.
+  // It will simplify quite a few things and make this storage allocation
+  // more efficient.
+  size_t max_elems = 1 << (kBitsPerByte * sizeof(uint16_t));
+  class_counters.resize(max_elems);
+}
+
+const uint8_t* ProfileCompilationInfo::GetVersion() const {
+  return version_;
+}
+
+void ProfileCompilationInfo::InitProfileVersionInternal(const uint8_t version[]) {
+  CHECK(
+      (memcmp(version, kProfileVersion, kProfileVersionSize) == 0) ||
+      (memcmp(version, kProfileVersionWithCounters, kProfileVersionSize) == 0));
+  memcpy(version_, version, kProfileVersionSize);
+}
+
+uint16_t ProfileCompilationInfo::DexFileData::GetNumMethodCounters() const {
+  uint16_t num_method_counters = 0;
+  for (uint16_t method_idx = 0; method_idx < num_method_ids; method_idx++) {
+    num_method_counters += GetHotnessInfo(method_idx).IsInProfile() ? 1 : 0;
+  }
+  return num_method_counters;
+}
+
+bool ProfileCompilationInfo::DexFileData::ContainsClass(const dex::TypeIndex type_index) const {
+  return class_set.find(type_index) != class_set.end();
+}
 }  // namespace art
diff --git a/libprofile/profile/profile_compilation_info.h b/libprofile/profile/profile_compilation_info.h
index 0dbf490..fa4615b 100644
--- a/libprofile/profile/profile_compilation_info.h
+++ b/libprofile/profile/profile_compilation_info.h
@@ -73,9 +73,10 @@
  public:
   static const uint8_t kProfileMagic[];
   static const uint8_t kProfileVersion[];
-
+  static const uint8_t kProfileVersionWithCounters[];
   static const char kDexMetadataProfileEntry[];
 
+  static constexpr size_t kProfileVersionSize = 4;
   static constexpr uint8_t kIndividualInlineCacheSize = 5;
 
   // Data structures for encoding the offline representation of inline caches.
@@ -377,12 +378,10 @@
                                                       uint16_t dex_method_index) const;
 
   // Dump all the loaded profile info into a string and returns it.
-  // If dex_files is not null then the method indices will be resolved to their
+  // If dex_files is not empty then the method indices will be resolved to their
   // names.
   // This is intended for testing and debugging.
-  std::string DumpInfo(const std::vector<std::unique_ptr<const DexFile>>* dex_files,
-                       bool print_full_dex_location = true) const;
-  std::string DumpInfo(const std::vector<const DexFile*>* dex_files,
+  std::string DumpInfo(const std::vector<const DexFile*>& dex_files,
                        bool print_full_dex_location = true) const;
 
   // Return the classes and methods for a given dex file through out args. The out args are the set
@@ -449,6 +448,30 @@
   // Clears all the data from the profile.
   void ClearData();
 
+  // Prepare the profile to store aggregation counters.
+  // This will change the profile version and allocate extra storage for the counters.
+  // It allocates 2 bytes for every possible method and class, so do not use in performance
+  // critical code which needs to be memory efficient.
+  void PrepareForAggregationCounters();
+
+  // Returns true if the profile is configured to store aggregation counters.
+  bool StoresAggregationCounters() const;
+
+  // Returns the aggregation counter for the given method.
+  // Returns -1 if the method is not in the profile.
+  // CHECKs that the profile is configured to store aggregations counters.
+  int32_t GetMethodAggregationCounter(const MethodReference& method_ref) const;
+  // Returns the aggregation counter for the given class.
+  // Returns -1 if the class is not in the profile.
+  // CHECKs that the profile is configured to store aggregations counters.
+  int32_t GetClassAggregationCounter(const TypeReference& type_ref) const;
+  // Returns the number of times the profile was merged.
+  // CHECKs that the profile is configured to store aggregations counters.
+  uint16_t GetAggregationCounter() const;
+
+  // Return the version of this profile.
+  const uint8_t* GetVersion() const;
+
  private:
   enum ProfileLoadStatus {
     kProfileLoadWouldOverwiteData,
@@ -472,7 +495,8 @@
                 const std::string& key,
                 uint32_t location_checksum,
                 uint16_t index,
-                uint32_t num_methods)
+                uint32_t num_methods,
+                bool store_aggregation_counters)
         : allocator_(allocator),
           profile_key(key),
           profile_index(index),
@@ -480,13 +504,18 @@
           method_map(std::less<uint16_t>(), allocator->Adapter(kArenaAllocProfile)),
           class_set(std::less<dex::TypeIndex>(), allocator->Adapter(kArenaAllocProfile)),
           num_method_ids(num_methods),
-          bitmap_storage(allocator->Adapter(kArenaAllocProfile)) {
+          bitmap_storage(allocator->Adapter(kArenaAllocProfile)),
+          method_counters(allocator->Adapter(kArenaAllocProfile)),
+          class_counters(allocator->Adapter(kArenaAllocProfile)) {
       bitmap_storage.resize(ComputeBitmapStorage(num_method_ids));
       if (!bitmap_storage.empty()) {
         method_bitmap =
             BitMemoryRegion(MemoryRegion(
                 &bitmap_storage[0], bitmap_storage.size()), 0, ComputeBitmapBits(num_method_ids));
       }
+      if (store_aggregation_counters) {
+        PrepareForAggregationCounters();
+      }
     }
 
     static size_t ComputeBitmapBits(uint32_t num_method_ids) {
@@ -497,7 +526,13 @@
     }
 
     bool operator==(const DexFileData& other) const {
-      return checksum == other.checksum && method_map == other.method_map;
+      return checksum == other.checksum &&
+          num_method_ids == other.num_method_ids &&
+          method_map == other.method_map &&
+          class_set == other.class_set &&
+          (BitMemoryRegion::Compare(method_bitmap, other.method_bitmap) == 0) &&
+          class_counters == other.class_counters &&
+          method_counters == other.method_counters;
     }
 
     // Mark a method as executed at least once.
@@ -512,6 +547,14 @@
 
     void SetMethodHotness(size_t index, MethodHotness::Flag flags);
     MethodHotness GetHotnessInfo(uint32_t dex_method_index) const;
+    void PrepareForAggregationCounters();
+
+    int32_t GetMethodAggregationCounter(uint16_t method_index) const;
+    int32_t GetClassAggregationCounter(uint16_t type_index) const;
+
+    uint16_t GetNumMethodCounters() const;
+
+    bool ContainsClass(const dex::TypeIndex type_index) const;
 
     // The allocator used to allocate new inline cache maps.
     ArenaAllocator* const allocator_;
@@ -521,7 +564,7 @@
     uint8_t profile_index;
     // The dex checksum.
     uint32_t checksum;
-    // The methonds' profile information.
+    // The methods' profile information.
     MethodMap method_map;
     // The classes which have been profiled. Note that these don't necessarily include
     // all the classes that can be found in the inline caches reference.
@@ -533,6 +576,8 @@
     uint32_t num_method_ids;
     ArenaVector<uint8_t> bitmap_storage;
     BitMemoryRegion method_bitmap;
+    ArenaVector<uint16_t> method_counters;
+    ArenaVector<uint16_t> class_counters;
 
    private:
     enum BitmapIndex {
@@ -763,6 +808,11 @@
                    const SafeMap<uint8_t, uint8_t>& dex_profile_index_remap,
                    /*out*/std::string* error);
 
+  // Read the aggregation counters from the buffer.
+  bool ReadAggregationCounters(SafeBuffer& buffer,
+                               DexFileData& dex_data,
+                               /*out*/std::string* error);
+
   // The method generates mapping of profile indices while merging a new profile
   // data into current data. It returns true, if the mapping was successful.
   bool RemapProfileIndex(const std::vector<ProfileLineHeader>& profile_line_headers,
@@ -794,6 +844,9 @@
   // if no previous data exists.
   DexPcData* FindOrAddDexPc(InlineCacheMap* inline_cache, uint32_t dex_pc);
 
+  // Initializes the profile version to the desired one.
+  void InitProfileVersionInternal(const uint8_t version[]);
+
   friend class ProfileCompilationInfoTest;
   friend class CompilerDriverProfileTest;
   friend class ProfileAssistantTest;
@@ -811,6 +864,14 @@
   // This is used to speed up searches since it avoids iterating
   // over the info_ vector when searching by profile key.
   ArenaSafeMap<const std::string, uint8_t> profile_key_map_;
+
+  // The version of the profile.
+  // This may change if a "normal" profile is transformed to keep track
+  // of aggregation counters.
+  uint8_t version_[kProfileVersionSize];
+
+  // Stored only when the profile is configured to keep track of aggregation counters.
+  uint16_t aggregation_count_;
 };
 
 }  // namespace art
diff --git a/libprofile/profile/profile_compilation_info_test.cc b/libprofile/profile/profile_compilation_info_test.cc
index 417abaa..47019c4 100644
--- a/libprofile/profile/profile_compilation_info_test.cc
+++ b/libprofile/profile/profile_compilation_info_test.cc
@@ -43,22 +43,22 @@
  protected:
   bool AddMethod(const std::string& dex_location,
                  uint32_t checksum,
-                 uint16_t method_index,
+                 uint16_t method_idx,
                  ProfileCompilationInfo* info) {
     return info->AddMethodIndex(Hotness::kFlagHot,
                                 dex_location,
                                 checksum,
-                                method_index,
+                                method_idx,
                                 kMaxMethodIds);
   }
 
   bool AddMethod(const std::string& dex_location,
                  uint32_t checksum,
-                 uint16_t method_index,
+                 uint16_t method_idx,
                  const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi,
                  ProfileCompilationInfo* info) {
     return info->AddMethod(
-        dex_location, checksum, method_index, kMaxMethodIds, pmi, Hotness::kFlagPostStartup);
+        dex_location, checksum, method_idx, kMaxMethodIds, pmi, Hotness::kFlagPostStartup);
   }
 
   bool AddClass(const std::string& dex_location,
@@ -115,9 +115,9 @@
 
     ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
 
-    pmi.dex_references.emplace_back("dex_location1", /* checksum */1, kMaxMethodIds);
-    pmi.dex_references.emplace_back("dex_location2", /* checksum */2, kMaxMethodIds);
-    pmi.dex_references.emplace_back("dex_location3", /* checksum */3, kMaxMethodIds);
+    pmi.dex_references.emplace_back("dex_location1", /* checksum= */1, kMaxMethodIds);
+    pmi.dex_references.emplace_back("dex_location2", /* checksum= */2, kMaxMethodIds);
+    pmi.dex_references.emplace_back("dex_location3", /* checksum= */3, kMaxMethodIds);
 
     return pmi;
   }
@@ -148,8 +148,8 @@
     ScratchFile profile;
     ProfileCompilationInfo saved_info;
     for (uint16_t i = 0; i < 10; i++) {
-      ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
-      ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, /* method_idx */ i, &saved_info));
+      ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
+      ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
     }
     ASSERT_TRUE(saved_info.Save(GetFd(profile)));
     ASSERT_EQ(0, profile.GetFile()->Flush());
@@ -207,8 +207,8 @@
   ProfileCompilationInfo saved_info;
   // Save a few methods.
   for (uint16_t i = 0; i < 10; i++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
-    ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, /* method_idx */ i, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
   }
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
   ASSERT_EQ(0, profile.GetFile()->Flush());
@@ -221,9 +221,9 @@
 
   // Save more methods.
   for (uint16_t i = 0; i < 100; i++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
-    ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, /* method_idx */ i, &saved_info));
-    ASSERT_TRUE(AddMethod("dex_location3", /* checksum */ 3, /* method_idx */ i, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location3", /* checksum= */ 3, /* method_idx= */ i, &saved_info));
   }
   ASSERT_TRUE(profile.GetFile()->ResetOffset());
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -240,19 +240,19 @@
   ScratchFile profile;
 
   ProfileCompilationInfo info;
-  ASSERT_TRUE(AddMethod("dex_location", /* checksum */ 1, /* method_idx */ 1, &info));
+  ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 1, /* method_idx= */ 1, &info));
   // Trying to add info for an existing file but with a different checksum.
-  ASSERT_FALSE(AddMethod("dex_location", /* checksum */ 2, /* method_idx */ 2, &info));
+  ASSERT_FALSE(AddMethod("dex_location", /* checksum= */ 2, /* method_idx= */ 2, &info));
 }
 
 TEST_F(ProfileCompilationInfoTest, MergeFail) {
   ScratchFile profile;
 
   ProfileCompilationInfo info1;
-  ASSERT_TRUE(AddMethod("dex_location", /* checksum */ 1, /* method_idx */ 1, &info1));
+  ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 1, /* method_idx= */ 1, &info1));
   // Use the same file, change the checksum.
   ProfileCompilationInfo info2;
-  ASSERT_TRUE(AddMethod("dex_location", /* checksum */ 2, /* method_idx */ 2, &info2));
+  ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 2, /* method_idx= */ 2, &info2));
 
   ASSERT_FALSE(info1.MergeWith(info2));
 }
@@ -262,10 +262,10 @@
   ScratchFile profile;
 
   ProfileCompilationInfo info1;
-  ASSERT_TRUE(AddMethod("dex_location", /* checksum */ 1, /* method_idx */ 1, &info1));
+  ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 1, /* method_idx= */ 1, &info1));
   // Use the same file, change the checksum.
   ProfileCompilationInfo info2;
-  ASSERT_TRUE(AddMethod("dex_location", /* checksum */ 2, /* method_idx */ 2, &info2));
+  ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 2, /* method_idx= */ 2, &info2));
 
   ASSERT_TRUE(info1.Save(profile.GetFd()));
   ASSERT_EQ(0, profile.GetFile()->Flush());
@@ -280,13 +280,13 @@
   ProfileCompilationInfo saved_info;
   // Save the maximum number of methods
   for (uint16_t i = 0; i < std::numeric_limits<uint16_t>::max(); i++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
-    ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, /* method_idx */ i, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
   }
   // Save the maximum number of classes
   for (uint16_t i = 0; i < std::numeric_limits<uint16_t>::max(); i++) {
-    ASSERT_TRUE(AddClass("dex_location1", /* checksum */ 1, dex::TypeIndex(i), &saved_info));
-    ASSERT_TRUE(AddClass("dex_location2", /* checksum */ 2, dex::TypeIndex(i), &saved_info));
+    ASSERT_TRUE(AddClass("dex_location1", /* checksum= */ 1, dex::TypeIndex(i), &saved_info));
+    ASSERT_TRUE(AddClass("dex_location2", /* checksum= */ 2, dex::TypeIndex(i), &saved_info));
   }
 
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -390,7 +390,7 @@
   ProfileCompilationInfo saved_info;
   // Save the maximum number of methods
   for (uint16_t i = 0; i < 10; i++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
   }
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
 
@@ -415,9 +415,9 @@
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     // Add a method which is part of the same dex file as one of the
     // class from the inline caches.
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
     // Add a method which is outside the set of dex files.
-    ASSERT_TRUE(AddMethod("dex_location4", /* checksum */ 4, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
   }
 
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -431,11 +431,11 @@
   ASSERT_TRUE(loaded_info.Equals(saved_info));
 
   std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
-      loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ 3);
+      loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, /* dex_method_index= */ 3);
   ASSERT_TRUE(loaded_pmi1 != nullptr);
   ASSERT_TRUE(*loaded_pmi1 == pmi);
   std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi2 =
-      loaded_info.GetMethod("dex_location4", /* checksum */ 4, /* method_idx */ 3);
+      loaded_info.GetMethod("dex_location4", /* dex_checksum= */ 4, /* dex_method_index= */ 3);
   ASSERT_TRUE(loaded_pmi2 != nullptr);
   ASSERT_TRUE(*loaded_pmi2 == pmi);
 }
@@ -448,7 +448,7 @@
 
   // Add methods with inline caches.
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
   }
 
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -459,7 +459,7 @@
   ProfileCompilationInfo::OfflineProfileMethodInfo pmi_extra = GetOfflineProfileMethodInfo();
   MakeMegamorphic(&pmi_extra);
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info_extra));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info_extra));
   }
 
   ASSERT_TRUE(profile.GetFile()->ResetOffset());
@@ -477,7 +477,7 @@
   ASSERT_TRUE(loaded_info.Equals(saved_info));
 
   std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
-      loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ 3);
+      loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, /* dex_method_index= */ 3);
 
   ASSERT_TRUE(loaded_pmi1 != nullptr);
   ASSERT_TRUE(*loaded_pmi1 == pmi_extra);
@@ -491,7 +491,7 @@
 
   // Add methods with inline caches.
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
   }
 
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -502,7 +502,7 @@
   ProfileCompilationInfo::OfflineProfileMethodInfo pmi_extra = GetOfflineProfileMethodInfo();
   MakeMegamorphic(&pmi_extra);
   for (uint16_t method_idx = 5; method_idx < 10; method_idx++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info_extra));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info_extra));
   }
 
   // Mark all inline caches with missing types and add them to the profile again.
@@ -510,7 +510,7 @@
   ProfileCompilationInfo::OfflineProfileMethodInfo missing_types = GetOfflineProfileMethodInfo();
   SetIsMissingTypes(&missing_types);
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info_extra));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info_extra));
   }
 
   ASSERT_TRUE(profile.GetFile()->ResetOffset());
@@ -528,7 +528,7 @@
   ASSERT_TRUE(loaded_info.Equals(saved_info));
 
   std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
-      loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ 3);
+      loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, /* dex_method_index= */ 3);
   ASSERT_TRUE(loaded_pmi1 != nullptr);
   ASSERT_TRUE(*loaded_pmi1 == pmi_extra);
 }
@@ -542,8 +542,8 @@
   // Modify the checksum to trigger a mismatch.
   pmi2.dex_references[0].dex_checksum++;
 
-  ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /*method_idx*/ 0, pmi1, &info));
-  ASSERT_FALSE(AddMethod("dex_location2", /* checksum */ 2, /*method_idx*/ 0, pmi2, &info));
+  ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /*method_idx=*/ 0, pmi1, &info));
+  ASSERT_FALSE(AddMethod("dex_location2", /* checksum= */ 2, /*method_idx=*/ 0, pmi2, &info));
 }
 
 // Verify that profiles behave correctly even if the methods are added in a different
@@ -556,8 +556,8 @@
 
   ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
   ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
-  pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
-  pmi.dex_references.emplace_back("dex_location2", /* checksum */ 2, kMaxMethodIds);
+  pmi.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
+  pmi.dex_references.emplace_back("dex_location2", /* checksum= */ 2, kMaxMethodIds);
   for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
     ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
     dex_pc_data.AddClass(0, dex::TypeIndex(0));
@@ -567,8 +567,8 @@
 
   ProfileCompilationInfo::InlineCacheMap* ic_map_reindexed = CreateInlineCacheMap();
   ProfileCompilationInfo::OfflineProfileMethodInfo pmi_reindexed(ic_map_reindexed);
-  pmi_reindexed.dex_references.emplace_back("dex_location2", /* checksum */ 2, kMaxMethodIds);
-  pmi_reindexed.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
+  pmi_reindexed.dex_references.emplace_back("dex_location2", /* checksum= */ 2, kMaxMethodIds);
+  pmi_reindexed.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
   for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
     ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
     dex_pc_data.AddClass(1, dex::TypeIndex(0));
@@ -579,15 +579,15 @@
   // Profile 1 and Profile 2 get the same methods but in different order.
   // This will trigger a different dex numbers.
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &info));
-    ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, method_idx, pmi, &info));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &info));
+    ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, method_idx, pmi, &info));
   }
 
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     ASSERT_TRUE(AddMethod(
-      "dex_location2", /* checksum */ 2, method_idx, pmi_reindexed, &info_reindexed));
+      "dex_location2", /* checksum= */ 2, method_idx, pmi_reindexed, &info_reindexed));
     ASSERT_TRUE(AddMethod(
-      "dex_location1", /* checksum */ 1, method_idx, pmi_reindexed, &info_reindexed));
+      "dex_location1", /* checksum= */ 1, method_idx, pmi_reindexed, &info_reindexed));
   }
 
   ProfileCompilationInfo info_backup;
@@ -597,11 +597,11 @@
   ASSERT_TRUE(info.Equals(info_backup));
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
-        info.GetMethod("dex_location1", /* checksum */ 1, method_idx);
+        info.GetMethod("dex_location1", /* dex_checksum= */ 1, method_idx);
     ASSERT_TRUE(loaded_pmi1 != nullptr);
     ASSERT_TRUE(*loaded_pmi1 == pmi);
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi2 =
-        info.GetMethod("dex_location2", /* checksum */ 2, method_idx);
+        info.GetMethod("dex_location2", /* dex_checksum= */ 2, method_idx);
     ASSERT_TRUE(loaded_pmi2 != nullptr);
     ASSERT_TRUE(*loaded_pmi2 == pmi);
   }
@@ -612,34 +612,34 @@
   // Save a few methods.
   for (uint16_t i = 0; i < std::numeric_limits<uint8_t>::max(); i++) {
     std::string dex_location = std::to_string(i);
-    ASSERT_TRUE(AddMethod(dex_location, /* checksum */ 1, /* method_idx */ i, &info));
+    ASSERT_TRUE(AddMethod(dex_location, /* checksum= */ 1, /* method_idx= */ i, &info));
   }
   // We only support at most 255 dex files.
   ASSERT_FALSE(AddMethod(
-      /*dex_location*/ "256", /* checksum */ 1, /* method_idx */ 0, &info));
+      /*dex_location=*/ "256", /* checksum= */ 1, /* method_idx= */ 0, &info));
 }
 
 TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCachesMerge) {
   // Create a megamorphic inline cache.
   ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
   ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
-  pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
+  pmi.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
   ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
   dex_pc_data.SetIsMegamorphic();
   ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
 
   ProfileCompilationInfo info_megamorphic;
   ASSERT_TRUE(AddMethod("dex_location1",
-                        /*checksum*/ 1,
-                        /*method_idx*/ 0,
+                        /*checksum=*/ 1,
+                        /*method_idx=*/ 0,
                         pmi,
                         &info_megamorphic));
 
   // Create a profile with no inline caches (for the same method).
   ProfileCompilationInfo info_no_inline_cache;
   ASSERT_TRUE(AddMethod("dex_location1",
-                        /*checksum*/ 1,
-                        /*method_idx*/ 0,
+                        /*checksum=*/ 1,
+                        /*method_idx=*/ 0,
                         &info_no_inline_cache));
 
   // Merge the megamorphic cache into the empty one.
@@ -653,23 +653,23 @@
   // Create an inline cache with missing types
   ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
   ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
-  pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
+  pmi.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
   ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
   dex_pc_data.SetIsMissingTypes();
   ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
 
   ProfileCompilationInfo info_megamorphic;
   ASSERT_TRUE(AddMethod("dex_location1",
-                        /*checksum*/ 1,
-                        /*method_idx*/ 0,
+                        /*checksum=*/ 1,
+                        /*method_idx=*/ 0,
                         pmi,
                         &info_megamorphic));
 
   // Create a profile with no inline caches (for the same method).
   ProfileCompilationInfo info_no_inline_cache;
   ASSERT_TRUE(AddMethod("dex_location1",
-                        /*checksum*/ 1,
-                        /*method_idx*/ 0,
+                        /*checksum=*/ 1,
+                        /*method_idx=*/ 0,
                         &info_no_inline_cache));
 
   // Merge the missing type cache into the empty one.
@@ -766,26 +766,26 @@
 TEST_F(ProfileCompilationInfoTest, LoadFromZipCompress) {
   TestProfileLoadFromZip("primary.prof",
                          ZipWriter::kCompress | ZipWriter::kAlign32,
-                         /*should_succeed*/true);
+                         /*should_succeed=*/true);
 }
 
 TEST_F(ProfileCompilationInfoTest, LoadFromZipUnCompress) {
   TestProfileLoadFromZip("primary.prof",
                          ZipWriter::kAlign32,
-                         /*should_succeed*/true);
+                         /*should_succeed=*/true);
 }
 
 TEST_F(ProfileCompilationInfoTest, LoadFromZipUnAligned) {
   TestProfileLoadFromZip("primary.prof",
                          0,
-                         /*should_succeed*/true);
+                         /*should_succeed=*/true);
 }
 
 TEST_F(ProfileCompilationInfoTest, LoadFromZipFailBadZipEntry) {
   TestProfileLoadFromZip("invalid.profile.entry",
                          0,
-                         /*should_succeed*/true,
-                         /*should_succeed_with_empty_profile*/true);
+                         /*should_succeed=*/true,
+                         /*should_succeed_with_empty_profile=*/true);
 }
 
 TEST_F(ProfileCompilationInfoTest, LoadFromZipFailBadProfile) {
@@ -835,7 +835,7 @@
     info.AddMethodIndex(Hotness::kFlagHot,
                         old_name,
                         dex->GetLocationChecksum(),
-                        /* method_idx */ 0,
+                        /* method_idx= */ 0,
                         dex->NumMethodIds());
   }
 
@@ -845,7 +845,7 @@
   // Verify that we find the methods when searched with the original dex files.
   for (const std::unique_ptr<const DexFile>& dex : dex_files) {
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi =
-        info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* method_idx */ 0);
+        info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* dex_method_index= */ 0);
     ASSERT_TRUE(loaded_pmi != nullptr);
   }
 }
@@ -856,9 +856,9 @@
   ProfileCompilationInfo info;
   info.AddMethodIndex(Hotness::kFlagHot,
                       "my.app",
-                      /* checksum */ 123,
-                      /* method_idx */ 0,
-                      /* num_method_ids */ 10);
+                      /* checksum= */ 123,
+                      /* method_idx= */ 0,
+                      /* num_method_ids= */ 10);
 
   // Update the profile keys based on the original dex files
   ASSERT_TRUE(info.UpdateProfileKeys(dex_files));
@@ -867,13 +867,13 @@
   // location.
   for (const std::unique_ptr<const DexFile>& dex : dex_files) {
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi =
-        info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* method_idx */ 0);
+        info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* dex_method_index= */ 0);
     ASSERT_TRUE(loaded_pmi == nullptr);
   }
 
   // Verify that we can find the original entry.
   std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi =
-        info.GetMethod("my.app", /* checksum */ 123, /* method_idx */ 0);
+        info.GetMethod("my.app", /* dex_checksum= */ 123, /* dex_method_index= */ 0);
   ASSERT_TRUE(loaded_pmi != nullptr);
 }
 
@@ -892,7 +892,7 @@
     info.AddMethodIndex(Hotness::kFlagHot,
                         old_name,
                         dex->GetLocationChecksum(),
-                        /* method_idx */ 0,
+                        /* method_idx= */ 0,
                         dex->NumMethodIds());
   }
 
@@ -900,8 +900,8 @@
   // This will cause the rename to fail because an existing entry would already have that name.
   info.AddMethodIndex(Hotness::kFlagHot,
                       dex_files[0]->GetLocation(),
-                      /* checksum */ 123,
-                      /* method_idx */ 0,
+                      /* checksum= */ 123,
+                      /* method_idx= */ 0,
                       dex_files[0]->NumMethodIds());
 
   ASSERT_FALSE(info.UpdateProfileKeys(dex_files));
@@ -916,10 +916,10 @@
   // Add methods with inline caches.
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     // Add a method which is part of the same dex file as one of the class from the inline caches.
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
-    ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, method_idx, pmi, &saved_info));
     // Add a method which is outside the set of dex files.
-    ASSERT_TRUE(AddMethod("dex_location4", /* checksum */ 4, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
   }
 
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -941,8 +941,12 @@
 
   // Dex location 2 and 4 should have been filtered out
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
-    ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location2", /* checksum */ 2, method_idx));
-    ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location4", /* checksum */ 4, method_idx));
+    ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location2",
+                                                 /* dex_checksum= */ 2,
+                                                 method_idx));
+    ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location4",
+                                                 /* dex_checksum= */ 4,
+                                                 method_idx));
   }
 
   // Dex location 1 should have all all the inline caches referencing dex location 2 set to
@@ -950,7 +954,7 @@
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     // The methods for dex location 1 should be in the profile data.
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
-      loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ method_idx);
+        loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, method_idx);
     ASSERT_TRUE(loaded_pmi1 != nullptr);
 
     // Verify the inline cache.
@@ -989,8 +993,8 @@
     ProfileCompilationInfo::OfflineProfileMethodInfo expected_pmi(ic_map);
 
     // The dex references should not have  dex_location2 in the list.
-    expected_pmi.dex_references.emplace_back("dex_location1", /* checksum */1, kMaxMethodIds);
-    expected_pmi.dex_references.emplace_back("dex_location3", /* checksum */3, kMaxMethodIds);
+    expected_pmi.dex_references.emplace_back("dex_location1", /* checksum= */1, kMaxMethodIds);
+    expected_pmi.dex_references.emplace_back("dex_location3", /* checksum= */3, kMaxMethodIds);
 
     // Now check that we get back what we expect.
     ASSERT_TRUE(*loaded_pmi1 == expected_pmi);
@@ -1006,10 +1010,10 @@
   // Add methods with inline caches.
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     // Add a method which is part of the same dex file as one of the class from the inline caches.
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
-    ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, method_idx, pmi, &saved_info));
     // Add a method which is outside the set of dex files.
-    ASSERT_TRUE(AddMethod("dex_location4", /* checksum */ 4, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
   }
 
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -1038,9 +1042,9 @@
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     // Add a method which is part of the same dex file as one of the
     // class from the inline caches.
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
     // Add a method which is outside the set of dex files.
-    ASSERT_TRUE(AddMethod("dex_location4", /* checksum */ 4, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
   }
 
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -1060,13 +1064,13 @@
 
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
-        loaded_info.GetMethod("dex_location1", /* checksum */ 1, method_idx);
+        loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, method_idx);
     ASSERT_TRUE(loaded_pmi1 != nullptr);
     ASSERT_TRUE(*loaded_pmi1 == pmi);
   }
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi2 =
-        loaded_info.GetMethod("dex_location4", /* checksum */ 4, method_idx);
+        loaded_info.GetMethod("dex_location4", /* dex_checksum= */ 4, method_idx);
     ASSERT_TRUE(loaded_pmi2 != nullptr);
     ASSERT_TRUE(*loaded_pmi2 == pmi);
   }
@@ -1081,8 +1085,8 @@
   ProfileCompilationInfo saved_info;
   uint16_t item_count = 1000;
   for (uint16_t i = 0; i < item_count; i++) {
-    ASSERT_TRUE(AddClass("dex_location1", /* checksum */ 1, dex::TypeIndex(i), &saved_info));
-    ASSERT_TRUE(AddClass("dex_location2", /* checksum */ 2, dex::TypeIndex(i), &saved_info));
+    ASSERT_TRUE(AddClass("dex_location1", /* checksum= */ 1, dex::TypeIndex(i), &saved_info));
+    ASSERT_TRUE(AddClass("dex_location2", /* checksum= */ 2, dex::TypeIndex(i), &saved_info));
   }
 
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -1101,7 +1105,7 @@
   // Compute the expectation.
   ProfileCompilationInfo expected_info;
   for (uint16_t i = 0; i < item_count; i++) {
-    ASSERT_TRUE(AddClass("dex_location2", /* checksum */ 2, dex::TypeIndex(i), &expected_info));
+    ASSERT_TRUE(AddClass("dex_location2", /* checksum= */ 2, dex::TypeIndex(i), &expected_info));
   }
 
   // Validate the expectation.
@@ -1112,7 +1116,7 @@
 TEST_F(ProfileCompilationInfoTest, ClearData) {
   ProfileCompilationInfo info;
   for (uint16_t i = 0; i < 10; i++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &info));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &info));
   }
   ASSERT_FALSE(IsEmpty(info));
   info.ClearData();
@@ -1122,7 +1126,7 @@
 TEST_F(ProfileCompilationInfoTest, ClearDataAndSave) {
   ProfileCompilationInfo info;
   for (uint16_t i = 0; i < 10; i++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &info));
+    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &info));
   }
   info.ClearData();
 
@@ -1137,4 +1141,180 @@
   ASSERT_TRUE(loaded_info.Equals(info));
 }
 
+TEST_F(ProfileCompilationInfoTest, PrepareForAggregationCounters) {
+  ProfileCompilationInfo info;
+  ASSERT_EQ(
+      memcmp(info.GetVersion(),
+             ProfileCompilationInfo::kProfileVersion,
+             ProfileCompilationInfo::kProfileVersionSize),
+      0);
+
+  info.PrepareForAggregationCounters();
+
+  ASSERT_EQ(
+      memcmp(info.GetVersion(),
+             ProfileCompilationInfo::kProfileVersionWithCounters,
+             ProfileCompilationInfo::kProfileVersionSize),
+      0);
+  ASSERT_TRUE(info.StoresAggregationCounters());
+  ASSERT_EQ(info.GetAggregationCounter(), 0);
+}
+
+TEST_F(ProfileCompilationInfoTest, MergeWithAggregationCounters) {
+  ProfileCompilationInfo info1;
+  info1.PrepareForAggregationCounters();
+
+  ProfileCompilationInfo info2;
+  ProfileCompilationInfo info3;
+
+  std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
+  std::string location = dex->GetLocation();
+  int checksum = dex->GetLocationChecksum();
+
+  AddMethod(location, checksum, /* method_idx= */ 1, &info1);
+
+  AddMethod(location, checksum, /* method_idx= */ 2, &info1);
+  AddMethod(location, checksum, /* method_idx= */ 2, &info2);
+
+  info1.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
+  info2.AddMethodIndex(Hotness::kFlagPostStartup, location, checksum, 3, kMaxMethodIds);
+  info3.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
+
+  AddMethod(location, checksum, /* method_idx= */ 6, &info2);
+  AddMethod(location, checksum, /* method_idx= */ 6, &info3);
+
+  AddClass(location, checksum, dex::TypeIndex(10), &info1);
+
+  AddClass(location, checksum, dex::TypeIndex(20), &info1);
+  AddClass(location, checksum, dex::TypeIndex(20), &info2);
+
+  AddClass(location, checksum, dex::TypeIndex(30), &info1);
+  AddClass(location, checksum, dex::TypeIndex(30), &info2);
+  AddClass(location, checksum, dex::TypeIndex(30), &info3);
+
+  ASSERT_EQ(info1.GetAggregationCounter(), 0);
+  info1.MergeWith(info2);
+  ASSERT_EQ(info1.GetAggregationCounter(), 1);
+  info1.MergeWith(info3);
+  ASSERT_EQ(info1.GetAggregationCounter(), 2);
+
+  ASSERT_EQ(0, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 1)));
+  ASSERT_EQ(1, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 2)));
+  ASSERT_EQ(2, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 3)));
+  ASSERT_EQ(1, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 6)));
+
+  ASSERT_EQ(0, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(10))));
+  ASSERT_EQ(1, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(20))));
+  ASSERT_EQ(2, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(30))));
+
+  // Check methods that do not exists.
+  ASSERT_EQ(-1, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 4)));
+  ASSERT_EQ(-1, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(40))));
+}
+
+TEST_F(ProfileCompilationInfoTest, SaveAndLoadAggregationCounters) {
+  ProfileCompilationInfo info1;
+  info1.PrepareForAggregationCounters();
+
+  ProfileCompilationInfo info2;
+  ProfileCompilationInfo info3;
+
+  std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
+  std::string location = dex->GetLocation();
+  int checksum = dex->GetLocationChecksum();
+
+  AddMethod(location, checksum, /* method_idx= */ 1, &info1);
+
+  AddMethod(location, checksum, /* method_idx= */ 2, &info1);
+  AddMethod(location, checksum, /* method_idx= */ 2, &info2);
+
+  info1.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
+  info2.AddMethodIndex(Hotness::kFlagPostStartup, location, checksum, 3, kMaxMethodIds);
+  info3.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
+
+  AddMethod(location, checksum, /* method_idx= */ 6, &info2);
+  AddMethod(location, checksum, /* method_idx= */ 6, &info3);
+
+  AddClass(location, checksum, dex::TypeIndex(10), &info1);
+
+  AddClass(location, checksum, dex::TypeIndex(20), &info1);
+  AddClass(location, checksum, dex::TypeIndex(20), &info2);
+
+  AddClass(location, checksum, dex::TypeIndex(30), &info1);
+  AddClass(location, checksum, dex::TypeIndex(30), &info2);
+  AddClass(location, checksum, dex::TypeIndex(30), &info3);
+
+  info1.MergeWith(info2);
+  info1.MergeWith(info3);
+
+  ScratchFile profile;
+
+  ASSERT_TRUE(info1.Save(GetFd(profile)));
+  ASSERT_EQ(0, profile.GetFile()->Flush());
+
+  // Check that we get back what we saved.
+  ProfileCompilationInfo loaded_info;
+  loaded_info.PrepareForAggregationCounters();
+  ASSERT_TRUE(profile.GetFile()->ResetOffset());
+  ASSERT_TRUE(loaded_info.Load(GetFd(profile)));
+  ASSERT_TRUE(loaded_info.Equals(info1));
+
+  ASSERT_EQ(2, loaded_info.GetAggregationCounter());
+
+  ASSERT_EQ(0, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 1)));
+  ASSERT_EQ(1, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 2)));
+  ASSERT_EQ(2, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 3)));
+  ASSERT_EQ(1, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 6)));
+
+  ASSERT_EQ(0, loaded_info.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(10))));
+  ASSERT_EQ(1, loaded_info.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(20))));
+  ASSERT_EQ(2, loaded_info.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(30))));
+}
+
+TEST_F(ProfileCompilationInfoTest, MergeTwoWithAggregationCounters) {
+  ProfileCompilationInfo info1;
+  info1.PrepareForAggregationCounters();
+
+  ProfileCompilationInfo info2;
+
+  std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
+  std::string location = dex->GetLocation();
+  int checksum = dex->GetLocationChecksum();
+
+  AddMethod(location, checksum, /* method_idx= */ 1, &info1);
+
+  AddMethod(location, checksum, /* method_idx= */ 2, &info1);
+  AddMethod(location, checksum, /* method_idx= */ 2, &info2);
+
+  AddClass(location, checksum, dex::TypeIndex(20), &info1);
+
+  AddClass(location, checksum, dex::TypeIndex(10), &info1);
+  AddClass(location, checksum, dex::TypeIndex(10), &info2);
+
+  info1.MergeWith(info2);
+  info1.MergeWith(info2);
+  ASSERT_EQ(2, info1.GetAggregationCounter());
+
+  // Save and load the profile to create a copy of the data
+  ScratchFile profile;
+  info1.Save(GetFd(profile));
+  ASSERT_EQ(0, profile.GetFile()->Flush());
+
+  ProfileCompilationInfo loaded_info;
+  loaded_info.PrepareForAggregationCounters();
+  profile.GetFile()->ResetOffset();
+  loaded_info.Load(GetFd(profile));
+
+  // Merge the data
+  info1.MergeWith(loaded_info);
+
+  ASSERT_EQ(4, info1.GetAggregationCounter());
+
+  ASSERT_EQ(0, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 1)));
+  ASSERT_EQ(4, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 2)));
+
+  ASSERT_EQ(4, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(10))));
+  ASSERT_EQ(0, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(20))));
+}
+
 }  // namespace art
diff --git a/oatdump/Android.bp b/oatdump/Android.bp
index 3cd8ae0..8849a7a 100644
--- a/oatdump/Android.bp
+++ b/oatdump/Android.bp
@@ -19,11 +19,6 @@
     defaults: ["art_defaults"],
     host_supported: true,
     srcs: ["oatdump.cc"],
-    target: {
-        android: {
-            shared_libs: ["libcutils"],
-        },
-    },
     // b/79417743, oatdump 32-bit tests failed with clang lld
     use_clang_lld: false,
     header_libs: [
@@ -37,6 +32,7 @@
     shared_libs: [
         "libart",
         "libart-compiler",
+        "libart-dexlayout",
         "libart-disassembler",
         "libdexfile",
         "libartbase",
@@ -54,6 +50,7 @@
     shared_libs: [
         "libartd",
         "libartd-compiler",
+        "libartd-dexlayout",
         "libartd-disassembler",
         "libdexfiled",
         "libartbased",
@@ -66,7 +63,9 @@
     name: "oatdumps-defaults",
     device_supported: false,
     static_executable: true,
-    defaults: ["oatdump-defaults"],
+    defaults: [
+        "oatdump-defaults",
+    ],
     target: {
         darwin: {
             enabled: false,
@@ -80,28 +79,39 @@
         // Try to get rid of it.
         "-z muldefs",
     ],
-    static_libs: art_static_dependencies,
+    static_libs: ["libsigchain_dummy"],
 }
 
 art_cc_binary {
     name: "oatdumps",
-    defaults: ["oatdumps-defaults"],
-    static_libs: [
-        "libart",
-        "libdexfile",
-        "libprofile",
-        "libartbase",
-        "libart-compiler",
-        "libart-disassembler",
-        "libvixl-arm",
-        "libvixl-arm64",
+    defaults: [
+        "libart_static_defaults",
+        "libartbase_static_defaults",
+        "libdexfile_static_defaults",
+        "libprofile_static_defaults",
+        "libart-compiler_static_defaults",
+        "libart-dexlayout_static_defaults",
+        "oatdumps-defaults",
     ],
+    static_libs: [
+        "libart-disassembler",
+        "libvixl",
+    ],
+    // We need this to resolve libartpalette symbols
+    // correctly. Multiple source libraries depend on it.
+    group_static_libs: true,
 }
 
 art_cc_binary {
     name: "oatdumpds",
     defaults: [
         "art_debug_defaults",
+        "libartd_static_defaults",
+        "libartbased_static_defaults",
+        "libdexfiled_static_defaults",
+        "libprofiled_static_defaults",
+        "libartd-compiler_static_defaults",
+        "libartd-dexlayout_static_defaults",
         "oatdumps-defaults",
     ],
     target: {
@@ -110,15 +120,10 @@
         },
     },
     static_libs: [
-        "libartd",
-        "libdexfiled",
-        "libprofiled",
-        "libartbased",
-        "libartd-compiler",
         "libartd-disassembler",
-        "libvixld-arm",
-        "libvixld-arm64",
+        "libvixld",
     ],
+    group_static_libs: true,
 }
 
 art_cc_test {
diff --git a/oatdump/Android.mk b/oatdump/Android.mk
index e82cd97..b50aa1c 100644
--- a/oatdump/Android.mk
+++ b/oatdump/Android.mk
@@ -66,7 +66,12 @@
 .PHONY: dump-oat-boot-$(TARGET_ARCH)
 ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
 dump-oat-boot-$(TARGET_ARCH): $(DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) $(OATDUMP)
-	$(OATDUMP) $(addprefix --image=,$(DEFAULT_DEX_PREOPT_BUILT_IMAGE_LOCATION)) \
+	$(OATDUMP) \
+	  --runtime-arg \
+	  -Xbootclasspath:$(call normalize-path-list, $(DEXPREOPT_BOOTCLASSPATH_DEX_FILES)) \
+	  --runtime-arg \
+	  -Xbootclasspath-locations:$(call normalize-path-list, $(DEXPREOPT_BOOTCLASSPATH_DEX_LOCATIONS)) \
+	  $(addprefix --image=,$(DEFAULT_DEX_PREOPT_BUILT_IMAGE_LOCATION)) \
 	  --output=$(ART_DUMP_OAT_PATH)/boot.$(TARGET_ARCH).oatdump.txt --instruction-set=$(TARGET_ARCH)
 	@echo Output in $(ART_DUMP_OAT_PATH)/boot.$(TARGET_ARCH).oatdump.txt
 endif
@@ -74,7 +79,12 @@
 ifdef TARGET_2ND_ARCH
 .PHONY: dump-oat-boot-$(TARGET_2ND_ARCH)
 dump-oat-boot-$(TARGET_2ND_ARCH): $(2ND_DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) $(OATDUMP)
-	$(OATDUMP) $(addprefix --image=,$(2ND_DEFAULT_DEX_PREOPT_BUILT_IMAGE_LOCATION)) \
+	$(OATDUMP) \
+	  --runtime-arg \
+	  -Xbootclasspath:$(call normalize-path-list, $(DEXPREOPT_BOOTCLASSPATH_DEX_FILES)) \
+	  --runtime-arg \
+	  -Xbootclasspath-locations:$(call normalize-path-list, $(DEXPREOPT_BOOTCLASSPATH_DEX_LOCATIONS)) \
+	  $(addprefix --image=,$(2ND_DEFAULT_DEX_PREOPT_BUILT_IMAGE_LOCATION)) \
 	  --output=$(ART_DUMP_OAT_PATH)/boot.$(TARGET_2ND_ARCH).oatdump.txt --instruction-set=$(TARGET_2ND_ARCH)
 	@echo Output in $(ART_DUMP_OAT_PATH)/boot.$(TARGET_2ND_ARCH).oatdump.txt
 endif
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index fc7f5b7..89826c6 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -28,6 +28,7 @@
 #include <vector>
 
 #include "android-base/logging.h"
+#include "android-base/parseint.h"
 #include "android-base/stringprintf.h"
 #include "android-base/strings.h"
 
@@ -48,6 +49,7 @@
 #include "debug/debug_info.h"
 #include "debug/elf_debug_writer.h"
 #include "debug/method_debug_info.h"
+#include "dex/art_dex_file_loader.h"
 #include "dex/class_accessor-inl.h"
 #include "dex/code_item_accessors-inl.h"
 #include "dex/descriptors_names.h"
@@ -55,6 +57,7 @@
 #include "dex/dex_instruction-inl.h"
 #include "dex/string_reference.h"
 #include "dex/type_lookup_table.h"
+#include "dexlayout.h"
 #include "disassembler.h"
 #include "gc/accounting/space_bitmap-inl.h"
 #include "gc/space/image_space.h"
@@ -111,7 +114,7 @@
   "kOomeWhenThrowingOome",
   "kOomeWhenHandlingStackOverflow",
   "kNoClassDefFoundError",
-  "kClassLoader",
+  "kSpecialRoots",
 };
 
 // Map is so that we don't allocate multiple dex files for the same OatDexFile.
@@ -221,7 +224,7 @@
     debug::WriteDebugInfo(builder_.get(),
                           debug_info,
                           dwarf::DW_DEBUG_FRAME_FORMAT,
-                          true /* write_oat_patches */);
+                          /* write_oat_patches= */ true);
 
     builder_->End();
 
@@ -293,7 +296,7 @@
                      const DexFile& dex_file,
                      uint32_t class_def_index,
                      uint32_t dex_method_index,
-                     const DexFile::CodeItem* code_item,
+                     const dex::CodeItem* code_item,
                      uint32_t method_access_flags) {
     if ((method_access_flags & kAccAbstract) != 0) {
       // Abstract method, no code.
@@ -398,7 +401,7 @@
                                              options_.absolute_addresses_,
                                              oat_file.Begin(),
                                              oat_file.End(),
-                                             true /* can_read_literals_ */,
+                                             /* can_read_literals_= */ true,
                                              Is64BitInstructionSet(instruction_set_)
                                                  ? &Thread::DumpThreadOffset<PointerSize::k64>
                                                  : &Thread::DumpThreadOffset<PointerSize::k32>))) {
@@ -416,7 +419,7 @@
     return instruction_set_;
   }
 
-  typedef std::vector<std::unique_ptr<const DexFile>> DexFileUniqV;
+  using DexFileUniqV = std::vector<std::unique_ptr<const DexFile>>;
 
   bool Dump(std::ostream& os) {
     bool success = true;
@@ -470,9 +473,6 @@
                            GetQuickToInterpreterBridgeOffset);
 #undef DUMP_OAT_HEADER_OFFSET
 
-    os << "IMAGE FILE LOCATION OAT CHECKSUM:\n";
-    os << StringPrintf("0x%08x\n\n", oat_header.GetImageFileLocationOatChecksum());
-
     // Print the key-value store.
     {
       os << "KEY VALUE STORE:\n";
@@ -624,8 +624,61 @@
         const OatDexFile* oat_dex_file = oat_dex_files_[i];
         CHECK(oat_dex_file != nullptr);
         CHECK(vdex_dex_file != nullptr);
-        if (!ExportDexFile(os, *oat_dex_file, vdex_dex_file.get())) {
-          success = false;
+
+        // If a CompactDex file is detected within a Vdex container, DexLayout is used to convert
+        // back to a StandardDex file. Since the converted DexFile will most likely not reproduce
+        // the original input Dex file, the `update_checksum_` option is used to recompute the
+        // checksum. If the vdex container does not contain cdex resources (`used_dexlayout` is
+        // false), ExportDexFile() enforces a reproducible checksum verification.
+        if (vdex_dex_file->IsCompactDexFile()) {
+          Options options;
+          options.compact_dex_level_ = CompactDexLevel::kCompactDexLevelNone;
+          options.update_checksum_ = true;
+          DexLayout dex_layout(options, /*info=*/ nullptr, /*out_file=*/ nullptr, /*header=*/ nullptr);
+          std::unique_ptr<art::DexContainer> dex_container;
+          bool result = dex_layout.ProcessDexFile(vdex_dex_file->GetLocation().c_str(),
+                                                  vdex_dex_file.get(),
+                                                  i,
+                                                  &dex_container,
+                                                  &error_msg);
+          if (!result) {
+            os << "DexLayout failed to process Dex file: " + error_msg;
+            success = false;
+            break;
+          }
+          DexContainer::Section* main_section = dex_container->GetMainSection();
+          CHECK_EQ(dex_container->GetDataSection()->Size(), 0u);
+
+          const ArtDexFileLoader dex_file_loader;
+          std::unique_ptr<const DexFile> dex(dex_file_loader.Open(
+              main_section->Begin(),
+              main_section->Size(),
+              vdex_dex_file->GetLocation(),
+              vdex_file->GetLocationChecksum(i),
+              /*oat_dex_file=*/ nullptr,
+              /*verify=*/ false,
+              /*verify_checksum=*/ true,
+              &error_msg));
+          if (dex == nullptr) {
+            os << "Failed to load DexFile from layout container: " + error_msg;
+            success = false;
+            break;
+          }
+          if (dex->IsCompactDexFile()) {
+            os <<"CompactDex conversion to StandardDex failed";
+            success = false;
+            break;
+          }
+
+          if (!ExportDexFile(os, *oat_dex_file, dex.get(), /*used_dexlayout=*/ true)) {
+            success = false;
+            break;
+          }
+        } else {
+          if (!ExportDexFile(os, *oat_dex_file, vdex_dex_file.get(), /*used_dexlayout=*/ false)) {
+            success = false;
+            break;
+          }
         }
         i++;
       }
@@ -670,7 +723,7 @@
             << "': " << error_msg;
       } else {
         const char* descriptor = m->GetDeclaringClassDescriptor();
-        const DexFile::ClassDef* class_def =
+        const dex::ClassDef* class_def =
             OatDexFile::FindClassDef(*dex_file, descriptor, ComputeModifiedUtf8Hash(descriptor));
         if (class_def != nullptr) {
           uint16_t class_def_index = dex_file->GetIndexForClassDef(*class_def);
@@ -705,8 +758,8 @@
         PROT_READ | PROT_WRITE,
         MAP_PRIVATE,
         file->Fd(),
-        /* start offset */ 0,
-        /* low_4gb */ false,
+        /* start offset= */ 0,
+        /* low_4gb= */ false,
         vdex_filename.c_str(),
         error_msg);
     if (!mmap.IsValid()) {
@@ -727,7 +780,7 @@
     }
 
     vdex_file->Unquicken(MakeNonOwningPointerVector(tmp_dex_files),
-                         /* decompile_return_instruction */ true);
+                         /* decompile_return_instruction= */ true);
 
     *dex_files = std::move(tmp_dex_files);
     return vdex_file;
@@ -897,10 +950,16 @@
   // Dex resource is extracted from the oat_dex_file and its checksum is repaired since it's not
   // unquickened. Otherwise the dex_file has been fully unquickened and is expected to verify the
   // original checksum.
-  bool ExportDexFile(std::ostream& os, const OatDexFile& oat_dex_file, const DexFile* dex_file) {
+  bool ExportDexFile(std::ostream& os,
+                     const OatDexFile& oat_dex_file,
+                     const DexFile* dex_file,
+                     bool used_dexlayout) {
     std::string error_msg;
     std::string dex_file_location = oat_dex_file.GetDexFileLocation();
-    size_t fsize = oat_dex_file.FileSize();
+
+    // If dex_file (from unquicken or dexlayout) is not available, the output DexFile size is the
+    // same as the one extracted from the Oat container (pre-oreo)
+    size_t fsize = dex_file == nullptr ? oat_dex_file.FileSize() : dex_file->Size();
 
     // Some quick checks just in case
     if (fsize == 0 || fsize < sizeof(DexFile::Header)) {
@@ -920,27 +979,19 @@
       reinterpret_cast<DexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()))->checksum_ =
           dex_file->CalculateChecksum();
     } else {
-      // Vdex unquicken output should match original input bytecode
-      uint32_t orig_checksum =
-          reinterpret_cast<DexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()))->checksum_;
-      CHECK_EQ(orig_checksum, dex_file->CalculateChecksum());
-      if (orig_checksum != dex_file->CalculateChecksum()) {
-        os << "Unexpected checksum from unquicken dex file '" << dex_file_location << "'\n";
-        return false;
+      // If dexlayout was used to convert CompactDex back to StandardDex, checksum will be updated
+      // due to `update_checksum_` option, otherwise we expect a reproducible checksum.
+      if (!used_dexlayout) {
+        // Vdex unquicken output should match original input bytecode
+        uint32_t orig_checksum =
+            reinterpret_cast<DexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()))->checksum_;
+        if (orig_checksum != dex_file->CalculateChecksum()) {
+          os << "Unexpected checksum from unquicken dex file '" << dex_file_location << "'\n";
+          return false;
+        }
       }
     }
 
-    // Update header for shared section.
-    uint32_t shared_section_offset = 0u;
-    uint32_t shared_section_size = 0u;
-    if (dex_file->IsCompactDexFile()) {
-      CompactDexFile::Header* const header =
-          reinterpret_cast<CompactDexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()));
-      shared_section_offset = header->data_off_;
-      shared_section_size = header->data_size_;
-      // The shared section will be serialized right after the dex file.
-      header->data_off_ = header->file_size_;
-    }
     // Verify output directory exists
     if (!OS::DirectoryExists(options_.export_dex_location_)) {
       // TODO: Extend OS::DirectoryExists if symlink support is required
@@ -994,15 +1045,6 @@
       return false;
     }
 
-    if (shared_section_size != 0) {
-      success = file->WriteFully(dex_file->Begin() + shared_section_offset, shared_section_size);
-      if (!success) {
-        os << "Failed to write shared data section";
-        file->Erase();
-        return false;
-      }
-    }
-
     if (file->FlushCloseOrErase() != 0) {
       os << "Flush and close failed";
       return false;
@@ -1030,7 +1072,7 @@
                          dex_file,
                          method.GetIndex(),
                          method.GetCodeItem(),
-                         method.GetRawAccessFlags(),
+                         method.GetAccessFlags(),
                          &addr_found)) {
         success = false;
       }
@@ -1050,12 +1092,12 @@
   static constexpr uint32_t kMaxCodeSize = 100 * 1000;
 
   bool DumpOatMethod(VariableIndentationOutputStream* vios,
-                     const DexFile::ClassDef& class_def,
+                     const dex::ClassDef& class_def,
                      uint32_t class_method_index,
                      const OatFile::OatClass& oat_class,
                      const DexFile& dex_file,
                      uint32_t dex_method_idx,
-                     const DexFile::CodeItem* code_item,
+                     const dex::CodeItem* code_item,
                      uint32_t method_access_flags,
                      bool* addr_found) {
     bool success = true;
@@ -1448,8 +1490,8 @@
                                          StackHandleScope<1>* hs,
                                          uint32_t dex_method_idx,
                                          const DexFile* dex_file,
-                                         const DexFile::ClassDef& class_def,
-                                         const DexFile::CodeItem* code_item,
+                                         const dex::ClassDef& class_def,
+                                         const dex::CodeItem* code_item,
                                          uint32_t method_access_flags) {
     if ((method_access_flags & kAccNative) == 0) {
       ScopedObjectAccess soa(Thread::Current());
@@ -1466,7 +1508,7 @@
       }
       return verifier::MethodVerifier::VerifyMethodAndDump(
           soa.Self(), vios, dex_method_idx, dex_file, dex_cache, *options_.class_loader_,
-          class_def, code_item, method, method_access_flags, /* api_level */ 0);
+          class_def, code_item, method, method_access_flags, /* api_level= */ 0);
     }
 
     return nullptr;
@@ -1726,29 +1768,25 @@
 
     os << "IMAGE LOCATION: " << image_space_.GetImageLocation() << "\n\n";
 
-    os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetImageBegin()) << "\n\n";
+    os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetImageBegin()) << "\n";
+    os << "IMAGE SIZE: " << image_header_.GetImageSize() << "\n";
+    os << "IMAGE CHECKSUM: " << std::hex << image_header_.GetImageChecksum() << std::dec << "\n\n";
 
-    os << "IMAGE SIZE: " << image_header_.GetImageSize() << "\n\n";
+    os << "OAT CHECKSUM: " << StringPrintf("0x%08x\n\n", image_header_.GetOatChecksum()) << "\n";
+    os << "OAT FILE BEGIN:" << reinterpret_cast<void*>(image_header_.GetOatFileBegin()) << "\n";
+    os << "OAT DATA BEGIN:" << reinterpret_cast<void*>(image_header_.GetOatDataBegin()) << "\n";
+    os << "OAT DATA END:" << reinterpret_cast<void*>(image_header_.GetOatDataEnd()) << "\n";
+    os << "OAT FILE END:" << reinterpret_cast<void*>(image_header_.GetOatFileEnd()) << "\n\n";
+
+    os << "BOOT IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetBootImageBegin())
+        << "\n";
+    os << "BOOT IMAGE SIZE: " << image_header_.GetBootImageSize() << "\n\n";
 
     for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
       auto section = static_cast<ImageHeader::ImageSections>(i);
       os << "IMAGE SECTION " << section << ": " << image_header_.GetImageSection(section) << "\n\n";
     }
 
-    os << "OAT CHECKSUM: " << StringPrintf("0x%08x\n\n", image_header_.GetOatChecksum());
-
-    os << "OAT FILE BEGIN:" << reinterpret_cast<void*>(image_header_.GetOatFileBegin()) << "\n\n";
-
-    os << "OAT DATA BEGIN:" << reinterpret_cast<void*>(image_header_.GetOatDataBegin()) << "\n\n";
-
-    os << "OAT DATA END:" << reinterpret_cast<void*>(image_header_.GetOatDataEnd()) << "\n\n";
-
-    os << "OAT FILE END:" << reinterpret_cast<void*>(image_header_.GetOatFileEnd()) << "\n\n";
-
-    os << "PATCH DELTA:" << image_header_.GetPatchDelta() << "\n\n";
-
-    os << "COMPILE PIC: " << (image_header_.CompilePic() ? "yes" : "no") << "\n\n";
-
     {
       os << "ROOTS: " << reinterpret_cast<void*>(image_header_.GetImageRoots().Ptr()) << "\n";
       static_assert(arraysize(image_roots_descriptions_) ==
@@ -1814,14 +1852,13 @@
       oat_file = runtime->GetOatFileManager().FindOpenedOatFileFromOatLocation(oat_location);
     }
     if (oat_file == nullptr) {
-      oat_file = OatFile::Open(/* zip_fd */ -1,
+      oat_file = OatFile::Open(/*zip_fd=*/ -1,
                                oat_location,
                                oat_location,
-                               /* requested_base */ nullptr,
-                               /* executable */ false,
-                               /* low_4gb */ false,
-                               /* abs_dex_location */ nullptr,
-                               /* reservation */ nullptr,
+                               /*executable=*/ false,
+                               /*low_4gb=*/ false,
+                               /*abs_dex_location=*/ nullptr,
+                               /*reservation=*/ nullptr,
                                &error_msg);
     }
     if (oat_file == nullptr) {
@@ -1896,7 +1933,7 @@
       stats_.file_bytes = file->GetLength();
       // If the image is compressed, adjust to decompressed size.
       size_t uncompressed_size = image_header_.GetImageSize() - sizeof(ImageHeader);
-      if (image_header_.GetStorageMode() == ImageHeader::kStorageModeUncompressed) {
+      if (image_header_.HasCompressedBlock()) {
         DCHECK_EQ(uncompressed_size, data_size) << "Sizes should match for uncompressed image";
       }
       stats_.file_bytes += uncompressed_size - data_size;
@@ -1908,8 +1945,9 @@
     const auto& dex_cache_arrays_section = image_header_.GetDexCacheArraysSection();
     const auto& intern_section = image_header_.GetInternedStringsSection();
     const auto& class_table_section = image_header_.GetClassTableSection();
+    const auto& sro_section = image_header_.GetImageStringReferenceOffsetsSection();
+    const auto& metadata_section = image_header_.GetMetadataSection();
     const auto& bitmap_section = image_header_.GetImageBitmapSection();
-    const auto& relocations_section = image_header_.GetImageRelocationsSection();
 
     stats_.header_bytes = header_bytes;
 
@@ -1949,16 +1987,15 @@
     CHECK_ALIGNED(bitmap_section.Offset(), kPageSize);
     stats_.alignment_bytes += RoundUp(bitmap_offset, kPageSize) - bitmap_offset;
 
-    // There should be no space between the bitmap and relocations.
-    CHECK_EQ(bitmap_section.Offset() + bitmap_section.Size(), relocations_section.Offset());
-
     stats_.bitmap_bytes += bitmap_section.Size();
-    stats_.relocations_bytes += relocations_section.Size();
     stats_.art_field_bytes += field_section.Size();
     stats_.art_method_bytes += method_section.Size();
     stats_.dex_cache_arrays_bytes += dex_cache_arrays_section.Size();
     stats_.interned_strings_bytes += intern_section.Size();
     stats_.class_table_bytes += class_table_section.Size();
+    stats_.sro_offset_bytes += sro_section.Size();
+    stats_.metadata_bytes += metadata_section.Size();
+
     stats_.Dump(os, indent_os);
     os << "\n";
 
@@ -2056,9 +2093,9 @@
     }
   }
 
-  static void DumpFields(std::ostream& os, mirror::Object* obj, mirror::Class* klass)
+  static void DumpFields(std::ostream& os, mirror::Object* obj, ObjPtr<mirror::Class> klass)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    mirror::Class* super = klass->GetSuperClass();
+    ObjPtr<mirror::Class> super = klass->GetSuperClass();
     if (super != nullptr) {
       DumpFields(os, obj, super);
     }
@@ -2375,64 +2412,45 @@
 
  public:
   struct Stats {
-    size_t oat_file_bytes;
-    size_t file_bytes;
+    size_t oat_file_bytes = 0u;
+    size_t file_bytes = 0u;
 
-    size_t header_bytes;
-    size_t object_bytes;
-    size_t art_field_bytes;
-    size_t art_method_bytes;
-    size_t dex_cache_arrays_bytes;
-    size_t interned_strings_bytes;
-    size_t class_table_bytes;
-    size_t bitmap_bytes;
-    size_t relocations_bytes;
-    size_t alignment_bytes;
+    size_t header_bytes = 0u;
+    size_t object_bytes = 0u;
+    size_t art_field_bytes = 0u;
+    size_t art_method_bytes = 0u;
+    size_t dex_cache_arrays_bytes = 0u;
+    size_t interned_strings_bytes = 0u;
+    size_t class_table_bytes = 0u;
+    size_t sro_offset_bytes = 0u;
+    size_t metadata_bytes = 0u;
+    size_t bitmap_bytes = 0u;
+    size_t alignment_bytes = 0u;
 
-    size_t managed_code_bytes;
-    size_t managed_code_bytes_ignoring_deduplication;
-    size_t native_to_managed_code_bytes;
-    size_t class_initializer_code_bytes;
-    size_t large_initializer_code_bytes;
-    size_t large_method_code_bytes;
+    size_t managed_code_bytes = 0u;
+    size_t managed_code_bytes_ignoring_deduplication = 0u;
+    size_t native_to_managed_code_bytes = 0u;
+    size_t class_initializer_code_bytes = 0u;
+    size_t large_initializer_code_bytes = 0u;
+    size_t large_method_code_bytes = 0u;
 
-    size_t vmap_table_bytes;
+    size_t vmap_table_bytes = 0u;
 
-    size_t dex_instruction_bytes;
+    size_t dex_instruction_bytes = 0u;
 
     std::vector<ArtMethod*> method_outlier;
     std::vector<size_t> method_outlier_size;
     std::vector<double> method_outlier_expansion;
     std::vector<std::pair<std::string, size_t>> oat_dex_file_sizes;
 
-    Stats()
-        : oat_file_bytes(0),
-          file_bytes(0),
-          header_bytes(0),
-          object_bytes(0),
-          art_field_bytes(0),
-          art_method_bytes(0),
-          dex_cache_arrays_bytes(0),
-          interned_strings_bytes(0),
-          class_table_bytes(0),
-          bitmap_bytes(0),
-          relocations_bytes(0),
-          alignment_bytes(0),
-          managed_code_bytes(0),
-          managed_code_bytes_ignoring_deduplication(0),
-          native_to_managed_code_bytes(0),
-          class_initializer_code_bytes(0),
-          large_initializer_code_bytes(0),
-          large_method_code_bytes(0),
-          vmap_table_bytes(0),
-          dex_instruction_bytes(0) {}
+    Stats() {}
 
     struct SizeAndCount {
       SizeAndCount(size_t bytes_in, size_t count_in) : bytes(bytes_in), count(count_in) {}
       size_t bytes;
       size_t count;
     };
-    typedef SafeMap<std::string, SizeAndCount> SizeAndCountTable;
+    using SizeAndCountTable = SafeMap<std::string, SizeAndCount>;
     SizeAndCountTable sizes_and_counts;
 
     void Update(const char* descriptor, size_t object_bytes_in) {
@@ -2579,8 +2597,9 @@
                                   "dex_cache_arrays_bytes =  %8zd (%2.0f%% of art file bytes)\n"
                                   "interned_string_bytes  =  %8zd (%2.0f%% of art file bytes)\n"
                                   "class_table_bytes      =  %8zd (%2.0f%% of art file bytes)\n"
+                                  "sro_bytes              =  %8zd (%2.0f%% of art file bytes)\n"
+                                  "metadata_bytes         =  %8zd (%2.0f%% of art file bytes)\n"
                                   "bitmap_bytes           =  %8zd (%2.0f%% of art file bytes)\n"
-                                  "relocations_bytes      =  %8zd (%2.0f%% of art file bytes)\n"
                                   "alignment_bytes        =  %8zd (%2.0f%% of art file bytes)\n\n",
                                   header_bytes, PercentOfFileBytes(header_bytes),
                                   object_bytes, PercentOfFileBytes(object_bytes),
@@ -2591,14 +2610,15 @@
                                   interned_strings_bytes,
                                   PercentOfFileBytes(interned_strings_bytes),
                                   class_table_bytes, PercentOfFileBytes(class_table_bytes),
+                                  sro_offset_bytes, PercentOfFileBytes(sro_offset_bytes),
+                                  metadata_bytes, PercentOfFileBytes(metadata_bytes),
                                   bitmap_bytes, PercentOfFileBytes(bitmap_bytes),
-                                  relocations_bytes, PercentOfFileBytes(relocations_bytes),
                                   alignment_bytes, PercentOfFileBytes(alignment_bytes))
             << std::flush;
         CHECK_EQ(file_bytes,
                  header_bytes + object_bytes + art_field_bytes + art_method_bytes +
                  dex_cache_arrays_bytes + interned_strings_bytes + class_table_bytes +
-                 bitmap_bytes + relocations_bytes + alignment_bytes);
+                 sro_offset_bytes + metadata_bytes + bitmap_bytes + alignment_bytes);
       }
 
       os << "object_bytes breakdown:\n";
@@ -2712,14 +2732,13 @@
     // We need to map the oat file in the low 4gb or else the fixup wont be able to fit oat file
     // pointers into 32 bit pointer sized ArtMethods.
     std::string error_msg;
-    std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+    std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
                                                     options->app_oat_,
                                                     options->app_oat_,
-                                                    /* requested_base */ nullptr,
-                                                    /* executable */ false,
-                                                    /* low_4gb */ true,
-                                                    /* abs_dex_location */ nullptr,
-                                                    /* reservation */ nullptr,
+                                                    /*executable=*/ false,
+                                                    /*low_4gb=*/ true,
+                                                    /*abs_dex_location=*/ nullptr,
+                                                    /*reservation=*/ nullptr,
                                                     &error_msg));
     if (oat_file == nullptr) {
       LOG(ERROR) << "Failed to open oat file " << options->app_oat_ << " with error " << error_msg;
@@ -2836,14 +2855,13 @@
                  << "oatdump might fail if the oat file does not contain the dex code.";
   }
   std::string error_msg;
-  std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
                                                   oat_filename,
                                                   oat_filename,
-                                                  /* requested_base */ nullptr,
-                                                  /* executable */ false,
-                                                  /* low_4gb */ false,
+                                                  /*executable=*/ false,
+                                                  /*low_4gb=*/ false,
                                                   dex_filename,
-                                                  /* reservation */ nullptr,
+                                                  /*reservation=*/ nullptr,
                                                   &error_msg));
   if (oat_file == nullptr) {
     LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
@@ -2862,14 +2880,13 @@
                         std::string& output_name,
                         bool no_bits) {
   std::string error_msg;
-  std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
                                                   oat_filename,
                                                   oat_filename,
-                                                  /* requested_base */ nullptr,
-                                                  /* executable */ false,
-                                                  /* low_4gb */ false,
+                                                  /*executable=*/ false,
+                                                  /*low_4gb=*/ false,
                                                   dex_filename,
-                                                  /* reservation */ nullptr,
+                                                  /*reservation=*/ nullptr,
                                                   &error_msg));
   if (oat_file == nullptr) {
     LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
@@ -2910,14 +2927,13 @@
 
     if (oat_filename != nullptr) {
       std::string error_msg;
-      std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+      std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
                                                       oat_filename,
                                                       oat_filename,
-                                                      /* requested_base */ nullptr,
-                                                      /* executable */ false,
-                                                      /*low_4gb*/false,
+                                                      /*executable=*/ false,
+                                                      /*low_4gb=*/false,
                                                       dex_filename,
-                                                      /* reservation */ nullptr,
+                                                      /*reservation=*/ nullptr,
                                                       &error_msg));
       if (oat_file == nullptr) {
         LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
@@ -2991,7 +3007,7 @@
       for (uint32_t class_def_index = 0;
            class_def_index != dex_file->NumClassDefs();
            ++class_def_index) {
-        const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
+        const dex::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
         const char* descriptor = dex_file->GetClassDescriptor(class_def);
         h_klass.Assign(class_linker->FindClass(self, descriptor, h_class_loader));
         if (h_klass == nullptr) {
@@ -3380,7 +3396,7 @@
     } else if (option.starts_with("--export-dex-to=")) {
       export_dex_location_ = option.substr(strlen("--export-dex-to=")).data();
     } else if (option.starts_with("--addr2instr=")) {
-      if (!ParseUint(option.substr(strlen("--addr2instr=")).data(), &addr2instr_)) {
+      if (!android::base::ParseUint(option.substr(strlen("--addr2instr=")).data(), &addr2instr_)) {
         *error_msg = "Address conversion failed";
         return kParseError;
       }
@@ -3423,7 +3439,7 @@
     return kParseOk;
   }
 
-  virtual std::string GetUsage() const {
+  std::string GetUsage() const override {
     std::string usage;
 
     usage +=
@@ -3577,7 +3593,7 @@
     }
   }
 
-  virtual bool ExecuteWithRuntime(Runtime* runtime) {
+  bool ExecuteWithRuntime(Runtime* runtime) override {
     CHECK(args_ != nullptr);
 
     if (!args_->imt_dump_.empty() || args_->imt_stat_dump_) {
diff --git a/oatdump/oatdump_image_test.cc b/oatdump/oatdump_image_test.cc
index de48b04..0a076f0 100644
--- a/oatdump/oatdump_image_test.cc
+++ b/oatdump/oatdump_image_test.cc
@@ -40,13 +40,13 @@
 TEST_F(OatDumpTest, TestOatImage) {
   TEST_DISABLED_FOR_ARM_AND_MIPS();
   std::string error_msg;
-  ASSERT_TRUE(Exec(kDynamic, kModeOat, {}, kListAndCode));
+  ASSERT_TRUE(Exec(kDynamic, kModeCoreOat, {}, kListAndCode));
 }
 TEST_F(OatDumpTest, TestOatImageStatic) {
   TEST_DISABLED_FOR_ARM_AND_MIPS();
   TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
   std::string error_msg;
-  ASSERT_TRUE(Exec(kStatic, kModeOat, {}, kListAndCode));
+  ASSERT_TRUE(Exec(kStatic, kModeCoreOat, {}, kListAndCode));
 }
 
 }  // namespace art
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index bcba182..8505b0c 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#include <android-base/file.h>
+
 #include "oatdump_test.h"
 
 namespace art {
@@ -90,11 +92,14 @@
   // Test is failing on target, b/77469384.
   TEST_DISABLED_FOR_TARGET();
   std::string error_msg;
+  ASSERT_TRUE(GenerateAppOdexFile(kDynamic, {"--runtime-arg", "-Xmx64M"}));
   ASSERT_TRUE(Exec(kDynamic, kModeOat, {"--export-dex-to=" + tmp_dir_}, kListOnly));
-  const std::string dex_location = tmp_dir_+ "/core-oj-hostdex.jar_export.dex";
+  const std::string dex_location =
+      tmp_dir_+ "/" + android::base::Basename(GetTestDexFileName(GetAppBaseName().c_str())) +
+      "_export.dex";
   const std::string dexdump2 = GetExecutableFilePath("dexdump2",
-                                                     /*is_debug*/false,
-                                                     /*is_static*/false);
+                                                     /*is_debug=*/false,
+                                                     /*is_static=*/false);
   std::string output;
   auto post_fork_fn = []() { return true; };
   ForkAndExecResult res = ForkAndExec({dexdump2, "-d", dex_location}, post_fork_fn, &output);
@@ -104,6 +109,7 @@
   TEST_DISABLED_FOR_ARM_AND_MIPS();
   TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
   std::string error_msg;
+  ASSERT_TRUE(GenerateAppOdexFile(kStatic, {"--runtime-arg", "-Xmx64M"}));
   ASSERT_TRUE(Exec(kStatic, kModeOat, {"--export-dex-to=" + tmp_dir_}, kListOnly));
 }
 
diff --git a/oatdump/oatdump_test.h b/oatdump/oatdump_test.h
index 4ee5101..3ead8de 100644
--- a/oatdump/oatdump_test.h
+++ b/oatdump/oatdump_test.h
@@ -90,6 +90,7 @@
 
   enum Mode {
     kModeOat,
+    kModeCoreOat,
     kModeOatWithBootImage,
     kModeArt,
     kModeSymbolize,
@@ -122,6 +123,10 @@
         "-Xmx512m",
         "--runtime-arg",
         "-Xnorelocate",
+        "--runtime-arg",
+        GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()),
+        "--runtime-arg",
+        GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()),
         "--boot-image=" + GetCoreArtLocation(),
         "--instruction-set=" + std::string(GetInstructionSetString(kRuntimeISA)),
         "--dex-file=" + GetTestDexFileName(GetAppBaseName().c_str()),
@@ -174,6 +179,11 @@
         expected_prefixes.push_back("InlineInfo");
       }
       if (mode == kModeArt) {
+        exec_argv.push_back("--runtime-arg");
+        exec_argv.push_back(GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()));
+        exec_argv.push_back("--runtime-arg");
+        exec_argv.push_back(
+            GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()));
         exec_argv.push_back("--image=" + core_art_location_);
         exec_argv.push_back("--instruction-set=" + std::string(
             GetInstructionSetString(kRuntimeISA)));
@@ -181,13 +191,20 @@
         expected_prefixes.push_back("IMAGE BEGIN:");
         expected_prefixes.push_back("kDexCaches:");
       } else if (mode == kModeOatWithBootImage) {
+        exec_argv.push_back("--runtime-arg");
+        exec_argv.push_back(GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()));
+        exec_argv.push_back("--runtime-arg");
+        exec_argv.push_back(
+            GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()));
         exec_argv.push_back("--boot-image=" + GetCoreArtLocation());
         exec_argv.push_back("--instruction-set=" + std::string(
             GetInstructionSetString(kRuntimeISA)));
         exec_argv.push_back("--oat-file=" + GetAppOdexName());
+      } else if (mode == kModeCoreOat) {
+        exec_argv.push_back("--oat-file=" + core_oat_location_);
       } else {
         CHECK_EQ(static_cast<size_t>(mode), static_cast<size_t>(kModeOat));
-        exec_argv.push_back("--oat-file=" + core_oat_location_);
+        exec_argv.push_back("--oat-file=" + GetAppOdexName());
       }
     }
     exec_argv.insert(exec_argv.end(), args.begin(), args.end());
diff --git a/openjdkjvm/OpenjdkJvm.cc b/openjdkjvm/OpenjdkJvm.cc
index 8d0200c..8297c54 100644
--- a/openjdkjvm/OpenjdkJvm.cc
+++ b/openjdkjvm/OpenjdkJvm.cc
@@ -77,6 +77,7 @@
                    << fname << "')";
     }
 
+    flags |= O_CLOEXEC;
     int fd = TEMP_FAILURE_RETRY(open(fname, flags & ~JVM_O_DELETE, mode));
     if (fd < 0) {
         int err = errno;
@@ -317,7 +318,8 @@
 
 JNIEXPORT jstring JVM_NativeLoad(JNIEnv* env,
                                  jstring javaFilename,
-                                 jobject javaLoader) {
+                                 jobject javaLoader,
+                                 jclass caller) {
   ScopedUtfChars filename(env, javaFilename);
   if (filename.c_str() == nullptr) {
     return nullptr;
@@ -329,6 +331,7 @@
     bool success = vm->LoadNativeLibrary(env,
                                          filename.c_str(),
                                          javaLoader,
+                                         caller,
                                          &error_msg);
     if (success) {
       return nullptr;
@@ -434,36 +437,37 @@
   }
 }
 
-JNIEXPORT jint JVM_IHashCode(JNIEnv* env ATTRIBUTE_UNUSED,
+JNIEXPORT __attribute__((noreturn)) jint JVM_IHashCode(JNIEnv* env ATTRIBUTE_UNUSED,
                              jobject javaObject ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL) << "JVM_IHashCode is not implemented";
-  return 0;
+  UNREACHABLE();
 }
 
-JNIEXPORT jlong JVM_NanoTime(JNIEnv* env ATTRIBUTE_UNUSED, jclass unused ATTRIBUTE_UNUSED) {
+JNIEXPORT __attribute__((noreturn)) jlong JVM_NanoTime(JNIEnv* env ATTRIBUTE_UNUSED, jclass unused ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL) << "JVM_NanoTime is not implemented";
-  return 0L;
+  UNREACHABLE();
 }
 
-JNIEXPORT void JVM_ArrayCopy(JNIEnv* /* env */, jclass /* unused */, jobject /* javaSrc */,
+JNIEXPORT __attribute__((noreturn)) void JVM_ArrayCopy(JNIEnv* /* env */, jclass /* unused */, jobject /* javaSrc */,
                              jint /* srcPos */, jobject /* javaDst */, jint /* dstPos */,
                              jint /* length */) {
   UNIMPLEMENTED(FATAL) << "JVM_ArrayCopy is not implemented";
+  UNREACHABLE();
 }
 
-JNIEXPORT jint JVM_FindSignal(const char* name ATTRIBUTE_UNUSED) {
+JNIEXPORT __attribute__((noreturn)) jint JVM_FindSignal(const char* name ATTRIBUTE_UNUSED) {
   LOG(FATAL) << "JVM_FindSignal is not implemented";
-  return 0;
+  UNREACHABLE();
 }
 
-JNIEXPORT void* JVM_RegisterSignal(jint signum ATTRIBUTE_UNUSED, void* handler ATTRIBUTE_UNUSED) {
+JNIEXPORT __attribute__((noreturn)) void* JVM_RegisterSignal(jint signum ATTRIBUTE_UNUSED, void* handler ATTRIBUTE_UNUSED) {
   LOG(FATAL) << "JVM_RegisterSignal is not implemented";
-  return nullptr;
+  UNREACHABLE();
 }
 
-JNIEXPORT jboolean JVM_RaiseSignal(jint signum ATTRIBUTE_UNUSED) {
+JNIEXPORT __attribute__((noreturn)) jboolean JVM_RaiseSignal(jint signum ATTRIBUTE_UNUSED) {
   LOG(FATAL) << "JVM_RaiseSignal is not implemented";
-  return JNI_FALSE;
+  UNREACHABLE();
 }
 
 JNIEXPORT __attribute__((noreturn))  void JVM_Halt(jint code) {
diff --git a/openjdkjvmti/Android.bp b/openjdkjvmti/Android.bp
index d8902d6..7621d48 100644
--- a/openjdkjvmti/Android.bp
+++ b/openjdkjvmti/Android.bp
@@ -41,6 +41,7 @@
         "ti_field.cc",
         "ti_heap.cc",
         "ti_jni.cc",
+        "ti_logging.cc",
         "ti_method.cc",
         "ti_monitor.cc",
         "ti_object.cc",
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index 3213bbe..7a2b638 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -29,6 +29,7 @@
  * questions.
  */
 
+#include <memory>
 #include <string>
 #include <type_traits>
 #include <vector>
@@ -58,6 +59,7 @@
 #include "ti_field.h"
 #include "ti_heap.h"
 #include "ti_jni.h"
+#include "ti_logging.h"
 #include "ti_method.h"
 #include "ti_monitor.h"
 #include "ti_object.h"
@@ -313,10 +315,10 @@
     return StackUtil::GetFrameCount(env, thread, count_ptr);
   }
 
-  static jvmtiError PopFrame(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+  static jvmtiError PopFrame(jvmtiEnv* env, jthread thread) {
     ENSURE_VALID_ENV(env);
     ENSURE_HAS_CAP(env, can_pop_frame);
-    return ERR(NOT_IMPLEMENTED);
+    return StackUtil::PopFrame(env, thread);
   }
 
   static jvmtiError GetFrameLocation(jvmtiEnv* env,
@@ -787,7 +789,7 @@
                                                      classes,
                                                      &error_msg);
     if (res != OK) {
-      LOG(WARNING) << "FAILURE TO RETRANFORM " << error_msg;
+      JVMTI_LOG(WARNING, env) << "FAILURE TO RETRANFORM " << error_msg;
     }
     return res;
   }
@@ -806,7 +808,7 @@
                                                 class_definitions,
                                                 &error_msg);
     if (res != OK) {
-      LOG(WARNING) << "FAILURE TO REDEFINE " << error_msg;
+      JVMTI_LOG(WARNING, env) << "FAILURE TO REDEFINE " << error_msg;
     }
     return res;
   }
@@ -1195,7 +1197,7 @@
 #undef ADD_CAPABILITY
     gEventHandler->HandleChangedCapabilities(ArtJvmTiEnv::AsArtJvmTiEnv(env),
                                              changed,
-                                             /*added*/true);
+                                             /*added=*/true);
     return ret;
   }
 
@@ -1219,7 +1221,7 @@
 #undef DEL_CAPABILITY
     gEventHandler->HandleChangedCapabilities(ArtJvmTiEnv::AsArtJvmTiEnv(env),
                                              changed,
-                                             /*added*/false);
+                                             /*added=*/false);
     return OK;
   }
 
@@ -1489,8 +1491,9 @@
       local_data(nullptr),
       ti_version(version),
       capabilities(),
-      event_info_mutex_("jvmtiEnv_EventInfoMutex") {
-  object_tag_table = std::unique_ptr<ObjectTagTable>(new ObjectTagTable(event_handler, this));
+      event_info_mutex_("jvmtiEnv_EventInfoMutex"),
+      last_error_mutex_("jvmtiEnv_LastErrorMutex", art::LockLevel::kGenericBottomLock) {
+  object_tag_table = std::make_unique<ObjectTagTable>(event_handler, this);
   functions = &gJvmtiInterface;
 }
 
@@ -1549,7 +1552,7 @@
 
   {
     // Make sure we can deopt anything we need to.
-    art::ScopedObjectAccess soa(art::Thread::Current());
+    art::ScopedSuspendAll ssa(__FUNCTION__);
     gDeoptManager->FinishSetup();
   }
 
diff --git a/openjdkjvmti/art_jvmti.h b/openjdkjvmti/art_jvmti.h
index 82f3866..7433e54 100644
--- a/openjdkjvmti/art_jvmti.h
+++ b/openjdkjvmti/art_jvmti.h
@@ -102,6 +102,10 @@
   // RW lock to protect access to all of the event data.
   art::ReaderWriterMutex event_info_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
+  std::string last_error_ GUARDED_BY(last_error_mutex_);
+  // Lock to touch the last-error-message.
+  art::Mutex last_error_mutex_ BOTTOM_MUTEX_ACQUIRED_AFTER;
+
   ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler, jint ti_version);
 
   static ArtJvmTiEnv* AsArtJvmTiEnv(jvmtiEnv* env) {
@@ -249,7 +253,7 @@
     .can_get_owned_monitor_info                      = 1,
     .can_get_current_contended_monitor               = 1,
     .can_get_monitor_info                            = 1,
-    .can_pop_frame                                   = 0,
+    .can_pop_frame                                   = 1,
     .can_redefine_classes                            = 1,
     .can_signal_thread                               = 1,
     .can_get_source_file_name                        = 1,
@@ -291,6 +295,7 @@
 //   can_retransform_classes:
 //   can_redefine_any_class:
 //   can_redefine_classes:
+//   can_pop_frame:
 //     We need to ensure that inlined code is either not present or can always be deoptimized. This
 //     is not guaranteed for non-debuggable processes since we might have inlined bootclasspath code
 //     on a threads stack.
@@ -303,7 +308,7 @@
     .can_get_owned_monitor_info                      = 0,
     .can_get_current_contended_monitor               = 0,
     .can_get_monitor_info                            = 0,
-    .can_pop_frame                                   = 0,
+    .can_pop_frame                                   = 1,
     .can_redefine_classes                            = 1,
     .can_signal_thread                               = 0,
     .can_get_source_file_name                        = 0,
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
index d20c756..d456d83 100644
--- a/openjdkjvmti/deopt_manager.cc
+++ b/openjdkjvmti/deopt_manager.cc
@@ -139,6 +139,9 @@
         // OnLoad since the runtime hasn't started up sufficiently. This is only expected to happen
         // on userdebug/eng builds.
         LOG(INFO) << "Attempting to start jit for openjdkjvmti plugin.";
+        // Note: use rwx allowed = true, because if this is the system server, we will not be
+        //       allowed to allocate any JIT code cache, anyways.
+        runtime->CreateJitCodeCache(/*rwx_memory_allowed=*/true);
         runtime->CreateJit();
         if (runtime->GetJit() == nullptr) {
           LOG(WARNING) << "Could not start jit for openjdkjvmti plugin. This process might be "
@@ -289,7 +292,7 @@
     uninterruptible_cause_ = critical_section_.Enter(art::gc::kGcCauseInstrumentation,
                                                      art::gc::kCollectorTypeCriticalSection);
     art::Runtime::Current()->GetThreadList()->SuspendAll("JMVTI Deoptimizing methods",
-                                                         /*long_suspend*/ false);
+                                                         /*long_suspend=*/ false);
   }
 
   ~ScopedDeoptimizationContext()
diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h
index d9f34a5..856f3f4 100644
--- a/openjdkjvmti/deopt_manager.h
+++ b/openjdkjvmti/deopt_manager.h
@@ -35,12 +35,8 @@
 #include <atomic>
 #include <unordered_map>
 
-#include "jni.h"
-#include "jvmti.h"
-
 #include "base/mutex.h"
 #include "runtime_callbacks.h"
-#include "ti_breakpoint.h"
 
 namespace art {
 class ArtMethod;
@@ -107,7 +103,7 @@
 
   void FinishSetup()
       REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
-      REQUIRES_SHARED(art::Locks::mutator_lock_);
+      REQUIRES(art::Locks::mutator_lock_);
 
   static DeoptManager* Get();
 
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index e98517f..8e06fe3 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -25,8 +25,11 @@
 #include "events.h"
 #include "jni/jni_internal.h"
 #include "nativehelper/scoped_local_ref.h"
+#include "runtime-inl.h"
 #include "scoped_thread_state_change-inl.h"
+#include "stack.h"
 #include "ti_breakpoint.h"
+#include "ti_thread.h"
 
 #include "art_jvmti.h"
 
@@ -359,6 +362,7 @@
   // have to deal with use-after-free or the frames being reallocated later.
   art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
   return env->notify_frames.erase(frame) != 0 &&
+      !frame->GetForcePopFrame() &&
       ShouldDispatchOnThread<ArtJvmtiEvent::kFramePop>(env, thread);
 }
 
@@ -418,6 +422,67 @@
   ExecuteCallback<ArtJvmtiEvent::kFramePop>(event, jnienv, jni_thread, jmethod, is_exception);
 }
 
+struct ScopedDisablePopFrame {
+ public:
+  explicit ScopedDisablePopFrame(art::Thread* thread) : thread_(thread) {
+    art::Locks::mutator_lock_->AssertSharedHeld(thread_);
+    art::MutexLock mu(thread_, *art::Locks::thread_list_lock_);
+    JvmtiGlobalTLSData* data = ThreadUtil::GetOrCreateGlobalTLSData(thread_);
+    current_top_frame_ = art::StackVisitor::ComputeNumFrames(
+        thread_, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+    old_disable_frame_pop_depth_ = data->disable_pop_frame_depth;
+    data->disable_pop_frame_depth = current_top_frame_;
+    DCHECK(old_disable_frame_pop_depth_ == JvmtiGlobalTLSData::kNoDisallowedPopFrame ||
+           current_top_frame_ > old_disable_frame_pop_depth_)
+        << "old: " << old_disable_frame_pop_depth_ << " current: " << current_top_frame_;
+  }
+
+  ~ScopedDisablePopFrame() {
+    art::Locks::mutator_lock_->AssertSharedHeld(thread_);
+    art::MutexLock mu(thread_, *art::Locks::thread_list_lock_);
+    JvmtiGlobalTLSData* data = ThreadUtil::GetGlobalTLSData(thread_);
+    DCHECK_EQ(data->disable_pop_frame_depth, current_top_frame_);
+    data->disable_pop_frame_depth = old_disable_frame_pop_depth_;
+  }
+
+ private:
+  art::Thread* thread_;
+  size_t current_top_frame_;
+  size_t old_disable_frame_pop_depth_;
+};
+// We want to prevent the use of PopFrame when reporting either of these events.
+template <ArtJvmtiEvent kEvent>
+inline void EventHandler::DispatchClassLoadOrPrepareEvent(art::Thread* thread,
+                                                          JNIEnv* jnienv,
+                                                          jthread jni_thread,
+                                                          jclass klass) const {
+  ScopedDisablePopFrame sdpf(thread);
+  art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative);
+  std::vector<impl::EventHandlerFunc<kEvent>> events = CollectEvents<kEvent>(thread,
+                                                                             jnienv,
+                                                                             jni_thread,
+                                                                             klass);
+
+  for (auto event : events) {
+    ExecuteCallback<kEvent>(event, jnienv, jni_thread, klass);
+  }
+}
+
+template <>
+inline void EventHandler::DispatchEvent<ArtJvmtiEvent::kClassLoad>(art::Thread* thread,
+                                                                   JNIEnv* jnienv,
+                                                                   jthread jni_thread,
+                                                                   jclass klass) const {
+  DispatchClassLoadOrPrepareEvent<ArtJvmtiEvent::kClassLoad>(thread, jnienv, jni_thread, klass);
+}
+template <>
+inline void EventHandler::DispatchEvent<ArtJvmtiEvent::kClassPrepare>(art::Thread* thread,
+                                                                      JNIEnv* jnienv,
+                                                                      jthread jni_thread,
+                                                                      jclass klass) const {
+  DispatchClassLoadOrPrepareEvent<ArtJvmtiEvent::kClassPrepare>(thread, jnienv, jni_thread, klass);
+}
+
 // Need to give a custom specialization for NativeMethodBind since it has to deal with an out
 // variable.
 template <>
@@ -553,6 +618,7 @@
                               : ArtJvmtiEvent::kClassFileLoadHookRetransformable;
   return (added && caps.can_access_local_variables == 1) ||
       caps.can_generate_breakpoint_events == 1 ||
+      caps.can_pop_frame == 1 ||
       (caps.can_retransform_classes == 1 &&
        IsEventEnabledAnywhere(event) &&
        env->event_masks.IsEnabledAnywhere(event));
@@ -573,6 +639,11 @@
     if (caps.can_generate_breakpoint_events == 1) {
       HandleBreakpointEventsChanged(added);
     }
+    if (caps.can_pop_frame == 1 && added) {
+      // TODO We should keep track of how many of these have been enabled and remove it if there are
+      // no more possible users. This isn't expected to be too common.
+      art::Runtime::Current()->SetNonStandardExitsEnabled();
+    }
   }
 }
 
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 43d0b10..22c622a 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -32,7 +32,9 @@
 #include "events-inl.h"
 
 #include <array>
+#include <sys/time.h>
 
+#include "arch/context.h"
 #include "art_field-inl.h"
 #include "art_jvmti.h"
 #include "art_method-inl.h"
@@ -56,6 +58,7 @@
 #include "thread-inl.h"
 #include "thread_list.h"
 #include "ti_phase.h"
+#include "well_known_classes.h"
 
 namespace openjdkjvmti {
 
@@ -410,14 +413,103 @@
   EventHandler* handler_;
 };
 
-static void SetupMonitorListener(art::MonitorCallback* listener, bool enable) {
+class JvmtiParkListener : public art::ParkCallback {
+ public:
+  explicit JvmtiParkListener(EventHandler* handler) : handler_(handler) {}
+
+  void ThreadParkStart(bool is_absolute, int64_t timeout)
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWait)) {
+      art::Thread* self = art::Thread::Current();
+      art::JNIEnvExt* jnienv = self->GetJniEnv();
+      art::ArtField* parkBlockerField = art::jni::DecodeArtField(
+          art::WellKnownClasses::java_lang_Thread_parkBlocker);
+      art::ObjPtr<art::mirror::Object> blocker_obj = parkBlockerField->GetObj(self->GetPeer());
+      if (blocker_obj.IsNull()) {
+        blocker_obj = self->GetPeer();
+      }
+      int64_t timeout_ms;
+      if (!is_absolute) {
+        if (timeout == 0) {
+          timeout_ms = 0;
+        } else {
+          timeout_ms = timeout / 1000000;
+          if (timeout_ms == 0) {
+            // If we were instructed to park for a nonzero number of nanoseconds, but not enough
+            // to be a full millisecond, round up to 1 ms. A nonzero park() call will return
+            // soon, but a 0 wait or park call will wait indefinitely.
+            timeout_ms = 1;
+          }
+        }
+      } else {
+        struct timeval tv;
+        gettimeofday(&tv, (struct timezone *) nullptr);
+        int64_t now = tv.tv_sec * 1000LL + tv.tv_usec / 1000;
+        if (now < timeout) {
+          timeout_ms = timeout - now;
+        } else {
+          // Waiting for 0 ms is an indefinite wait; parking until a time in
+          // the past or the current time will return immediately, so emulate
+          // the shortest possible wait event.
+          timeout_ms = 1;
+        }
+      }
+      ScopedLocalRef<jobject> blocker(jnienv, AddLocalRef<jobject>(jnienv, blocker_obj.Ptr()));
+      RunEventCallback<ArtJvmtiEvent::kMonitorWait>(
+          handler_,
+          self,
+          jnienv,
+          blocker.get(),
+          static_cast<jlong>(timeout_ms));
+    }
+  }
+
+
+  // Our interpretation of the spec is that the JVMTI_EVENT_MONITOR_WAITED will be sent immediately
+  // after a thread has woken up from a sleep caused by a call to Object#wait. If the thread will
+  // never go to sleep (due to not having the lock, having bad arguments, or having an exception
+  // propogated from JVMTI_EVENT_MONITOR_WAIT) we will not send this event.
+  //
+  // This does not fully match the RI semantics. Specifically, we will not send the
+  // JVMTI_EVENT_MONITOR_WAITED event in one situation where the RI would, there was an exception in
+  // the JVMTI_EVENT_MONITOR_WAIT event but otherwise the call was fine. In that case the RI would
+  // send this event and return without going to sleep.
+  //
+  // See b/65558434 for more discussion.
+  void ThreadParkFinished(bool timeout) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWaited)) {
+      art::Thread* self = art::Thread::Current();
+      art::JNIEnvExt* jnienv = self->GetJniEnv();
+      art::ArtField* parkBlockerField = art::jni::DecodeArtField(
+          art::WellKnownClasses::java_lang_Thread_parkBlocker);
+      art::ObjPtr<art::mirror::Object> blocker_obj = parkBlockerField->GetObj(self->GetPeer());
+      if (blocker_obj.IsNull()) {
+        blocker_obj = self->GetPeer();
+      }
+      ScopedLocalRef<jobject> blocker(jnienv, AddLocalRef<jobject>(jnienv, blocker_obj.Ptr()));
+      RunEventCallback<ArtJvmtiEvent::kMonitorWaited>(
+          handler_,
+          self,
+          jnienv,
+          blocker.get(),
+          static_cast<jboolean>(timeout));
+    }
+  }
+
+ private:
+  EventHandler* handler_;
+};
+
+static void SetupMonitorListener(art::MonitorCallback* monitor_listener, art::ParkCallback* park_listener, bool enable) {
   // We must not hold the mutator lock here, but if we're in FastJNI, for example, we might. For
   // now, do a workaround: (possibly) acquire and release.
   art::ScopedObjectAccess soa(art::Thread::Current());
   if (enable) {
-    art::Runtime::Current()->GetRuntimeCallbacks()->AddMonitorCallback(listener);
+    art::Runtime::Current()->GetRuntimeCallbacks()->AddMonitorCallback(monitor_listener);
+    art::Runtime::Current()->GetRuntimeCallbacks()->AddParkCallback(park_listener);
   } else {
-    art::Runtime::Current()->GetRuntimeCallbacks()->RemoveMonitorCallback(listener);
+    art::Runtime::Current()->GetRuntimeCallbacks()->RemoveMonitorCallback(monitor_listener);
+    art::Runtime::Current()->GetRuntimeCallbacks()->RemoveParkCallback(park_listener);
   }
 }
 
@@ -517,7 +609,7 @@
           self,
           jnienv,
           art::jni::EncodeArtMethod(method),
-          /*was_popped_by_exception*/ static_cast<jboolean>(JNI_FALSE),
+          /*was_popped_by_exception=*/ static_cast<jboolean>(JNI_FALSE),
           val);
     }
   }
@@ -545,7 +637,7 @@
           self,
           jnienv,
           art::jni::EncodeArtMethod(method),
-          /*was_popped_by_exception*/ static_cast<jboolean>(JNI_FALSE),
+          /*was_popped_by_exception=*/ static_cast<jboolean>(JNI_FALSE),
           val);
     }
   }
@@ -572,7 +664,7 @@
           self,
           jnienv,
           art::jni::EncodeArtMethod(method),
-          /*was_popped_by_exception*/ static_cast<jboolean>(JNI_TRUE),
+          /*was_popped_by_exception=*/ static_cast<jboolean>(JNI_TRUE),
           val);
       // Match RI behavior of just throwing away original exception if a new one is thrown.
       if (LIKELY(!self->IsExceptionPending())) {
@@ -777,7 +869,7 @@
                             context.get(),
                             /*out*/ out_method,
                             /*out*/ dex_pc);
-    clf.WalkStack(/* include_transitions */ false);
+    clf.WalkStack(/* include_transitions= */ false);
   }
 
   // Call-back when an exception is thrown.
@@ -793,8 +885,8 @@
       FindCatchMethodsFromThrow(self, exception_object, &catch_method, &catch_pc);
       uint32_t dex_pc = 0;
       art::ArtMethod* method = self->GetCurrentMethod(&dex_pc,
-                                                      /* check_suspended */ true,
-                                                      /* abort_on_error */ art::kIsDebugBuild);
+                                                      /* check_suspended= */ true,
+                                                      /* abort_on_error= */ art::kIsDebugBuild);
       ScopedLocalRef<jobject> exception(jnienv,
                                         AddLocalRef<jobject>(jnienv, exception_object.Get()));
       RunEventCallback<ArtJvmtiEvent::kException>(
@@ -819,8 +911,8 @@
       art::JNIEnvExt* jnienv = self->GetJniEnv();
       uint32_t dex_pc;
       art::ArtMethod* method = self->GetCurrentMethod(&dex_pc,
-                                                      /* check_suspended */ true,
-                                                      /* abort_on_error */ art::kIsDebugBuild);
+                                                      /* check_suspended= */ true,
+                                                      /* abort_on_error= */ art::kIsDebugBuild);
       ScopedLocalRef<jobject> exception(jnienv,
                                         AddLocalRef<jobject>(jnienv, exception_object.Get()));
       RunEventCallback<ArtJvmtiEvent::kExceptionCatch>(
@@ -843,16 +935,6 @@
     return;
   }
 
-  // Call-back for when we get an invokevirtual or an invokeinterface.
-  void InvokeVirtualOrInterface(art::Thread* self ATTRIBUTE_UNUSED,
-                                art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
-                                art::ArtMethod* caller ATTRIBUTE_UNUSED,
-                                uint32_t dex_pc ATTRIBUTE_UNUSED,
-                                art::ArtMethod* callee ATTRIBUTE_UNUSED)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
-    return;
-  }
-
  private:
   EventHandler* const event_handler_;
 };
@@ -879,7 +961,7 @@
       return art::instrumentation::Instrumentation::kExceptionHandled;
     default:
       LOG(FATAL) << "Unknown event ";
-      return 0;
+      UNREACHABLE();
   }
 }
 
@@ -1063,7 +1145,7 @@
     case ArtJvmtiEvent::kMonitorWait:
     case ArtJvmtiEvent::kMonitorWaited:
       if (!OtherMonitorEventsEnabledAnywhere(event)) {
-        SetupMonitorListener(monitor_listener_.get(), enable);
+        SetupMonitorListener(monitor_listener_.get(), park_listener_.get(), enable);
       }
       return;
     default:
@@ -1214,6 +1296,7 @@
   gc_pause_listener_.reset(new JvmtiGcPauseListener(this));
   method_trace_listener_.reset(new JvmtiMethodTraceListener(this));
   monitor_listener_.reset(new JvmtiMonitorListener(this));
+  park_listener_.reset(new JvmtiParkListener(this));
 }
 
 EventHandler::~EventHandler() {
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index bf12cb1..abb15cc 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -35,6 +35,7 @@
 class JvmtiGcPauseListener;
 class JvmtiMethodTraceListener;
 class JvmtiMonitorListener;
+class JvmtiParkListener;
 
 // an enum for ArtEvents. This differs from the JVMTI events only in that we distinguish between
 // retransformation capable and incapable loading
@@ -301,6 +302,13 @@
                                                            unsigned char** new_class_data) const
       REQUIRES(!envs_lock_);
 
+  template <ArtJvmtiEvent kEvent>
+  ALWAYS_INLINE inline void DispatchClassLoadOrPrepareEvent(art::Thread* thread,
+                                                            JNIEnv* jnienv,
+                                                            jthread jni_thread,
+                                                            jclass klass) const
+      REQUIRES(!envs_lock_);
+
   void HandleEventType(ArtJvmtiEvent event, bool enable);
   void HandleLocalAccessCapabilityAdded();
   void HandleBreakpointEventsChanged(bool enable);
@@ -324,6 +332,7 @@
   std::unique_ptr<JvmtiGcPauseListener> gc_pause_listener_;
   std::unique_ptr<JvmtiMethodTraceListener> method_trace_listener_;
   std::unique_ptr<JvmtiMonitorListener> monitor_listener_;
+  std::unique_ptr<JvmtiParkListener> park_listener_;
 
   // True if frame pop has ever been enabled. Since we store pointers to stack frames we need to
   // continue to listen to this event even if it has been disabled.
diff --git a/openjdkjvmti/fixed_up_dex_file.cc b/openjdkjvmti/fixed_up_dex_file.cc
index 2ca87fd..da7eef9 100644
--- a/openjdkjvmti/fixed_up_dex_file.cc
+++ b/openjdkjvmti/fixed_up_dex_file.cc
@@ -51,17 +51,6 @@
       dex_file->CalculateChecksum();
 }
 
-static void UnhideApis(const art::DexFile& target_dex_file) {
-  for (art::ClassAccessor accessor : target_dex_file.GetClasses()) {
-    for (const art::ClassAccessor::Field& field : accessor.GetFields()) {
-      field.UnHideAccessFlags();
-    }
-    for (const art::ClassAccessor::Method& method : accessor.GetMethods()) {
-      method.UnHideAccessFlags();
-    }
-  }
-}
-
 static const art::VdexFile* GetVdex(const art::DexFile& original_dex_file) {
   const art::OatDexFile* oat_dex = original_dex_file.GetOatDexFile();
   if (oat_dex == nullptr) {
@@ -78,9 +67,10 @@
                            const art::DexFile& original_dex_file) {
   const art::VdexFile* vdex = GetVdex(original_dex_file);
   if (vdex != nullptr) {
-    vdex->UnquickenDexFile(new_dex_file, original_dex_file, /* decompile_return_instruction */true);
+    vdex->UnquickenDexFile(new_dex_file,
+                           original_dex_file,
+                           /* decompile_return_instruction= */ true);
   }
-  UnhideApis(new_dex_file);
 }
 
 static void DCheckVerifyDexFile(const art::DexFile& dex) {
@@ -90,7 +80,7 @@
                                       dex.Begin(),
                                       dex.Size(),
                                       "FixedUpDexFile_Verification.dex",
-                                      /*verify_checksum*/ true,
+                                      /*verify_checksum=*/ true,
                                       &error)) {
       LOG(FATAL) << "Failed to verify de-quickened dex file: " << error;
     }
@@ -110,23 +100,22 @@
   // property from `original` to `new_dex_file`.
   const art::DexFileLoader dex_file_loader;
 
-  if (original.IsCompactDexFile()) {
+  if (original.IsCompactDexFile() || original.HasHiddenapiClassData()) {
     // Since we are supposed to return a standard dex, convert back using dexlayout. It's OK to do
     // this before unquickening.
+    // We also do dex layout for dex files that have hidden API data, as we want to remove that
+    // data.
     art::Options options;
     options.compact_dex_level_ = art::CompactDexLevel::kCompactDexLevelNone;
-    // Never verify the output since hidden API flags may cause the dex file verifier to fail.
-    // See b/74063493
-    options.verify_output_ = false;
     // Add a filter to only include the class that has the matching descriptor.
     static constexpr bool kFilterByDescriptor = true;
     if (kFilterByDescriptor) {
       options.class_filter_.insert(descriptor);
     }
     art::DexLayout dex_layout(options,
-                              /*info*/ nullptr,
-                              /*out_file*/ nullptr,
-                              /*header*/ nullptr);
+                              /*info=*/ nullptr,
+                              /*out_file=*/ nullptr,
+                              /*header=*/ nullptr);
     std::unique_ptr<art::DexContainer> dex_container;
     bool result = dex_layout.ProcessDexFile(
         original.GetLocation().c_str(),
@@ -147,21 +136,19 @@
   new_dex_file = dex_file_loader.Open(
       data.data(),
       data.size(),
-      /*location*/"Unquickening_dexfile.dex",
-      /*location_checksum*/0,
-      /*oat_dex_file*/nullptr,
-      /*verify*/false,
-      /*verify_checksum*/false,
+      /*location=*/"Unquickening_dexfile.dex",
+      /*location_checksum=*/0,
+      /*oat_dex_file=*/nullptr,
+      /*verify=*/false,
+      /*verify_checksum=*/false,
       &error);
 
-  if (new_dex_file  == nullptr) {
+  if (new_dex_file == nullptr) {
     LOG(ERROR) << "Unable to open dex file from memory for unquickening! error: " << error;
     return nullptr;
   }
 
-  if (original.IsPlatformDexFile()) {
-    const_cast<art::DexFile*>(new_dex_file.get())->SetIsPlatformDexFile();
-  }
+  const_cast<art::DexFile*>(new_dex_file.get())->SetHiddenapiDomain(original.GetHiddenapiDomain());
 
   DoDexUnquicken(*new_dex_file, original);
 
diff --git a/openjdkjvmti/object_tagging.cc b/openjdkjvmti/object_tagging.cc
index ba242ef..0a51bf2 100644
--- a/openjdkjvmti/object_tagging.cc
+++ b/openjdkjvmti/object_tagging.cc
@@ -36,12 +36,41 @@
 #include "art_jvmti.h"
 #include "events-inl.h"
 #include "jvmti_weak_table-inl.h"
+#include "mirror/object-inl.h"
 
 namespace openjdkjvmti {
 
 // Instantiate for jlong = JVMTI tags.
 template class JvmtiWeakTable<jlong>;
 
+void ObjectTagTable::Allow() {
+  JvmtiWeakTable<jlong>::Allow();
+  SendDelayedFreeEvents();
+}
+
+void ObjectTagTable::Broadcast(bool broadcast_for_checkpoint) {
+  JvmtiWeakTable<jlong>::Broadcast(broadcast_for_checkpoint);
+  if (!broadcast_for_checkpoint) {
+    SendDelayedFreeEvents();
+  }
+}
+
+void ObjectTagTable::SendDelayedFreeEvents() {
+  std::vector<jlong> to_send;
+  {
+    art::MutexLock mu(art::Thread::Current(), lock_);
+    to_send.swap(null_tags_);
+  }
+  for (jlong t : to_send) {
+    SendSingleFreeEvent(t);
+  }
+}
+
+void ObjectTagTable::SendSingleFreeEvent(jlong tag) {
+  event_handler_->DispatchEventOnEnv<ArtJvmtiEvent::kObjectFree>(
+      jvmti_env_, art::Thread::Current(), tag);
+}
+
 bool ObjectTagTable::Set(art::mirror::Object* obj, jlong new_tag) {
   if (new_tag == 0) {
     jlong tmp;
@@ -49,6 +78,7 @@
   }
   return JvmtiWeakTable<jlong>::Set(obj, new_tag);
 }
+
 bool ObjectTagTable::SetLocked(art::mirror::Object* obj, jlong new_tag) {
   if (new_tag == 0) {
     jlong tmp;
@@ -60,9 +90,10 @@
 bool ObjectTagTable::DoesHandleNullOnSweep() {
   return event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kObjectFree);
 }
+
 void ObjectTagTable::HandleNullSweep(jlong tag) {
-  event_handler_->DispatchEventOnEnv<ArtJvmtiEvent::kObjectFree>(
-      jvmti_env_, art::Thread::Current(), tag);
+  art::MutexLock mu(art::Thread::Current(), lock_);
+  null_tags_.push_back(tag);
 }
 
 }  // namespace openjdkjvmti
diff --git a/openjdkjvmti/object_tagging.h b/openjdkjvmti/object_tagging.h
index 4181302..ca05a05 100644
--- a/openjdkjvmti/object_tagging.h
+++ b/openjdkjvmti/object_tagging.h
@@ -48,7 +48,18 @@
 class ObjectTagTable final : public JvmtiWeakTable<jlong> {
  public:
   ObjectTagTable(EventHandler* event_handler, ArtJvmTiEnv* env)
-      : event_handler_(event_handler), jvmti_env_(env) {}
+      : lock_("Object tag table lock", art::LockLevel::kGenericBottomLock),
+        event_handler_(event_handler),
+        jvmti_env_(env) {}
+
+  // Denotes that weak-refs are visible on all threads. Used by semi-space.
+  void Allow() override
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!allow_disallow_lock_);
+  // Used by cms and the checkpoint system.
+  void Broadcast(bool broadcast_for_checkpoint) override
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!allow_disallow_lock_);
 
   bool Set(art::mirror::Object* obj, jlong tag) override
       REQUIRES_SHARED(art::Locks::mutator_lock_)
@@ -77,6 +88,16 @@
   void HandleNullSweep(jlong tag) override;
 
  private:
+  void SendDelayedFreeEvents()
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!allow_disallow_lock_);
+
+  void SendSingleFreeEvent(jlong tag)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!allow_disallow_lock_, !lock_);
+
+  art::Mutex lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
+  std::vector<jlong> null_tags_ GUARDED_BY(lock_);
   EventHandler* event_handler_;
   ArtJvmTiEnv* jvmti_env_;
 };
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index f1d6fb0..a8e220c 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -56,6 +56,7 @@
 #include "handle.h"
 #include "jni/jni_env_ext-inl.h"
 #include "jni/jni_internal.h"
+#include "mirror/array-alloc-inl.h"
 #include "mirror/array-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_ext.h"
@@ -73,6 +74,7 @@
 #include "thread_list.h"
 #include "ti_class_definition.h"
 #include "ti_class_loader-inl.h"
+#include "ti_logging.h"
 #include "ti_phase.h"
 #include "ti_redefine.h"
 #include "transform.h"
@@ -113,8 +115,8 @@
   std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(map_name,
                                                                     checksum,
                                                                     std::move(map),
-                                                                    /*verify*/true,
-                                                                    /*verify_checksum*/true,
+                                                                    /*verify=*/true,
+                                                                    /*verify_checksum=*/true,
                                                                     &error_msg));
   if (dex_file.get() == nullptr) {
     LOG(WARNING) << "Unable to load modified dex file for " << descriptor << ": " << error_msg;
@@ -145,7 +147,7 @@
   FakeJvmtiDeleter() {}
 
   FakeJvmtiDeleter(FakeJvmtiDeleter&) = default;
-  FakeJvmtiDeleter(FakeJvmtiDeleter&&) = default;
+  FakeJvmtiDeleter(FakeJvmtiDeleter&&) noexcept = default;
   FakeJvmtiDeleter& operator=(const FakeJvmtiDeleter&) = default;
 
   template <typename U> void operator()(const U* ptr) const {
@@ -160,9 +162,9 @@
                       art::Handle<art::mirror::Class> klass,
                       art::Handle<art::mirror::ClassLoader> class_loader,
                       const art::DexFile& initial_dex_file,
-                      const art::DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
+                      const art::dex::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
                       /*out*/art::DexFile const** final_dex_file,
-                      /*out*/art::DexFile::ClassDef const** final_class_def)
+                      /*out*/art::dex::ClassDef const** final_class_def)
       override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     bool is_enabled =
         event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassFileLoadHookRetransformable) ||
@@ -267,7 +269,8 @@
     }
   }
 
-  void ClassLoad(art::Handle<art::mirror::Class> klass) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void ClassLoad(art::Handle<art::mirror::Class> klass) override
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassLoad)) {
       art::Thread* thread = art::Thread::Current();
       ScopedLocalRef<jclass> jklass(thread->GetJniEnv(),
@@ -289,7 +292,7 @@
 
   void ClassPrepare(art::Handle<art::mirror::Class> temp_klass,
                     art::Handle<art::mirror::Class> klass)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassPrepare)) {
       art::Thread* thread = art::Thread::Current();
       if (temp_klass.Get() != klass.Get()) {
@@ -931,8 +934,8 @@
     return ERR(ILLEGAL_ARGUMENT);
   } else if (!jnienv->IsInstanceOf(loader,
                                    art::WellKnownClasses::dalvik_system_BaseDexClassLoader)) {
-    LOG(ERROR) << "GetClassLoaderClassDescriptors is only implemented for BootClassPath and "
-               << "dalvik.system.BaseDexClassLoader class loaders";
+    JVMTI_LOG(ERROR, env) << "GetClassLoaderClassDescriptors is only implemented for "
+                          << "BootClassPath and dalvik.system.BaseDexClassLoader class loaders";
     // TODO Possibly return OK With no classes would  be better since these ones cannot have any
     // real classes associated with them.
     return ERR(NOT_IMPLEMENTED);
diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc
index 895e734..20feb78 100644
--- a/openjdkjvmti/ti_class_definition.cc
+++ b/openjdkjvmti/ti_class_definition.cc
@@ -32,6 +32,7 @@
 #include "ti_class_definition.h"
 
 #include "base/array_slice.h"
+#include "base/logging.h"
 #include "class_linker-inl.h"
 #include "class_root.h"
 #include "dex/dex_file.h"
@@ -135,7 +136,8 @@
                              const char* descriptor,
                              /*out*/std::vector<unsigned char>* dex_data)
     REQUIRES_SHARED(art::Locks::mutator_lock_) {
-  std::unique_ptr<FixedUpDexFile> fixed_dex_file(FixedUpDexFile::Create(*dex_file, descriptor));
+  std::unique_ptr<FixedUpDexFile> fixed_dex_file(
+      FixedUpDexFile::Create(*dex_file, descriptor));
   dex_data->resize(fixed_dex_file->Size());
   memcpy(dex_data->data(), fixed_dex_file->Begin(), fixed_dex_file->Size());
 }
@@ -246,17 +248,15 @@
     mmap_name += name_;
     std::string error;
     dex_data_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
-                                               /* addr */ nullptr,
                                                dequick_size,
                                                PROT_NONE,
-                                               /*low_4gb*/ false,
+                                               /*low_4gb=*/ false,
                                                &error);
     mmap_name += "-TEMP";
     temp_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
-                                           /* addr */ nullptr,
                                            dequick_size,
                                            PROT_READ | PROT_WRITE,
-                                           /*low_4gb*/ false,
+                                           /*low_4gb=*/ false,
                                            &error);
     if (UNLIKELY(dex_data_mmap_.IsValid() && temp_mmap_.IsValid())) {
       // Need to save the initial dexfile so we don't need to search for it in the fault-handler.
diff --git a/openjdkjvmti/ti_class_loader.cc b/openjdkjvmti/ti_class_loader.cc
index 9a32849..348a1ff 100644
--- a/openjdkjvmti/ti_class_loader.cc
+++ b/openjdkjvmti/ti_class_loader.cc
@@ -48,6 +48,8 @@
 #include "jit/jit_code_cache.h"
 #include "jni/jni_env_ext-inl.h"
 #include "jvmti_allocator.h"
+#include "mirror/array-alloc-inl.h"
+#include "mirror/array-inl.h"
 #include "mirror/class.h"
 #include "mirror/class_ext.h"
 #include "mirror/object.h"
diff --git a/openjdkjvmti/ti_ddms.cc b/openjdkjvmti/ti_ddms.cc
index bf063fa..9de5cbc 100644
--- a/openjdkjvmti/ti_ddms.cc
+++ b/openjdkjvmti/ti_ddms.cc
@@ -39,6 +39,7 @@
 #include "debugger.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread-inl.h"
+#include "ti_logging.h"
 
 namespace openjdkjvmti {
 
@@ -69,7 +70,7 @@
                                 data_arr,
                                 /*out*/reinterpret_cast<uint32_t*>(type_out),
                                 /*out*/&out_data)) {
-    LOG(WARNING) << "Something went wrong with handling the ddm chunk.";
+    JVMTI_LOG(WARNING, env) << "Something went wrong with handling the ddm chunk.";
     return ERR(INTERNAL);
   } else {
     jvmtiError error = OK;
diff --git a/openjdkjvmti/ti_extension.cc b/openjdkjvmti/ti_extension.cc
index c61d6e5..5d39884 100644
--- a/openjdkjvmti/ti_extension.cc
+++ b/openjdkjvmti/ti_extension.cc
@@ -39,7 +39,9 @@
 #include "ti_class.h"
 #include "ti_ddms.h"
 #include "ti_heap.h"
+#include "ti_logging.h"
 #include "ti_monitor.h"
+
 #include "thread-inl.h"
 
 namespace openjdkjvmti {
@@ -272,6 +274,44 @@
   if (error != ERR(NONE)) {
     return error;
   }
+
+  // GetLastError extension
+  error = add_extension(
+      reinterpret_cast<jvmtiExtensionFunction>(LogUtil::GetLastError),
+      "com.android.art.misc.get_last_error_message",
+      "In some cases the jvmti plugin will log data about errors to the android logcat. These can"
+      " be useful to tools so we make (some) of the messages available here as well. This will"
+      " fill the given 'msg' buffer with the last non-fatal message associated with this"
+      " jvmti-env. Note this is best-effort only, not all log messages will be accessible through"
+      " this API. This will return the last error-message from all threads. Care should be taken"
+      " interpreting the return value when used with a multi-threaded program. The error message"
+      " will only be cleared by a call to 'com.android.art.misc.clear_last_error_message' and will"
+      " not be cleared by intervening successful calls. If no (tracked) error message has been"
+      " sent since the last call to clear_last_error_message this API will return"
+      " JVMTI_ERROR_ABSENT_INFORMATION. Not all failures will cause an error message to be"
+      " recorded.",
+      {
+          { "msg", JVMTI_KIND_ALLOC_BUF, JVMTI_TYPE_CCHAR, false },
+      },
+      {
+        ERR(NULL_POINTER),
+        ERR(ABSENT_INFORMATION),
+      });
+  if (error != ERR(NONE)) {
+    return error;
+  }
+
+  // ClearLastError extension
+  error = add_extension(
+      reinterpret_cast<jvmtiExtensionFunction>(LogUtil::ClearLastError),
+      "com.android.art.misc.clear_last_error_message",
+      "Clears the error message returned by 'com.android.art.misc.get_last_error_message'.",
+      { },
+      { });
+  if (error != ERR(NONE)) {
+    return error;
+  }
+
   // Copy into output buffer.
 
   *extension_count_ptr = ext_vector.size();
@@ -424,7 +464,7 @@
     }
   }
   return event_handler->SetEvent(art_env,
-                                 /*event_thread*/nullptr,
+                                 /*thread=*/nullptr,
                                  static_cast<ArtJvmtiEvent>(extension_event_index),
                                  mode);
 }
diff --git a/openjdkjvmti/ti_heap.cc b/openjdkjvmti/ti_heap.cc
index 6c79a60..01ef4c6 100644
--- a/openjdkjvmti/ti_heap.cc
+++ b/openjdkjvmti/ti_heap.cc
@@ -981,7 +981,9 @@
           // TODO: We don't have this info.
           if (thread != nullptr) {
             ref_info->jni_local.depth = 0;
-            art::ArtMethod* method = thread->GetCurrentMethod(nullptr, false /* abort_on_error */);
+            art::ArtMethod* method = thread->GetCurrentMethod(nullptr,
+                                                              /* check_suspended= */ true,
+                                                              /* abort_on_error= */ false);
             if (method != nullptr) {
               ref_info->jni_local.method = art::jni::EncodeArtMethod(method);
             }
@@ -1012,7 +1014,7 @@
           ref_info->stack_local.slot = static_cast<jint>(java_info.GetVReg());
           const art::StackVisitor* visitor = java_info.GetVisitor();
           ref_info->stack_local.location =
-              static_cast<jlocation>(visitor->GetDexPc(false /* abort_on_failure */));
+              static_cast<jlocation>(visitor->GetDexPc(/* abort_on_failure= */ false));
           ref_info->stack_local.depth = static_cast<jint>(visitor->GetFrameDepth());
           art::ArtMethod* method = visitor->GetMethod();
           if (method != nullptr) {
@@ -1173,7 +1175,7 @@
     stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_SUPERCLASS,
                                                  nullptr,
                                                  klass,
-                                                 klass->GetSuperClass());
+                                                 klass->GetSuperClass().Ptr());
     if (stop_reports_) {
       return;
     }
@@ -1447,7 +1449,7 @@
 }
 
 jvmtiError HeapUtil::ForceGarbageCollection(jvmtiEnv* env ATTRIBUTE_UNUSED) {
-  art::Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  art::Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
 
   return ERR(NONE);
 }
diff --git a/openjdkjvmti/ti_logging.cc b/openjdkjvmti/ti_logging.cc
new file mode 100644
index 0000000..60f4340
--- /dev/null
+++ b/openjdkjvmti/ti_logging.cc
@@ -0,0 +1,72 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "ti_logging.h"
+
+#include "art_jvmti.h"
+
+#include "base/mutex.h"
+#include "base/strlcpy.h"
+#include "thread-current-inl.h"
+
+namespace openjdkjvmti {
+
+jvmtiError LogUtil::GetLastError(jvmtiEnv* env, char** data) {
+  if (env == nullptr || data == nullptr) {
+    return ERR(INVALID_ENVIRONMENT);
+  }
+  ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
+  art::MutexLock mu(art::Thread::Current(), tienv->last_error_mutex_);
+  if (tienv->last_error_.empty()) {
+    return ERR(ABSENT_INFORMATION);
+  }
+  const size_t size = tienv->last_error_.size() + 1;
+  char* out;
+  jvmtiError err = tienv->Allocate(size, reinterpret_cast<unsigned char**>(&out));
+  if (err != OK) {
+    return err;
+  }
+  strlcpy(out, tienv->last_error_.c_str(), size);
+  *data = out;
+  return OK;
+}
+
+jvmtiError LogUtil::ClearLastError(jvmtiEnv* env) {
+  if (env == nullptr) {
+    return ERR(INVALID_ENVIRONMENT);
+  }
+  ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
+  art::MutexLock mu(art::Thread::Current(), tienv->last_error_mutex_);
+  tienv->last_error_.clear();
+  return OK;
+}
+
+}  // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_logging.h b/openjdkjvmti/ti_logging.h
new file mode 100644
index 0000000..a1be090
--- /dev/null
+++ b/openjdkjvmti/ti_logging.h
@@ -0,0 +1,103 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_OPENJDKJVMTI_TI_LOGGING_H_
+#define ART_OPENJDKJVMTI_TI_LOGGING_H_
+
+#include "art_jvmti.h"
+
+#include <ostream>
+#include <sstream>
+
+#include <android-base/logging.h>
+#include <android-base/macros.h>
+
+#include "base/mutex.h"
+#include "thread-current-inl.h"
+
+namespace openjdkjvmti {
+
+// NB Uses implementation details of android-base/logging.h.
+#define JVMTI_LOG(severity, env)                             \
+  ::openjdkjvmti::JvmtiLogMessage((env),                     \
+                                  __FILE__,                  \
+                                  __LINE__,                  \
+                                  ::android::base::DEFAULT,  \
+                                  SEVERITY_LAMBDA(severity), \
+                                  _LOG_TAG_INTERNAL,         \
+                                  -1)
+
+class JvmtiLogMessage {
+ public:
+  JvmtiLogMessage(jvmtiEnv* env,
+                  const char* file,
+                  unsigned int line,
+                  android::base::LogId id,
+                  android::base::LogSeverity severity,
+                  const char* tag,
+                  int error)
+      : env_(ArtJvmTiEnv::AsArtJvmTiEnv(env)),
+        real_log_(file, line, id, severity, tag, error),
+        real_log_stream_(real_log_.stream()) {
+    DCHECK(env_ != nullptr);
+  }
+
+  ~JvmtiLogMessage() {
+    art::MutexLock mu(art::Thread::Current(), env_->last_error_mutex_);
+    env_->last_error_ = save_stream_.str();
+  }
+
+  template<typename T>
+  JvmtiLogMessage& operator<<(T t) {
+    (real_log_stream_ << t);
+    (save_stream_ << t);
+    return *this;
+  }
+
+ private:
+  ArtJvmTiEnv* env_;
+  android::base::LogMessage real_log_;
+  // Lifetime of real_log_stream_ is lifetime of real_log_.
+  std::ostream& real_log_stream_;
+  std::ostringstream save_stream_;
+
+  DISALLOW_COPY_AND_ASSIGN(JvmtiLogMessage);
+};
+
+class LogUtil {
+ public:
+  static jvmtiError ClearLastError(jvmtiEnv* env);
+  static jvmtiError GetLastError(jvmtiEnv* env, char** data);
+};
+
+}  // namespace openjdkjvmti
+
+#endif  // ART_OPENJDKJVMTI_TI_LOGGING_H_
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index 1588df4..e88539f 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -33,6 +33,7 @@
 
 #include <type_traits>
 
+#include "arch/context.h"
 #include "art_jvmti.h"
 #include "art_method-inl.h"
 #include "base/enums.h"
@@ -206,76 +207,59 @@
     return ERR(ABSENT_INFORMATION);
   }
 
-  struct LocalVariableContext {
-    explicit LocalVariableContext(jvmtiEnv* jenv) : env_(jenv), variables_(), err_(OK) {}
+  std::vector<jvmtiLocalVariableEntry> variables;
+  jvmtiError err = OK;
 
-    static void Callback(void* raw_ctx, const art::DexFile::LocalInfo& entry) {
-      reinterpret_cast<LocalVariableContext*>(raw_ctx)->Insert(entry);
+  auto release = [&](jint* out_entry_count_ptr, jvmtiLocalVariableEntry** out_table_ptr) {
+    jlong table_size = sizeof(jvmtiLocalVariableEntry) * variables.size();
+    if (err != OK ||
+        (err = env->Allocate(table_size,
+                              reinterpret_cast<unsigned char**>(out_table_ptr))) != OK) {
+      for (jvmtiLocalVariableEntry& e : variables) {
+        env->Deallocate(reinterpret_cast<unsigned char*>(e.name));
+        env->Deallocate(reinterpret_cast<unsigned char*>(e.signature));
+        env->Deallocate(reinterpret_cast<unsigned char*>(e.generic_signature));
+      }
+      return err;
     }
-
-    void Insert(const art::DexFile::LocalInfo& entry) {
-      if (err_ != OK) {
-        return;
-      }
-      JvmtiUniquePtr<char[]> name_str = CopyString(env_, entry.name_, &err_);
-      if (err_ != OK) {
-        return;
-      }
-      JvmtiUniquePtr<char[]> sig_str = CopyString(env_, entry.descriptor_, &err_);
-      if (err_ != OK) {
-        return;
-      }
-      JvmtiUniquePtr<char[]> generic_sig_str = CopyString(env_, entry.signature_, &err_);
-      if (err_ != OK) {
-        return;
-      }
-      variables_.push_back({
-        .start_location = static_cast<jlocation>(entry.start_address_),
-        .length = static_cast<jint>(entry.end_address_ - entry.start_address_),
-        .name = name_str.release(),
-        .signature = sig_str.release(),
-        .generic_signature = generic_sig_str.release(),
-        .slot = entry.reg_,
-      });
-    }
-
-    jvmtiError Release(jint* out_entry_count_ptr, jvmtiLocalVariableEntry** out_table_ptr) {
-      jlong table_size = sizeof(jvmtiLocalVariableEntry) * variables_.size();
-      if (err_ != OK ||
-          (err_ = env_->Allocate(table_size,
-                                 reinterpret_cast<unsigned char**>(out_table_ptr))) != OK) {
-        Cleanup();
-        return err_;
-      } else {
-        *out_entry_count_ptr = variables_.size();
-        memcpy(*out_table_ptr, variables_.data(), table_size);
-        return OK;
-      }
-    }
-
-    void Cleanup() {
-      for (jvmtiLocalVariableEntry& e : variables_) {
-        env_->Deallocate(reinterpret_cast<unsigned char*>(e.name));
-        env_->Deallocate(reinterpret_cast<unsigned char*>(e.signature));
-        env_->Deallocate(reinterpret_cast<unsigned char*>(e.generic_signature));
-      }
-    }
-
-    jvmtiEnv* env_;
-    std::vector<jvmtiLocalVariableEntry> variables_;
-    jvmtiError err_;
+    *out_entry_count_ptr = variables.size();
+    memcpy(*out_table_ptr, variables.data(), table_size);
+    return OK;
   };
 
-  LocalVariableContext context(env);
+  auto visitor = [&](const art::DexFile::LocalInfo& entry) {
+    if (err != OK) {
+      return;
+    }
+    JvmtiUniquePtr<char[]> name_str = CopyString(env, entry.name_, &err);
+    if (err != OK) {
+      return;
+    }
+    JvmtiUniquePtr<char[]> sig_str = CopyString(env, entry.descriptor_, &err);
+    if (err != OK) {
+      return;
+    }
+    JvmtiUniquePtr<char[]> generic_sig_str = CopyString(env, entry.signature_, &err);
+    if (err != OK) {
+      return;
+    }
+    variables.push_back({
+      .start_location = static_cast<jlocation>(entry.start_address_),
+      .length = static_cast<jint>(entry.end_address_ - entry.start_address_),
+      .name = name_str.release(),
+      .signature = sig_str.release(),
+      .generic_signature = generic_sig_str.release(),
+      .slot = entry.reg_,
+    });
+  };
+
   if (!accessor.DecodeDebugLocalInfo(art_method->IsStatic(),
                                      art_method->GetDexMethodIndex(),
-                                     LocalVariableContext::Callback,
-                                     &context)) {
+                                     visitor)) {
     // Something went wrong with decoding the debug information. It might as well not be there.
     return ERR(ABSENT_INFORMATION);
-  } else {
-    return context.Release(entry_count_ptr, table_ptr);
   }
+  return release(entry_count_ptr, table_ptr);
 }
 
 jvmtiError MethodUtil::GetMaxLocals(jvmtiEnv* env ATTRIBUTE_UNUSED,
@@ -446,16 +430,6 @@
   return ERR(NONE);
 }
 
-using LineNumberContext = std::vector<jvmtiLineNumberEntry>;
-
-static bool CollectLineNumbers(void* void_context, const art::DexFile::PositionInfo& entry) {
-  LineNumberContext* context = reinterpret_cast<LineNumberContext*>(void_context);
-  jvmtiLineNumberEntry jvmti_entry = { static_cast<jlocation>(entry.address_),
-                                       static_cast<jint>(entry.line_) };
-  context->push_back(jvmti_entry);
-  return false;  // Collect all, no early exit.
-}
-
 jvmtiError MethodUtil::GetLineNumberTable(jvmtiEnv* env,
                                           jmethodID method,
                                           jint* entry_count_ptr,
@@ -486,9 +460,11 @@
     DCHECK(accessor.HasCodeItem()) << art_method->PrettyMethod() << " " << dex_file->GetLocation();
   }
 
-  LineNumberContext context;
-  bool success = dex_file->DecodeDebugPositionInfo(
-      accessor.DebugInfoOffset(), CollectLineNumbers, &context);
+  std::vector<jvmtiLineNumberEntry> context;
+  bool success = accessor.DecodeDebugPositionInfo([&](const art::DexFile::PositionInfo& entry) {
+    context.push_back({static_cast<jlocation>(entry.address_), static_cast<jint>(entry.line_)});
+    return false;
+  });
   if (!success) {
     return ERR(ABSENT_INFORMATION);
   }
@@ -572,7 +548,7 @@
       return;
     }
     bool needs_instrument = !visitor.IsShadowFrame();
-    uint32_t pc = visitor.GetDexPc(/*abort_on_failure*/ false);
+    uint32_t pc = visitor.GetDexPc(/*abort_on_failure=*/ false);
     if (pc == art::dex::kDexNoIndex) {
       // Cannot figure out current PC.
       result_ = ERR(OPAQUE_FRAME);
@@ -622,55 +598,25 @@
     if (!accessor.HasCodeItem()) {
       return ERR(OPAQUE_FRAME);
     }
-
-    struct GetLocalVariableInfoContext {
-      explicit GetLocalVariableInfoContext(jint slot,
-                                          uint32_t pc,
-                                          std::string* out_descriptor,
-                                          art::Primitive::Type* out_type)
-          : found_(false), jslot_(slot), pc_(pc), descriptor_(out_descriptor), type_(out_type) {
-        *descriptor_ = "";
-        *type_ = art::Primitive::kPrimVoid;
+    bool found = false;
+    *type = art::Primitive::kPrimVoid;
+    descriptor->clear();
+    auto visitor = [&](const art::DexFile::LocalInfo& entry) {
+      if (!found &&
+          entry.start_address_ <= dex_pc &&
+          entry.end_address_ > dex_pc &&
+          entry.reg_ == slot_) {
+        found = true;
+        *type = art::Primitive::GetType(entry.descriptor_[0]);
+        *descriptor = entry.descriptor_;
       }
-
-      static void Callback(void* raw_ctx, const art::DexFile::LocalInfo& entry) {
-        reinterpret_cast<GetLocalVariableInfoContext*>(raw_ctx)->Handle(entry);
-      }
-
-      void Handle(const art::DexFile::LocalInfo& entry) {
-        if (found_) {
-          return;
-        } else if (entry.start_address_ <= pc_ &&
-                   entry.end_address_ > pc_ &&
-                   entry.reg_ == jslot_) {
-          found_ = true;
-          *type_ = art::Primitive::GetType(entry.descriptor_[0]);
-          *descriptor_ = entry.descriptor_;
-        }
-        return;
-      }
-
-      bool found_;
-      jint jslot_;
-      uint32_t pc_;
-      std::string* descriptor_;
-      art::Primitive::Type* type_;
     };
-
-    GetLocalVariableInfoContext context(slot_, dex_pc, descriptor, type);
-    if (!dex_file->DecodeDebugLocalInfo(accessor.RegistersSize(),
-                                        accessor.InsSize(),
-                                        accessor.InsnsSizeInCodeUnits(),
-                                        accessor.DebugInfoOffset(),
-                                        method->IsStatic(),
-                                        method->GetDexMethodIndex(),
-                                        GetLocalVariableInfoContext::Callback,
-                                        &context) || !context.found_) {
+    if (!accessor.DecodeDebugLocalInfo(method->IsStatic(), method->GetDexMethodIndex(), visitor) ||
+        !found) {
       // Something went wrong with decoding the debug information. It might as well not be there.
       return ERR(INVALID_SLOT);
-    } else {
-      return OK;
     }
+    return OK;
   }
 
   jvmtiError result_;
@@ -689,7 +635,7 @@
         val_(val),
         obj_val_(nullptr) {}
 
-  virtual jvmtiError GetResult() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  jvmtiError GetResult() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (result_ == OK && type_ == art::Primitive::kPrimNot) {
       val_->l = obj_val_.IsNull()
           ? nullptr
diff --git a/openjdkjvmti/ti_monitor.cc b/openjdkjvmti/ti_monitor.cc
index 6d3a37e..aac7233 100644
--- a/openjdkjvmti/ti_monitor.cc
+++ b/openjdkjvmti/ti_monitor.cc
@@ -38,6 +38,7 @@
 
 #include "art_jvmti.h"
 #include "gc_root-inl.h"
+#include "mirror/object-inl.h"
 #include "monitor.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
@@ -190,7 +191,7 @@
 
     // Reaquire the mutex/monitor, also go to sleep if we were suspended.
     // TODO Give an extension to wait without suspension as well.
-    MonitorEnter(self, /*suspend*/ true);
+    MonitorEnter(self, /*suspend=*/ true);
     CHECK(owner_.load(std::memory_order_relaxed) == self);
     DCHECK_EQ(1u, count_);
     // Reset the count.
@@ -260,7 +261,7 @@
   JvmtiMonitor* monitor = DecodeMonitor(id);
   art::Thread* self = art::Thread::Current();
 
-  monitor->MonitorEnter(self, /*suspend*/false);
+  monitor->MonitorEnter(self, /*suspend=*/false);
 
   return ERR(NONE);
 }
@@ -273,7 +274,7 @@
   JvmtiMonitor* monitor = DecodeMonitor(id);
   art::Thread* self = art::Thread::Current();
 
-  monitor->MonitorEnter(self, /*suspend*/true);
+  monitor->MonitorEnter(self, /*suspend=*/true);
 
   return ERR(NONE);
 }
@@ -370,7 +371,7 @@
    public:
     GetContendedMonitorClosure() : out_(nullptr) {}
 
-    void Run(art::Thread* target_thread) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    void Run(art::Thread* target_thread) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
       art::ScopedAssertNoThreadSuspension sants("GetContendedMonitorClosure::Run");
       switch (target_thread->GetState()) {
         // These three we are actually currently waiting on a monitor and have sent the appropriate
diff --git a/openjdkjvmti/ti_object.cc b/openjdkjvmti/ti_object.cc
index 89ce352..344ae88 100644
--- a/openjdkjvmti/ti_object.cc
+++ b/openjdkjvmti/ti_object.cc
@@ -92,7 +92,7 @@
   {
     art::ScopedObjectAccess soa(self);      // Now we know we have the shared lock.
     art::ScopedThreadSuspension sts(self, art::kNative);
-    art::ScopedSuspendAll ssa("GetObjectMonitorUsage", /*long_suspend*/false);
+    art::ScopedSuspendAll ssa("GetObjectMonitorUsage", /*long_suspend=*/false);
     art::ObjPtr<art::mirror::Object> target(self->DecodeJObject(obj));
     // This gets the list of threads trying to lock or wait on the monitor.
     art::MonitorInfo info(target.Ptr());
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index db2b143..3d175a8 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -49,6 +49,7 @@
 #include "dex/dex_file.h"
 #include "dex/dex_file_loader.h"
 #include "dex/dex_file_types.h"
+#include "dex/signature-inl.h"
 #include "events-inl.h"
 #include "gc/allocation_listener.h"
 #include "gc/heap.h"
@@ -63,9 +64,13 @@
 #include "jni/jni_env_ext-inl.h"
 #include "jvmti_allocator.h"
 #include "linear_alloc.h"
+#include "mirror/array-alloc-inl.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_ext.h"
 #include "mirror/object.h"
+#include "mirror/object_array-alloc-inl.h"
+#include "mirror/object_array-inl.h"
 #include "nativehelper/scoped_local_ref.h"
 #include "non_debuggable_classes.h"
 #include "object_lock.h"
@@ -152,7 +157,7 @@
       const std::unordered_set<art::ArtMethod*>& obsoleted_methods,
       ObsoleteMap* obsolete_maps)
         : StackVisitor(thread,
-                       /*context*/nullptr,
+                       /*context=*/nullptr,
                        StackVisitor::StackWalkKind::kIncludeInlinedFrames),
           allocator_(allocator),
           obsoleted_methods_(obsoleted_methods),
@@ -183,7 +188,9 @@
     if (obsoleted_methods_.find(old_method) != obsoleted_methods_.end()) {
       // We cannot ensure that the right dex file is used in inlined frames so we don't support
       // redefining them.
-      DCHECK(!IsInInlinedFrame()) << "Inlined frames are not supported when using redefinition";
+      DCHECK(!IsInInlinedFrame()) << "Inlined frames are not supported when using redefinition: "
+                                  << old_method->PrettyMethod() << " is inlined into "
+                                  << GetOuterMethod()->PrettyMethod();
       art::ArtMethod* new_obsolete_method = obsolete_maps_->FindObsoleteVersion(old_method);
       if (new_obsolete_method == nullptr) {
         // Create a new Obsolete Method and put it in the list.
@@ -305,10 +312,9 @@
                                         std::string* error_msg) {
   art::MemMap map = art::MemMap::MapAnonymous(
       StringPrintf("%s-transformed", original_location.c_str()).c_str(),
-      /* addr */ nullptr,
       data.size(),
       PROT_READ|PROT_WRITE,
-      /*low_4gb*/ false,
+      /*low_4gb=*/ false,
       error_msg);
   if (LIKELY(map.IsValid())) {
     memcpy(map.Begin(), data.data(), data.size());
@@ -445,8 +451,8 @@
   std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(name,
                                                                     checksum,
                                                                     std::move(map),
-                                                                    /*verify*/true,
-                                                                    /*verify_checksum*/true,
+                                                                    /*verify=*/true,
+                                                                    /*verify_checksum=*/true,
                                                                     error_msg_));
   if (dex_file.get() == nullptr) {
     os << "Unable to load modified dex file for " << def.GetName() << ": " << *error_msg_;
@@ -626,7 +632,7 @@
   // and removals. We should have already checked the fields.
   for (const art::ClassAccessor::Method& method : accessor.GetMethods()) {
     // Get the data on the method we are searching for
-    const art::DexFile::MethodId& new_method_id = dex_file_->GetMethodId(method.GetIndex());
+    const art::dex::MethodId& new_method_id = dex_file_->GetMethodId(method.GetIndex());
     const char* new_method_name = dex_file_->GetMethodName(new_method_id);
     art::Signature new_method_signature = dex_file_->GetMethodSignature(new_method_id);
     art::ArtMethod* old_method = FindMethod(h_klass, new_method_name, new_method_signature);
@@ -669,7 +675,7 @@
   auto old_iter = old_fields.begin();
   for (const art::ClassAccessor::Field& new_field : new_accessor.GetFields()) {
     // Get the data on the method we are searching for
-    const art::DexFile::FieldId& new_field_id = dex_file_->GetFieldId(new_field.GetIndex());
+    const art::dex::FieldId& new_field_id = dex_file_->GetFieldId(new_field.GetIndex());
     const char* new_field_name = dex_file_->GetFieldName(new_field_id);
     const char* new_field_type = dex_file_->GetFieldTypeDescriptor(new_field_id);
 
@@ -682,7 +688,7 @@
       return false;
     }
 
-    const art::DexFile::FieldId& old_field_id = old_dex_file.GetFieldId(old_iter->GetIndex());
+    const art::dex::FieldId& old_field_id = old_dex_file.GetFieldId(old_iter->GetIndex());
     const char* old_field_name = old_dex_file.GetFieldName(old_field_id);
     const char* old_field_type = old_dex_file.GetFieldTypeDescriptor(old_field_id);
 
@@ -733,7 +739,7 @@
   }
   // Get the ClassDef from the new DexFile.
   // Since the dex file has only a single class def the index is always 0.
-  const art::DexFile::ClassDef& def = dex_file_->GetClassDef(0);
+  const art::dex::ClassDef& def = dex_file_->GetClassDef(0);
   // Get the class as it is now.
   art::Handle<art::mirror::Class> current_class(hs.NewHandle(GetMirrorClass()));
 
@@ -770,7 +776,7 @@
       return false;
     }
   }
-  const art::DexFile::TypeList* interfaces = dex_file_->GetInterfacesList(def);
+  const art::dex::TypeList* interfaces = dex_file_->GetInterfacesList(def);
   if (interfaces == nullptr) {
     if (current_class->NumDirectInterfaces() != 0) {
       RecordFailure(ERR(UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED), "Interfaces added");
@@ -778,7 +784,7 @@
     }
   } else {
     DCHECK(!current_class->IsProxyClass());
-    const art::DexFile::TypeList* current_interfaces = current_class->GetInterfaceTypeList();
+    const art::dex::TypeList* current_interfaces = current_class->GetInterfaceTypeList();
     if (current_interfaces == nullptr || current_interfaces->Size() != interfaces->Size()) {
       RecordFailure(ERR(UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED), "Interfaces added or removed");
       return false;
@@ -1117,10 +1123,10 @@
                                                  dex_file_.get(),
                                                  hs.NewHandle(iter.GetNewDexCache()),
                                                  hs.NewHandle(GetClassLoader()),
-                                                 dex_file_->GetClassDef(0), /*class_def*/
-                                                 nullptr, /*compiler_callbacks*/
-                                                 true, /*allow_soft_failures*/
-                                                 /*log_level*/
+                                                 /*class_def=*/ dex_file_->GetClassDef(0),
+                                                 /*callbacks=*/ nullptr,
+                                                 /*allow_soft_failures=*/ true,
+                                                 /*log_level=*/
                                                  art::verifier::HardFailLogMode::kLogWarning,
                                                  art::Runtime::Current()->GetTargetSdkVersion(),
                                                  &error);
@@ -1288,7 +1294,7 @@
 }
 
 void Redefiner::ClassRedefinition::ReleaseDexFile() {
-  dex_file_.release();
+  dex_file_.release();  // NOLINT b/117926937
 }
 
 void Redefiner::ReleaseAllDexFiles() {
@@ -1367,7 +1373,7 @@
   // TODO We might want to give this its own suspended state!
   // TODO This isn't right. We need to change state without any chance of suspend ideally!
   art::ScopedThreadSuspension sts(self_, art::ThreadState::kNative);
-  art::ScopedSuspendAll ssa("Final installation of redefined Classes!", /*long_suspend*/true);
+  art::ScopedSuspendAll ssa("Final installation of redefined Classes!", /*long_suspend=*/true);
   for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
     art::ScopedAssertNoThreadSuspension nts("Updating runtime objects for redefinition");
     ClassRedefinition& redef = data.GetRedefinition();
@@ -1391,14 +1397,14 @@
 }
 
 void Redefiner::ClassRedefinition::UpdateMethods(art::ObjPtr<art::mirror::Class> mclass,
-                                                 const art::DexFile::ClassDef& class_def) {
+                                                 const art::dex::ClassDef& class_def) {
   art::ClassLinker* linker = driver_->runtime_->GetClassLinker();
   art::PointerSize image_pointer_size = linker->GetImagePointerSize();
-  const art::DexFile::TypeId& declaring_class_id = dex_file_->GetTypeId(class_def.class_idx_);
+  const art::dex::TypeId& declaring_class_id = dex_file_->GetTypeId(class_def.class_idx_);
   const art::DexFile& old_dex_file = mclass->GetDexFile();
   // Update methods.
   for (art::ArtMethod& method : mclass->GetDeclaredMethods(image_pointer_size)) {
-    const art::DexFile::StringId* new_name_id = dex_file_->FindStringId(method.GetName());
+    const art::dex::StringId* new_name_id = dex_file_->FindStringId(method.GetName());
     art::dex::TypeIndex method_return_idx =
         dex_file_->GetIndexForTypeId(*dex_file_->FindTypeId(method.GetReturnTypeDescriptor()));
     const auto* old_type_list = method.GetParameterTypeList();
@@ -1411,12 +1417,11 @@
                       old_dex_file.GetTypeId(
                           old_type_list->GetTypeItem(i).type_idx_)))));
     }
-    const art::DexFile::ProtoId* proto_id = dex_file_->FindProtoId(method_return_idx,
-                                                                   new_type_list);
+    const art::dex::ProtoId* proto_id = dex_file_->FindProtoId(method_return_idx, new_type_list);
     CHECK(proto_id != nullptr || old_type_list == nullptr);
-    const art::DexFile::MethodId* method_id = dex_file_->FindMethodId(declaring_class_id,
-                                                                      *new_name_id,
-                                                                      *proto_id);
+    const art::dex::MethodId* method_id = dex_file_->FindMethodId(declaring_class_id,
+                                                                  *new_name_id,
+                                                                  *proto_id);
     CHECK(method_id != nullptr);
     uint32_t dex_method_idx = dex_file_->GetIndexForMethodId(*method_id);
     method.SetDexMethodIndex(dex_method_idx);
@@ -1432,12 +1437,12 @@
   for (auto fields_iter : {mclass->GetIFields(), mclass->GetSFields()}) {
     for (art::ArtField& field : fields_iter) {
       std::string declaring_class_name;
-      const art::DexFile::TypeId* new_declaring_id =
+      const art::dex::TypeId* new_declaring_id =
           dex_file_->FindTypeId(field.GetDeclaringClass()->GetDescriptor(&declaring_class_name));
-      const art::DexFile::StringId* new_name_id = dex_file_->FindStringId(field.GetName());
-      const art::DexFile::TypeId* new_type_id = dex_file_->FindTypeId(field.GetTypeDescriptor());
+      const art::dex::StringId* new_name_id = dex_file_->FindStringId(field.GetName());
+      const art::dex::TypeId* new_type_id = dex_file_->FindTypeId(field.GetTypeDescriptor());
       CHECK(new_name_id != nullptr && new_type_id != nullptr && new_declaring_id != nullptr);
-      const art::DexFile::FieldId* new_field_id =
+      const art::dex::FieldId* new_field_id =
           dex_file_->FindFieldId(*new_declaring_id, *new_name_id, *new_type_id);
       CHECK(new_field_id != nullptr);
       // We only need to update the index since the other data in the ArtField cannot be updated.
@@ -1452,19 +1457,29 @@
     art::ObjPtr<art::mirror::DexCache> new_dex_cache,
     art::ObjPtr<art::mirror::Object> original_dex_file) {
   DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
-  const art::DexFile::ClassDef& class_def = dex_file_->GetClassDef(0);
+  const art::dex::ClassDef& class_def = dex_file_->GetClassDef(0);
   UpdateMethods(mclass, class_def);
   UpdateFields(mclass);
 
+  art::ObjPtr<art::mirror::ClassExt> ext(mclass->GetExtData());
+  CHECK(!ext.IsNull());
+  ext->SetOriginalDexFile(original_dex_file);
+
+  // If this is the first time the class is being redefined, store
+  // the native DexFile pointer and initial ClassDef index in ClassExt.
+  // This preserves the pointer for hiddenapi access checks which need
+  // to read access flags from the initial DexFile.
+  if (ext->GetPreRedefineDexFile() == nullptr) {
+    ext->SetPreRedefineDexFile(&mclass->GetDexFile());
+    ext->SetPreRedefineClassDefIndex(mclass->GetDexClassDefIndex());
+  }
+
   // Update the class fields.
   // Need to update class last since the ArtMethod gets its DexFile from the class (which is needed
   // to call GetReturnTypeDescriptor and GetParameterTypeList above).
   mclass->SetDexCache(new_dex_cache.Ptr());
   mclass->SetDexClassDefIndex(dex_file_->GetIndexForClassDef(class_def));
   mclass->SetDexTypeIndex(dex_file_->GetIndexForTypeId(*dex_file_->FindTypeId(class_sig_.c_str())));
-  art::ObjPtr<art::mirror::ClassExt> ext(mclass->GetExtData());
-  CHECK(!ext.IsNull());
-  ext->SetOriginalDexFile(original_dex_file);
 
   // Notify the jit that all the methods in this class were redefined. Need to do this last since
   // the jit relies on the dex_file_ being correct (for native methods at least) to find the method
diff --git a/openjdkjvmti/ti_redefine.h b/openjdkjvmti/ti_redefine.h
index f4a4280..a974dc1 100644
--- a/openjdkjvmti/ti_redefine.h
+++ b/openjdkjvmti/ti_redefine.h
@@ -39,13 +39,19 @@
 #include "art_jvmti.h"
 #include "base/array_ref.h"
 #include "base/globals.h"
-#include "dex/dex_file.h"
 #include "jni/jni_env_ext-inl.h"
 #include "jvmti.h"
 #include "mirror/array.h"
 #include "mirror/class.h"
 #include "obj_ptr.h"
 
+namespace art {
+namespace dex {
+struct ClassDef;
+}  // namespace dex
+class DexFile;
+}  // namespace art
+
 namespace openjdkjvmti {
 
 class ArtClassDefinition;
@@ -172,7 +178,7 @@
         REQUIRES(art::Locks::mutator_lock_);
 
     void UpdateMethods(art::ObjPtr<art::mirror::Class> mclass,
-                       const art::DexFile::ClassDef& class_def)
+                       const art::dex::ClassDef& class_def)
         REQUIRES(art::Locks::mutator_lock_);
 
     void UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
diff --git a/openjdkjvmti/ti_search.cc b/openjdkjvmti/ti_search.cc
index 1189b1d..2187825 100644
--- a/openjdkjvmti/ti_search.cc
+++ b/openjdkjvmti/ti_search.cc
@@ -52,6 +52,7 @@
 #include "scoped_thread_state_change-inl.h"
 #include "thread-current-inl.h"
 #include "thread_list.h"
+#include "ti_logging.h"
 #include "ti_phase.h"
 #include "well_known_classes.h"
 
@@ -213,7 +214,7 @@
   runtime->GetRuntimeCallbacks()->RemoveRuntimePhaseCallback(&gSearchCallback);
 }
 
-jvmtiError SearchUtil::AddToBootstrapClassLoaderSearch(jvmtiEnv* env ATTRIBUTE_UNUSED,
+jvmtiError SearchUtil::AddToBootstrapClassLoaderSearch(jvmtiEnv* env,
                                                        const char* segment) {
   art::Runtime* current = art::Runtime::Current();
   if (current == nullptr) {
@@ -229,9 +230,14 @@
   std::string error_msg;
   std::vector<std::unique_ptr<const art::DexFile>> dex_files;
   const art::ArtDexFileLoader dex_file_loader;
-  if (!dex_file_loader.Open(
-        segment, segment, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files)) {
-    LOG(WARNING) << "Could not open " << segment << " for boot classpath extension: " << error_msg;
+  if (!dex_file_loader.Open(segment,
+                            segment,
+                            /* verify= */ true,
+                            /* verify_checksum= */ true,
+                            &error_msg,
+                            &dex_files)) {
+    JVMTI_LOG(WARNING, env) << "Could not open " << segment << " for boot classpath extension: "
+                            << error_msg;
     return ERR(ILLEGAL_ARGUMENT);
   }
 
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index b6969af..385ac45 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -36,6 +36,7 @@
 #include <unordered_map>
 #include <vector>
 
+#include "arch/context.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "art_jvmti.h"
@@ -57,6 +58,7 @@
 #include "nativehelper/scoped_local_ref.h"
 #include "scoped_thread_state_change-inl.h"
 #include "stack.h"
+#include "ti_logging.h"
 #include "ti_thread.h"
 #include "thread-current-inl.h"
 #include "thread_list.h"
@@ -77,9 +79,9 @@
         start(start_),
         stop(stop_) {}
   GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
-  GetStackTraceVisitor(GetStackTraceVisitor&&) = default;
+  GetStackTraceVisitor(GetStackTraceVisitor&&) noexcept = default;
 
-  bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     art::ArtMethod* m = GetMethod();
     if (m->IsRuntimeMethod()) {
       return true;
@@ -112,6 +114,23 @@
   size_t stop;
 };
 
+art::ShadowFrame* FindFrameAtDepthVisitor::GetOrCreateShadowFrame(bool* created_frame) {
+  art::ShadowFrame* cur = GetCurrentShadowFrame();
+  if (cur == nullptr) {
+    *created_frame = true;
+    art::ArtMethod* method = GetMethod();
+    const uint16_t num_regs = method->DexInstructionData().RegistersSize();
+    cur = GetThread()->FindOrCreateDebuggerShadowFrame(GetFrameId(),
+                                                       num_regs,
+                                                       method,
+                                                       GetDexPc());
+    DCHECK(cur != nullptr);
+  } else {
+    *created_frame = false;
+  }
+  return cur;
+}
+
 template <typename FrameFn>
 GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
                                                     size_t start,
@@ -133,7 +152,7 @@
       frames.push_back(info);
     };
     auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
-    visitor.WalkStack(/* include_transitions */ false);
+    visitor.WalkStack(/* include_transitions= */ false);
 
     start_result = visitor.start;
     stop_result = visitor.stop;
@@ -201,7 +220,7 @@
       ++index;
     };
     auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
-    visitor.WalkStack(/* include_transitions */ false);
+    visitor.WalkStack(/* include_transitions= */ false);
   }
 
   jvmtiFrameInfo* frame_buffer;
@@ -313,7 +332,7 @@
       thread_frames->push_back(info);
     };
     auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
-    visitor.WalkStack(/* include_transitions */ false);
+    visitor.WalkStack(/* include_transitions= */ false);
   }
 
   art::Barrier barrier;
@@ -655,34 +674,24 @@
   return ERR(NONE);
 }
 
-// Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
-// runtime methods and transitions must not be counted.
-struct GetFrameCountVisitor : public art::StackVisitor {
-  explicit GetFrameCountVisitor(art::Thread* thread)
-      : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        count(0) {}
-
-  bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
-    art::ArtMethod* m = GetMethod();
-    const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
-    if (do_count) {
-      count++;
-    }
-    return true;
-  }
-
-  size_t count;
-};
-
 struct GetFrameCountClosure : public art::Closure {
  public:
   GetFrameCountClosure() : count(0) {}
 
   void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
-    GetFrameCountVisitor visitor(self);
-    visitor.WalkStack(false);
-
-    count = visitor.count;
+    // This is not StackVisitor::ComputeNumFrames, as runtime methods and transitions must not be
+    // counted.
+    art::StackVisitor::WalkStack(
+        [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+          art::ArtMethod* m = stack_visitor->GetMethod();
+          if (m != nullptr && !m->IsRuntimeMethod()) {
+            count++;
+          }
+          return true;
+        },
+        self,
+        /* context= */ nullptr,
+        art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
   }
 
   size_t count;
@@ -725,46 +734,30 @@
   return ERR(NONE);
 }
 
-// Walks up the stack 'n' callers, when used with Thread::WalkStack.
-struct GetLocationVisitor : public art::StackVisitor {
-  GetLocationVisitor(art::Thread* thread, size_t n_in)
-      : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        n(n_in),
-        count(0),
-        caller(nullptr),
-        caller_dex_pc(0) {}
-
-  bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
-    art::ArtMethod* m = GetMethod();
-    const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
-    if (do_count) {
-      DCHECK(caller == nullptr);
-      if (count == n) {
-        caller = m;
-        caller_dex_pc = GetDexPc(false);
-        return false;
-      }
-      count++;
-    }
-    return true;
-  }
-
-  const size_t n;
-  size_t count;
-  art::ArtMethod* caller;
-  uint32_t caller_dex_pc;
-};
-
 struct GetLocationClosure : public art::Closure {
  public:
   explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
 
   void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
-    GetLocationVisitor visitor(self, n);
-    visitor.WalkStack(false);
-
-    method = visitor.caller;
-    dex_pc = visitor.caller_dex_pc;
+    // Walks up the stack 'n' callers.
+    size_t count = 0u;
+    art::StackVisitor::WalkStack(
+        [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+          art::ArtMethod* m = stack_visitor->GetMethod();
+          if (m != nullptr && !m->IsRuntimeMethod()) {
+            DCHECK(method == nullptr);
+            if (count == n) {
+              method = m;
+              dex_pc = stack_visitor->GetDexPc(/*abort_on_failure=*/false);
+              return false;
+            }
+            count++;
+          }
+          return true;
+        },
+        self,
+        /* context= */ nullptr,
+        art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
   }
 
   const size_t n;
@@ -893,7 +886,7 @@
     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
     // Find the monitors on the stack.
     MonitorVisitor visitor(target);
-    visitor.WalkStack(/* include_transitions */ false);
+    visitor.WalkStack(/* include_transitions= */ false);
     // Find any other monitors, including ones acquired in native code.
     art::RootInfo root_info(art::kRootVMInternal);
     target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info);
@@ -1065,16 +1058,7 @@
     // From here we are sure to succeed.
     bool needs_instrument = false;
     // Get/create a shadow frame
-    art::ShadowFrame* shadow_frame = visitor.GetCurrentShadowFrame();
-    if (shadow_frame == nullptr) {
-      needs_instrument = true;
-      const size_t frame_id = visitor.GetFrameId();
-      const uint16_t num_regs = method->DexInstructionData().RegistersSize();
-      shadow_frame = target->FindOrCreateDebuggerShadowFrame(frame_id,
-                                                             num_regs,
-                                                             method,
-                                                             visitor.GetDexPc());
-    }
+    art::ShadowFrame* shadow_frame = visitor.GetOrCreateShadowFrame(&needs_instrument);
     {
       art::WriterMutexLock lk(self, tienv->event_info_mutex_);
       // Mark shadow frame as needs_notify_pop_
@@ -1089,4 +1073,89 @@
   } while (true);
 }
 
+jvmtiError StackUtil::PopFrame(jvmtiEnv* env, jthread thread) {
+  art::Thread* self = art::Thread::Current();
+  art::Thread* target;
+  do {
+    ThreadUtil::SuspendCheck(self);
+    art::MutexLock ucsl_mu(self, *art::Locks::user_code_suspension_lock_);
+    // Make sure we won't be suspended in the middle of holding the thread_suspend_count_lock_ by a
+    // user-code suspension. We retry and do another SuspendCheck to clear this.
+    if (ThreadUtil::WouldSuspendForUserCodeLocked(self)) {
+      continue;
+    }
+    // From now on we know we cannot get suspended by user-code.
+    // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
+    // have the 'suspend_lock' locked here.
+    art::ScopedObjectAccess soa(self);
+    art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
+    jvmtiError err = ERR(INTERNAL);
+    if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+      return err;
+    }
+    {
+      art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
+      if (target == self || target->GetUserCodeSuspendCount() == 0) {
+        // We cannot be the current thread for this function.
+        return ERR(THREAD_NOT_SUSPENDED);
+      }
+    }
+    JvmtiGlobalTLSData* tls_data = ThreadUtil::GetGlobalTLSData(target);
+    constexpr art::StackVisitor::StackWalkKind kWalkKind =
+        art::StackVisitor::StackWalkKind::kIncludeInlinedFrames;
+    if (tls_data != nullptr &&
+        tls_data->disable_pop_frame_depth != JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
+        tls_data->disable_pop_frame_depth == art::StackVisitor::ComputeNumFrames(target,
+                                                                                 kWalkKind)) {
+      JVMTI_LOG(WARNING, env) << "Disallowing frame pop due to in-progress class-load/prepare. "
+                              << "Frame at depth " << tls_data->disable_pop_frame_depth << " was "
+                              << "marked as un-poppable by the jvmti plugin. See b/117615146 for "
+                              << "more information.";
+      return ERR(OPAQUE_FRAME);
+    }
+    // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
+    // done.
+    std::unique_ptr<art::Context> context(art::Context::Create());
+    FindFrameAtDepthVisitor final_frame(target, context.get(), 0);
+    FindFrameAtDepthVisitor penultimate_frame(target, context.get(), 1);
+    final_frame.WalkStack();
+    penultimate_frame.WalkStack();
+
+    if (!final_frame.FoundFrame() || !penultimate_frame.FoundFrame()) {
+      // Cannot do it if there is only one frame!
+      return ERR(NO_MORE_FRAMES);
+    }
+
+    art::ArtMethod* called_method = final_frame.GetMethod();
+    art::ArtMethod* calling_method = penultimate_frame.GetMethod();
+    if (calling_method->IsNative() || called_method->IsNative()) {
+      return ERR(OPAQUE_FRAME);
+    }
+    // From here we are sure to succeed.
+
+    // Get/create a shadow frame
+    bool created_final_frame = false;
+    bool created_penultimate_frame = false;
+    art::ShadowFrame* called_shadow_frame =
+        final_frame.GetOrCreateShadowFrame(&created_final_frame);
+    art::ShadowFrame* calling_shadow_frame =
+        penultimate_frame.GetOrCreateShadowFrame(&created_penultimate_frame);
+
+    CHECK_NE(called_shadow_frame, calling_shadow_frame)
+        << "Frames at different depths not different!";
+
+    // Tell the shadow-frame to return immediately and skip all exit events.
+    called_shadow_frame->SetForcePopFrame(true);
+    calling_shadow_frame->SetForceRetryInstruction(true);
+
+    // Make sure can we will go to the interpreter and use the shadow frames. The early return for
+    // the final frame will force everything to the interpreter so we only need to instrument if it
+    // was not present.
+    if (created_final_frame) {
+      DeoptManager::Get()->DeoptimizeThread(target);
+    }
+    return OK;
+  } while (true);
+}
+
 }  // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_stack.h b/openjdkjvmti/ti_stack.h
index b41fa4b..55c4269 100644
--- a/openjdkjvmti/ti_stack.h
+++ b/openjdkjvmti/ti_stack.h
@@ -81,6 +81,8 @@
                                         jobject** owned_monitors_ptr);
 
   static jvmtiError NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth);
+
+  static jvmtiError PopFrame(jvmtiEnv* env, jthread thread);
 };
 
 struct FindFrameAtDepthVisitor : art::StackVisitor {
@@ -110,6 +112,9 @@
     }
   }
 
+  art::ShadowFrame* GetOrCreateShadowFrame(/*out*/bool* created_frame)
+      REQUIRES_SHARED(art::Locks::mutator_lock_);
+
  private:
   bool found_frame_;
   size_t cnt_;
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index e533094..051db4c 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -91,7 +91,8 @@
         self->GetThreadName(name);
         if (name != "JDWP" &&
             name != "Signal Catcher" &&
-            !android::base::StartsWith(name, "Jit thread pool")) {
+            !android::base::StartsWith(name, "Jit thread pool") &&
+            !android::base::StartsWith(name, "Runtime worker thread")) {
           LOG(FATAL) << "Unexpected thread before start: " << name << " id: "
                      << self->GetThreadId();
         }
@@ -623,18 +624,10 @@
   return ERR(NONE);
 }
 
-// The struct that we store in the art::Thread::custom_tls_ that maps the jvmtiEnvs to the data
-// stored with that thread. This is needed since different jvmtiEnvs are not supposed to share TLS
-// data but we only have a single slot in Thread objects to store data.
-struct JvmtiGlobalTLSData : public art::TLSData {
-  std::unordered_map<jvmtiEnv*, const void*> data GUARDED_BY(art::Locks::thread_list_lock_);
-};
-
 static void RemoveTLSData(art::Thread* target, void* ctx) REQUIRES(art::Locks::thread_list_lock_) {
   jvmtiEnv* env = reinterpret_cast<jvmtiEnv*>(ctx);
   art::Locks::thread_list_lock_->AssertHeld(art::Thread::Current());
-  JvmtiGlobalTLSData* global_tls =
-      reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS(kJvmtiTlsKey));
+  JvmtiGlobalTLSData* global_tls = ThreadUtil::GetGlobalTLSData(target);
   if (global_tls != nullptr) {
     global_tls->data.erase(env);
   }
@@ -657,19 +650,27 @@
     return err;
   }
 
-  JvmtiGlobalTLSData* global_tls =
-      reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS(kJvmtiTlsKey));
-  if (global_tls == nullptr) {
-    // Synchronized using thread_list_lock_ to prevent racing sets.
-    target->SetCustomTLS(kJvmtiTlsKey, new JvmtiGlobalTLSData);
-    global_tls = reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS(kJvmtiTlsKey));
-  }
+  JvmtiGlobalTLSData* global_tls = GetOrCreateGlobalTLSData(target);
 
   global_tls->data[env] = data;
 
   return ERR(NONE);
 }
 
+JvmtiGlobalTLSData* ThreadUtil::GetOrCreateGlobalTLSData(art::Thread* thread) {
+  JvmtiGlobalTLSData* data = GetGlobalTLSData(thread);
+  if (data != nullptr) {
+    return data;
+  } else {
+    thread->SetCustomTLS(kJvmtiTlsKey, new JvmtiGlobalTLSData);
+    return GetGlobalTLSData(thread);
+  }
+}
+
+JvmtiGlobalTLSData* ThreadUtil::GetGlobalTLSData(art::Thread* thread) {
+  return reinterpret_cast<JvmtiGlobalTLSData*>(thread->GetCustomTLS(kJvmtiTlsKey));
+}
+
 jvmtiError ThreadUtil::GetThreadLocalStorage(jvmtiEnv* env,
                                              jthread thread,
                                              void** data_ptr) {
@@ -686,8 +687,7 @@
     return err;
   }
 
-  JvmtiGlobalTLSData* global_tls =
-      reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS(kJvmtiTlsKey));
+  JvmtiGlobalTLSData* global_tls = GetGlobalTLSData(target);
   if (global_tls == nullptr) {
     *data_ptr = nullptr;
     return OK;
@@ -813,42 +813,11 @@
     runtime->EndThreadBirth();
     return ERR(INTERNAL);
   }
-  data.release();
+  data.release();  // NOLINT pthreads API.
 
   return ERR(NONE);
 }
 
-class ScopedSuspendByPeer {
- public:
-  explicit ScopedSuspendByPeer(jthread jtarget)
-      : thread_list_(art::Runtime::Current()->GetThreadList()),
-        timeout_(false),
-        target_(thread_list_->SuspendThreadByPeer(jtarget,
-                                                  /* suspend_thread */ true,
-                                                  art::SuspendReason::kInternal,
-                                                  &timeout_)) { }
-  ~ScopedSuspendByPeer() {
-    if (target_ != nullptr) {
-      if (!thread_list_->Resume(target_, art::SuspendReason::kInternal)) {
-        LOG(ERROR) << "Failed to resume " << target_ << "!";
-      }
-    }
-  }
-
-  art::Thread* GetTargetThread() const {
-    return target_;
-  }
-
-  bool TimedOut() const {
-    return timeout_;
-  }
-
- private:
-  art::ThreadList* thread_list_;
-  bool timeout_;
-  art::Thread* target_;
-};
-
 jvmtiError ThreadUtil::SuspendOther(art::Thread* self,
                                     jthread target_jthread) {
   // Loop since we need to bail out and try again if we would end up getting suspended while holding
@@ -876,27 +845,29 @@
       if (!GetAliveNativeThread(target_jthread, soa, &target, &err)) {
         return err;
       }
+      art::ThreadState state = target->GetState();
+      if (state == art::ThreadState::kStarting || target->IsStillStarting()) {
+        return ERR(THREAD_NOT_ALIVE);
+      } else {
+        art::MutexLock thread_suspend_count_mu(self, *art::Locks::thread_suspend_count_lock_);
+        if (target->GetUserCodeSuspendCount() != 0) {
+          return ERR(THREAD_SUSPENDED);
+        }
+      }
     }
-    // Get the actual thread in a suspended state so we can change the user-code suspend count.
-    ScopedSuspendByPeer ssbp(target_jthread);
-    if (ssbp.GetTargetThread() == nullptr && !ssbp.TimedOut()) {
+    bool timeout = true;
+    art::Thread* ret_target = art::Runtime::Current()->GetThreadList()->SuspendThreadByPeer(
+        target_jthread,
+        /* request_suspension= */ true,
+        art::SuspendReason::kForUserCode,
+        &timeout);
+    if (ret_target == nullptr && !timeout) {
       // TODO It would be good to get more information about why exactly the thread failed to
       // suspend.
       return ERR(INTERNAL);
-    } else if (!ssbp.TimedOut()) {
-      art::ThreadState state = ssbp.GetTargetThread()->GetState();
-      if (state == art::ThreadState::kStarting || ssbp.GetTargetThread()->IsStillStarting()) {
-        return ERR(THREAD_NOT_ALIVE);
-      }
-      // we didn't time out and got a result. Suspend the thread by usercode and return. It's
-      // already suspended internal so we don't need to do anything but increment the count.
-      art::MutexLock thread_suspend_count_mu(self, *art::Locks::thread_suspend_count_lock_);
-      if (ssbp.GetTargetThread()->GetUserCodeSuspendCount() != 0) {
-        return ERR(THREAD_SUSPENDED);
-      }
-      bool res = ssbp.GetTargetThread()->ModifySuspendCount(
-          self, +1, nullptr, art::SuspendReason::kForUserCode);
-      return res ? OK : ERR(INTERNAL);
+    } else if (!timeout) {
+      // we didn't time out and got a result.
+      return OK;
     }
     // We timed out. Just go around and try again.
   } while (true);
@@ -905,17 +876,6 @@
 
 jvmtiError ThreadUtil::SuspendSelf(art::Thread* self) {
   CHECK(self == art::Thread::Current());
-  if (!self->CanBeSuspendedByUserCode()) {
-    // TODO This is really undesirable. As far as I can tell this is can only come about because of
-    // class-loads in the jit-threads (through either VMObjectAlloc or the ClassLoad/ClassPrepare
-    // events that we send). It's unlikely that anyone would be suspending themselves there since
-    // it's almost guaranteed to cause a deadlock but it is technically allowed. Ideally we'd want
-    // to put a CHECK here (or in the event-dispatch code) that we are only in this situation when
-    // sending the GC callbacks but the jit causing events means we cannot do this.
-    LOG(WARNING) << "Attempt to self-suspend on a thread without suspension enabled. Thread is "
-                 << *self;
-    return ERR(INTERNAL);
-  }
   {
     art::MutexLock mu(self, *art::Locks::user_code_suspension_lock_);
     art::MutexLock thread_list_mu(self, *art::Locks::thread_suspend_count_lock_);
@@ -963,6 +923,7 @@
     return ERR(NULL_POINTER);
   }
   art::Thread* self = art::Thread::Current();
+  art::Thread* target;
   // Retry until we know we won't get suspended by user code while resuming something.
   do {
     SuspendCheck(self);
@@ -973,37 +934,36 @@
       continue;
     }
     // From now on we know we cannot get suspended by user-code.
-    // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
-    // have the 'suspend_lock' locked here.
-    art::ScopedObjectAccess soa(self);
-    if (thread == nullptr) {
-      // The thread is the current thread.
-      return ERR(THREAD_NOT_SUSPENDED);
-    } else if (!soa.Env()->IsInstanceOf(thread, art::WellKnownClasses::java_lang_Thread)) {
-      // Not a thread object.
-      return ERR(INVALID_THREAD);
-    } else if (self->GetPeer() == soa.Decode<art::mirror::Object>(thread)) {
-      // The thread is the current thread.
-      return ERR(THREAD_NOT_SUSPENDED);
+    {
+      // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
+      // have the 'suspend_lock' locked here.
+      art::ScopedObjectAccess soa(self);
+      art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
+      jvmtiError err = ERR(INTERNAL);
+      if (!GetAliveNativeThread(thread, soa, &target, &err)) {
+        return err;
+      } else if (target == self) {
+        // We would have paused until we aren't suspended anymore due to the ScopedObjectAccess so
+        // we can just return THREAD_NOT_SUSPENDED. Unfortunately we cannot do any real DCHECKs
+        // about current state since it's all concurrent.
+        return ERR(THREAD_NOT_SUSPENDED);
+      }
+      // The JVMTI spec requires us to return THREAD_NOT_SUSPENDED if it is alive but we really
+      // cannot tell why resume failed.
+      {
+        art::MutexLock thread_suspend_count_mu(self, *art::Locks::thread_suspend_count_lock_);
+        if (target->GetUserCodeSuspendCount() == 0) {
+          return ERR(THREAD_NOT_SUSPENDED);
+        }
+      }
     }
-    ScopedSuspendByPeer ssbp(thread);
-    if (ssbp.TimedOut()) {
-      // Unknown error. Couldn't suspend thread!
-      return ERR(INTERNAL);
-    } else if (ssbp.GetTargetThread() == nullptr) {
-      // Thread must not be alive.
-      return ERR(THREAD_NOT_ALIVE);
-    }
-    // We didn't time out and got a result. Check the thread is suspended by usercode, unsuspend it
-    // and return. It's already suspended internal so we don't need to do anything but decrement the
-    // count.
-    art::MutexLock thread_list_mu(self, *art::Locks::thread_suspend_count_lock_);
-    if (ssbp.GetTargetThread()->GetUserCodeSuspendCount() == 0) {
-      return ERR(THREAD_NOT_SUSPENDED);
-    } else if (!ssbp.GetTargetThread()->ModifySuspendCount(
-        self, -1, nullptr, art::SuspendReason::kForUserCode)) {
+    // It is okay that we don't have a thread_list_lock here since we know that the thread cannot
+    // die since it is currently held suspended by a SuspendReason::kForUserCode suspend.
+    DCHECK(target != self);
+    if (!art::Runtime::Current()->GetThreadList()->Resume(target,
+                                                          art::SuspendReason::kForUserCode)) {
       // TODO Give a better error.
-      // This should not really be possible and is probably some race.
+      // This is most likely THREAD_NOT_SUSPENDED but we cannot really be sure.
       return ERR(INTERNAL);
     } else {
       return OK;
@@ -1110,7 +1070,7 @@
    public:
     explicit StopThreadClosure(art::Handle<art::mirror::Throwable> except) : exception_(except) { }
 
-    void Run(art::Thread* me) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    void Run(art::Thread* me) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
       // Make sure the thread is prepared to notice the exception.
       art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(me);
       me->SetAsyncException(exception_.Get());
diff --git a/openjdkjvmti/ti_thread.h b/openjdkjvmti/ti_thread.h
index c6b6af1..39f1f07 100644
--- a/openjdkjvmti/ti_thread.h
+++ b/openjdkjvmti/ti_thread.h
@@ -32,11 +32,14 @@
 #ifndef ART_OPENJDKJVMTI_TI_THREAD_H_
 #define ART_OPENJDKJVMTI_TI_THREAD_H_
 
+#include <unordered_map>
+
 #include "jni.h"
 #include "jvmti.h"
 
 #include "base/macros.h"
 #include "base/mutex.h"
+#include "thread.h"
 
 namespace art {
 class ArtField;
@@ -49,6 +52,18 @@
 
 class EventHandler;
 
+// The struct that we store in the art::Thread::custom_tls_ that maps the jvmtiEnvs to the data
+// stored with that thread. This is needed since different jvmtiEnvs are not supposed to share TLS
+// data but we only have a single slot in Thread objects to store data.
+struct JvmtiGlobalTLSData : public art::TLSData {
+  std::unordered_map<jvmtiEnv*, const void*> data GUARDED_BY(art::Locks::thread_list_lock_);
+
+  // The depth of the last frame where popping using PopFrame it is not allowed. It is set to
+  // kNoDisallowedPopFrame if all frames can be popped. See b/117615146 for more information.
+  static constexpr size_t kNoDisallowedPopFrame = -1;
+  size_t disable_pop_frame_depth = kNoDisallowedPopFrame;
+};
+
 class ThreadUtil {
  public:
   static void Register(EventHandler* event_handler);
@@ -134,6 +149,11 @@
     REQUIRES(!art::Locks::user_code_suspension_lock_,
              !art::Locks::thread_suspend_count_lock_);
 
+  static JvmtiGlobalTLSData* GetGlobalTLSData(art::Thread* thread)
+      REQUIRES(art::Locks::thread_list_lock_);
+  static JvmtiGlobalTLSData* GetOrCreateGlobalTLSData(art::Thread* thread)
+      REQUIRES(art::Locks::thread_list_lock_);
+
  private:
   // We need to make sure only one thread tries to suspend threads at a time so we can get the
   // 'suspend-only-once' behavior the spec requires. Internally, ART considers suspension to be a
diff --git a/openjdkjvmti/transform.cc b/openjdkjvmti/transform.cc
index d87ca56..27f04b7 100644
--- a/openjdkjvmti/transform.cc
+++ b/openjdkjvmti/transform.cc
@@ -40,6 +40,7 @@
 #include "art_method.h"
 #include "base/array_ref.h"
 #include "base/globals.h"
+#include "base/logging.h"
 #include "base/mem_map.h"
 #include "class_linker.h"
 #include "dex/dex_file.h"
@@ -48,6 +49,7 @@
 #include "events-inl.h"
 #include "fault_handler.h"
 #include "gc_root-inl.h"
+#include "handle_scope-inl.h"
 #include "jni/jni_env_ext-inl.h"
 #include "jvalue.h"
 #include "jvmti.h"
@@ -76,7 +78,7 @@
                                               art::LockLevel::kSignalHandlingLock),
         class_definition_initialized_cond_("JVMTI Initialized class definitions condition",
                                            uninitialized_class_definitions_lock_) {
-    manager->AddHandler(this, /* generated_code */ false);
+    manager->AddHandler(this, /* generated_code= */ false);
   }
 
   ~TransformationFaultHandler() {
diff --git a/patchoat/Android.bp b/patchoat/Android.bp
deleted file mode 100644
index 13c8f47..0000000
--- a/patchoat/Android.bp
+++ /dev/null
@@ -1,64 +0,0 @@
-//
-// Copyright (C) 2014 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-cc_defaults {
-    name: "patchoat-defaults",
-    host_supported: true,
-    defaults: ["art_defaults"],
-    srcs: ["patchoat.cc"],
-    target: {
-        android: {
-            compile_multilib: "prefer32",
-        },
-    },
-    shared_libs: [
-        "libartbase",
-        "libbase",
-        "libcrypto", // For computing the digest of image file
-    ],
-}
-
-art_cc_binary {
-    name: "patchoat",
-    defaults: ["patchoat-defaults"],
-    shared_libs: [
-        "libart",
-    ],
-}
-
-art_cc_binary {
-    name: "patchoatd",
-    defaults: [
-        "art_debug_defaults",
-        "patchoat-defaults",
-    ],
-    shared_libs: [
-        "libartd",
-    ],
-}
-
-art_cc_test {
-    name: "art_patchoat_tests",
-    defaults: [
-        "art_gtest_defaults",
-    ],
-    srcs: [
-        "patchoat_test.cc",
-    ],
-    shared_libs: [
-        "libcrypto", // For computing the digest of image file
-    ],
-}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
deleted file mode 100644
index aaa3e83..0000000
--- a/patchoat/patchoat.cc
+++ /dev/null
@@ -1,1323 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "patchoat.h"
-
-#include <openssl/sha.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/file.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
-#include "android-base/file.h"
-#include "android-base/stringprintf.h"
-#include "android-base/strings.h"
-
-#include "art_field-inl.h"
-#include "art_method-inl.h"
-#include "base/bit_memory_region.h"
-#include "base/dumpable.h"
-#include "base/file_utils.h"
-#include "base/leb128.h"
-#include "base/logging.h"  // For InitLogging.
-#include "base/mutex.h"
-#include "base/memory_region.h"
-#include "base/memory_tool.h"
-#include "base/os.h"
-#include "base/scoped_flock.h"
-#include "base/stringpiece.h"
-#include "base/unix_file/fd_file.h"
-#include "base/unix_file/random_access_file_utils.h"
-#include "base/utils.h"
-#include "class_root.h"
-#include "elf_file.h"
-#include "elf_file_impl.h"
-#include "elf_utils.h"
-#include "gc/space/image_space.h"
-#include "image-inl.h"
-#include "intern_table.h"
-#include "mirror/dex_cache.h"
-#include "mirror/executable.h"
-#include "mirror/method.h"
-#include "mirror/object-inl.h"
-#include "mirror/object-refvisitor-inl.h"
-#include "mirror/reference.h"
-#include "noop_compiler_callbacks.h"
-#include "offsets.h"
-#include "runtime.h"
-#include "scoped_thread_state_change-inl.h"
-#include "thread.h"
-
-namespace art {
-
-using android::base::StringPrintf;
-
-namespace {
-
-static const OatHeader* GetOatHeader(const ElfFile* elf_file) {
-  uint64_t off = 0;
-  if (!elf_file->GetSectionOffsetAndSize(".rodata", &off, nullptr)) {
-    return nullptr;
-  }
-
-  OatHeader* oat_header = reinterpret_cast<OatHeader*>(elf_file->Begin() + off);
-  return oat_header;
-}
-
-static File* CreateOrOpen(const char* name) {
-  if (OS::FileExists(name)) {
-    return OS::OpenFileReadWrite(name);
-  } else {
-    std::unique_ptr<File> f(OS::CreateEmptyFile(name));
-    if (f.get() != nullptr) {
-      if (fchmod(f->Fd(), 0644) != 0) {
-        PLOG(ERROR) << "Unable to make " << name << " world readable";
-        unlink(name);
-        return nullptr;
-      }
-    }
-    return f.release();
-  }
-}
-
-// Either try to close the file (close=true), or erase it.
-static bool FinishFile(File* file, bool close) {
-  if (close) {
-    if (file->FlushCloseOrErase() != 0) {
-      PLOG(ERROR) << "Failed to flush and close file.";
-      return false;
-    }
-    return true;
-  } else {
-    file->Erase();
-    return false;
-  }
-}
-
-static bool SymlinkFile(const std::string& input_filename, const std::string& output_filename) {
-  if (input_filename == output_filename) {
-    // Input and output are the same, nothing to do.
-    return true;
-  }
-
-  // Unlink the original filename, since we are overwriting it.
-  unlink(output_filename.c_str());
-
-  // Create a symlink from the source file to the target path.
-  if (symlink(input_filename.c_str(), output_filename.c_str()) < 0) {
-    PLOG(ERROR) << "Failed to create symlink " << output_filename << " -> " << input_filename;
-    return false;
-  }
-
-  if (kIsDebugBuild) {
-    LOG(INFO) << "Created symlink " << output_filename << " -> " << input_filename;
-  }
-
-  return true;
-}
-
-// Holder class for runtime options and related objects.
-class PatchoatRuntimeOptionsHolder {
- public:
-  PatchoatRuntimeOptionsHolder(const std::string& image_location, InstructionSet isa) {
-    options_.push_back(std::make_pair("compilercallbacks", &callbacks_));
-    img_ = "-Ximage:" + image_location;
-    options_.push_back(std::make_pair(img_.c_str(), nullptr));
-    isa_name_ = GetInstructionSetString(isa);
-    options_.push_back(std::make_pair("imageinstructionset",
-                                      reinterpret_cast<const void*>(isa_name_.c_str())));
-    options_.push_back(std::make_pair("-Xno-sig-chain", nullptr));
-    // We do not want the runtime to attempt to patch the image.
-    options_.push_back(std::make_pair("-Xnorelocate", nullptr));
-    // Don't try to compile.
-    options_.push_back(std::make_pair("-Xnoimage-dex2oat", nullptr));
-    // Do not accept broken image.
-    options_.push_back(std::make_pair("-Xno-dex-file-fallback", nullptr));
-  }
-
-  const RuntimeOptions& GetRuntimeOptions() {
-    return options_;
-  }
-
- private:
-  RuntimeOptions options_;
-  NoopCompilerCallbacks callbacks_;
-  std::string isa_name_;
-  std::string img_;
-};
-
-}  // namespace
-
-bool PatchOat::GeneratePatch(
-    const MemMap& original,
-    const MemMap& relocated,
-    std::vector<uint8_t>* output,
-    std::string* error_msg) {
-  // FORMAT of the patch (aka image relocation) file:
-  // * SHA-256 digest (32 bytes) of original/unrelocated file (e.g., the one from /system)
-  // * List of monotonically increasing offsets (max value defined by uint32_t) at which relocations
-  //   occur.
-  //   Each element is represented as the delta from the previous offset in the list (first element
-  //   is a delta from 0). Each delta is encoded using unsigned LEB128: little-endian
-  //   variable-length 7 bits per byte encoding, where all bytes have the highest bit (0x80) set
-  //   except for the final byte which does not have that bit set. For example, 0x3f is offset 0x3f,
-  //   whereas 0xbf 0x05 is offset (0x3f & 0x7f) | (0x5 << 7) which is 0x2bf. Most deltas end up
-  //   being encoding using just one byte, achieving ~4x decrease in relocation file size compared
-  //   to the encoding where offsets are stored verbatim, as uint32_t.
-
-  size_t original_size = original.Size();
-  size_t relocated_size = relocated.Size();
-  if (original_size != relocated_size) {
-    *error_msg =
-        StringPrintf(
-            "Original and relocated image sizes differ: %zu vs %zu", original_size, relocated_size);
-    return false;
-  }
-  if (original_size > UINT32_MAX) {
-    *error_msg = StringPrintf("Image too large: %zu" , original_size);
-    return false;
-  }
-
-  const ImageHeader& relocated_header =
-      *reinterpret_cast<const ImageHeader*>(relocated.Begin());
-  // Offsets are supposed to differ between original and relocated by this value
-  off_t expected_diff = relocated_header.GetPatchDelta();
-  if (expected_diff == 0) {
-    // Can't identify offsets which are supposed to differ due to relocation
-    *error_msg = "Relocation delta is 0";
-    return false;
-  }
-
-  const ImageHeader* image_header = reinterpret_cast<const ImageHeader*>(original.Begin());
-  if (image_header->GetStorageMode() != ImageHeader::kStorageModeUncompressed) {
-    *error_msg = "Unexpected compressed image.";
-    return false;
-  }
-  if (image_header->IsAppImage()) {
-    *error_msg = "Unexpected app image.";
-    return false;
-  }
-  if (image_header->GetPointerSize() != PointerSize::k32 &&
-      image_header->GetPointerSize() != PointerSize::k64) {
-    *error_msg = "Unexpected pointer size.";
-    return false;
-  }
-  static_assert(sizeof(GcRoot<mirror::Object>) == sizeof(mirror::HeapReference<mirror::Object>),
-                "Expecting heap GC roots and references to have the same size.");
-  DCHECK_LE(sizeof(GcRoot<mirror::Object>), static_cast<size_t>(image_header->GetPointerSize()));
-
-  const size_t image_bitmap_offset = RoundUp(sizeof(ImageHeader) + image_header->GetDataSize(),
-                                             kPageSize);
-  const size_t end_of_bitmap = image_bitmap_offset + image_header->GetImageBitmapSection().Size();
-  const ImageSection& relocation_section = image_header->GetImageRelocationsSection();
-  MemoryRegion relocations_data(original.Begin() + end_of_bitmap, relocation_section.Size());
-  size_t image_end = image_header->GetClassTableSection().End();
-  if (!IsAligned<sizeof(GcRoot<mirror::Object>)>(image_end)) {
-    *error_msg = StringPrintf("Unaligned image end: %zu", image_end);
-    return false;
-  }
-  size_t num_indexes = image_end / sizeof(GcRoot<mirror::Object>);
-  if (relocation_section.Size() != BitsToBytesRoundUp(num_indexes)) {
-    *error_msg = StringPrintf("Unexpected size of relocation section: %zu expected: %zu",
-                              static_cast<size_t>(relocation_section.Size()),
-                              BitsToBytesRoundUp(num_indexes));
-    return false;
-  }
-  BitMemoryRegion relocation_bitmap(relocations_data, /* bit_offset */ 0u, num_indexes);
-
-  // Output the SHA-256 digest of the original
-  output->resize(SHA256_DIGEST_LENGTH);
-  const uint8_t* original_bytes = original.Begin();
-  SHA256(original_bytes, original_size, output->data());
-
-  // Check the list of offsets at which the original and patched images differ.
-  size_t diff_offset_count = 0;
-  const uint8_t* relocated_bytes = relocated.Begin();
-  for (size_t index = 0; index != num_indexes; ++index) {
-    size_t offset = index * sizeof(GcRoot<mirror::Object>);
-    uint32_t original_value = *reinterpret_cast<const uint32_t*>(original_bytes + offset);
-    uint32_t relocated_value = *reinterpret_cast<const uint32_t*>(relocated_bytes + offset);
-    off_t diff = relocated_value - original_value;
-    if (diff == 0) {
-      CHECK(!relocation_bitmap.LoadBit(index));
-      continue;
-    } else if (diff != expected_diff) {
-      *error_msg =
-          StringPrintf(
-              "Unexpected diff at offset %zu. Expected: %jd, but was: %jd",
-              offset,
-              (intmax_t) expected_diff,
-              (intmax_t) diff);
-      return false;
-    }
-    CHECK(relocation_bitmap.LoadBit(index));
-    diff_offset_count++;
-  }
-  size_t tail_bytes = original_size - image_end;
-  CHECK_EQ(memcmp(original_bytes + image_end, relocated_bytes + image_end, tail_bytes), 0);
-
-  if (diff_offset_count == 0) {
-    *error_msg = "Original and patched images are identical";
-    return false;
-  }
-
-  return true;
-}
-
-static bool WriteRelFile(
-    const MemMap& original,
-    const MemMap& relocated,
-    const std::string& rel_filename,
-    std::string* error_msg) {
-  std::vector<uint8_t> output;
-  if (!PatchOat::GeneratePatch(original, relocated, &output, error_msg)) {
-    return false;
-  }
-
-  std::unique_ptr<File> rel_file(OS::CreateEmptyFileWriteOnly(rel_filename.c_str()));
-  if (rel_file.get() == nullptr) {
-    *error_msg = StringPrintf("Failed to create/open output file %s", rel_filename.c_str());
-    return false;
-  }
-  if (!rel_file->WriteFully(output.data(), output.size())) {
-    *error_msg = StringPrintf("Failed to write to %s", rel_filename.c_str());
-    return false;
-  }
-  if (rel_file->FlushCloseOrErase() != 0) {
-    *error_msg = StringPrintf("Failed to flush and close %s", rel_filename.c_str());
-    return false;
-  }
-
-  return true;
-}
-
-static bool CheckImageIdenticalToOriginalExceptForRelocation(
-    const std::string& relocated_filename,
-    const std::string& original_filename,
-    std::string* error_msg) {
-  *error_msg = "";
-  std::string rel_filename = original_filename + ".rel";
-  std::unique_ptr<File> rel_file(OS::OpenFileForReading(rel_filename.c_str()));
-  if (rel_file.get() == nullptr) {
-    *error_msg = StringPrintf("Failed to open image relocation file %s", rel_filename.c_str());
-    return false;
-  }
-  int64_t rel_size = rel_file->GetLength();
-  if (rel_size < 0) {
-    *error_msg = StringPrintf("Error while getting size of image relocation file %s",
-                              rel_filename.c_str());
-    return false;
-  }
-  if (rel_size != SHA256_DIGEST_LENGTH) {
-    *error_msg = StringPrintf("Unexpected size of image relocation file %s: %" PRId64
-                                  ", expected %zu",
-                              rel_filename.c_str(),
-                              rel_size,
-                              static_cast<size_t>(SHA256_DIGEST_LENGTH));
-    return false;
-  }
-  std::unique_ptr<uint8_t[]> rel(new uint8_t[rel_size]);
-  if (!rel_file->ReadFully(rel.get(), rel_size)) {
-    *error_msg = StringPrintf("Failed to read image relocation file %s", rel_filename.c_str());
-    return false;
-  }
-
-  std::unique_ptr<File> image_file(OS::OpenFileForReading(relocated_filename.c_str()));
-  if (image_file.get() == nullptr) {
-    *error_msg = StringPrintf("Unable to open relocated image file  %s",
-                              relocated_filename.c_str());
-    return false;
-  }
-
-  int64_t image_size = image_file->GetLength();
-  if (image_size < 0) {
-    *error_msg = StringPrintf("Error while getting size of relocated image file %s",
-                              relocated_filename.c_str());
-    return false;
-  }
-  if (static_cast<uint64_t>(image_size) < sizeof(ImageHeader)) {
-    *error_msg =
-        StringPrintf(
-            "Relocated image file %s too small: %" PRId64,
-                relocated_filename.c_str(), image_size);
-    return false;
-  }
-  if (image_size > std::numeric_limits<uint32_t>::max()) {
-    *error_msg =
-        StringPrintf(
-            "Relocated image file %s too large: %" PRId64, relocated_filename.c_str(), image_size);
-    return false;
-  }
-
-  std::unique_ptr<uint8_t[]> image(new uint8_t[image_size]);
-  if (!image_file->ReadFully(image.get(), image_size)) {
-    *error_msg = StringPrintf("Failed to read relocated image file %s", relocated_filename.c_str());
-    return false;
-  }
-
-  const ImageHeader& image_header = *reinterpret_cast<const ImageHeader*>(image.get());
-  if (image_header.GetStorageMode() != ImageHeader::kStorageModeUncompressed) {
-    *error_msg = StringPrintf("Unsuported compressed image file %s",
-                              relocated_filename.c_str());
-    return false;
-  }
-  size_t image_end = image_header.GetClassTableSection().End();
-  if (image_end > static_cast<uint64_t>(image_size) || !IsAligned<4u>(image_end)) {
-    *error_msg = StringPrintf("Heap size too big or unaligned in image file %s: %zu",
-                              relocated_filename.c_str(),
-                              image_end);
-    return false;
-  }
-  size_t number_of_relocation_locations = image_end / 4u;
-  const ImageSection& relocation_section = image_header.GetImageRelocationsSection();
-  if (relocation_section.Size() != BitsToBytesRoundUp(number_of_relocation_locations)) {
-    *error_msg = StringPrintf("Unexpected size of relocation section in image file %s: %zu"
-                                  " expected: %zu",
-                              relocated_filename.c_str(),
-                              static_cast<size_t>(relocation_section.Size()),
-                              BitsToBytesRoundUp(number_of_relocation_locations));
-    return false;
-  }
-  if (relocation_section.End() != image_size) {
-    *error_msg = StringPrintf("Relocation section does not end at file end in image file %s: %zu"
-                                  " expected: %" PRId64,
-                              relocated_filename.c_str(),
-                              static_cast<size_t>(relocation_section.End()),
-                              image_size);
-    return false;
-  }
-
-  off_t expected_diff = image_header.GetPatchDelta();
-  if (expected_diff == 0) {
-    *error_msg = StringPrintf("Unsuported patch delta of zero in %s",
-                              relocated_filename.c_str());
-    return false;
-  }
-
-  // Relocated image is expected to differ from the original due to relocation.
-  // Unrelocate the image in memory to compensate.
-  MemoryRegion relocations(image.get() + relocation_section.Offset(), relocation_section.Size());
-  BitMemoryRegion relocation_bitmask(relocations,
-                                     /* bit_offset */ 0u,
-                                     number_of_relocation_locations);
-  for (size_t index = 0; index != number_of_relocation_locations; ++index) {
-    if (relocation_bitmask.LoadBit(index)) {
-      uint32_t* image_value = reinterpret_cast<uint32_t*>(image.get() + index * 4u);
-      *image_value -= expected_diff;
-    }
-  }
-
-  // Image in memory is now supposed to be identical to the original.  We
-  // confirm this by comparing the digest of the in-memory image to the expected
-  // digest from relocation file.
-  uint8_t image_digest[SHA256_DIGEST_LENGTH];
-  SHA256(image.get(), image_size, image_digest);
-  if (memcmp(image_digest, rel.get(), SHA256_DIGEST_LENGTH) != 0) {
-    *error_msg =
-        StringPrintf(
-            "Relocated image %s does not match the original %s after unrelocation",
-            relocated_filename.c_str(),
-            original_filename.c_str());
-    return false;
-  }
-
-  // Relocated image is identical to the original, once relocations are taken into account
-  return true;
-}
-
-static bool VerifySymlink(const std::string& intended_target, const std::string& link_name) {
-  std::string actual_target;
-  if (!android::base::Readlink(link_name, &actual_target)) {
-    PLOG(ERROR) << "Readlink on " << link_name << " failed.";
-    return false;
-  }
-  return actual_target == intended_target;
-}
-
-static bool VerifyVdexAndOatSymlinks(const std::string& input_image_filename,
-                                     const std::string& output_image_filename) {
-  return VerifySymlink(ImageHeader::GetVdexLocationFromImageLocation(input_image_filename),
-                       ImageHeader::GetVdexLocationFromImageLocation(output_image_filename))
-      && VerifySymlink(ImageHeader::GetOatLocationFromImageLocation(input_image_filename),
-                       ImageHeader::GetOatLocationFromImageLocation(output_image_filename));
-}
-
-bool PatchOat::CreateVdexAndOatSymlinks(const std::string& input_image_filename,
-                                        const std::string& output_image_filename) {
-  std::string input_vdex_filename =
-      ImageHeader::GetVdexLocationFromImageLocation(input_image_filename);
-  std::string input_oat_filename =
-      ImageHeader::GetOatLocationFromImageLocation(input_image_filename);
-
-  std::unique_ptr<File> input_oat_file(OS::OpenFileForReading(input_oat_filename.c_str()));
-  if (input_oat_file.get() == nullptr) {
-    LOG(ERROR) << "Unable to open input oat file at " << input_oat_filename;
-    return false;
-  }
-  std::string error_msg;
-  std::unique_ptr<ElfFile> elf(ElfFile::Open(input_oat_file.get(),
-                                             PROT_READ | PROT_WRITE,
-                                             MAP_PRIVATE,
-                                             &error_msg));
-  if (elf == nullptr) {
-    LOG(ERROR) << "Unable to open oat file " << input_oat_filename << " : " << error_msg;
-    return false;
-  }
-
-  const OatHeader* oat_header = GetOatHeader(elf.get());
-  if (oat_header == nullptr) {
-    LOG(ERROR) << "Failed to find oat header in oat file " << input_oat_filename;
-    return false;
-  }
-
-  if (!oat_header->IsValid()) {
-    LOG(ERROR) << "Elf file " << input_oat_filename << " has an invalid oat header";
-    return false;
-  }
-
-  std::string output_vdex_filename =
-      ImageHeader::GetVdexLocationFromImageLocation(output_image_filename);
-  std::string output_oat_filename =
-      ImageHeader::GetOatLocationFromImageLocation(output_image_filename);
-
-  return SymlinkFile(input_oat_filename, output_oat_filename) &&
-         SymlinkFile(input_vdex_filename, output_vdex_filename);
-}
-
-bool PatchOat::Patch(const std::string& image_location,
-                     off_t delta,
-                     const std::string& output_image_directory,
-                     const std::string& output_image_relocation_directory,
-                     InstructionSet isa,
-                     TimingLogger* timings) {
-  bool output_image = !output_image_directory.empty();
-  bool output_image_relocation = !output_image_relocation_directory.empty();
-  if ((!output_image) && (!output_image_relocation)) {
-    // Nothing to do
-    return true;
-  }
-  if ((output_image_relocation) && (delta == 0)) {
-    LOG(ERROR) << "Cannot output image relocation information when requested relocation delta is 0";
-    return false;
-  }
-
-  CHECK(Runtime::Current() == nullptr);
-  CHECK(!image_location.empty()) << "image file must have a filename.";
-
-  TimingLogger::ScopedTiming t("Runtime Setup", timings);
-
-  CHECK_NE(isa, InstructionSet::kNone);
-
-  // Set up the runtime
-  PatchoatRuntimeOptionsHolder options_holder(image_location, isa);
-  if (!Runtime::Create(options_holder.GetRuntimeOptions(), false)) {
-    LOG(ERROR) << "Unable to initialize runtime";
-    return false;
-  }
-  std::unique_ptr<Runtime> runtime(Runtime::Current());
-
-  // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
-  // give it away now and then switch to a more manageable ScopedObjectAccess.
-  Thread::Current()->TransitionFromRunnableToSuspended(kNative);
-  ScopedObjectAccess soa(Thread::Current());
-
-  std::vector<gc::space::ImageSpace*> spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
-  std::map<gc::space::ImageSpace*, MemMap> space_to_memmap_map;
-
-  for (size_t i = 0; i < spaces.size(); ++i) {
-    t.NewTiming("Image Patching setup");
-    gc::space::ImageSpace* space = spaces[i];
-    std::string input_image_filename = space->GetImageFilename();
-    std::unique_ptr<File> input_image(OS::OpenFileForReading(input_image_filename.c_str()));
-    if (input_image.get() == nullptr) {
-      LOG(ERROR) << "Unable to open input image file at " << input_image_filename;
-      return false;
-    }
-
-    int64_t image_len = input_image->GetLength();
-    if (image_len < 0) {
-      LOG(ERROR) << "Error while getting image length";
-      return false;
-    }
-    ImageHeader image_header;
-    if (sizeof(image_header) != input_image->Read(reinterpret_cast<char*>(&image_header),
-                                                  sizeof(image_header), 0)) {
-      LOG(ERROR) << "Unable to read image header from image file " << input_image->GetPath();
-    }
-
-    /*bool is_image_pic = */IsImagePic(image_header, input_image->GetPath());
-    // Nothing special to do right now since the image always needs to get patched.
-    // Perhaps in some far-off future we may have images with relative addresses that are true-PIC.
-
-    // Create the map where we will write the image patches to.
-    std::string error_msg;
-    MemMap image = MemMap::MapFile(image_len,
-                                   PROT_READ | PROT_WRITE,
-                                   MAP_PRIVATE,
-                                   input_image->Fd(),
-                                   0,
-                                   /*low_4gb*/false,
-                                   input_image->GetPath().c_str(),
-                                   &error_msg);
-    if (!image.IsValid()) {
-      LOG(ERROR) << "Unable to map image file " << input_image->GetPath() << " : " << error_msg;
-      return false;
-    }
-
-
-    space_to_memmap_map.emplace(space, std::move(image));
-    PatchOat p = PatchOat(isa,
-                          &space_to_memmap_map[space],
-                          space->GetLiveBitmap(),
-                          space->GetMemMap(),
-                          delta,
-                          &space_to_memmap_map,
-                          timings);
-
-    t.NewTiming("Patching image");
-    if (!p.PatchImage(i == 0)) {
-      LOG(ERROR) << "Failed to patch image file " << input_image_filename;
-      return false;
-    }
-
-    // Write the patched image spaces.
-    if (output_image) {
-      std::string output_image_filename;
-      if (!GetDalvikCacheFilename(space->GetImageLocation().c_str(),
-                                  output_image_directory.c_str(),
-                                  &output_image_filename,
-                                  &error_msg)) {
-        LOG(ERROR) << "Failed to find relocated image file name: " << error_msg;
-        return false;
-      }
-
-      if (!CreateVdexAndOatSymlinks(input_image_filename, output_image_filename))
-        return false;
-
-      t.NewTiming("Writing image");
-      std::unique_ptr<File> output_image_file(CreateOrOpen(output_image_filename.c_str()));
-      if (output_image_file.get() == nullptr) {
-        LOG(ERROR) << "Failed to open output image file at " << output_image_filename;
-        return false;
-      }
-
-      bool success = p.WriteImage(output_image_file.get());
-      success = FinishFile(output_image_file.get(), success);
-      if (!success) {
-        return false;
-      }
-    }
-
-    if (output_image_relocation) {
-      t.NewTiming("Writing image relocation");
-      std::string original_image_filename(space->GetImageLocation() + ".rel");
-      std::string image_relocation_filename =
-          output_image_relocation_directory
-              + (android::base::StartsWith(original_image_filename, "/") ? "" : "/")
-              + original_image_filename.substr(original_image_filename.find_last_of("/"));
-      int64_t input_image_size = input_image->GetLength();
-      if (input_image_size < 0) {
-        LOG(ERROR) << "Error while getting input image size";
-        return false;
-      }
-      MemMap original = MemMap::MapFile(input_image_size,
-                                        PROT_READ,
-                                        MAP_PRIVATE,
-                                        input_image->Fd(),
-                                        0,
-                                        /*low_4gb*/false,
-                                        input_image->GetPath().c_str(),
-                                        &error_msg);
-      if (!original.IsValid()) {
-        LOG(ERROR) << "Unable to map image file " << input_image->GetPath() << " : " << error_msg;
-        return false;
-      }
-
-      const MemMap* relocated = p.image_;
-
-      if (!WriteRelFile(original, *relocated, image_relocation_filename, &error_msg)) {
-        LOG(ERROR) << "Failed to create image relocation file " << image_relocation_filename
-            << ": " << error_msg;
-        return false;
-      }
-    }
-  }
-
-  if (!kIsDebugBuild && !(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
-    // We want to just exit on non-debug builds, not bringing the runtime down
-    // in an orderly fashion. So release the following fields.
-    runtime.release();
-  }
-
-  return true;
-}
-
-bool PatchOat::Verify(const std::string& image_location,
-                      const std::string& output_image_directory,
-                      InstructionSet isa,
-                      TimingLogger* timings) {
-  if (image_location.empty()) {
-    LOG(ERROR) << "Original image file not provided";
-    return false;
-  }
-  if (output_image_directory.empty()) {
-    LOG(ERROR) << "Relocated image directory not provided";
-    return false;
-  }
-
-  TimingLogger::ScopedTiming t("Runtime Setup", timings);
-
-  CHECK_NE(isa, InstructionSet::kNone);
-
-  // Set up the runtime
-  PatchoatRuntimeOptionsHolder options_holder(image_location, isa);
-  if (!Runtime::Create(options_holder.GetRuntimeOptions(), false)) {
-    LOG(ERROR) << "Unable to initialize runtime";
-    return false;
-  }
-  std::unique_ptr<Runtime> runtime(Runtime::Current());
-
-  // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
-  // give it away now and then switch to a more manageable ScopedObjectAccess.
-  Thread::Current()->TransitionFromRunnableToSuspended(kNative);
-  ScopedObjectAccess soa(Thread::Current());
-
-  t.NewTiming("Image Verification setup");
-  std::vector<gc::space::ImageSpace*> spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
-
-  // TODO: Check that no other .rel files exist in the original dir
-
-  bool success = true;
-  std::string image_location_dir = android::base::Dirname(image_location);
-  for (size_t i = 0; i < spaces.size(); ++i) {
-    gc::space::ImageSpace* space = spaces[i];
-
-    std::string relocated_image_filename;
-    std::string error_msg;
-    if (!GetDalvikCacheFilename(space->GetImageLocation().c_str(),
-            output_image_directory.c_str(), &relocated_image_filename, &error_msg)) {
-      LOG(ERROR) << "Failed to find relocated image file name: " << error_msg;
-      success = false;
-      break;
-    }
-    // location:     /system/framework/boot.art
-    // isa:          arm64
-    // basename:     boot.art
-    // original:     /system/framework/arm64/boot.art
-    // relocation:   /system/framework/arm64/boot.art.rel
-    std::string original_image_filename =
-        GetSystemImageFilename(space->GetImageLocation().c_str(), isa);
-
-    if (!CheckImageIdenticalToOriginalExceptForRelocation(
-            relocated_image_filename, original_image_filename, &error_msg)) {
-      LOG(ERROR) << error_msg;
-      success = false;
-      break;
-    }
-
-    if (!VerifyVdexAndOatSymlinks(original_image_filename, relocated_image_filename)) {
-      LOG(ERROR) << "Verification of vdex and oat symlinks for "
-                 << space->GetImageLocation() << " failed.";
-      success = false;
-      break;
-    }
-  }
-
-  if (!kIsDebugBuild && !(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
-    // We want to just exit on non-debug builds, not bringing the runtime down
-    // in an orderly fashion. So release the following fields.
-    runtime.release();
-  }
-
-  return success;
-}
-
-bool PatchOat::WriteImage(File* out) {
-  CHECK(out != nullptr);
-  TimingLogger::ScopedTiming t("Writing image File", timings_);
-  std::string error_msg;
-
-  // No error checking here, this is best effort. The locking may or may not
-  // succeed and we don't really care either way.
-  ScopedFlock img_flock = LockedFile::DupOf(out->Fd(), out->GetPath(),
-                                            true /* read_only_mode */, &error_msg);
-
-  CHECK(image_ != nullptr);
-  size_t expect = image_->Size();
-  if (out->WriteFully(reinterpret_cast<char*>(image_->Begin()), expect) &&
-      out->SetLength(expect) == 0) {
-    return true;
-  } else {
-    LOG(ERROR) << "Writing to image file " << out->GetPath() << " failed.";
-    return false;
-  }
-}
-
-bool PatchOat::IsImagePic(const ImageHeader& image_header, const std::string& image_path) {
-  if (!image_header.CompilePic()) {
-    if (kIsDebugBuild) {
-      LOG(INFO) << "image at location " << image_path << " was *not* compiled pic";
-    }
-    return false;
-  }
-
-  if (kIsDebugBuild) {
-    LOG(INFO) << "image at location " << image_path << " was compiled PIC";
-  }
-
-  return true;
-}
-
-class PatchOat::PatchOatArtFieldVisitor : public ArtFieldVisitor {
- public:
-  explicit PatchOatArtFieldVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
-
-  void Visit(ArtField* field) override REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtField* const dest = patch_oat_->RelocatedCopyOf(field);
-    dest->SetDeclaringClass(
-        patch_oat_->RelocatedAddressOfPointer(field->GetDeclaringClass().Ptr()));
-  }
-
- private:
-  PatchOat* const patch_oat_;
-};
-
-void PatchOat::PatchArtFields(const ImageHeader* image_header) {
-  PatchOatArtFieldVisitor visitor(this);
-  image_header->VisitPackedArtFields(&visitor, heap_->Begin());
-}
-
-class PatchOat::PatchOatArtMethodVisitor : public ArtMethodVisitor {
- public:
-  explicit PatchOatArtMethodVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
-
-  void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* const dest = patch_oat_->RelocatedCopyOf(method);
-    patch_oat_->FixupMethod(method, dest);
-  }
-
- private:
-  PatchOat* const patch_oat_;
-};
-
-void PatchOat::PatchArtMethods(const ImageHeader* image_header) {
-  const PointerSize pointer_size = InstructionSetPointerSize(isa_);
-  PatchOatArtMethodVisitor visitor(this);
-  image_header->VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size);
-}
-
-void PatchOat::PatchImTables(const ImageHeader* image_header) {
-  const PointerSize pointer_size = InstructionSetPointerSize(isa_);
-  // We can safely walk target image since the conflict tables are independent.
-  image_header->VisitPackedImTables(
-      [this](ArtMethod* method) {
-        return RelocatedAddressOfPointer(method);
-      },
-      image_->Begin(),
-      pointer_size);
-}
-
-void PatchOat::PatchImtConflictTables(const ImageHeader* image_header) {
-  const PointerSize pointer_size = InstructionSetPointerSize(isa_);
-  // We can safely walk target image since the conflict tables are independent.
-  image_header->VisitPackedImtConflictTables(
-      [this](ArtMethod* method) {
-        return RelocatedAddressOfPointer(method);
-      },
-      image_->Begin(),
-      pointer_size);
-}
-
-class PatchOat::FixupRootVisitor : public RootVisitor {
- public:
-  explicit FixupRootVisitor(const PatchOat* patch_oat) : patch_oat_(patch_oat) {
-  }
-
-  void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    for (size_t i = 0; i < count; ++i) {
-      *roots[i] = patch_oat_->RelocatedAddressOfPointer(*roots[i]);
-    }
-  }
-
-  void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    for (size_t i = 0; i < count; ++i) {
-      roots[i]->Assign(patch_oat_->RelocatedAddressOfPointer(roots[i]->AsMirrorPtr()));
-    }
-  }
-
- private:
-  const PatchOat* const patch_oat_;
-};
-
-void PatchOat::PatchInternedStrings(const ImageHeader* image_header) {
-  const auto& section = image_header->GetInternedStringsSection();
-  if (section.Size() == 0) {
-    return;
-  }
-  InternTable temp_table;
-  // Note that we require that ReadFromMemory does not make an internal copy of the elements.
-  // This also relies on visit roots not doing any verification which could fail after we update
-  // the roots to be the image addresses.
-  temp_table.AddTableFromMemory(image_->Begin() + section.Offset());
-  FixupRootVisitor visitor(this);
-  temp_table.VisitRoots(&visitor, kVisitRootFlagAllRoots);
-}
-
-void PatchOat::PatchClassTable(const ImageHeader* image_header) {
-  const auto& section = image_header->GetClassTableSection();
-  if (section.Size() == 0) {
-    return;
-  }
-  // Note that we require that ReadFromMemory does not make an internal copy of the elements.
-  // This also relies on visit roots not doing any verification which could fail after we update
-  // the roots to be the image addresses.
-  WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-  ClassTable temp_table;
-  temp_table.ReadFromMemory(image_->Begin() + section.Offset());
-  FixupRootVisitor visitor(this);
-  temp_table.VisitRoots(UnbufferedRootVisitor(&visitor, RootInfo(kRootUnknown)));
-}
-
-
-class PatchOat::RelocatedPointerVisitor {
- public:
-  explicit RelocatedPointerVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
-
-  template <typename T>
-  T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED = nullptr) const {
-    return patch_oat_->RelocatedAddressOfPointer(ptr);
-  }
-
- private:
-  PatchOat* const patch_oat_;
-};
-
-void PatchOat::PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots) {
-  auto* dex_caches = down_cast<mirror::ObjectArray<mirror::DexCache>*>(
-      img_roots->Get(ImageHeader::kDexCaches));
-  const PointerSize pointer_size = InstructionSetPointerSize(isa_);
-  for (size_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
-    auto* orig_dex_cache = dex_caches->GetWithoutChecks(i);
-    auto* copy_dex_cache = RelocatedCopyOf(orig_dex_cache);
-    // Though the DexCache array fields are usually treated as native pointers, we set the full
-    // 64-bit values here, clearing the top 32 bits for 32-bit targets. The zero-extension is
-    // done by casting to the unsigned type uintptr_t before casting to int64_t, i.e.
-    //     static_cast<int64_t>(reinterpret_cast<uintptr_t>(image_begin_ + offset))).
-    mirror::StringDexCacheType* orig_strings = orig_dex_cache->GetStrings();
-    mirror::StringDexCacheType* relocated_strings = RelocatedAddressOfPointer(orig_strings);
-    copy_dex_cache->SetField64<false>(
-        mirror::DexCache::StringsOffset(),
-        static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_strings)));
-    if (orig_strings != nullptr) {
-      orig_dex_cache->FixupStrings(RelocatedCopyOf(orig_strings), RelocatedPointerVisitor(this));
-    }
-    mirror::TypeDexCacheType* orig_types = orig_dex_cache->GetResolvedTypes();
-    mirror::TypeDexCacheType* relocated_types = RelocatedAddressOfPointer(orig_types);
-    copy_dex_cache->SetField64<false>(
-        mirror::DexCache::ResolvedTypesOffset(),
-        static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_types)));
-    if (orig_types != nullptr) {
-      orig_dex_cache->FixupResolvedTypes(RelocatedCopyOf(orig_types),
-                                         RelocatedPointerVisitor(this));
-    }
-    mirror::MethodDexCacheType* orig_methods = orig_dex_cache->GetResolvedMethods();
-    mirror::MethodDexCacheType* relocated_methods = RelocatedAddressOfPointer(orig_methods);
-    copy_dex_cache->SetField64<false>(
-        mirror::DexCache::ResolvedMethodsOffset(),
-        static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_methods)));
-    if (orig_methods != nullptr) {
-      mirror::MethodDexCacheType* copy_methods = RelocatedCopyOf(orig_methods);
-      for (size_t j = 0, num = orig_dex_cache->NumResolvedMethods(); j != num; ++j) {
-        mirror::MethodDexCachePair orig =
-            mirror::DexCache::GetNativePairPtrSize(orig_methods, j, pointer_size);
-        mirror::MethodDexCachePair copy(RelocatedAddressOfPointer(orig.object), orig.index);
-        mirror::DexCache::SetNativePairPtrSize(copy_methods, j, copy, pointer_size);
-      }
-    }
-    mirror::FieldDexCacheType* orig_fields = orig_dex_cache->GetResolvedFields();
-    mirror::FieldDexCacheType* relocated_fields = RelocatedAddressOfPointer(orig_fields);
-    copy_dex_cache->SetField64<false>(
-        mirror::DexCache::ResolvedFieldsOffset(),
-        static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_fields)));
-    if (orig_fields != nullptr) {
-      mirror::FieldDexCacheType* copy_fields = RelocatedCopyOf(orig_fields);
-      for (size_t j = 0, num = orig_dex_cache->NumResolvedFields(); j != num; ++j) {
-        mirror::FieldDexCachePair orig =
-            mirror::DexCache::GetNativePairPtrSize(orig_fields, j, pointer_size);
-        mirror::FieldDexCachePair copy(RelocatedAddressOfPointer(orig.object), orig.index);
-        mirror::DexCache::SetNativePairPtrSize(copy_fields, j, copy, pointer_size);
-      }
-    }
-    mirror::MethodTypeDexCacheType* orig_method_types = orig_dex_cache->GetResolvedMethodTypes();
-    mirror::MethodTypeDexCacheType* relocated_method_types =
-        RelocatedAddressOfPointer(orig_method_types);
-    copy_dex_cache->SetField64<false>(
-        mirror::DexCache::ResolvedMethodTypesOffset(),
-        static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_method_types)));
-    if (orig_method_types != nullptr) {
-      orig_dex_cache->FixupResolvedMethodTypes(RelocatedCopyOf(orig_method_types),
-                                               RelocatedPointerVisitor(this));
-    }
-
-    GcRoot<mirror::CallSite>* orig_call_sites = orig_dex_cache->GetResolvedCallSites();
-    GcRoot<mirror::CallSite>* relocated_call_sites = RelocatedAddressOfPointer(orig_call_sites);
-    copy_dex_cache->SetField64<false>(
-        mirror::DexCache::ResolvedCallSitesOffset(),
-        static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_call_sites)));
-    if (orig_call_sites != nullptr) {
-      orig_dex_cache->FixupResolvedCallSites(RelocatedCopyOf(orig_call_sites),
-                                             RelocatedPointerVisitor(this));
-    }
-  }
-}
-
-bool PatchOat::PatchImage(bool primary_image) {
-  ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
-  CHECK_GT(image_->Size(), sizeof(ImageHeader));
-  // These are the roots from the original file.
-  mirror::ObjectArray<mirror::Object>* img_roots = image_header->GetImageRoots().Ptr();
-  image_header->RelocateImage(delta_);
-
-  PatchArtFields(image_header);
-  PatchArtMethods(image_header);
-  PatchImTables(image_header);
-  PatchImtConflictTables(image_header);
-  PatchInternedStrings(image_header);
-  PatchClassTable(image_header);
-  // Patch dex file int/long arrays which point to ArtFields.
-  PatchDexFileArrays(img_roots);
-
-  if (primary_image) {
-    VisitObject(img_roots);
-  }
-
-  if (!image_header->IsValid()) {
-    LOG(ERROR) << "relocation renders image header invalid";
-    return false;
-  }
-
-  {
-    TimingLogger::ScopedTiming t("Walk Bitmap", timings_);
-    // Walk the bitmap.
-    WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-    auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
-      VisitObject(obj);
-    };
-    bitmap_->Walk(visitor);
-  }
-  return true;
-}
-
-
-void PatchOat::PatchVisitor::operator() (ObjPtr<mirror::Object> obj,
-                                         MemberOffset off,
-                                         bool is_static_unused ATTRIBUTE_UNUSED) const {
-  mirror::Object* referent = obj->GetFieldObject<mirror::Object, kVerifyNone>(off);
-  mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent);
-  copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
-}
-
-void PatchOat::PatchVisitor::operator() (ObjPtr<mirror::Class> cls ATTRIBUTE_UNUSED,
-                                         ObjPtr<mirror::Reference> ref) const {
-  MemberOffset off = mirror::Reference::ReferentOffset();
-  mirror::Object* referent = ref->GetReferent();
-  DCHECK(referent == nullptr ||
-         Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(referent)) << referent;
-  mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent);
-  copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
-}
-
-// Called by PatchImage.
-void PatchOat::VisitObject(mirror::Object* object) {
-  mirror::Object* copy = RelocatedCopyOf(object);
-  CHECK(copy != nullptr);
-  if (kUseBakerReadBarrier) {
-    object->AssertReadBarrierState();
-  }
-  PatchOat::PatchVisitor visitor(this, copy);
-  object->VisitReferences<kVerifyNone>(visitor, visitor);
-  if (object->IsClass<kVerifyNone>()) {
-    const PointerSize pointer_size = InstructionSetPointerSize(isa_);
-    mirror::Class* klass = object->AsClass();
-    mirror::Class* copy_klass = down_cast<mirror::Class*>(copy);
-    RelocatedPointerVisitor native_visitor(this);
-    klass->FixupNativePointers(copy_klass, pointer_size, native_visitor);
-    auto* vtable = klass->GetVTable();
-    if (vtable != nullptr) {
-      vtable->Fixup(RelocatedCopyOfFollowImages(vtable), pointer_size, native_visitor);
-    }
-    mirror::IfTable* iftable = klass->GetIfTable();
-    for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
-      if (iftable->GetMethodArrayCount(i) > 0) {
-        auto* method_array = iftable->GetMethodArray(i);
-        CHECK(method_array != nullptr);
-        method_array->Fixup(RelocatedCopyOfFollowImages(method_array),
-                            pointer_size,
-                            native_visitor);
-      }
-    }
-  } else if (object->GetClass() == GetClassRoot<mirror::Method>() ||
-             object->GetClass() == GetClassRoot<mirror::Constructor>()) {
-    // Need to go update the ArtMethod.
-    auto* dest = down_cast<mirror::Executable*>(copy);
-    auto* src = down_cast<mirror::Executable*>(object);
-    dest->SetArtMethod(RelocatedAddressOfPointer(src->GetArtMethod()));
-  }
-}
-
-void PatchOat::FixupMethod(ArtMethod* object, ArtMethod* copy) {
-  const PointerSize pointer_size = InstructionSetPointerSize(isa_);
-  copy->CopyFrom(object, pointer_size);
-  // Just update the entry points if it looks like we should.
-  // TODO: sanity check all the pointers' values
-  copy->SetDeclaringClass(RelocatedAddressOfPointer(object->GetDeclaringClass().Ptr()));
-  copy->SetEntryPointFromQuickCompiledCodePtrSize(RelocatedAddressOfPointer(
-      object->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)), pointer_size);
-  // No special handling for IMT conflict table since all pointers are moved by the same offset.
-  copy->SetDataPtrSize(RelocatedAddressOfPointer(
-      object->GetDataPtrSize(pointer_size)), pointer_size);
-}
-
-static int orig_argc;
-static char** orig_argv;
-
-static std::string CommandLine() {
-  std::vector<std::string> command;
-  for (int i = 0; i < orig_argc; ++i) {
-    command.push_back(orig_argv[i]);
-  }
-  return android::base::Join(command, ' ');
-}
-
-static void UsageErrorV(const char* fmt, va_list ap) {
-  std::string error;
-  android::base::StringAppendV(&error, fmt, ap);
-  LOG(ERROR) << error;
-}
-
-static void UsageError(const char* fmt, ...) {
-  va_list ap;
-  va_start(ap, fmt);
-  UsageErrorV(fmt, ap);
-  va_end(ap);
-}
-
-NO_RETURN static void Usage(const char *fmt, ...) {
-  va_list ap;
-  va_start(ap, fmt);
-  UsageErrorV(fmt, ap);
-  va_end(ap);
-
-  UsageError("Command: %s", CommandLine().c_str());
-  UsageError("Usage: patchoat [options]...");
-  UsageError("");
-  UsageError("  --instruction-set=<isa>: Specifies the instruction set the patched code is");
-  UsageError("      compiled for (required).");
-  UsageError("");
-  UsageError("  --input-image-location=<file.art>: Specifies the 'location' of the image file to");
-  UsageError("      be patched.");
-  UsageError("");
-  UsageError("  --output-image-directory=<dir>: Specifies the directory to write the patched");
-  UsageError("      image file(s) to.");
-  UsageError("");
-  UsageError("  --output-image-relocation-directory=<dir>: Specifies the directory to write");
-  UsageError("      the image relocation information to.");
-  UsageError("");
-  UsageError("  --base-offset-delta=<delta>: Specify the amount to change the old base-offset by.");
-  UsageError("      This value may be negative.");
-  UsageError("");
-  UsageError("  --verify: Verify an existing patched file instead of creating one.");
-  UsageError("");
-  UsageError("  --dump-timings: dump out patch timing information");
-  UsageError("");
-  UsageError("  --no-dump-timings: do not dump out patch timing information");
-  UsageError("");
-
-  exit(EXIT_FAILURE);
-}
-
-static int patchoat_patch_image(TimingLogger& timings,
-                                InstructionSet isa,
-                                const std::string& input_image_location,
-                                const std::string& output_image_directory,
-                                const std::string& output_image_relocation_directory,
-                                off_t base_delta,
-                                bool base_delta_set,
-                                bool debug) {
-  CHECK(!input_image_location.empty());
-  if ((output_image_directory.empty()) && (output_image_relocation_directory.empty())) {
-    Usage("Image patching requires --output-image-directory or --output-image-relocation-directory");
-  }
-
-  if (!base_delta_set) {
-    Usage("Must supply a desired new offset or delta.");
-  }
-
-  if (!IsAligned<kPageSize>(base_delta)) {
-    Usage("Base offset/delta must be aligned to a pagesize (0x%08x) boundary.", kPageSize);
-  }
-
-  if (debug) {
-    LOG(INFO) << "moving offset by " << base_delta
-        << " (0x" << std::hex << base_delta << ") bytes or "
-        << std::dec << (base_delta/kPageSize) << " pages.";
-  }
-
-  TimingLogger::ScopedTiming pt("patch image and oat", &timings);
-
-  bool ret =
-      PatchOat::Patch(
-          input_image_location,
-          base_delta,
-          output_image_directory,
-          output_image_relocation_directory,
-          isa,
-          &timings);
-
-  if (kIsDebugBuild) {
-    LOG(INFO) << "Exiting with return ... " << ret;
-  }
-  return ret ? EXIT_SUCCESS : EXIT_FAILURE;
-}
-
-static int patchoat_verify_image(TimingLogger& timings,
-                                 InstructionSet isa,
-                                 const std::string& input_image_location,
-                                 const std::string& output_image_directory) {
-  CHECK(!input_image_location.empty());
-  TimingLogger::ScopedTiming pt("verify image and oat", &timings);
-
-  bool ret =
-      PatchOat::Verify(
-          input_image_location,
-          output_image_directory,
-          isa,
-          &timings);
-
-  if (kIsDebugBuild) {
-    LOG(INFO) << "Exiting with return ... " << ret;
-  }
-  return ret ? EXIT_SUCCESS : EXIT_FAILURE;
-}
-
-static int patchoat(int argc, char **argv) {
-  Locks::Init();
-  InitLogging(argv, Runtime::Abort);
-  MemMap::Init();
-  const bool debug = kIsDebugBuild;
-  orig_argc = argc;
-  orig_argv = argv;
-  TimingLogger timings("patcher", false, false);
-
-  // Skip over the command name.
-  argv++;
-  argc--;
-
-  if (argc == 0) {
-    Usage("No arguments specified");
-  }
-
-  timings.StartTiming("Patchoat");
-
-  // cmd line args
-  bool isa_set = false;
-  InstructionSet isa = InstructionSet::kNone;
-  std::string input_image_location;
-  std::string output_image_directory;
-  std::string output_image_relocation_directory;
-  off_t base_delta = 0;
-  bool base_delta_set = false;
-  bool dump_timings = kIsDebugBuild;
-  bool verify = false;
-
-  for (int i = 0; i < argc; ++i) {
-    const StringPiece option(argv[i]);
-    const bool log_options = false;
-    if (log_options) {
-      LOG(INFO) << "patchoat: option[" << i << "]=" << argv[i];
-    }
-    if (option.starts_with("--instruction-set=")) {
-      isa_set = true;
-      const char* isa_str = option.substr(strlen("--instruction-set=")).data();
-      isa = GetInstructionSetFromString(isa_str);
-      if (isa == InstructionSet::kNone) {
-        Usage("Unknown or invalid instruction set %s", isa_str);
-      }
-    } else if (option.starts_with("--input-image-location=")) {
-      input_image_location = option.substr(strlen("--input-image-location=")).data();
-    } else if (option.starts_with("--output-image-directory=")) {
-      output_image_directory = option.substr(strlen("--output-image-directory=")).data();
-    } else if (option.starts_with("--output-image-relocation-directory=")) {
-      output_image_relocation_directory =
-          option.substr(strlen("--output-image-relocation-directory=")).data();
-    } else if (option.starts_with("--base-offset-delta=")) {
-      const char* base_delta_str = option.substr(strlen("--base-offset-delta=")).data();
-      base_delta_set = true;
-      if (!ParseInt(base_delta_str, &base_delta)) {
-        Usage("Failed to parse --base-offset-delta argument '%s' as an off_t", base_delta_str);
-      }
-    } else if (option == "--dump-timings") {
-      dump_timings = true;
-    } else if (option == "--no-dump-timings") {
-      dump_timings = false;
-    } else if (option == "--verify") {
-      verify = true;
-    } else {
-      Usage("Unknown argument %s", option.data());
-    }
-  }
-
-  // The instruction set is mandatory. This simplifies things...
-  if (!isa_set) {
-    Usage("Instruction set must be set.");
-  }
-
-  int ret;
-  if (verify) {
-    ret = patchoat_verify_image(timings,
-                                isa,
-                                input_image_location,
-                                output_image_directory);
-  } else {
-    ret = patchoat_patch_image(timings,
-                               isa,
-                               input_image_location,
-                               output_image_directory,
-                               output_image_relocation_directory,
-                               base_delta,
-                               base_delta_set,
-                               debug);
-  }
-
-  timings.EndTiming();
-  if (dump_timings) {
-    LOG(INFO) << Dumpable<TimingLogger>(timings);
-  }
-
-  return ret;
-}
-
-}  // namespace art
-
-int main(int argc, char **argv) {
-  return art::patchoat(argc, argv);
-}
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
deleted file mode 100644
index 237ef50..0000000
--- a/patchoat/patchoat.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_PATCHOAT_PATCHOAT_H_
-#define ART_PATCHOAT_PATCHOAT_H_
-
-#include "arch/instruction_set.h"
-#include "base/enums.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "base/os.h"
-#include "elf_file.h"
-#include "elf_utils.h"
-#include "gc/accounting/space_bitmap.h"
-#include "gc/heap.h"
-#include "gc/space/image_space.h"
-#include "runtime.h"
-
-namespace art {
-
-class ArtMethod;
-class ImageHeader;
-class OatHeader;
-
-namespace mirror {
-class Object;
-class PointerArray;
-class Reference;
-class Class;
-}  // namespace mirror
-
-class PatchOat {
- public:
-  // Relocates the provided image by the specified offset. If output_image_directory is non-empty,
-  // outputs the relocated image into that directory. If output_image_relocation_directory is
-  // non-empty, outputs image relocation files (see GeneratePatch) into that directory.
-  static bool Patch(const std::string& image_location,
-                    off_t delta,
-                    const std::string& output_image_directory,
-                    const std::string& output_image_relocation_directory,
-                    InstructionSet isa,
-                    TimingLogger* timings);
-  static bool Verify(const std::string& image_location,
-                     const std::string& output_image_filename,
-                     InstructionSet isa,
-                     TimingLogger* timings);
-
-  // Generates a patch which can be used to efficiently relocate the original file or to check that
-  // a relocated file matches the original. The patch is generated from the difference of the
-  // |original| and the already |relocated| image, and written to |output| in the form of unsigned
-  // LEB128 for each relocation position.
-  static bool GeneratePatch(const MemMap& original,
-                            const MemMap& relocated,
-                            std::vector<uint8_t>* output,
-                            std::string* error_msg);
-
-  ~PatchOat() {}
-  PatchOat(PatchOat&&) = default;
-
- private:
-  // All pointers are only borrowed.
-  PatchOat(InstructionSet isa, MemMap* image,
-           gc::accounting::ContinuousSpaceBitmap* bitmap, MemMap* heap, off_t delta,
-           std::map<gc::space::ImageSpace*, MemMap>* map, TimingLogger* timings)
-      : image_(image), bitmap_(bitmap), heap_(heap),
-        delta_(delta), isa_(isa), space_map_(map), timings_(timings) {}
-
-  // Was the .art image at image_path made with --compile-pic ?
-  static bool IsImagePic(const ImageHeader& image_header, const std::string& image_path);
-
-  static bool CreateVdexAndOatSymlinks(const std::string& input_image_filename,
-                                       const std::string& output_image_filename);
-
-
-  void VisitObject(mirror::Object* obj)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  void FixupMethod(ArtMethod* object, ArtMethod* copy)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  bool PatchImage(bool primary_image) REQUIRES_SHARED(Locks::mutator_lock_);
-  void PatchArtFields(const ImageHeader* image_header) REQUIRES_SHARED(Locks::mutator_lock_);
-  void PatchArtMethods(const ImageHeader* image_header) REQUIRES_SHARED(Locks::mutator_lock_);
-  void PatchImTables(const ImageHeader* image_header) REQUIRES_SHARED(Locks::mutator_lock_);
-  void PatchImtConflictTables(const ImageHeader* image_header)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  void PatchInternedStrings(const ImageHeader* image_header)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  void PatchClassTable(const ImageHeader* image_header)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  void PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  bool WriteImage(File* out);
-
-  template <typename T>
-  T* RelocatedCopyOf(T* obj) const {
-    if (obj == nullptr) {
-      return nullptr;
-    }
-    DCHECK_GT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->Begin()));
-    DCHECK_LT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->End()));
-    uintptr_t heap_off =
-        reinterpret_cast<uintptr_t>(obj) - reinterpret_cast<uintptr_t>(heap_->Begin());
-    DCHECK_LT(heap_off, image_->Size());
-    return reinterpret_cast<T*>(image_->Begin() + heap_off);
-  }
-
-  template <typename T>
-  T* RelocatedCopyOfFollowImages(T* obj) const {
-    if (obj == nullptr) {
-      return nullptr;
-    }
-    // Find ImageSpace this belongs to.
-    auto image_spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
-    for (gc::space::ImageSpace* image_space : image_spaces) {
-      if (image_space->Contains(obj)) {
-        uintptr_t heap_off = reinterpret_cast<uintptr_t>(obj) -
-                             reinterpret_cast<uintptr_t>(image_space->GetMemMap()->Begin());
-        return reinterpret_cast<T*>(space_map_->find(image_space)->second.Begin() + heap_off);
-      }
-    }
-    LOG(FATAL) << "Did not find object in boot image space " << obj;
-    UNREACHABLE();
-  }
-
-  template <typename T>
-  T* RelocatedAddressOfPointer(T* obj) const {
-    if (obj == nullptr) {
-      return obj;
-    }
-    auto ret = reinterpret_cast<uintptr_t>(obj) + delta_;
-    // Trim off high bits in case negative relocation with 64 bit patchoat.
-    if (Is32BitISA()) {
-      ret = static_cast<uintptr_t>(static_cast<uint32_t>(ret));
-    }
-    return reinterpret_cast<T*>(ret);
-  }
-
-  bool Is32BitISA() const {
-    return InstructionSetPointerSize(isa_) == PointerSize::k32;
-  }
-
-  // Walks through the old image and patches the mmap'd copy of it to the new offset. It does not
-  // change the heap.
-  class PatchVisitor {
-   public:
-    PatchVisitor(PatchOat* patcher, mirror::Object* copy) : patcher_(patcher), copy_(copy) {}
-    ~PatchVisitor() {}
-    void operator() (ObjPtr<mirror::Object> obj, MemberOffset off, bool b) const
-        REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-    // For reference classes.
-    void operator() (ObjPtr<mirror::Class> cls, ObjPtr<mirror::Reference>  ref) const
-        REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-    // TODO: Consider using these for updating native class roots?
-    void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
-        const {}
-    void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
-
-   private:
-    PatchOat* const patcher_;
-    mirror::Object* const copy_;
-  };
-
-  // A mmap of the image we are patching. This is modified.
-  const MemMap* const image_;
-  // The bitmap over the image within the heap we are patching. This is not modified.
-  gc::accounting::ContinuousSpaceBitmap* const bitmap_;
-  // The heap we are patching. This is not modified.
-  const MemMap* const heap_;
-  // The amount we are changing the offset by.
-  const off_t delta_;
-  // Active instruction set, used to know the entrypoint size.
-  const InstructionSet isa_;
-
-  const std::map<gc::space::ImageSpace*, MemMap>* space_map_;
-
-  TimingLogger* timings_;
-
-  class FixupRootVisitor;
-  class RelocatedPointerVisitor;
-  class PatchOatArtFieldVisitor;
-  class PatchOatArtMethodVisitor;
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(PatchOat);
-};
-
-}  // namespace art
-#endif  // ART_PATCHOAT_PATCHOAT_H_
diff --git a/patchoat/patchoat_test.cc b/patchoat/patchoat_test.cc
deleted file mode 100644
index 08bf31c..0000000
--- a/patchoat/patchoat_test.cc
+++ /dev/null
@@ -1,617 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <openssl/sha.h>
-#include <dirent.h>
-#include <sys/types.h>
-
-#include <string>
-#include <vector>
-
-#include "android-base/stringprintf.h"
-#include "android-base/strings.h"
-
-#include "base/hex_dump.h"
-#include "base/leb128.h"
-#include "dexopt_test.h"
-#include "runtime.h"
-
-#include <gtest/gtest.h>
-
-namespace art {
-
-using android::base::StringPrintf;
-
-class PatchoatTest : public DexoptTest {
- public:
-  static bool ListDirFilesEndingWith(
-      const std::string& dir,
-      const std::string& suffix,
-      std::vector<std::string>* filenames,
-      std::string* error_msg) {
-    DIR* d = opendir(dir.c_str());
-    if (d == nullptr) {
-      *error_msg = "Failed to open directory";
-      return false;
-    }
-    dirent* e;
-    struct stat s;
-    size_t suffix_len = suffix.size();
-    while ((e = readdir(d)) != nullptr) {
-      if ((strcmp(e->d_name, ".") == 0) || (strcmp(e->d_name, "..") == 0)) {
-        continue;
-      }
-      size_t name_len = strlen(e->d_name);
-      if ((name_len < suffix_len) || (strcmp(&e->d_name[name_len - suffix_len], suffix.c_str()))) {
-        continue;
-      }
-      std::string basename(e->d_name);
-      std::string filename = dir + "/" + basename;
-      int stat_result = lstat(filename.c_str(), &s);
-      if (stat_result != 0) {
-        *error_msg =
-            StringPrintf("Failed to stat %s: stat returned %d", filename.c_str(), stat_result);
-        return false;
-      }
-      if (S_ISDIR(s.st_mode)) {
-        continue;
-      }
-      filenames->push_back(basename);
-    }
-    closedir(d);
-    return true;
-  }
-
-  static void AddRuntimeArg(std::vector<std::string>& args, const std::string& arg) {
-    args.push_back("--runtime-arg");
-    args.push_back(arg);
-  }
-
-  bool CompileBootImage(const std::vector<std::string>& extra_args,
-                        const std::string& image_file_name_prefix,
-                        uint32_t base_addr,
-                        std::string* error_msg) {
-    Runtime* const runtime = Runtime::Current();
-    std::vector<std::string> argv;
-    argv.push_back(runtime->GetCompilerExecutable());
-    AddRuntimeArg(argv, "-Xms64m");
-    AddRuntimeArg(argv, "-Xmx64m");
-    std::vector<std::string> dex_files = GetLibCoreDexFileNames();
-    for (const std::string& dex_file : dex_files) {
-      argv.push_back("--dex-file=" + dex_file);
-      argv.push_back("--dex-location=" + dex_file);
-    }
-    if (runtime->IsJavaDebuggable()) {
-      argv.push_back("--debuggable");
-    }
-    runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
-
-    AddRuntimeArg(argv, "-Xverify:softfail");
-
-    if (!kIsTargetBuild) {
-      argv.push_back("--host");
-    }
-
-    argv.push_back("--image=" + image_file_name_prefix + ".art");
-    argv.push_back("--oat-file=" + image_file_name_prefix + ".oat");
-    argv.push_back("--oat-location=" + image_file_name_prefix + ".oat");
-    argv.push_back(StringPrintf("--base=0x%" PRIx32, base_addr));
-    argv.push_back("--compile-pic");
-    argv.push_back("--multi-image");
-    argv.push_back("--no-generate-debug-info");
-
-    std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
-    argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
-
-    // We must set --android-root.
-    const char* android_root = getenv("ANDROID_ROOT");
-    CHECK(android_root != nullptr);
-    argv.push_back("--android-root=" + std::string(android_root));
-    argv.insert(argv.end(), extra_args.begin(), extra_args.end());
-
-    return RunDex2OatOrPatchoat(argv, error_msg);
-  }
-
-  static std::vector<std::string> BasePatchoatCommand(const std::string& input_image_location,
-                                                      off_t base_offset_delta) {
-    Runtime* const runtime = Runtime::Current();
-    std::vector<std::string> argv;
-    argv.push_back(runtime->GetPatchoatExecutable());
-    argv.push_back("--input-image-location=" + input_image_location);
-    argv.push_back(StringPrintf("--base-offset-delta=0x%jx", (intmax_t) base_offset_delta));
-    argv.push_back(StringPrintf("--instruction-set=%s", GetInstructionSetString(kRuntimeISA)));
-
-    return argv;
-  }
-
-  bool RelocateBootImage(const std::string& input_image_location,
-                         const std::string& output_image_directory,
-                         off_t base_offset_delta,
-                         std::string* error_msg) {
-    std::vector<std::string> argv = BasePatchoatCommand(input_image_location, base_offset_delta);
-    argv.push_back("--output-image-directory=" + output_image_directory);
-
-    return RunDex2OatOrPatchoat(argv, error_msg);
-  }
-
-  bool VerifyBootImage(const std::string& input_image_location,
-                       const std::string& output_image_directory,
-                       off_t base_offset_delta,
-                       std::string* error_msg) {
-    std::vector<std::string> argv = BasePatchoatCommand(input_image_location, base_offset_delta);
-    argv.push_back("--output-image-directory=" + output_image_directory);
-    argv.push_back("--verify");
-
-    return RunDex2OatOrPatchoat(argv, error_msg);
-  }
-
-  bool GenerateBootImageRelFile(const std::string& input_image_location,
-                                const std::string& output_rel_directory,
-                                off_t base_offset_delta,
-                                std::string* error_msg) {
-    std::vector<std::string> argv = BasePatchoatCommand(input_image_location, base_offset_delta);
-    argv.push_back("--output-image-relocation-directory=" + output_rel_directory);
-
-    return RunDex2OatOrPatchoat(argv, error_msg);
-  }
-
-  bool RunDex2OatOrPatchoat(const std::vector<std::string>& args, std::string* error_msg) {
-    int link[2];
-
-    if (pipe(link) == -1) {
-      return false;
-    }
-
-    pid_t pid = fork();
-    if (pid == -1) {
-      return false;
-    }
-
-    if (pid == 0) {
-      // We need dex2oat to actually log things.
-      setenv("ANDROID_LOG_TAGS", "*:e", 1);
-      dup2(link[1], STDERR_FILENO);
-      close(link[0]);
-      close(link[1]);
-      std::vector<const char*> c_args;
-      for (const std::string& str : args) {
-        c_args.push_back(str.c_str());
-      }
-      c_args.push_back(nullptr);
-      execv(c_args[0], const_cast<char* const*>(c_args.data()));
-      exit(1);
-      UNREACHABLE();
-    } else {
-      close(link[1]);
-      char buffer[128];
-      memset(buffer, 0, 128);
-      ssize_t bytes_read = 0;
-
-      while (TEMP_FAILURE_RETRY(bytes_read = read(link[0], buffer, 128)) > 0) {
-        *error_msg += std::string(buffer, bytes_read);
-      }
-      close(link[0]);
-      int status = -1;
-      if (waitpid(pid, &status, 0) != -1) {
-        return (status == 0);
-      }
-      return false;
-    }
-  }
-
-  bool CompileBootImageToDir(
-      const std::string& output_dir,
-      const std::vector<std::string>& dex2oat_extra_args,
-      uint32_t base_addr,
-      std::string* error_msg) {
-    return CompileBootImage(dex2oat_extra_args, output_dir + "/boot", base_addr, error_msg);
-  }
-
-  bool CopyImageChecksumAndSetPatchDelta(
-      const std::string& src_image_filename,
-      const std::string& dest_image_filename,
-      off_t dest_patch_delta,
-      std::string* error_msg) {
-    std::unique_ptr<File> src_file(OS::OpenFileForReading(src_image_filename.c_str()));
-    if (src_file.get() == nullptr) {
-      *error_msg = StringPrintf("Failed to open source image file %s", src_image_filename.c_str());
-      return false;
-    }
-    ImageHeader src_header;
-    if (!src_file->ReadFully(&src_header, sizeof(src_header))) {
-      *error_msg = StringPrintf("Failed to read source image file %s", src_image_filename.c_str());
-      return false;
-    }
-
-    std::unique_ptr<File> dest_file(OS::OpenFileReadWrite(dest_image_filename.c_str()));
-    if (dest_file.get() == nullptr) {
-      *error_msg =
-          StringPrintf("Failed to open destination image file %s", dest_image_filename.c_str());
-      return false;
-    }
-    ImageHeader dest_header;
-    if (!dest_file->ReadFully(&dest_header, sizeof(dest_header))) {
-      *error_msg =
-          StringPrintf("Failed to read destination image file %s", dest_image_filename.c_str());
-      return false;
-    }
-    dest_header.SetOatChecksum(src_header.GetOatChecksum());
-    dest_header.SetPatchDelta(dest_patch_delta);
-    if (!dest_file->ResetOffset()) {
-      *error_msg =
-          StringPrintf(
-              "Failed to seek to start of destination image file %s", dest_image_filename.c_str());
-      return false;
-    }
-    if (!dest_file->WriteFully(&dest_header, sizeof(dest_header))) {
-      *error_msg =
-          StringPrintf("Failed to write to destination image file %s", dest_image_filename.c_str());
-      dest_file->Erase();
-      return false;
-    }
-    if (dest_file->FlushCloseOrErase() != 0) {
-      *error_msg =
-          StringPrintf(
-              "Failed to flush/close destination image file %s", dest_image_filename.c_str());
-      return false;
-    }
-
-    return true;
-  }
-
-  bool ReadFully(
-      const std::string& filename, std::vector<uint8_t>* contents, std::string* error_msg) {
-    std::unique_ptr<File> file(OS::OpenFileForReading(filename.c_str()));
-    if (file.get() == nullptr) {
-      *error_msg = "Failed to open";
-      return false;
-    }
-    int64_t size = file->GetLength();
-    if (size < 0) {
-      *error_msg = "Failed to get size";
-      return false;
-    }
-    contents->resize(size);
-    if (!file->ReadFully(&(*contents)[0], size)) {
-      *error_msg = "Failed to read";
-      contents->clear();
-      return false;
-    }
-    return true;
-  }
-
-  bool BinaryDiff(
-      const std::string& filename1, const std::string& filename2, std::string* error_msg) {
-    std::string read_error_msg;
-    std::vector<uint8_t> image1;
-    if (!ReadFully(filename1, &image1, &read_error_msg)) {
-      *error_msg = StringPrintf("Failed to read %s: %s", filename1.c_str(), read_error_msg.c_str());
-      return true;
-    }
-    std::vector<uint8_t> image2;
-    if (!ReadFully(filename2, &image2, &read_error_msg)) {
-      *error_msg = StringPrintf("Failed to read %s: %s", filename2.c_str(), read_error_msg.c_str());
-      return true;
-    }
-    if (image1.size() != image1.size()) {
-      *error_msg =
-          StringPrintf(
-              "%s and %s are of different size: %zu vs %zu",
-              filename1.c_str(),
-              filename2.c_str(),
-              image1.size(),
-              image2.size());
-      return true;
-    }
-    size_t size = image1.size();
-    for (size_t i = 0; i < size; i++) {
-      if (image1[i] != image2[i]) {
-        *error_msg =
-            StringPrintf("%s and %s differ at offset %zu", filename1.c_str(), filename2.c_str(), i);
-        size_t hexdump_size = std::min<size_t>(16u, size - i);
-        HexDump dump1(&image1[i], hexdump_size, /* show_actual_addresses */ false, /* prefix */ "");
-        HexDump dump2(&image2[i], hexdump_size, /* show_actual_addresses */ false, /* prefix */ "");
-        std::ostringstream oss;
-        oss << "\n" << dump1 << "\n" << dump2;
-        *error_msg += oss.str();
-        return true;
-      }
-    }
-
-    return false;
-  }
-};
-
-TEST_F(PatchoatTest, PatchoatRelocationSameAsDex2oatRelocation) {
-#if defined(ART_USE_READ_BARRIER)
-  // This test checks that relocating a boot image using patchoat produces the same result as
-  // producing the boot image for that relocated base address using dex2oat. To be precise, these
-  // two files will have two small differences: the OAT checksum and base address. However, this
-  // test takes this into account.
-
-  // Compile boot image into a random directory using dex2oat
-  ScratchFile dex2oat_orig_scratch;
-  dex2oat_orig_scratch.Unlink();
-  std::string dex2oat_orig_dir = dex2oat_orig_scratch.GetFilename();
-  ASSERT_EQ(0, mkdir(dex2oat_orig_dir.c_str(), 0700));
-  const uint32_t orig_base_addr = 0x60000000;
-  // Force deterministic output. We want the boot images created by this dex2oat run and the run
-  // below to differ only in their base address.
-  std::vector<std::string> dex2oat_extra_args;
-  dex2oat_extra_args.push_back("--force-determinism");
-  dex2oat_extra_args.push_back("-j1");  // Might not be needed. Causes a 3-5x slowdown.
-  std::string error_msg;
-  if (!CompileBootImageToDir(dex2oat_orig_dir, dex2oat_extra_args, orig_base_addr, &error_msg)) {
-    FAIL() << "CompileBootImage1 failed: " << error_msg;
-  }
-
-  // Compile a "relocated" boot image into a random directory using dex2oat. This image is relocated
-  // in the sense that it uses a different base address.
-  ScratchFile dex2oat_reloc_scratch;
-  dex2oat_reloc_scratch.Unlink();
-  std::string dex2oat_reloc_dir = dex2oat_reloc_scratch.GetFilename();
-  ASSERT_EQ(0, mkdir(dex2oat_reloc_dir.c_str(), 0700));
-  const uint32_t reloc_base_addr = 0x70000000;
-  if (!CompileBootImageToDir(dex2oat_reloc_dir, dex2oat_extra_args, reloc_base_addr, &error_msg)) {
-    FAIL() << "CompileBootImage2 failed: " << error_msg;
-  }
-  const off_t base_addr_delta = reloc_base_addr - orig_base_addr;
-
-  // Relocate the original boot image using patchoat. The image is relocated by the same amount
-  // as the second/relocated image produced by dex2oat.
-  ScratchFile patchoat_scratch;
-  patchoat_scratch.Unlink();
-  std::string patchoat_dir = patchoat_scratch.GetFilename();
-  ASSERT_EQ(0, mkdir(patchoat_dir.c_str(), 0700));
-  std::string dex2oat_orig_with_arch_dir =
-      dex2oat_orig_dir + "/" + GetInstructionSetString(kRuntimeISA);
-  // The arch-including symlink is needed by patchoat
-  ASSERT_EQ(0, symlink(dex2oat_orig_dir.c_str(), dex2oat_orig_with_arch_dir.c_str()));
-  if (!RelocateBootImage(
-      dex2oat_orig_dir + "/boot.art",
-      patchoat_dir,
-      base_addr_delta,
-      &error_msg)) {
-    FAIL() << "RelocateBootImage failed: " << error_msg;
-  }
-
-  // Assert that patchoat created the same set of .art files as dex2oat
-  std::vector<std::string> dex2oat_image_basenames;
-  std::vector<std::string> patchoat_image_basenames;
-  if (!ListDirFilesEndingWith(dex2oat_reloc_dir, ".art", &dex2oat_image_basenames, &error_msg)) {
-    FAIL() << "Failed to list *.art files in " << dex2oat_reloc_dir << ": " << error_msg;
-  }
-  if (!ListDirFilesEndingWith(patchoat_dir, ".art", &patchoat_image_basenames, &error_msg)) {
-    FAIL() << "Failed to list *.art files in " << patchoat_dir << ": " << error_msg;
-  }
-  std::sort(dex2oat_image_basenames.begin(), dex2oat_image_basenames.end());
-  std::sort(patchoat_image_basenames.begin(), patchoat_image_basenames.end());
-  // .art file names output by patchoat look like tmp@art-data-<random>-<random>@boot*.art. To
-  // compare these with .art file names output by dex2oat we retain only the part of the file name
-  // after the last @.
-  std::vector<std::string> patchoat_image_shortened_basenames(patchoat_image_basenames.size());
-  for (size_t i = 0; i < patchoat_image_basenames.size(); i++) {
-    patchoat_image_shortened_basenames[i] =
-        patchoat_image_basenames[i].substr(patchoat_image_basenames[i].find_last_of("@") + 1);
-  }
-  ASSERT_EQ(dex2oat_image_basenames, patchoat_image_shortened_basenames);
-
-  // Patch up the dex2oat-relocated image files so that it looks as though they were relocated by
-  // patchoat. patchoat preserves the OAT checksum header field and sets patch delta header field.
-  for (const std::string& image_basename : dex2oat_image_basenames) {
-    if (!CopyImageChecksumAndSetPatchDelta(
-        dex2oat_orig_dir + "/" + image_basename,
-        dex2oat_reloc_dir + "/" + image_basename,
-        base_addr_delta,
-        &error_msg)) {
-      FAIL() << "Unable to patch up " << image_basename << ": " << error_msg;
-    }
-  }
-
-  // Assert that the patchoat-relocated images are identical to the dex2oat-relocated images
-  for (size_t i = 0; i < dex2oat_image_basenames.size(); i++) {
-    const std::string& dex2oat_image_basename = dex2oat_image_basenames[i];
-    const std::string& dex2oat_image_filename = dex2oat_reloc_dir + "/" + dex2oat_image_basename;
-    const std::string& patchoat_image_filename = patchoat_dir + "/" + patchoat_image_basenames[i];
-    if (BinaryDiff(dex2oat_image_filename, patchoat_image_filename, &error_msg)) {
-      FAIL() << "patchoat- and dex2oat-relocated variants of " << dex2oat_image_basename
-          << " differ: " << error_msg;
-    }
-  }
-
-  ClearDirectory(dex2oat_orig_dir.c_str(), /*recursive*/ true);
-  ClearDirectory(dex2oat_reloc_dir.c_str(), /*recursive*/ true);
-  ClearDirectory(patchoat_dir.c_str(), /*recursive*/ true);
-  rmdir(dex2oat_orig_dir.c_str());
-  rmdir(dex2oat_reloc_dir.c_str());
-  rmdir(patchoat_dir.c_str());
-#else
-  LOG(INFO) << "Skipping PatchoatRelocationSameAsDex2oatRelocation";
-  // Force-print to std::cout so it's also outside the logcat.
-  std::cout << "Skipping PatchoatRelocationSameAsDex2oatRelocation" << std::endl;
-#endif
-}
-
-// These tests check that a boot image relocated using patchoat can be unrelocated
-// using the .rel file created by patchoat.
-//
-// The tests don't work when heap poisoning is enabled because some of the
-// references are negated. b/72117833 is tracking the effort to have patchoat
-// and its tests support heap poisoning.
-class PatchoatVerificationTest : public PatchoatTest {
- protected:
-  void CreateRelocatedBootImage() {
-    // Compile boot image into a random directory using dex2oat
-    ScratchFile dex2oat_orig_scratch;
-    dex2oat_orig_scratch.Unlink();
-    dex2oat_orig_dir_ = dex2oat_orig_scratch.GetFilename();
-    ASSERT_EQ(0, mkdir(dex2oat_orig_dir_.c_str(), 0700));
-    const uint32_t orig_base_addr = 0x60000000;
-    std::vector<std::string> dex2oat_extra_args;
-    std::string error_msg;
-    if (!CompileBootImageToDir(dex2oat_orig_dir_, dex2oat_extra_args, orig_base_addr, &error_msg)) {
-      FAIL() << "CompileBootImage1 failed: " << error_msg;
-    }
-
-    // Generate image relocation file for the original boot image
-    std::string dex2oat_orig_with_arch_dir =
-        dex2oat_orig_dir_ + "/" + GetInstructionSetString(kRuntimeISA);
-    // The arch-including symlink is needed by patchoat
-    ASSERT_EQ(0, symlink(dex2oat_orig_dir_.c_str(), dex2oat_orig_with_arch_dir.c_str()));
-    base_addr_delta_ = 0x100000;
-    if (!GenerateBootImageRelFile(
-        dex2oat_orig_dir_ + "/boot.art",
-        dex2oat_orig_dir_,
-        base_addr_delta_,
-        &error_msg)) {
-      FAIL() << "RelocateBootImage failed: " << error_msg;
-    }
-
-    // Relocate the original boot image using patchoat
-    ScratchFile relocated_scratch;
-    relocated_scratch.Unlink();
-    relocated_dir_ = relocated_scratch.GetFilename();
-    ASSERT_EQ(0, mkdir(relocated_dir_.c_str(), 0700));
-    // Use a different relocation delta from the one used when generating .rel files above. This is
-    // to make sure .rel files are not specific to a particular relocation delta.
-    base_addr_delta_ -= 0x10000;
-    if (!RelocateBootImage(
-        dex2oat_orig_dir_ + "/boot.art",
-        relocated_dir_,
-        base_addr_delta_,
-        &error_msg)) {
-      FAIL() << "RelocateBootImage failed: " << error_msg;
-    }
-
-    // Assert that patchoat created the same set of .art and .art.rel files
-    std::vector<std::string> rel_basenames;
-    std::vector<std::string> relocated_image_basenames;
-    if (!ListDirFilesEndingWith(dex2oat_orig_dir_, ".rel", &rel_basenames, &error_msg)) {
-      FAIL() << "Failed to list *.art.rel files in " << dex2oat_orig_dir_ << ": " << error_msg;
-    }
-    if (!ListDirFilesEndingWith(relocated_dir_, ".art", &relocated_image_basenames, &error_msg)) {
-      FAIL() << "Failed to list *.art files in " << relocated_dir_ << ": " << error_msg;
-    }
-    std::sort(rel_basenames.begin(), rel_basenames.end());
-    std::sort(relocated_image_basenames.begin(), relocated_image_basenames.end());
-
-    // .art and .art.rel file names output by patchoat look like
-    // tmp@art-data-<random>-<random>@boot*.art, encoding the name of the directory in their name.
-    // To compare these with each other, we retain only the part of the file name after the last @,
-    // and we also drop the extension.
-    std::vector<std::string> rel_shortened_basenames(rel_basenames.size());
-    std::vector<std::string> relocated_image_shortened_basenames(relocated_image_basenames.size());
-    for (size_t i = 0; i < rel_basenames.size(); i++) {
-      rel_shortened_basenames[i] = rel_basenames[i].substr(rel_basenames[i].find_last_of("@") + 1);
-      rel_shortened_basenames[i] =
-          rel_shortened_basenames[i].substr(0, rel_shortened_basenames[i].find("."));
-    }
-    for (size_t i = 0; i < relocated_image_basenames.size(); i++) {
-      relocated_image_shortened_basenames[i] =
-          relocated_image_basenames[i].substr(relocated_image_basenames[i].find_last_of("@") + 1);
-      relocated_image_shortened_basenames[i] =
-          relocated_image_shortened_basenames[i].substr(
-              0, relocated_image_shortened_basenames[i].find("."));
-    }
-    ASSERT_EQ(rel_shortened_basenames, relocated_image_shortened_basenames);
-  }
-
-  virtual void TearDown() {
-    if (!dex2oat_orig_dir_.empty()) {
-      ClearDirectory(dex2oat_orig_dir_.c_str(), /*recursive*/ true);
-      rmdir(dex2oat_orig_dir_.c_str());
-    }
-    if (!relocated_dir_.empty()) {
-      ClearDirectory(relocated_dir_.c_str(), /*recursive*/ true);
-      rmdir(relocated_dir_.c_str());
-    }
-    PatchoatTest::TearDown();
-  }
-
-  std::string dex2oat_orig_dir_;
-  std::string relocated_dir_;
-  off_t base_addr_delta_;
-};
-
-// Assert that verification works with the .rel files.
-TEST_F(PatchoatVerificationTest, Sucessful) {
-  TEST_DISABLED_FOR_HEAP_POISONING();
-  CreateRelocatedBootImage();
-
-  std::string error_msg;
-  if (!VerifyBootImage(
-      dex2oat_orig_dir_ + "/boot.art",
-      relocated_dir_,
-      base_addr_delta_,
-      &error_msg)) {
-    FAIL() << "VerifyBootImage failed: " << error_msg;
-  }
-}
-
-// Corrupt the image file and check that the verification fails gracefully.
-TEST_F(PatchoatVerificationTest, CorruptedImage) {
-  TEST_DISABLED_FOR_HEAP_POISONING();
-  CreateRelocatedBootImage();
-
-  std::string error_msg;
-  std::string relocated_image_filename;
-  if (!GetDalvikCacheFilename((dex2oat_orig_dir_ + "/boot.art").c_str(),
-                               relocated_dir_.c_str(),
-                               &relocated_image_filename,
-                               &error_msg)) {
-    FAIL() << "Failed to find relocated image file name: " << error_msg;
-  }
-  ASSERT_EQ(truncate(relocated_image_filename.c_str(), sizeof(ImageHeader)), 0)
-    << relocated_image_filename;
-
-  if (VerifyBootImage(
-      dex2oat_orig_dir_ + "/boot.art",
-      relocated_dir_,
-      base_addr_delta_,
-      &error_msg)) {
-    FAIL() << "VerifyBootImage should have failed since the image was intentionally corrupted";
-  }
-}
-
-// Corrupt the relocation file and check that the verification fails gracefully.
-TEST_F(PatchoatVerificationTest, CorruptedRelFile) {
-  TEST_DISABLED_FOR_HEAP_POISONING();
-  CreateRelocatedBootImage();
-
-  std::string error_msg;
-  std::string art_filename = dex2oat_orig_dir_ + "/boot.art";
-  std::string rel_filename = dex2oat_orig_dir_ + "/boot.art.rel";
-  std::unique_ptr<File> art_file(OS::OpenFileForReading(art_filename.c_str()));
-  std::unique_ptr<File> rel_file(OS::OpenFileReadWrite(rel_filename.c_str()));
-  rel_file->ClearContent();
-  uint8_t buffer[64] = {};
-  ASSERT_TRUE(rel_file->WriteFully(&buffer, SHA256_DIGEST_LENGTH));
-  // Encode single relocation which is just past the end of the image file.
-  size_t leb_size = EncodeUnsignedLeb128(buffer, art_file->GetLength()) - buffer;
-  ASSERT_TRUE(rel_file->WriteFully(&buffer, leb_size));
-  ASSERT_EQ(rel_file->FlushClose(), 0);
-  ASSERT_EQ(art_file->Close(), 0);
-
-  if (VerifyBootImage(
-      dex2oat_orig_dir_ + "/boot.art",
-      relocated_dir_,
-      base_addr_delta_,
-      &error_msg)) {
-    FAIL() << "VerifyBootImage should have failed since the rel file was intentionally corrupted";
-  }
-}
-
-}  // namespace art
diff --git a/profman/Android.bp b/profman/Android.bp
index 5aaccb0..33cca02 100644
--- a/profman/Android.bp
+++ b/profman/Android.bp
@@ -26,6 +26,7 @@
 
     target: {
         android: {
+            // Use the 32-bit version of profman on devices.
             compile_multilib: "prefer32",
         },
     },
@@ -39,7 +40,6 @@
     name: "profman",
     defaults: ["profman-defaults"],
     shared_libs: [
-        "libart",
         "libprofile",
         "libdexfile",
         "libartbase",
@@ -53,13 +53,33 @@
         "profman-defaults",
     ],
     shared_libs: [
-        "libartd",
         "libprofiled",
         "libdexfiled",
         "libartbased",
     ],
 }
 
+art_cc_binary {
+    name: "profmans",
+    defaults: [
+        "profman-defaults",
+        "libprofile_static_defaults",
+        "libdexfile_static_defaults",
+        "libartbase_static_defaults",
+    ],
+    host_supported: true,
+    device_supported: false,
+    target: {
+        darwin: {
+            enabled: false,
+        },
+        windows: {
+            enabled: true,
+	    cflags: ["-Wno-thread-safety"],
+        },
+    },
+}
+
 art_cc_test {
     name: "art_profman_tests",
     defaults: [
diff --git a/profman/boot_image_profile.cc b/profman/boot_image_profile.cc
index 6715680..4d8eef9 100644
--- a/profman/boot_image_profile.cc
+++ b/profman/boot_image_profile.cc
@@ -38,7 +38,7 @@
     // Avoid merging classes since we may want to only add classes that fit a certain criteria.
     // If we merged the classes, every single class in each profile would be in the out_profile,
     // but we want to only included classes that are in at least a few profiles.
-    out_profile->MergeWith(*profile, /*merge_classes*/ false);
+    out_profile->MergeWith(*profile, /*merge_classes=*/ false);
   }
 
   // Image classes that were added because they are commonly used.
@@ -96,7 +96,7 @@
               is_clean = false;
             }
           },
-          /*instance_fields*/ VoidFunctor(),
+          /*instance_field_visitor=*/ VoidFunctor(),
           method_visitor,
           method_visitor);
 
diff --git a/profman/profile_assistant.cc b/profman/profile_assistant.cc
index b509fb4..b65bb43 100644
--- a/profman/profile_assistant.cc
+++ b/profman/profile_assistant.cc
@@ -32,16 +32,23 @@
 ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
         const std::vector<ScopedFlock>& profile_files,
         const ScopedFlock& reference_profile_file,
-        const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
+        const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
+        bool store_aggregation_counters) {
   DCHECK(!profile_files.empty());
 
   ProfileCompilationInfo info;
   // Load the reference profile.
-  if (!info.Load(reference_profile_file->Fd(), /*merge_classes*/ true, filter_fn)) {
+  if (!info.Load(reference_profile_file->Fd(), /*merge_classes=*/ true, filter_fn)) {
     LOG(WARNING) << "Could not load reference profile file";
     return kErrorBadProfiles;
   }
 
+  // If we need to store aggregation counters (e.g. for the boot image profile),
+  // prepare the reference profile now.
+  if (store_aggregation_counters) {
+    info.PrepareForAggregationCounters();
+  }
+
   // Store the current state of the reference profile before merging with the current profiles.
   uint32_t number_of_methods = info.GetNumberOfMethods();
   uint32_t number_of_classes = info.GetNumberOfResolvedClasses();
@@ -49,7 +56,7 @@
   // Merge all current profiles.
   for (size_t i = 0; i < profile_files.size(); i++) {
     ProfileCompilationInfo cur_info;
-    if (!cur_info.Load(profile_files[i]->Fd(), /*merge_classes*/ true, filter_fn)) {
+    if (!cur_info.Load(profile_files[i]->Fd(), /*merge_classes=*/ true, filter_fn)) {
       LOG(WARNING) << "Could not load profile file at index " << i;
       return kErrorBadProfiles;
     }
@@ -92,7 +99,7 @@
   // Will block until all the locks are acquired.
   bool Init(const std::vector<std::string>& filenames, /* out */ std::string* error) {
     for (size_t i = 0; i < filenames.size(); i++) {
-      flocks_[i] = LockedFile::Open(filenames[i].c_str(), O_RDWR, /* block */ true, error);
+      flocks_[i] = LockedFile::Open(filenames[i].c_str(), O_RDWR, /* block= */ true, error);
       if (flocks_[i].get() == nullptr) {
         *error += " (index=" + std::to_string(i) + ")";
         return false;
@@ -106,7 +113,7 @@
     for (size_t i = 0; i < fds.size(); i++) {
       DCHECK_GE(fds[i], 0);
       flocks_[i] = LockedFile::DupOf(fds[i], "profile-file",
-                                     true /* read_only_mode */, error);
+                                     /* read_only_mode= */ true, error);
       if (flocks_[i].get() == nullptr) {
         *error += " (index=" + std::to_string(i) + ")";
         return false;
@@ -124,7 +131,8 @@
 ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
         const std::vector<int>& profile_files_fd,
         int reference_profile_file_fd,
-        const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
+        const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
+        bool store_aggregation_counters) {
   DCHECK_GE(reference_profile_file_fd, 0);
 
   std::string error;
@@ -138,7 +146,7 @@
   // cleared after processing.
   ScopedFlock reference_profile_file = LockedFile::DupOf(reference_profile_file_fd,
                                                          "reference-profile",
-                                                         false /* read_only_mode */,
+                                                         /* read_only_mode= */ false,
                                                          &error);
   if (reference_profile_file.get() == nullptr) {
     LOG(WARNING) << "Could not lock reference profiled files: " << error;
@@ -147,13 +155,15 @@
 
   return ProcessProfilesInternal(profile_files.Get(),
                                  reference_profile_file,
-                                 filter_fn);
+                                 filter_fn,
+                                 store_aggregation_counters);
 }
 
 ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
         const std::vector<std::string>& profile_files,
         const std::string& reference_profile_file,
-        const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
+        const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
+        bool store_aggregation_counters) {
   std::string error;
 
   ScopedFlockList profile_files_list(profile_files.size());
@@ -163,7 +173,7 @@
   }
 
   ScopedFlock locked_reference_profile_file = LockedFile::Open(
-      reference_profile_file.c_str(), O_RDWR, /* block */ true, &error);
+      reference_profile_file.c_str(), O_RDWR, /* block= */ true, &error);
   if (locked_reference_profile_file.get() == nullptr) {
     LOG(WARNING) << "Could not lock reference profile files: " << error;
     return kErrorCannotLock;
@@ -171,7 +181,8 @@
 
   return ProcessProfilesInternal(profile_files_list.Get(),
                                  locked_reference_profile_file,
-                                 filter_fn);
+                                 filter_fn,
+                                 store_aggregation_counters);
 }
 
 }  // namespace art
diff --git a/profman/profile_assistant.h b/profman/profile_assistant.h
index c1d6f8e..45d4e38 100644
--- a/profman/profile_assistant.h
+++ b/profman/profile_assistant.h
@@ -55,19 +55,22 @@
       const std::vector<std::string>& profile_files,
       const std::string& reference_profile_file,
       const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
-          = ProfileCompilationInfo::ProfileFilterFnAcceptAll);
+          = ProfileCompilationInfo::ProfileFilterFnAcceptAll,
+      bool store_aggregation_counters = false);
 
   static ProcessingResult ProcessProfiles(
       const std::vector<int>& profile_files_fd_,
       int reference_profile_file_fd,
       const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
-          = ProfileCompilationInfo::ProfileFilterFnAcceptAll);
+          = ProfileCompilationInfo::ProfileFilterFnAcceptAll,
+      bool store_aggregation_counters = false);
 
  private:
   static ProcessingResult ProcessProfilesInternal(
       const std::vector<ScopedFlock>& profile_files,
       const ScopedFlock& reference_profile_file,
-      const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn);
+      const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
+      bool store_aggregation_counters);
 
   DISALLOW_COPY_AND_ASSIGN(ProfileAssistant);
 };
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 286b686..e906151 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -102,7 +102,7 @@
       }
     }
     for (uint16_t i = 0; i < number_of_classes; i++) {
-      ASSERT_TRUE(info->AddClassIndex(dex_location1,
+      ASSERT_TRUE(info->AddClassIndex(ProfileCompilationInfo::GetProfileDexFileKey(dex_location1),
                                       dex_location_checksum1,
                                       dex::TypeIndex(i),
                                       number_of_methods1));
@@ -116,9 +116,9 @@
   void SetupBasicProfile(const std::string& id,
                          uint32_t checksum,
                          uint16_t number_of_methods,
-                         const std::vector<uint32_t> hot_methods,
-                         const std::vector<uint32_t> startup_methods,
-                         const std::vector<uint32_t> post_startup_methods,
+                         const std::vector<uint32_t>& hot_methods,
+                         const std::vector<uint32_t>& startup_methods,
+                         const std::vector<uint32_t>& post_startup_methods,
                          const ScratchFile& profile,
                          ProfileCompilationInfo* info) {
     std::string dex_location = "location1" + id;
@@ -720,7 +720,7 @@
   ASSERT_TRUE(info.Load(GetFd(profile_file)));
   // Verify that the profile has matching methods.
   ScopedObjectAccess soa(Thread::Current());
-  ObjPtr<mirror::Class> klass = GetClass(soa, /* class_loader */ nullptr, "Ljava/lang/Math;");
+  ObjPtr<mirror::Class> klass = GetClass(soa, /* class_loader= */ nullptr, "Ljava/lang/Math;");
   ASSERT_TRUE(klass != nullptr);
   size_t method_count = 0;
   for (ArtMethod& method : klass->GetMethods(kRuntimePointerSize)) {
@@ -932,8 +932,8 @@
     AssertInlineCaches(inline_monomorphic,
                        expected_monomorphic,
                        info,
-                       /*megamorphic*/false,
-                       /*missing_types*/false);
+                       /*is_megamorphic=*/false,
+                       /*is_missing_types=*/false);
   }
 
   {
@@ -949,8 +949,8 @@
     AssertInlineCaches(inline_polymorhic,
                        expected_polymorphic,
                        info,
-                       /*megamorphic*/false,
-                       /*missing_types*/false);
+                       /*is_megamorphic=*/false,
+                       /*is_missing_types=*/false);
   }
 
   {
@@ -963,8 +963,8 @@
     AssertInlineCaches(inline_megamorphic,
                        expected_megamorphic,
                        info,
-                       /*megamorphic*/true,
-                       /*missing_types*/false);
+                       /*is_megamorphic=*/true,
+                       /*is_missing_types=*/false);
   }
 
   {
@@ -977,8 +977,8 @@
     AssertInlineCaches(inline_missing_types,
                        expected_missing_Types,
                        info,
-                       /*megamorphic*/false,
-                       /*missing_types*/true);
+                       /*is_megamorphic=*/false,
+                       /*is_missing_types=*/true);
   }
 
   {
@@ -1005,7 +1005,7 @@
   const uint16_t kNumberOfMethodsToEnableCompilation = 100;
   ProfileCompilationInfo info1;
   SetupProfile("p1", 1, kNumberOfMethodsToEnableCompilation, 0, profile1, &info1,
-      /*start_method_index*/0, /*reverse_dex_write_order*/false);
+      /*start_method_index=*/0, /*reverse_dex_write_order=*/false);
 
   // The reference profile info will contain the methods with indices 50-150.
   // When setting up the profile reverse the order in which the dex files
@@ -1014,7 +1014,7 @@
   const uint16_t kNumberOfMethodsAlreadyCompiled = 100;
   ProfileCompilationInfo reference_info;
   SetupProfile("p1", 1, kNumberOfMethodsAlreadyCompiled, 0, reference_profile,
-      &reference_info, kNumberOfMethodsToEnableCompilation / 2, /*reverse_dex_write_order*/true);
+      &reference_info, kNumberOfMethodsToEnableCompilation / 2, /*reverse_dex_write_order=*/true);
 
   // We should advise compilation.
   ASSERT_EQ(ProfileAssistant::kCompile,
@@ -1192,7 +1192,7 @@
 
   // Run profman and pass the dex file with --apk-fd.
   android::base::unique_fd apk_fd(
-      open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY));
+      open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY));  // NOLINT
   ASSERT_GE(apk_fd.get(), 0);
 
   std::string profman_cmd = GetProfmanCmd();
@@ -1233,9 +1233,9 @@
   ProfileCompilationInfo info2_filter;
   ProfileCompilationInfo expected;
 
-  info2_filter.Load(profile1.GetFd(), /*merge_classes*/ true, filter_fn);
-  info2_filter.Load(profile2.GetFd(), /*merge_classes*/ true, filter_fn);
-  expected.Load(reference_profile.GetFd(), /*merge_classes*/ true, filter_fn);
+  info2_filter.Load(profile1.GetFd(), /*merge_classes=*/ true, filter_fn);
+  info2_filter.Load(profile2.GetFd(), /*merge_classes=*/ true, filter_fn);
+  expected.Load(reference_profile.GetFd(), /*merge_classes=*/ true, filter_fn);
 
   ASSERT_TRUE(expected.MergeWith(info1_filter));
   ASSERT_TRUE(expected.MergeWith(info2_filter));
@@ -1260,17 +1260,17 @@
                "fake-location2",
                d2.GetLocationChecksum(),
                num_methods_to_add,
-               /*num_classes*/ 0,
+               /*number_of_classes=*/ 0,
                profile1,
                &info1,
-               /*start_method_index*/ 0,
-               /*reverse_dex_write_order*/ false,
-               /*number_of_methods1*/ d1.NumMethodIds(),
-               /*number_of_methods2*/ d2.NumMethodIds());
+               /*start_method_index=*/ 0,
+               /*reverse_dex_write_order=*/ false,
+               /*number_of_methods1=*/ d1.NumMethodIds(),
+               /*number_of_methods2=*/ d2.NumMethodIds());
 
   // Run profman and pass the dex file with --apk-fd.
   android::base::unique_fd apk_fd(
-      open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY));
+      open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY));  // NOLINT
   ASSERT_GE(apk_fd.get(), 0);
 
   std::string profman_cmd = GetProfmanCmd();
@@ -1300,4 +1300,57 @@
   }
 }
 
+TEST_F(ProfileAssistantTest, MergeProfilesWithCounters) {
+  ScratchFile profile1;
+  ScratchFile profile2;
+  ScratchFile reference_profile;
+
+  // The new profile info will contain methods with indices 0-100.
+  const uint16_t kNumberOfMethodsToEnableCompilation = 100;
+  const uint16_t kNumberOfClasses = 50;
+
+  std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("ProfileTestMultiDex");
+  const DexFile& d1 = *dex_files[0];
+  const DexFile& d2 = *dex_files[1];
+  ProfileCompilationInfo info1;
+  SetupProfile(
+      d1.GetLocation(), d1.GetLocationChecksum(),
+      d2.GetLocation(), d2.GetLocationChecksum(),
+      kNumberOfMethodsToEnableCompilation, kNumberOfClasses, profile1, &info1);
+  ProfileCompilationInfo info2;
+  SetupProfile(
+      d1.GetLocation(), d1.GetLocationChecksum(),
+      d2.GetLocation(), d2.GetLocationChecksum(),
+      kNumberOfMethodsToEnableCompilation, kNumberOfClasses, profile2, &info2);
+
+  std::string profman_cmd = GetProfmanCmd();
+  std::vector<std::string> argv_str;
+  argv_str.push_back(profman_cmd);
+  argv_str.push_back("--profile-file-fd=" + std::to_string(profile1.GetFd()));
+  argv_str.push_back("--profile-file-fd=" + std::to_string(profile2.GetFd()));
+  argv_str.push_back("--reference-profile-file-fd=" + std::to_string(reference_profile.GetFd()));
+  argv_str.push_back("--store-aggregation-counters");
+  std::string error;
+
+  EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0) << error;
+
+  // Verify that we can load the result and that the counters are in place.
+
+  ProfileCompilationInfo result;
+  result.PrepareForAggregationCounters();
+  ASSERT_TRUE(reference_profile.GetFile()->ResetOffset());
+  ASSERT_TRUE(result.Load(reference_profile.GetFd()));
+
+  ASSERT_TRUE(result.StoresAggregationCounters());
+  ASSERT_EQ(2, result.GetAggregationCounter());
+
+  for (uint16_t i = 0; i < kNumberOfMethodsToEnableCompilation; i++) {
+    ASSERT_EQ(1, result.GetMethodAggregationCounter(MethodReference(&d1, i)));
+    ASSERT_EQ(1, result.GetMethodAggregationCounter(MethodReference(&d2, i)));
+  }
+  for (uint16_t i = 0; i < kNumberOfClasses; i++) {
+    ASSERT_EQ(1, result.GetClassAggregationCounter(TypeReference(&d1, dex::TypeIndex(i))));
+  }
+}
+
 }  // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index cecd3c2..82d9df0 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -35,6 +35,7 @@
 #include "base/logging.h"  // For InitLogging.
 #include "base/mem_map.h"
 #include "base/scoped_flock.h"
+#include "base/stl_util.h"
 #include "base/stringpiece.h"
 #include "base/time_utils.h"
 #include "base/unix_file/fd_file.h"
@@ -59,6 +60,7 @@
 
 static std::string CommandLine() {
   std::vector<std::string> command;
+  command.reserve(original_argc);
   for (int i = 0; i < original_argc; ++i) {
     command.push_back(original_argv[i]);
   }
@@ -155,6 +157,9 @@
   UsageError("      the file passed with --profile-fd(file) to the profile passed with");
   UsageError("      --reference-profile-fd(file) and update at the same time the profile-key");
   UsageError("      of entries corresponding to the apks passed with --apk(-fd).");
+  UsageError("  --store-aggregation-counters: if present, profman will compute and store");
+  UsageError("      the aggregation counters of classes and methods in the output profile.");
+  UsageError("      In this case the profile will have a different version.");
   UsageError("");
 
   exit(EXIT_FAILURE);
@@ -198,7 +203,8 @@
       test_profile_class_percentage_(kDefaultTestProfileClassPercentage),
       test_profile_seed_(NanoTime()),
       start_ns_(NanoTime()),
-      copy_and_update_profile_key_(false) {}
+      copy_and_update_profile_key_(false),
+      store_aggregation_counters_(false) {}
 
   ~ProfMan() {
     LogCompletionTime();
@@ -285,6 +291,8 @@
         ParseUintOption(option, "--generate-test-profile-seed", &test_profile_seed_, Usage);
       } else if (option.starts_with("--copy-and-update-profile-key")) {
         copy_and_update_profile_key_ = true;
+      } else if (option.starts_with("--store-aggregation-counters")) {
+        store_aggregation_counters_ = true;
       } else {
         Usage("Unknown argument '%s'", option.data());
       }
@@ -361,12 +369,14 @@
       File file(reference_profile_file_fd_, false);
       result = ProfileAssistant::ProcessProfiles(profile_files_fd_,
                                                  reference_profile_file_fd_,
-                                                 filter_fn);
+                                                 filter_fn,
+                                                 store_aggregation_counters_);
       CloseAllFds(profile_files_fd_, "profile_files_fd_");
     } else {
       result = ProfileAssistant::ProcessProfiles(profile_files_,
                                                  reference_profile_file_,
-                                                 filter_fn);
+                                                 filter_fn,
+                                                 store_aggregation_counters_);
     }
     return result;
   }
@@ -426,7 +436,7 @@
       if (use_apk_fd_list) {
         if (dex_file_loader.OpenZip(apks_fd_[i],
                                     dex_locations_[i],
-                                    /* verify */ false,
+                                    /* verify= */ false,
                                     kVerifyChecksum,
                                     &error_msg,
                                     &dex_files_for_location)) {
@@ -437,7 +447,7 @@
       } else {
         if (dex_file_loader.Open(apk_files_[i].c_str(),
                                  dex_locations_[i],
-                                 /* verify */ false,
+                                 /* verify= */ false,
                                  kVerifyChecksum,
                                  &error_msg,
                                  &dex_files_for_location)) {
@@ -457,6 +467,10 @@
   // The methods reads the links from /proc/self/fd/ to find the original apk paths
   // and puts them in the dex_locations_ vector.
   bool ComputeDexLocationsFromApkFds() {
+#ifdef _WIN32
+    PLOG(ERROR) << "ComputeDexLocationsFromApkFds is unsupported on Windows.";
+    return false;
+#else
     // We can't use a char array of PATH_MAX size without exceeding the frame size.
     // So we use a vector as the buffer for the path.
     std::vector<char> buffer(PATH_MAX, 0);
@@ -472,11 +486,17 @@
       dex_locations_.push_back(buffer.data());
     }
     return true;
+#endif
   }
 
   std::unique_ptr<const ProfileCompilationInfo> LoadProfile(const std::string& filename, int fd) {
     if (!filename.empty()) {
-      fd = open(filename.c_str(), O_RDWR);
+#ifdef _WIN32
+      int flags = O_RDWR;
+#else
+      int flags = O_RDWR | O_CLOEXEC;
+#endif
+      fd = open(filename.c_str(), flags);
       if (fd < 0) {
         LOG(ERROR) << "Cannot open " << filename << strerror(errno);
         return nullptr;
@@ -500,7 +520,7 @@
       LOG(ERROR) << "Cannot load profile info from filename=" << filename << " fd=" << fd;
       return -1;
     }
-    *dump += banner + "\n" + info->DumpInfo(dex_files) + "\n";
+    *dump += banner + "\n" + info->DumpInfo(MakeNonOwningPointerVector(*dex_files)) + "\n";
     return 0;
   }
 
@@ -513,10 +533,23 @@
     static const char* kEmptyString = "";
     static const char* kOrdinaryProfile = "=== profile ===";
     static const char* kReferenceProfile = "=== reference profile ===";
+    static const char* kDexFiles = "=== Dex files  ===";
 
     std::vector<std::unique_ptr<const DexFile>> dex_files;
     OpenApkFilesFromLocations(&dex_files);
+
     std::string dump;
+
+    // Dump checkfiles and corresponding checksums.
+    dump += kDexFiles;
+    dump += "\n";
+    for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+      std::ostringstream oss;
+      oss << dex_file->GetLocation()
+          << " [checksum=" << std::hex << dex_file->GetLocationChecksum() << "]\n";
+      dump += oss.str();
+    }
+
     // Dump individual profile files.
     if (!profile_files_fd_.empty()) {
       for (int profile_file_fd : profile_files_fd_) {
@@ -530,12 +563,10 @@
         }
       }
     }
-    if (!profile_files_.empty()) {
-      for (const std::string& profile_file : profile_files_) {
-        int ret = DumpOneProfile(kOrdinaryProfile, profile_file, kInvalidFd, &dex_files, &dump);
-        if (ret != 0) {
-          return ret;
-        }
+    for (const std::string& profile_file : profile_files_) {
+      int ret = DumpOneProfile(kOrdinaryProfile, profile_file, kInvalidFd, &dex_files, &dump);
+      if (ret != 0) {
+        return ret;
       }
     }
     // Dump reference profile file.
@@ -562,7 +593,7 @@
     if (!FdIsValid(dump_output_to_fd_)) {
       std::cout << dump;
     } else {
-      unix_file::FdFile out_fd(dump_output_to_fd_, false /*check_usage*/);
+      unix_file::FdFile out_fd(dump_output_to_fd_, /*check_usage=*/ false);
       if (!out_fd.WriteFully(dump.c_str(), dump.length())) {
         return -1;
       }
@@ -594,14 +625,14 @@
                                             &startup_methods,
                                             &post_startup_methods)) {
         for (const dex::TypeIndex& type_index : class_types) {
-          const DexFile::TypeId& type_id = dex_file->GetTypeId(type_index);
+          const dex::TypeId& type_id = dex_file->GetTypeId(type_index);
           out_lines->insert(std::string(dex_file->GetTypeDescriptor(type_id)));
         }
         combined_methods = hot_methods;
         combined_methods.insert(startup_methods.begin(), startup_methods.end());
         combined_methods.insert(post_startup_methods.begin(), post_startup_methods.end());
         for (uint16_t dex_method_idx : combined_methods) {
-          const DexFile::MethodId& id = dex_file->GetMethodId(dex_method_idx);
+          const dex::MethodId& id = dex_file->GetMethodId(dex_method_idx);
           std::string signature_string(dex_file->GetMethodSignature(id).ToString());
           std::string type_string(dex_file->GetTypeDescriptor(dex_file->GetTypeId(id.class_idx_)));
           std::string method_name(dex_file->GetMethodName(id));
@@ -629,7 +660,12 @@
   bool GetClassNamesAndMethods(const std::string& profile_file,
                                std::vector<std::unique_ptr<const DexFile>>* dex_files,
                                std::set<std::string>* out_lines) {
-    int fd = open(profile_file.c_str(), O_RDONLY);
+#ifdef _WIN32
+    int flags = O_RDONLY;
+#else
+    int flags = O_RDONLY | O_CLOEXEC;
+#endif
+    int fd = open(profile_file.c_str(), flags);
     if (!FdIsValid(fd)) {
       LOG(ERROR) << "Cannot open " << profile_file << strerror(errno);
       return false;
@@ -688,7 +724,7 @@
     if (!FdIsValid(dump_output_to_fd_)) {
       std::cout << dump;
     } else {
-      unix_file::FdFile out_fd(dump_output_to_fd_, false /*check_usage*/);
+      unix_file::FdFile out_fd(dump_output_to_fd_, /*check_usage=*/ false);
       if (!out_fd.WriteFully(dump.c_str(), dump.length())) {
         return -1;
       }
@@ -761,7 +797,7 @@
         }
       }
 
-      const DexFile::TypeId* type_id = dex_file->FindTypeId(klass_descriptor.c_str());
+      const dex::TypeId* type_id = dex_file->FindTypeId(klass_descriptor.c_str());
       if (type_id == nullptr) {
         continue;
       }
@@ -797,7 +833,7 @@
     const std::string& name = name_and_signature[0];
     const std::string& signature = kProfileParsingFirstCharInSignature + name_and_signature[1];
 
-    const DexFile::StringId* name_id = dex_file->FindStringId(name.c_str());
+    const dex::StringId* name_id = dex_file->FindStringId(name.c_str());
     if (name_id == nullptr) {
       LOG(WARNING) << "Could not find name: "  << name;
       return dex::kDexNoIndex;
@@ -808,12 +844,12 @@
       LOG(WARNING) << "Could not create type list" << signature;
       return dex::kDexNoIndex;
     }
-    const DexFile::ProtoId* proto_id = dex_file->FindProtoId(return_type_idx, param_type_idxs);
+    const dex::ProtoId* proto_id = dex_file->FindProtoId(return_type_idx, param_type_idxs);
     if (proto_id == nullptr) {
       LOG(WARNING) << "Could not find proto_id: " << name;
       return dex::kDexNoIndex;
     }
-    const DexFile::MethodId* method_id = dex_file->FindMethodId(
+    const dex::MethodId* method_id = dex_file->FindMethodId(
         dex_file->GetTypeId(class_ref.TypeIndex()), *name_id, *proto_id);
     if (method_id == nullptr) {
       LOG(WARNING) << "Could not find method_id: " << name;
@@ -836,7 +872,7 @@
     uint32_t offset = dex_file->FindCodeItemOffset(
         *dex_file->FindClassDef(class_ref.TypeIndex()),
         method_index);
-    const DexFile::CodeItem* code_item = dex_file->GetCodeItem(offset);
+    const dex::CodeItem* code_item = dex_file->GetCodeItem(offset);
 
     bool found_invoke = false;
     for (const DexInstructionPcPair& inst : CodeItemInstructionAccessor(*dex_file, code_item)) {
@@ -912,7 +948,7 @@
       flags |= ProfileCompilationInfo::MethodHotness::kFlagPostStartup;
     }
 
-    TypeReference class_ref(/* dex_file */ nullptr, dex::TypeIndex());
+    TypeReference class_ref(/* dex_file= */ nullptr, dex::TypeIndex());
     if (!FindClass(dex_files, klass, &class_ref)) {
       LOG(WARNING) << "Could not find class: " << klass;
       return false;
@@ -981,7 +1017,7 @@
         return false;
       }
       std::vector<TypeReference> classes(inline_cache_elems.size(),
-                                         TypeReference(/* dex_file */ nullptr, dex::TypeIndex()));
+                                         TypeReference(/* dex_file= */ nullptr, dex::TypeIndex()));
       size_t class_it = 0;
       for (const std::string& ic_class : inline_cache_elems) {
         if (!FindClass(dex_files, ic_class, &(classes[class_it++]))) {
@@ -1010,7 +1046,12 @@
     int fd = reference_profile_file_fd_;
     if (!FdIsValid(fd)) {
       CHECK(!reference_profile_file_.empty());
-      fd = open(reference_profile_file_.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
+#ifdef _WIN32
+      int flags = O_CREAT | O_TRUNC | O_WRONLY;
+#else
+      int flags = O_CREAT | O_TRUNC | O_WRONLY | O_CLOEXEC;
+#endif
+      fd = open(reference_profile_file_.c_str(), flags, 0644);
       if (fd < 0) {
         LOG(ERROR) << "Cannot open " << reference_profile_file_ << strerror(errno);
         return kInvalidFd;
@@ -1143,7 +1184,12 @@
       }
     }
     // ShouldGenerateTestProfile confirms !test_profile_.empty().
-    int profile_test_fd = open(test_profile_.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
+#ifdef _WIN32
+    int flags = O_CREAT | O_TRUNC | O_WRONLY;
+#else
+    int flags = O_CREAT | O_TRUNC | O_WRONLY | O_CLOEXEC;
+#endif
+    int profile_test_fd = open(test_profile_.c_str(), flags, 0644);
     if (profile_test_fd < 0) {
       LOG(ERROR) << "Cannot open " << test_profile_ << strerror(errno);
       return -1;
@@ -1201,7 +1247,7 @@
     // Do not clear if invalid. The input might be an archive.
     bool load_ok = use_fds
         ? profile.Load(profile_files_fd_[0])
-        : profile.Load(profile_files_[0], /*clear_if_invalid*/ false);
+        : profile.Load(profile_files_[0], /*clear_if_invalid=*/ false);
     if (load_ok) {
       // Open the dex files to look up classes and methods.
       std::vector<std::unique_ptr<const DexFile>> dex_files;
@@ -1211,7 +1257,7 @@
       }
       bool result = use_fds
           ? profile.Save(reference_profile_file_fd_)
-          : profile.Save(reference_profile_file_, /*bytes_written*/ nullptr);
+          : profile.Save(reference_profile_file_, /*bytes_written=*/ nullptr);
       return result ? 0 : kErrorFailedToSaveProfile;
     } else {
       return kErrorFailedToLoadProfile;
@@ -1264,6 +1310,7 @@
   uint32_t test_profile_seed_;
   uint64_t start_ns_;
   bool copy_and_update_profile_key_;
+  bool store_aggregation_counters_;
 };
 
 // See ProfileAssistant::ProcessingResult for return codes.
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 6ec6265..a08ba70 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -33,6 +33,7 @@
         "art_method.cc",
         "backtrace_helper.cc",
         "barrier.cc",
+        "base/locks.cc",
         "base/mem_map_arena_pool.cc",
         "base/mutex.cc",
         "base/quasi_atomic.cc",
@@ -93,9 +94,13 @@
         "instrumentation.cc",
         "intern_table.cc",
         "interpreter/interpreter.cc",
+        "interpreter/interpreter_cache.cc",
         "interpreter/interpreter_common.cc",
         "interpreter/interpreter_intrinsics.cc",
-        "interpreter/interpreter_switch_impl.cc",
+        "interpreter/interpreter_switch_impl0.cc",
+        "interpreter/interpreter_switch_impl1.cc",
+        "interpreter/interpreter_switch_impl2.cc",
+        "interpreter/interpreter_switch_impl3.cc",
         "interpreter/lock_count_data.cc",
         "interpreter/shadow_frame.cc",
         "interpreter/unstarted_runtime.cc",
@@ -239,14 +244,11 @@
         "entrypoints/quick/quick_trampoline_entrypoints.cc",
     ],
 
-    // b/77976998, clang lld does not recognize the --keep-unique flag.
-    use_clang_lld: false,
-
     arch: {
         arm: {
             srcs: [
                 "interpreter/mterp/mterp.cc",
-                "interpreter/mterp/out/mterp_arm.S",
+                ":libart_mterp.arm",
                 "arch/arm/context_arm.cc",
                 "arch/arm/entrypoints_init_arm.cc",
                 "arch/arm/instruction_set_features_assembly_tests.S",
@@ -261,7 +263,7 @@
         arm64: {
             srcs: [
                 "interpreter/mterp/mterp.cc",
-                "interpreter/mterp/out/mterp_arm64.S",
+                ":libart_mterp.arm64",
                 "arch/arm64/context_arm64.cc",
                 "arch/arm64/entrypoints_init_arm64.cc",
                 "arch/arm64/jni_entrypoints_arm64.S",
@@ -275,7 +277,7 @@
         x86: {
             srcs: [
                 "interpreter/mterp/mterp.cc",
-                "interpreter/mterp/out/mterp_x86.S",
+                ":libart_mterp.x86",
                 "arch/x86/context_x86.cc",
                 "arch/x86/entrypoints_init_x86.cc",
                 "arch/x86/jni_entrypoints_x86.S",
@@ -290,7 +292,7 @@
                 // Note that the fault_handler_x86.cc is not a mistake.  This file is
                 // shared between the x86 and x86_64 architectures.
                 "interpreter/mterp/mterp.cc",
-                "interpreter/mterp/out/mterp_x86_64.S",
+                ":libart_mterp.x86_64",
                 "arch/x86_64/context_x86_64.cc",
                 "arch/x86_64/entrypoints_init_x86_64.cc",
                 "arch/x86_64/jni_entrypoints_x86_64.S",
@@ -304,7 +306,7 @@
         mips: {
             srcs: [
                 "interpreter/mterp/mterp.cc",
-                "interpreter/mterp/out/mterp_mips.S",
+                ":libart_mterp.mips",
                 "arch/mips/context_mips.cc",
                 "arch/mips/entrypoints_init_mips.cc",
                 "arch/mips/jni_entrypoints_mips.S",
@@ -317,7 +319,7 @@
         mips64: {
             srcs: [
                 "interpreter/mterp/mterp.cc",
-                "interpreter/mterp/out/mterp_mips64.S",
+                ":libart_mterp.mips64",
                 "arch/mips64/context_mips64.cc",
                 "arch/mips64/entrypoints_init_mips64.cc",
                 "arch/mips64/jni_entrypoints_mips64.S",
@@ -343,6 +345,11 @@
             static_libs: [
                 "libz",  // For adler32.
             ],
+            cflags: [
+                // ART is allowed to link to libicuuc directly
+                // since they are in the same module
+                "-DANDROID_LINK_SHARED_ICU4C",
+            ],
         },
         android_arm: {
             ldflags: JIT_DEBUG_REGISTER_CODE_LDFLAGS,
@@ -375,23 +382,21 @@
     export_generated_headers: ["cpp-define-generator-asm-support"],
     include_dirs: [
         "art/sigchainlib",
-        "external/icu/icu4c/source/common",
-        "external/lz4/lib",
         "external/zlib",
     ],
     header_libs: [
         "art_cmdlineparser_headers",
+        "cpp-define-generator-definitions",
+        "libicuuc_headers",
         "libnativehelper_header_only",
         "jni_platform_headers",
     ],
     shared_libs: [
+        "libartpalette",
         "libnativebridge",
         "libnativeloader",
         "libbacktrace",
-        "liblz4",
         "liblog",
-        // For atrace, properties, ashmem, set_sched_policy and socket_peer_is_trusted.
-        "libcutils",
         // For common macros.
         "libbase",
     ],
@@ -408,12 +413,56 @@
     export_shared_lib_headers: ["libbase"],
 }
 
+libart_static_cc_defaults {
+    name: "libart_static_base_defaults",
+    target: {
+        android: {
+            static_libs: ["libtombstoned_client_static"],
+        },
+    },
+    static_libs: [
+        "libartpalette",
+        "libbacktrace",
+        "libbase",
+        "libdexfile_external",  // libunwindstack dependency
+        "libdexfile_support",  // libunwindstack dependency
+        "liblog",
+        "libnativebridge",
+        "libnativeloader",
+        "libunwindstack",
+        "libz",
+    ],
+}
+
+cc_defaults {
+    name: "libart_static_defaults",
+    defaults: [
+        "libart_static_base_defaults",
+        "libartbase_static_defaults",
+        "libdexfile_static_defaults",
+        "libprofile_static_defaults",
+    ],
+    static_libs: ["libart"],
+}
+
+cc_defaults {
+    name: "libartd_static_defaults",
+    defaults: [
+        "libart_static_base_defaults",
+        "libartbased_static_defaults",
+        "libdexfiled_static_defaults",
+        "libprofiled_static_defaults",
+    ],
+    static_libs: ["libartd"],
+}
+
 gensrcs {
     name: "art_operator_srcs",
     cmd: "$(location generate_operator_out) art/runtime $(in) > $(out)",
     tools: ["generate_operator_out"],
     srcs: [
-        "base/mutex.h",
+        "base/callee_save_type.h",
+        "base/locks.h",
         "class_loader_context.h",
         "class_status.h",
         "debugger.h",
@@ -511,9 +560,6 @@
     header_libs: [
         "libnativehelper_header_only",
     ],
-    include_dirs: [
-        "external/icu/icu4c/source/common",
-    ],
 }
 
 art_cc_test {
@@ -616,8 +662,7 @@
     ],
     shared_libs: [
         "libartd-compiler",
-        "libvixld-arm",
-        "libvixld-arm64",
+        "libvixld",
     ],
 }
 
@@ -626,3 +671,51 @@
     host_supported: true,
     export_include_dirs: ["."],
 }
+
+genrule {
+  name: "libart_mterp.arm",
+  out: ["mterp_arm.S"],
+  srcs: ["interpreter/mterp/arm/*.S"],
+  tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+  cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
+
+genrule {
+  name: "libart_mterp.arm64",
+  out: ["mterp_arm64.S"],
+  srcs: ["interpreter/mterp/arm64/*.S"],
+  tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+  cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
+
+genrule {
+  name: "libart_mterp.mips",
+  out: ["mterp_mips.S"],
+  srcs: ["interpreter/mterp/mips/*.S"],
+  tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+  cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
+
+genrule {
+  name: "libart_mterp.mips64",
+  out: ["mterp_mips64.S"],
+  srcs: ["interpreter/mterp/mips64/*.S"],
+  tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+  cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
+
+genrule {
+  name: "libart_mterp.x86",
+  out: ["mterp_x86.S"],
+  srcs: ["interpreter/mterp/x86/*.S"],
+  tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+  cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
+
+genrule {
+  name: "libart_mterp.x86_64",
+  out: ["mterp_x86_64.S"],
+  srcs: ["interpreter/mterp/x86_64/*.S"],
+  tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+  cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index d4dbbf9..12ad84b 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -22,26 +22,6 @@
 #include "common_runtime_test.h"
 #include "quick/quick_method_frame_info.h"
 
-// asm_support.h declares tests next to the #defines. We use asm_support_check.h to (safely)
-// generate CheckAsmSupportOffsetsAndSizes using gtest's EXPECT for the tests. We also use the
-// RETURN_TYPE, HEADER and FOOTER defines from asm_support_check.h to try to ensure that any
-// tests are actually generated.
-
-// Let CheckAsmSupportOffsetsAndSizes return a size_t (the count).
-#define ASM_SUPPORT_CHECK_RETURN_TYPE size_t
-
-// Declare the counter that will be updated per test.
-#define ASM_SUPPORT_CHECK_HEADER size_t count = 0;
-
-// Use EXPECT_EQ for tests, and increment the counter.
-#define ADD_TEST_EQ(x, y) EXPECT_EQ(x, y); count++;
-
-// Return the counter at the end of CheckAsmSupportOffsetsAndSizes.
-#define ASM_SUPPORT_CHECK_FOOTER return count;
-
-// Generate CheckAsmSupportOffsetsAndSizes().
-#include "asm_support_check.h"
-
 namespace art {
 
 class ArchTest : public CommonRuntimeTest {
@@ -60,11 +40,6 @@
   }
 };
 
-TEST_F(ArchTest, CheckCommonOffsetsAndSizes) {
-  size_t test_count = CheckAsmSupportOffsetsAndSizes();
-  EXPECT_GT(test_count, 0u);
-}
-
 // Grab architecture specific constants.
 namespace arm {
 #include "arch/arm/asm_support_arm.h"
diff --git a/runtime/arch/arm/callee_save_frame_arm.h b/runtime/arch/arm/callee_save_frame_arm.h
index 11eefb9..72ba3b7 100644
--- a/runtime/arch/arm/callee_save_frame_arm.h
+++ b/runtime/arch/arm/callee_save_frame_arm.h
@@ -21,9 +21,9 @@
 #include "base/bit_utils.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
-#include "base/globals.h"
 #include "quick/quick_method_frame_info.h"
 #include "registers_arm.h"
+#include "runtime_globals.h"
 
 namespace art {
 namespace arm {
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 2c5465e..c1a03ab 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -174,7 +174,7 @@
 
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
-  UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+  UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
   qpoints->pReadBarrierMarkReg12 = nullptr;  // Cannot use register 12 (IP) to pass arguments.
   qpoints->pReadBarrierMarkReg13 = nullptr;  // Cannot use register 13 (SP) to pass arguments.
   qpoints->pReadBarrierMarkReg14 = nullptr;  // Cannot use register 14 (LR) to pass arguments.
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index bb33a27..e186cd3 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -18,12 +18,13 @@
 
 #include <sys/ucontext.h>
 
+#include "arch/instruction_set.h"
 #include "art_method.h"
 #include "base/enums.h"
-#include "base/globals.h"
 #include "base/hex_dump.h"
 #include "base/logging.h"  // For VLOG.
 #include "base/macros.h"
+#include "runtime_globals.h"
 #include "thread-current-inl.h"
 
 //
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index 608999b..fdf4dbd 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -51,9 +51,10 @@
       "cortex-a72",
       "cortex-a73",
       "cortex-a75",
+      "cortex-a76",
       "exynos-m1",
-      "denver",
-      "kryo"
+      "kryo",
+      "kryo385",
   };
   bool has_armv8a = FindVariantInArray(arm_variants_with_armv8a,
                                        arraysize(arm_variants_with_armv8a),
@@ -318,8 +319,9 @@
   bool has_atomic_ldrd_strd = has_atomic_ldrd_strd_;
   bool has_div = has_div_;
   bool has_armv8a = has_armv8a_;
-  for (auto i = features.begin(); i != features.end(); i++) {
-    std::string feature = android::base::Trim(*i);
+  for (const std::string& feature : features) {
+    DCHECK_EQ(android::base::Trim(feature), feature)
+        << "Feature name is not trimmed: '" << feature << "'";
     if (feature == "div") {
       has_div = true;
     } else if (feature == "-div") {
diff --git a/runtime/arch/arm/instruction_set_features_arm_test.cc b/runtime/arch/arm/instruction_set_features_arm_test.cc
index d9651f9..36e31bd 100644
--- a/runtime/arch/arm/instruction_set_features_arm_test.cc
+++ b/runtime/arch/arm/instruction_set_features_arm_test.cc
@@ -46,20 +46,6 @@
   EXPECT_STREQ("div,atomic_ldrd_strd,armv8a", kryo_features->GetFeatureString().c_str());
   EXPECT_EQ(kryo_features->AsBitmap(), 7U);
 
-  // Build features for a 32-bit ARM denver processor.
-  std::unique_ptr<const InstructionSetFeatures> denver_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kArm, "denver", &error_msg));
-  ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
-
-  EXPECT_TRUE(denver_features->Equals(denver_features.get()));
-  EXPECT_TRUE(denver_features->HasAtLeast(krait_features.get()));
-  EXPECT_FALSE(krait_features->Equals(denver_features.get()));
-  EXPECT_FALSE(krait_features->HasAtLeast(denver_features.get()));
-  EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
-  EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
-  EXPECT_STREQ("div,atomic_ldrd_strd,armv8a", denver_features->GetFeatureString().c_str());
-  EXPECT_EQ(denver_features->AsBitmap(), 7U);
-
   // Build features for a 32-bit ARMv7 processor.
   std::unique_ptr<const InstructionSetFeatures> generic_features(
       InstructionSetFeatures::FromVariant(InstructionSet::kArm, "generic", &error_msg));
diff --git a/runtime/arch/arm64/callee_save_frame_arm64.h b/runtime/arch/arm64/callee_save_frame_arm64.h
index bc36bfa..d3609f1 100644
--- a/runtime/arch/arm64/callee_save_frame_arm64.h
+++ b/runtime/arch/arm64/callee_save_frame_arm64.h
@@ -21,9 +21,9 @@
 #include "base/bit_utils.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
-#include "base/globals.h"
 #include "quick/quick_method_frame_info.h"
 #include "registers_arm64.h"
+#include "runtime_globals.h"
 
 namespace art {
 namespace arm64 {
@@ -54,7 +54,7 @@
     (1 << art::arm64::X9) | (1 << art::arm64::X10) | (1 << art::arm64::X11) |
     (1 << art::arm64::X12) | (1 << art::arm64::X13) | (1 << art::arm64::X14) |
     (1 << art::arm64::X15) | (1 << art::arm64::X16) | (1 << art::arm64::X17) |
-    (1 << art::arm64::X18) | (1 << art::arm64::X19);
+    (1 << art::arm64::X19);
 
 static constexpr uint32_t kArm64CalleeSaveFpAlwaysSpills = 0;
 static constexpr uint32_t kArm64CalleeSaveFpRefSpills = 0;
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 4c43b7e..22f0c28 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -103,7 +103,6 @@
   qpoints->pReadBarrierMarkReg14 = is_active ? art_quick_read_barrier_mark_reg14 : nullptr;
   qpoints->pReadBarrierMarkReg15 = is_active ? art_quick_read_barrier_mark_reg15 : nullptr;
   qpoints->pReadBarrierMarkReg17 = is_active ? art_quick_read_barrier_mark_reg17 : nullptr;
-  qpoints->pReadBarrierMarkReg18 = is_active ? art_quick_read_barrier_mark_reg18 : nullptr;
   qpoints->pReadBarrierMarkReg19 = is_active ? art_quick_read_barrier_mark_reg19 : nullptr;
   qpoints->pReadBarrierMarkReg20 = is_active ? art_quick_read_barrier_mark_reg20 : nullptr;
   qpoints->pReadBarrierMarkReg21 = is_active ? art_quick_read_barrier_mark_reg21 : nullptr;
@@ -190,7 +189,7 @@
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
   qpoints->pReadBarrierMarkReg16 = nullptr;  // IP0 is used as a temp by the asm stub.
-  UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+  UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
   qpoints->pReadBarrierSlow = artReadBarrierSlow;
   qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
 }
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index e8b4627..751c05b 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -18,13 +18,14 @@
 
 #include <sys/ucontext.h>
 
+#include "arch/instruction_set.h"
 #include "art_method.h"
 #include "base/enums.h"
-#include "base/globals.h"
 #include "base/hex_dump.h"
 #include "base/logging.h"  // For VLOG.
 #include "base/macros.h"
 #include "registers_arm64.h"
+#include "runtime_globals.h"
 #include "thread-current-inl.h"
 
 extern "C" void art_quick_throw_stack_overflow();
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index d0f61c9..196f358 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -16,6 +16,11 @@
 
 #include "instruction_set_features_arm64.h"
 
+#if defined(ART_TARGET_ANDROID) && defined(__aarch64__)
+#include <asm/hwcap.h>
+#include <sys/auxv.h>
+#endif
+
 #include <fstream>
 #include <sstream>
 
@@ -31,6 +36,10 @@
 
 Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromVariant(
     const std::string& variant, std::string* error_msg) {
+  // The CPU variant string is passed to ART through --instruction-set-variant option.
+  // During build, such setting is from TARGET_CPU_VARIANT in device BoardConfig.mk, for example:
+  //   TARGET_CPU_VARIANT := cortex-a75
+
   // Look for variants that need a fix for a53 erratum 835769.
   static const char* arm64_variants_with_a53_835769_bug[] = {
       // Pessimistically assume all generic CPUs are cortex-a53.
@@ -39,14 +48,73 @@
       "cortex-a53",
       "cortex-a53.a57",
       "cortex-a53.a72",
-      // Pessimistically assume all "big" cortex CPUs are paired with a cortex-a53.
+      // Pessimistically assume following "big" cortex CPUs are paired with a cortex-a53.
       "cortex-a57",
       "cortex-a72",
       "cortex-a73",
   };
+
+  static const char* arm64_variants_with_crc[] = {
+      "default",
+      "generic",
+      "cortex-a35",
+      "cortex-a53",
+      "cortex-a53.a57",
+      "cortex-a53.a72",
+      "cortex-a57",
+      "cortex-a72",
+      "cortex-a73",
+      "cortex-a55",
+      "cortex-a75",
+      "cortex-a76",
+      "exynos-m1",
+      "exynos-m2",
+      "exynos-m3",
+      "kryo",
+      "kryo385",
+  };
+
+  static const char* arm64_variants_with_lse[] = {
+      "cortex-a55",
+      "cortex-a75",
+      "cortex-a76",
+      "kryo385",
+  };
+
+  static const char* arm64_variants_with_fp16[] = {
+      "cortex-a55",
+      "cortex-a75",
+      "cortex-a76",
+      "kryo385",
+  };
+
+  static const char* arm64_variants_with_dotprod[] = {
+      "cortex-a55",
+      "cortex-a75",
+      "cortex-a76",
+  };
+
   bool needs_a53_835769_fix = FindVariantInArray(arm64_variants_with_a53_835769_bug,
                                                  arraysize(arm64_variants_with_a53_835769_bug),
                                                  variant);
+  // The variants that need a fix for 843419 are the same that need a fix for 835769.
+  bool needs_a53_843419_fix = needs_a53_835769_fix;
+
+  bool has_crc = FindVariantInArray(arm64_variants_with_crc,
+                                    arraysize(arm64_variants_with_crc),
+                                    variant);
+
+  bool has_lse = FindVariantInArray(arm64_variants_with_lse,
+                                    arraysize(arm64_variants_with_lse),
+                                    variant);
+
+  bool has_fp16 = FindVariantInArray(arm64_variants_with_fp16,
+                                     arraysize(arm64_variants_with_fp16),
+                                     variant);
+
+  bool has_dotprod = FindVariantInArray(arm64_variants_with_dotprod,
+                                        arraysize(arm64_variants_with_dotprod),
+                                        variant);
 
   if (!needs_a53_835769_fix) {
     // Check to see if this is an expected variant.
@@ -54,11 +122,12 @@
         "cortex-a35",
         "cortex-a55",
         "cortex-a75",
+        "cortex-a76",
         "exynos-m1",
         "exynos-m2",
         "exynos-m3",
-        "denver64",
-        "kryo"
+        "kryo",
+        "kryo385",
     };
     if (!FindVariantInArray(arm64_known_variants, arraysize(arm64_known_variants), variant)) {
       std::ostringstream os;
@@ -68,31 +137,91 @@
     }
   }
 
-  // The variants that need a fix for 843419 are the same that need a fix for 835769.
-  bool needs_a53_843419_fix = needs_a53_835769_fix;
-
-  return Arm64FeaturesUniquePtr(
-      new Arm64InstructionSetFeatures(needs_a53_835769_fix, needs_a53_843419_fix));
+  return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(needs_a53_835769_fix,
+                                                                needs_a53_843419_fix,
+                                                                has_crc,
+                                                                has_lse,
+                                                                has_fp16,
+                                                                has_dotprod));
 }
 
 Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
   bool is_a53 = (bitmap & kA53Bitfield) != 0;
-  return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(is_a53, is_a53));
+  bool has_crc = (bitmap & kCRCBitField) != 0;
+  bool has_lse = (bitmap & kLSEBitField) != 0;
+  bool has_fp16 = (bitmap & kFP16BitField) != 0;
+  bool has_dotprod = (bitmap & kDotProdBitField) != 0;
+  return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(is_a53,
+                                                                is_a53,
+                                                                has_crc,
+                                                                has_lse,
+                                                                has_fp16,
+                                                                has_dotprod));
 }
 
 Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromCppDefines() {
-  const bool is_a53 = true;  // Pessimistically assume all ARM64s are A53s.
-  return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(is_a53, is_a53));
+  // For more details about ARM feature macros, refer to
+  // Arm C Language Extensions Documentation (ACLE).
+  // https://developer.arm.com/docs/101028/latest
+  bool needs_a53_835769_fix = false;
+  bool needs_a53_843419_fix = needs_a53_835769_fix;
+  bool has_crc = false;
+  bool has_lse = false;
+  bool has_fp16 = false;
+  bool has_dotprod = false;
+
+#if defined (__ARM_FEATURE_CRC32)
+  has_crc = true;
+#endif
+
+#if defined (__ARM_ARCH_8_1A__) || defined (__ARM_ARCH_8_2A__)
+  // There is no specific ACLE macro defined for ARMv8.1 LSE features.
+  has_lse = true;
+#endif
+
+#if defined (__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) || defined (__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+  has_fp16 = true;
+#endif
+
+#if defined (__ARM_FEATURE_DOTPROD)
+  has_dotprod = true;
+#endif
+
+  return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(needs_a53_835769_fix,
+                                                                needs_a53_843419_fix,
+                                                                has_crc,
+                                                                has_lse,
+                                                                has_fp16,
+                                                                has_dotprod));
 }
 
 Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromCpuInfo() {
-  const bool is_a53 = true;  // Conservative default.
-  return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(is_a53, is_a53));
+  UNIMPLEMENTED(WARNING);
+  return FromCppDefines();
 }
 
 Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromHwcap() {
-  const bool is_a53 = true;  // Pessimistically assume all ARM64s are A53s.
-  return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(is_a53, is_a53));
+  bool needs_a53_835769_fix = false;  // No HWCAP for this.
+  bool needs_a53_843419_fix = false;  // No HWCAP for this.
+  bool has_crc = false;
+  bool has_lse = false;
+  bool has_fp16 = false;
+  bool has_dotprod = false;
+
+#if defined(ART_TARGET_ANDROID) && defined(__aarch64__)
+  uint64_t hwcaps = getauxval(AT_HWCAP);
+  has_crc = hwcaps & HWCAP_CRC32 ? true : false;
+  has_lse = hwcaps & HWCAP_ATOMICS ? true : false;
+  has_fp16 = hwcaps & HWCAP_FPHP ? true : false;
+  has_dotprod = hwcaps & HWCAP_ASIMDDP ? true : false;
+#endif
+
+  return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(needs_a53_835769_fix,
+                                                                needs_a53_843419_fix,
+                                                                has_crc,
+                                                                has_lse,
+                                                                has_fp16,
+                                                                has_dotprod));
 }
 
 Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromAssembly() {
@@ -106,11 +235,33 @@
   }
   const Arm64InstructionSetFeatures* other_as_arm64 = other->AsArm64InstructionSetFeatures();
   return fix_cortex_a53_835769_ == other_as_arm64->fix_cortex_a53_835769_ &&
-      fix_cortex_a53_843419_ == other_as_arm64->fix_cortex_a53_843419_;
+      fix_cortex_a53_843419_ == other_as_arm64->fix_cortex_a53_843419_ &&
+      has_crc_ == other_as_arm64->has_crc_ &&
+      has_lse_ == other_as_arm64->has_lse_ &&
+      has_fp16_ == other_as_arm64->has_fp16_ &&
+      has_dotprod_ == other_as_arm64->has_dotprod_;
+}
+
+bool Arm64InstructionSetFeatures::HasAtLeast(const InstructionSetFeatures* other) const {
+  if (InstructionSet::kArm64 != other->GetInstructionSet()) {
+    return false;
+  }
+  // Currently 'default' feature is cortex-a53 with fixes 835769 and 843419.
+  // Newer CPUs are not required to have such features,
+  // so these two a53 fix features are not tested for HasAtLeast.
+  const Arm64InstructionSetFeatures* other_as_arm64 = other->AsArm64InstructionSetFeatures();
+  return (has_crc_ || !other_as_arm64->has_crc_)
+      && (has_lse_ || !other_as_arm64->has_lse_)
+      && (has_fp16_ || !other_as_arm64->has_fp16_)
+      && (has_dotprod_ || !other_as_arm64->has_dotprod_);
 }
 
 uint32_t Arm64InstructionSetFeatures::AsBitmap() const {
-  return (fix_cortex_a53_835769_ ? kA53Bitfield : 0);
+  return (fix_cortex_a53_835769_ ? kA53Bitfield : 0)
+      | (has_crc_ ? kCRCBitField : 0)
+      | (has_lse_ ? kLSEBitField: 0)
+      | (has_fp16_ ? kFP16BitField: 0)
+      | (has_dotprod_ ? kDotProdBitField : 0);
 }
 
 std::string Arm64InstructionSetFeatures::GetFeatureString() const {
@@ -120,26 +271,114 @@
   } else {
     result += "-a53";
   }
+  if (has_crc_) {
+    result += ",crc";
+  } else {
+    result += ",-crc";
+  }
+  if (has_lse_) {
+    result += ",lse";
+  } else {
+    result += ",-lse";
+  }
+  if (has_fp16_) {
+    result += ",fp16";
+  } else {
+    result += ",-fp16";
+  }
+  if (has_dotprod_) {
+    result += ",dotprod";
+  } else {
+    result += ",-dotprod";
+  }
   return result;
 }
 
 std::unique_ptr<const InstructionSetFeatures>
 Arm64InstructionSetFeatures::AddFeaturesFromSplitString(
     const std::vector<std::string>& features, std::string* error_msg) const {
+  // This 'features' string is from '--instruction-set-features=' option in ART.
+  // These ARMv8.x feature strings align with those introduced in other compilers:
+  // https://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html
+  // User can also use armv8.x-a to select group of features:
+  //   armv8.1-a is equivalent to crc,lse
+  //   armv8.2-a is equivalent to crc,lse,fp16
+  //   armv8.3-a is equivalent to crc,lse,fp16
+  //   armv8.4-a is equivalent to crc,lse,fp16,dotprod
+  // For detailed optional & mandatory features support in armv8.x-a,
+  // please refer to section 'A1.7 ARMv8 architecture extensions' in
+  // ARM Architecture Reference Manual ARMv8 document:
+  // https://developer.arm.com/products/architecture/cpu-architecture/a-profile/docs/ddi0487/latest/
+  // arm-architecture-reference-manual-armv8-for-armv8-a-architecture-profile/
   bool is_a53 = fix_cortex_a53_835769_;
-  for (auto i = features.begin(); i != features.end(); i++) {
-    std::string feature = android::base::Trim(*i);
+  bool has_crc = has_crc_;
+  bool has_lse = has_lse_;
+  bool has_fp16 = has_fp16_;
+  bool has_dotprod = has_dotprod_;
+  for (const std::string& feature : features) {
+    DCHECK_EQ(android::base::Trim(feature), feature)
+        << "Feature name is not trimmed: '" << feature << "'";
     if (feature == "a53") {
       is_a53 = true;
     } else if (feature == "-a53") {
       is_a53 = false;
+    } else if (feature == "crc") {
+      has_crc = true;
+    } else if (feature == "-crc") {
+      has_crc = false;
+    } else if (feature == "lse") {
+      has_lse = true;
+    } else if (feature == "-lse") {
+      has_lse = false;
+    } else if (feature == "fp16") {
+      has_fp16 = true;
+    } else if (feature == "-fp16") {
+      has_fp16 = false;
+    } else if (feature == "dotprod") {
+      has_dotprod = true;
+    } else if (feature == "-dotprod") {
+      has_dotprod = false;
+    } else if (feature == "armv8.1-a") {
+      has_crc = true;
+      has_lse = true;
+    } else if (feature == "armv8.2-a") {
+      has_crc = true;
+      has_lse = true;
+      has_fp16 = true;
+    } else if (feature == "armv8.3-a") {
+      has_crc = true;
+      has_lse = true;
+      has_fp16 = true;
+    } else if (feature == "armv8.4-a") {
+      has_crc = true;
+      has_lse = true;
+      has_fp16 = true;
+      has_dotprod = true;
     } else {
       *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
       return nullptr;
     }
   }
   return std::unique_ptr<const InstructionSetFeatures>(
-      new Arm64InstructionSetFeatures(is_a53, is_a53));
+      new Arm64InstructionSetFeatures(is_a53,  // erratum 835769
+                                      is_a53,  // erratum 843419
+                                      has_crc,
+                                      has_lse,
+                                      has_fp16,
+                                      has_dotprod));
+}
+
+std::unique_ptr<const InstructionSetFeatures>
+Arm64InstructionSetFeatures::AddRuntimeDetectedFeatures(
+    const InstructionSetFeatures *features) const {
+  const Arm64InstructionSetFeatures *arm64_features = features->AsArm64InstructionSetFeatures();
+  return std::unique_ptr<const InstructionSetFeatures>(
+      new Arm64InstructionSetFeatures(fix_cortex_a53_835769_,
+                                      fix_cortex_a53_843419_,
+                                      arm64_features->has_crc_,
+                                      arm64_features->has_lse_,
+                                      arm64_features->has_fp16_,
+                                      arm64_features->has_dotprod_));
 }
 
 }  // namespace art
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index 163a2d8..432b9ef 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -49,6 +49,11 @@
 
   bool Equals(const InstructionSetFeatures* other) const override;
 
+  // Note that newer CPUs do not have a53 erratum 835769 and 843419,
+  // so the two a53 fix features (fix_cortex_a53_835769 and fix_cortex_a53_843419)
+  // are not tested for HasAtLeast.
+  bool HasAtLeast(const InstructionSetFeatures* other) const override;
+
   InstructionSet GetInstructionSet() const override {
     return InstructionSet::kArm64;
   }
@@ -68,6 +73,23 @@
       return fix_cortex_a53_843419_;
   }
 
+  bool HasCRC() const {
+    return has_crc_;
+  }
+
+  bool HasLSE() const {
+    return has_lse_;
+  }
+
+  bool HasFP16() const {
+    return has_fp16_;
+  }
+
+  // Are Dot Product instructions (UDOT/SDOT) available?
+  bool HasDotProd() const {
+    return has_dotprod_;
+  }
+
   virtual ~Arm64InstructionSetFeatures() {}
 
  protected:
@@ -76,20 +98,40 @@
       AddFeaturesFromSplitString(const std::vector<std::string>& features,
                                  std::string* error_msg) const override;
 
+  std::unique_ptr<const InstructionSetFeatures>
+      AddRuntimeDetectedFeatures(const InstructionSetFeatures *features) const override;
+
  private:
-  Arm64InstructionSetFeatures(bool needs_a53_835769_fix, bool needs_a53_843419_fix)
+  Arm64InstructionSetFeatures(bool needs_a53_835769_fix,
+                              bool needs_a53_843419_fix,
+                              bool has_crc,
+                              bool has_lse,
+                              bool has_fp16,
+                              bool has_dotprod)
       : InstructionSetFeatures(),
         fix_cortex_a53_835769_(needs_a53_835769_fix),
-        fix_cortex_a53_843419_(needs_a53_843419_fix) {
+        fix_cortex_a53_843419_(needs_a53_843419_fix),
+        has_crc_(has_crc),
+        has_lse_(has_lse),
+        has_fp16_(has_fp16),
+        has_dotprod_(has_dotprod) {
   }
 
   // Bitmap positions for encoding features as a bitmap.
   enum {
     kA53Bitfield = 1 << 0,
+    kCRCBitField = 1 << 1,
+    kLSEBitField = 1 << 2,
+    kFP16BitField = 1 << 3,
+    kDotProdBitField = 1 << 4,
   };
 
   const bool fix_cortex_a53_835769_;
   const bool fix_cortex_a53_843419_;
+  const bool has_crc_;      // optional in ARMv8.0, mandatory in ARMv8.1.
+  const bool has_lse_;      // ARMv8.1 Large System Extensions.
+  const bool has_fp16_;     // ARMv8.2 FP16 extensions.
+  const bool has_dotprod_;  // optional in ARMv8.2, mandatory in ARMv8.4.
 
   DISALLOW_COPY_AND_ASSIGN(Arm64InstructionSetFeatures);
 };
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
index b946f4f..eef8f08 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -28,32 +28,37 @@
   ASSERT_TRUE(arm64_features.get() != nullptr) << error_msg;
   EXPECT_EQ(arm64_features->GetInstructionSet(), InstructionSet::kArm64);
   EXPECT_TRUE(arm64_features->Equals(arm64_features.get()));
-  EXPECT_STREQ("a53", arm64_features->GetFeatureString().c_str());
-  EXPECT_EQ(arm64_features->AsBitmap(), 1U);
+  EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod", arm64_features->GetFeatureString().c_str());
+  EXPECT_EQ(arm64_features->AsBitmap(), 3U);
 
   std::unique_ptr<const InstructionSetFeatures> cortex_a57_features(
       InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a57", &error_msg));
   ASSERT_TRUE(cortex_a57_features.get() != nullptr) << error_msg;
   EXPECT_EQ(cortex_a57_features->GetInstructionSet(), InstructionSet::kArm64);
   EXPECT_TRUE(cortex_a57_features->Equals(cortex_a57_features.get()));
-  EXPECT_STREQ("a53", cortex_a57_features->GetFeatureString().c_str());
-  EXPECT_EQ(cortex_a57_features->AsBitmap(), 1U);
+  EXPECT_TRUE(cortex_a57_features->HasAtLeast(arm64_features.get()));
+  EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod", cortex_a57_features->GetFeatureString().c_str());
+  EXPECT_EQ(cortex_a57_features->AsBitmap(), 3U);
 
   std::unique_ptr<const InstructionSetFeatures> cortex_a73_features(
       InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a73", &error_msg));
   ASSERT_TRUE(cortex_a73_features.get() != nullptr) << error_msg;
   EXPECT_EQ(cortex_a73_features->GetInstructionSet(), InstructionSet::kArm64);
   EXPECT_TRUE(cortex_a73_features->Equals(cortex_a73_features.get()));
-  EXPECT_STREQ("a53", cortex_a73_features->GetFeatureString().c_str());
-  EXPECT_EQ(cortex_a73_features->AsBitmap(), 1U);
+  EXPECT_TRUE(cortex_a73_features->AsArm64InstructionSetFeatures()->HasCRC());
+  EXPECT_FALSE(cortex_a73_features->AsArm64InstructionSetFeatures()->HasLSE());
+  EXPECT_FALSE(cortex_a73_features->AsArm64InstructionSetFeatures()->HasFP16());
+  EXPECT_FALSE(cortex_a73_features->AsArm64InstructionSetFeatures()->HasDotProd());
+  EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod", cortex_a73_features->GetFeatureString().c_str());
+  EXPECT_EQ(cortex_a73_features->AsBitmap(), 3U);
 
   std::unique_ptr<const InstructionSetFeatures> cortex_a35_features(
       InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a35", &error_msg));
   ASSERT_TRUE(cortex_a35_features.get() != nullptr) << error_msg;
   EXPECT_EQ(cortex_a35_features->GetInstructionSet(), InstructionSet::kArm64);
   EXPECT_TRUE(cortex_a35_features->Equals(cortex_a35_features.get()));
-  EXPECT_STREQ("-a53", cortex_a35_features->GetFeatureString().c_str());
-  EXPECT_EQ(cortex_a35_features->AsBitmap(), 0U);
+  EXPECT_STREQ("-a53,crc,-lse,-fp16,-dotprod", cortex_a35_features->GetFeatureString().c_str());
+  EXPECT_EQ(cortex_a35_features->AsBitmap(), 2U);
 
   std::unique_ptr<const InstructionSetFeatures> kryo_features(
       InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "kryo", &error_msg));
@@ -62,28 +67,157 @@
   EXPECT_TRUE(kryo_features->Equals(kryo_features.get()));
   EXPECT_TRUE(kryo_features->Equals(cortex_a35_features.get()));
   EXPECT_FALSE(kryo_features->Equals(cortex_a57_features.get()));
-  EXPECT_STREQ("-a53", kryo_features->GetFeatureString().c_str());
-  EXPECT_EQ(kryo_features->AsBitmap(), 0U);
+  EXPECT_STREQ("-a53,crc,-lse,-fp16,-dotprod", kryo_features->GetFeatureString().c_str());
+  EXPECT_EQ(kryo_features->AsBitmap(), 2U);
 
   std::unique_ptr<const InstructionSetFeatures> cortex_a55_features(
       InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a55", &error_msg));
   ASSERT_TRUE(cortex_a55_features.get() != nullptr) << error_msg;
   EXPECT_EQ(cortex_a55_features->GetInstructionSet(), InstructionSet::kArm64);
   EXPECT_TRUE(cortex_a55_features->Equals(cortex_a55_features.get()));
-  EXPECT_TRUE(cortex_a55_features->Equals(cortex_a35_features.get()));
+  EXPECT_FALSE(cortex_a55_features->Equals(cortex_a35_features.get()));
   EXPECT_FALSE(cortex_a55_features->Equals(cortex_a57_features.get()));
-  EXPECT_STREQ("-a53", cortex_a55_features->GetFeatureString().c_str());
-  EXPECT_EQ(cortex_a55_features->AsBitmap(), 0U);
+  EXPECT_TRUE(cortex_a35_features->HasAtLeast(arm64_features.get()));
+  EXPECT_STREQ("-a53,crc,lse,fp16,dotprod", cortex_a55_features->GetFeatureString().c_str());
+  EXPECT_EQ(cortex_a55_features->AsBitmap(), 30U);
 
   std::unique_ptr<const InstructionSetFeatures> cortex_a75_features(
       InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a75", &error_msg));
   ASSERT_TRUE(cortex_a75_features.get() != nullptr) << error_msg;
   EXPECT_EQ(cortex_a75_features->GetInstructionSet(), InstructionSet::kArm64);
   EXPECT_TRUE(cortex_a75_features->Equals(cortex_a75_features.get()));
-  EXPECT_TRUE(cortex_a75_features->Equals(cortex_a35_features.get()));
+  EXPECT_FALSE(cortex_a75_features->Equals(cortex_a35_features.get()));
   EXPECT_FALSE(cortex_a75_features->Equals(cortex_a57_features.get()));
-  EXPECT_STREQ("-a53", cortex_a75_features->GetFeatureString().c_str());
-  EXPECT_EQ(cortex_a75_features->AsBitmap(), 0U);
+  EXPECT_TRUE(cortex_a75_features->HasAtLeast(arm64_features.get()));
+  EXPECT_TRUE(cortex_a75_features->HasAtLeast(cortex_a55_features.get()));
+  EXPECT_FALSE(cortex_a35_features->HasAtLeast(cortex_a75_features.get()));
+  EXPECT_FALSE(cortex_a75_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_835769());
+  EXPECT_FALSE(cortex_a75_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_843419());
+  EXPECT_TRUE(cortex_a75_features->AsArm64InstructionSetFeatures()->HasCRC());
+  EXPECT_TRUE(cortex_a75_features->AsArm64InstructionSetFeatures()->HasLSE());
+  EXPECT_TRUE(cortex_a75_features->AsArm64InstructionSetFeatures()->HasFP16());
+  EXPECT_TRUE(cortex_a75_features->AsArm64InstructionSetFeatures()->HasDotProd());
+  EXPECT_STREQ("-a53,crc,lse,fp16,dotprod", cortex_a75_features->GetFeatureString().c_str());
+  EXPECT_EQ(cortex_a75_features->AsBitmap(), 30U);
+
+  std::unique_ptr<const InstructionSetFeatures> cortex_a76_features(
+      InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a76", &error_msg));
+  ASSERT_TRUE(cortex_a76_features.get() != nullptr) << error_msg;
+  EXPECT_EQ(cortex_a76_features->GetInstructionSet(), InstructionSet::kArm64);
+  EXPECT_TRUE(cortex_a76_features->Equals(cortex_a76_features.get()));
+  EXPECT_FALSE(cortex_a76_features->Equals(cortex_a35_features.get()));
+  EXPECT_FALSE(cortex_a76_features->Equals(cortex_a57_features.get()));
+  EXPECT_TRUE(cortex_a76_features->Equals(cortex_a75_features.get()));
+  EXPECT_TRUE(cortex_a76_features->HasAtLeast(arm64_features.get()));
+  EXPECT_TRUE(cortex_a76_features->HasAtLeast(cortex_a55_features.get()));
+  EXPECT_FALSE(cortex_a35_features->HasAtLeast(cortex_a76_features.get()));
+  EXPECT_FALSE(cortex_a76_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_835769());
+  EXPECT_FALSE(cortex_a76_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_843419());
+  EXPECT_TRUE(cortex_a76_features->AsArm64InstructionSetFeatures()->HasCRC());
+  EXPECT_TRUE(cortex_a76_features->AsArm64InstructionSetFeatures()->HasLSE());
+  EXPECT_TRUE(cortex_a76_features->AsArm64InstructionSetFeatures()->HasFP16());
+  EXPECT_TRUE(cortex_a76_features->AsArm64InstructionSetFeatures()->HasDotProd());
+  EXPECT_STREQ("-a53,crc,lse,fp16,dotprod", cortex_a76_features->GetFeatureString().c_str());
+  EXPECT_EQ(cortex_a76_features->AsBitmap(), 30U);
+}
+
+TEST(Arm64InstructionSetFeaturesTest, Arm64AddFeaturesFromString) {
+  std::string error_msg;
+  std::unique_ptr<const InstructionSetFeatures> base_features(
+      InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "generic", &error_msg));
+  ASSERT_TRUE(base_features.get() != nullptr) << error_msg;
+
+  // Build features for a Cortex-A76 processor (with ARMv8.2 and Dot Product exentions support).
+  std::unique_ptr<const InstructionSetFeatures> a76_features(
+      base_features->AddFeaturesFromString("-a53,armv8.2-a,dotprod", &error_msg));
+  ASSERT_TRUE(a76_features.get() != nullptr) << error_msg;
+  ASSERT_EQ(a76_features->GetInstructionSet(), InstructionSet::kArm64);
+  EXPECT_TRUE(a76_features->Equals(a76_features.get()));
+  EXPECT_FALSE(a76_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_835769());
+  EXPECT_FALSE(a76_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_843419());
+  EXPECT_TRUE(a76_features->AsArm64InstructionSetFeatures()->HasCRC());
+  EXPECT_TRUE(a76_features->AsArm64InstructionSetFeatures()->HasLSE());
+  EXPECT_TRUE(a76_features->AsArm64InstructionSetFeatures()->HasFP16());
+  EXPECT_TRUE(a76_features->AsArm64InstructionSetFeatures()->HasDotProd());
+  EXPECT_STREQ("-a53,crc,lse,fp16,dotprod", a76_features->GetFeatureString().c_str());
+  EXPECT_EQ(a76_features->AsBitmap(), 30U);
+
+  // Build features for a default ARM64 processor.
+  std::unique_ptr<const InstructionSetFeatures> generic_features(
+      base_features->AddFeaturesFromString("default", &error_msg));
+  ASSERT_TRUE(generic_features.get() != nullptr) << error_msg;
+  ASSERT_EQ(generic_features->GetInstructionSet(), InstructionSet::kArm64);
+  EXPECT_TRUE(generic_features->Equals(generic_features.get()));
+  EXPECT_FALSE(generic_features->AsArm64InstructionSetFeatures()->HasLSE());
+  EXPECT_FALSE(generic_features->AsArm64InstructionSetFeatures()->HasFP16());
+  EXPECT_FALSE(generic_features->AsArm64InstructionSetFeatures()->HasDotProd());
+  EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod", generic_features->GetFeatureString().c_str());
+  EXPECT_EQ(generic_features->AsBitmap(), 3U);
+
+  // Build features for a ARM64 processor that supports up to ARMv8.2.
+  std::unique_ptr<const InstructionSetFeatures> armv8_2a_cpu_features(
+      base_features->AddFeaturesFromString("-a53,armv8.2-a", &error_msg));
+  ASSERT_TRUE(armv8_2a_cpu_features.get() != nullptr) << error_msg;
+  ASSERT_EQ(armv8_2a_cpu_features->GetInstructionSet(), InstructionSet::kArm64);
+  EXPECT_TRUE(armv8_2a_cpu_features->Equals(armv8_2a_cpu_features.get()));
+  EXPECT_FALSE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_835769());
+  EXPECT_FALSE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_843419());
+  EXPECT_TRUE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->HasCRC());
+  EXPECT_TRUE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->HasLSE());
+  EXPECT_TRUE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->HasFP16());
+  EXPECT_FALSE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->HasDotProd());
+  EXPECT_STREQ("-a53,crc,lse,fp16,-dotprod", armv8_2a_cpu_features->GetFeatureString().c_str());
+  EXPECT_EQ(armv8_2a_cpu_features->AsBitmap(), 14U);
+}
+
+TEST(Arm64InstructionSetFeaturesTest, IsRuntimeDetectionSupported) {
+  if (kRuntimeISA == InstructionSet::kArm64) {
+    EXPECT_TRUE(InstructionSetFeatures::IsRuntimeDetectionSupported());
+  }
+}
+
+TEST(Arm64InstructionSetFeaturesTest, FeaturesFromRuntimeDetection) {
+  if (kRuntimeISA != InstructionSet::kArm64) {
+    return;
+  }
+
+  std::unique_ptr<const InstructionSetFeatures> hwcap_features(
+      InstructionSetFeatures::FromHwcap());
+  std::unique_ptr<const InstructionSetFeatures> runtime_detected_features(
+      InstructionSetFeatures::FromRuntimeDetection());
+  std::unique_ptr<const InstructionSetFeatures> cpp_defined_features(
+      InstructionSetFeatures::FromCppDefines());
+  EXPECT_NE(runtime_detected_features, nullptr);
+  EXPECT_TRUE(InstructionSetFeatures::IsRuntimeDetectionSupported());
+  EXPECT_TRUE(runtime_detected_features->Equals(hwcap_features.get()));
+  EXPECT_TRUE(runtime_detected_features->HasAtLeast(cpp_defined_features.get()));
+}
+
+TEST(Arm64InstructionSetFeaturesTest, AddFeaturesFromStringRuntime) {
+  std::unique_ptr<const InstructionSetFeatures> features(
+      InstructionSetFeatures::FromBitmap(InstructionSet::kArm64, 0x0));
+  std::unique_ptr<const InstructionSetFeatures> hwcap_features(
+      InstructionSetFeatures::FromHwcap());
+
+  std::string error_msg;
+  features = features->AddFeaturesFromString("runtime", &error_msg);
+
+  EXPECT_NE(features, nullptr);
+  EXPECT_TRUE(error_msg.empty());
+
+  if (kRuntimeISA == InstructionSet::kArm64) {
+    EXPECT_TRUE(features->Equals(hwcap_features.get()));
+    EXPECT_EQ(features->GetFeatureString(), hwcap_features->GetFeatureString());
+  }
+
+  std::unique_ptr<const InstructionSetFeatures> a53_features(
+      Arm64InstructionSetFeatures::FromVariant("cortex-a53", &error_msg));
+  features = a53_features->AddFeaturesFromString("runtime", &error_msg);
+  EXPECT_NE(features, nullptr);
+  EXPECT_TRUE(error_msg.empty()) << error_msg;
+  const Arm64InstructionSetFeatures *arm64_features = features->AsArm64InstructionSetFeatures();
+  EXPECT_TRUE(arm64_features->NeedFixCortexA53_835769());
+  EXPECT_TRUE(arm64_features->NeedFixCortexA53_843419());
 }
 
 }  // namespace art
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 96ceecf..9f3377e 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -289,36 +289,33 @@
 #endif
 
     // Save FP registers.
-    // For better performance, store d0 and d31 separately, so that all STPs are 16-byte aligned.
-    str d0,       [sp, #8]
-    stp d1, d2,   [sp, #16]
-    stp d3, d4,   [sp, #32]
-    stp d5, d6,   [sp, #48]
-    stp d7, d8,   [sp, #64]
-    stp d9, d10,  [sp, #80]
-    stp d11, d12, [sp, #96]
-    stp d13, d14, [sp, #112]
-    stp d15, d16, [sp, #128]
-    stp d17, d18, [sp, #144]
-    stp d19, d20, [sp, #160]
-    stp d21, d22, [sp, #176]
-    stp d23, d24, [sp, #192]
-    stp d25, d26, [sp, #208]
-    stp d27, d28, [sp, #224]
-    stp d29, d30, [sp, #240]
-    str d31,      [sp, #256]
+    stp d0, d1,   [sp, #16]
+    stp d2, d3,   [sp, #32]
+    stp d4, d5,   [sp, #48]
+    stp d6, d7,   [sp, #64]
+    stp d8, d9,   [sp, #80]
+    stp d10, d11, [sp, #96]
+    stp d12, d13, [sp, #112]
+    stp d14, d15, [sp, #128]
+    stp d16, d17, [sp, #144]
+    stp d18, d19, [sp, #160]
+    stp d20, d21, [sp, #176]
+    stp d22, d23, [sp, #192]
+    stp d24, d25, [sp, #208]
+    stp d26, d27, [sp, #224]
+    stp d28, d29, [sp, #240]
+    stp d30, d31, [sp, #256]
 
     // Save core registers.
-    SAVE_REG            x0, 264
-    SAVE_TWO_REGS  x1,  x2, 272
-    SAVE_TWO_REGS  x3,  x4, 288
-    SAVE_TWO_REGS  x5,  x6, 304
-    SAVE_TWO_REGS  x7,  x8, 320
-    SAVE_TWO_REGS  x9, x10, 336
-    SAVE_TWO_REGS x11, x12, 352
-    SAVE_TWO_REGS x13, x14, 368
-    SAVE_TWO_REGS x15, x16, 384
-    SAVE_TWO_REGS x17, x18, 400
+    SAVE_TWO_REGS  x0,  x1, 272
+    SAVE_TWO_REGS  x2,  x3, 288
+    SAVE_TWO_REGS  x4,  x5, 304
+    SAVE_TWO_REGS  x6,  x7, 320
+    SAVE_TWO_REGS  x8,  x9, 336
+    SAVE_TWO_REGS x10, x11, 352
+    SAVE_TWO_REGS x12, x13, 368
+    SAVE_TWO_REGS x14, x15, 384
+    SAVE_TWO_REGS x16, x17, 400 // Do not save the platform register.
     SAVE_TWO_REGS x19, x20, 416
     SAVE_TWO_REGS x21, x22, 432
     SAVE_TWO_REGS x23, x24, 448
@@ -351,35 +348,33 @@
 
 .macro RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
     // Restore FP registers.
-    // For better performance, load d0 and d31 separately, so that all LDPs are 16-byte aligned.
-    ldr d0,       [sp, #8]
-    ldp d1, d2,   [sp, #16]
-    ldp d3, d4,   [sp, #32]
-    ldp d5, d6,   [sp, #48]
-    ldp d7, d8,   [sp, #64]
-    ldp d9, d10,  [sp, #80]
-    ldp d11, d12, [sp, #96]
-    ldp d13, d14, [sp, #112]
-    ldp d15, d16, [sp, #128]
-    ldp d17, d18, [sp, #144]
-    ldp d19, d20, [sp, #160]
-    ldp d21, d22, [sp, #176]
-    ldp d23, d24, [sp, #192]
-    ldp d25, d26, [sp, #208]
-    ldp d27, d28, [sp, #224]
-    ldp d29, d30, [sp, #240]
-    ldr d31,      [sp, #256]
+    ldp d0, d1,   [sp, #16]
+    ldp d2, d3,   [sp, #32]
+    ldp d4, d5,   [sp, #48]
+    ldp d6, d7,   [sp, #64]
+    ldp d8, d9,   [sp, #80]
+    ldp d10, d11, [sp, #96]
+    ldp d12, d13, [sp, #112]
+    ldp d14, d15, [sp, #128]
+    ldp d16, d17, [sp, #144]
+    ldp d18, d19, [sp, #160]
+    ldp d20, d21, [sp, #176]
+    ldp d22, d23, [sp, #192]
+    ldp d24, d25, [sp, #208]
+    ldp d26, d27, [sp, #224]
+    ldp d28, d29, [sp, #240]
+    ldp d30, d31, [sp, #256]
 
     // Restore core registers, except x0.
-    RESTORE_TWO_REGS  x1,  x2, 272
-    RESTORE_TWO_REGS  x3,  x4, 288
-    RESTORE_TWO_REGS  x5,  x6, 304
-    RESTORE_TWO_REGS  x7,  x8, 320
-    RESTORE_TWO_REGS  x9, x10, 336
-    RESTORE_TWO_REGS x11, x12, 352
-    RESTORE_TWO_REGS x13, x14, 368
-    RESTORE_TWO_REGS x15, x16, 384
-    RESTORE_TWO_REGS x17, x18, 400
+    RESTORE_REG            x1, 280
+    RESTORE_TWO_REGS  x2,  x3, 288
+    RESTORE_TWO_REGS  x4,  x5, 304
+    RESTORE_TWO_REGS  x6,  x7, 320
+    RESTORE_TWO_REGS  x8,  x9, 336
+    RESTORE_TWO_REGS x10, x11, 352
+    RESTORE_TWO_REGS x12, x13, 368
+    RESTORE_TWO_REGS x14, x15, 384
+    RESTORE_TWO_REGS x16, x17, 400 // Do not restore the platform register.
     RESTORE_TWO_REGS x19, x20, 416
     RESTORE_TWO_REGS x21, x22, 432
     RESTORE_TWO_REGS x23, x24, 448
@@ -391,7 +386,7 @@
 .endm
 
 .macro RESTORE_SAVE_EVERYTHING_FRAME
-    RESTORE_REG  x0, 264
+    RESTORE_REG  x0, 272
     RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
 .endm
 
@@ -1116,7 +1111,8 @@
     ldp x12, x13, [x0, #96]
     ldp x14, x15, [x0, #112]
     // Do not load IP0 (x16) and IP1 (x17), these shall be clobbered below.
-    ldp x18, x19, [x0, #144]      // X18 and xSELF.
+    // Don't load the platform register (x18) either.
+    ldr      x19, [x0, #152]      // xSELF.
     ldp x20, x21, [x0, #160]      // For Baker RB, wMR (w20) is reloaded below.
     ldp x22, x23, [x0, #176]
     ldp x24, x25, [x0, #192]
@@ -2293,8 +2289,8 @@
     mov   xLR, #0             // Clobber LR for later checks.
     SETUP_SAVE_EVERYTHING_FRAME
 
-    add   x3, sp, #8          // Pass floating-point result pointer, in kSaveEverything frame.
-    add   x2, sp, #264        // Pass integer result pointer, in kSaveEverything frame.
+    add   x3, sp, #16         // Pass floating-point result pointer, in kSaveEverything frame.
+    add   x2, sp, #272        // Pass integer result pointer, in kSaveEverything frame.
     mov   x1, sp              // Pass SP.
     mov   x0, xSELF           // Pass Thread.
     bl   artInstrumentationMethodExitFromCode    // (Thread*, SP, gpr_res*, fpr_res*)
@@ -2496,7 +2492,8 @@
 .Lslow_rb_\name:
     /*
      * Allocate 44 stack slots * 8 = 352 bytes:
-     * - 20 slots for core registers X0-15, X17-X19, LR
+     * - 19 slots for core registers X0-15, X17, X19, LR
+     * - 1 slot padding
      * - 24 slots for floating-point registers D0-D7 and D16-D31
      */
     // We must not clobber IP1 since code emitted for HLoadClass and HLoadString
@@ -2510,8 +2507,8 @@
     SAVE_TWO_REGS x10, x11, 80
     SAVE_TWO_REGS x12, x13, 96
     SAVE_TWO_REGS x14, x15, 112
-    SAVE_TWO_REGS x17, x18, 128  // Skip x16, i.e. IP0.
-    SAVE_TWO_REGS x19, xLR, 144  // Save also return address.
+    SAVE_TWO_REGS x17, x19, 128  // Skip x16, i.e. IP0, and x18, the platform register.
+    SAVE_REG      xLR,      144  // Save also return address.
     // Save all potentially live caller-save floating-point registers.
     stp   d0, d1,   [sp, #160]
     stp   d2, d3,   [sp, #176]
@@ -2544,8 +2541,8 @@
     POP_REGS_NE x10, x11, 80,  \xreg
     POP_REGS_NE x12, x13, 96,  \xreg
     POP_REGS_NE x14, x15, 112, \xreg
-    POP_REGS_NE x17, x18, 128, \xreg
-    POP_REGS_NE x19, xLR, 144, \xreg  // Restore also return address.
+    POP_REGS_NE x17, x19, 128, \xreg
+    POP_REG_NE  xLR,      144, \xreg  // Restore also return address.
     // Restore floating-point registers.
     ldp   d0, d1,   [sp, #160]
     ldp   d2, d3,   [sp, #176]
@@ -2588,7 +2585,7 @@
 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg15, w15, x15
 // READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg16, w16, x16 ip0 is blocked
 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, w17, x17
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, w18, x18
+// READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, w18, x18 x18 is blocked
 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, w19, x19
 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, w20, x20
 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, w21, x21
@@ -2629,7 +2626,7 @@
     SELECT_X_OR_W_FOR_MACRO \macro_for_register, x15, w15, \xreg
     \macro_for_reserved_register  // IP0 is reserved
     \macro_for_reserved_register  // IP1 is reserved
-    SELECT_X_OR_W_FOR_MACRO \macro_for_register, x18, w18, \xreg
+    \macro_for_reserved_register  // x18 is reserved
     SELECT_X_OR_W_FOR_MACRO \macro_for_register, x19, w19, \xreg
     SELECT_X_OR_W_FOR_MACRO \macro_for_register, x20, w20, \xreg
     SELECT_X_OR_W_FOR_MACRO \macro_for_register, x21, w21, \xreg
@@ -2673,13 +2670,12 @@
 
 .macro READ_BARRIER_MARK_INTROSPECTION_SLOW_PATH ldr_offset
     /*
-     * Allocate 44 stack slots * 8 = 352 bytes:
-     * - 19 slots for core registers X0-15, X18-X19, LR
-     * - 1 slot padding
+     * Allocate 42 stack slots * 8 = 336 bytes:
+     * - 18 slots for core registers X0-15, X19, LR
      * - 24 slots for floating-point registers D0-D7 and D16-D31
      */
     // Save all potentially live caller-save core registers.
-    SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 352
+    SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 336
     SAVE_TWO_REGS  x2,  x3, 16
     SAVE_TWO_REGS  x4,  x5, 32
     SAVE_TWO_REGS  x6,  x7, 48
@@ -2687,21 +2683,21 @@
     SAVE_TWO_REGS x10, x11, 80
     SAVE_TWO_REGS x12, x13, 96
     SAVE_TWO_REGS x14, x15, 112
-    SAVE_TWO_REGS x18, x19, 128       // Skip x16, x17, i.e. IP0, IP1.
-    SAVE_REG      xLR,      144       // Save return address, skip padding at 152.
+    // Skip x16, x17, i.e. IP0, IP1, and x18, the platform register.
+    SAVE_TWO_REGS x19, xLR, 128       // Save return address.
     // Save all potentially live caller-save floating-point registers.
-    stp   d0, d1,   [sp, #160]
-    stp   d2, d3,   [sp, #176]
-    stp   d4, d5,   [sp, #192]
-    stp   d6, d7,   [sp, #208]
-    stp   d16, d17, [sp, #224]
-    stp   d18, d19, [sp, #240]
-    stp   d20, d21, [sp, #256]
-    stp   d22, d23, [sp, #272]
-    stp   d24, d25, [sp, #288]
-    stp   d26, d27, [sp, #304]
-    stp   d28, d29, [sp, #320]
-    stp   d30, d31, [sp, #336]
+    stp   d0, d1,   [sp, #144]
+    stp   d2, d3,   [sp, #160]
+    stp   d4, d5,   [sp, #176]
+    stp   d6, d7,   [sp, #192]
+    stp   d16, d17, [sp, #208]
+    stp   d18, d19, [sp, #224]
+    stp   d20, d21, [sp, #240]
+    stp   d22, d23, [sp, #256]
+    stp   d24, d25, [sp, #272]
+    stp   d26, d27, [sp, #288]
+    stp   d28, d29, [sp, #304]
+    stp   d30, d31, [sp, #320]
 
     mov   x0, xIP0
     bl    artReadBarrierMark          // artReadBarrierMark(obj)
@@ -2716,26 +2712,26 @@
     RESTORE_TWO_REGS x10, x11, 80
     RESTORE_TWO_REGS x12, x13, 96
     RESTORE_TWO_REGS x14, x15, 112
-    RESTORE_TWO_REGS x18, x19, 128    // Skip x16, x17, i.e. IP0, IP1.
-    RESTORE_REG      xLR,      144    // Restore return address.
+    // Skip x16, x17, i.e. IP0, IP1, and x18, the platform register.
+    RESTORE_TWO_REGS x19, xLR, 128    // Restore return address.
     // Restore caller-save floating-point registers.
-    ldp   d0, d1,   [sp, #160]
-    ldp   d2, d3,   [sp, #176]
-    ldp   d4, d5,   [sp, #192]
-    ldp   d6, d7,   [sp, #208]
-    ldp   d16, d17, [sp, #224]
-    ldp   d18, d19, [sp, #240]
-    ldp   d20, d21, [sp, #256]
-    ldp   d22, d23, [sp, #272]
-    ldp   d24, d25, [sp, #288]
-    ldp   d26, d27, [sp, #304]
-    ldp   d28, d29, [sp, #320]
-    ldp   d30, d31, [sp, #336]
+    ldp   d0, d1,   [sp, #144]
+    ldp   d2, d3,   [sp, #160]
+    ldp   d4, d5,   [sp, #176]
+    ldp   d6, d7,   [sp, #192]
+    ldp   d16, d17, [sp, #208]
+    ldp   d18, d19, [sp, #224]
+    ldp   d20, d21, [sp, #240]
+    ldp   d22, d23, [sp, #256]
+    ldp   d24, d25, [sp, #272]
+    ldp   d26, d27, [sp, #288]
+    ldp   d28, d29, [sp, #304]
+    ldp   d30, d31, [sp, #320]
 
     ldr   x0, [lr, #\ldr_offset]      // Load the instruction.
     adr   xIP1, .Lmark_introspection_return_switch
     bfi   xIP1, x0, #3, #5            // Calculate switch case address.
-    RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 352
+    RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 336
     br    xIP1
 .endm
 
diff --git a/runtime/arch/context.h b/runtime/arch/context.h
index d067f66..5980b03 100644
--- a/runtime/arch/context.h
+++ b/runtime/arch/context.h
@@ -21,7 +21,6 @@
 #include <stdint.h>
 
 #include "base/macros.h"
-#include "base/mutex.h"
 
 namespace art {
 
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
index 0c45bc9..c5c2d31 100644
--- a/runtime/arch/instruction_set_features.cc
+++ b/runtime/arch/instruction_set_features.cc
@@ -14,8 +14,13 @@
  * limitations under the License.
  */
 
+#include <algorithm>
+
 #include "instruction_set_features.h"
 
+#include <algorithm>
+#include <ostream>
+
 #include "android-base/strings.h"
 
 #include "base/casts.h"
@@ -110,6 +115,16 @@
   UNREACHABLE();
 }
 
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromRuntimeDetection() {
+  switch (kRuntimeISA) {
+#ifdef ART_TARGET_ANDROID
+    case InstructionSet::kArm64:
+      return Arm64InstructionSetFeatures::FromHwcap();
+#endif
+    default:
+      return nullptr;
+  }
+}
 
 std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCpuInfo() {
   switch (kRuntimeISA) {
@@ -181,44 +196,57 @@
 }
 
 std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::AddFeaturesFromString(
-    const std::string& feature_list, std::string* error_msg) const {
-  if (feature_list.empty()) {
-    *error_msg = "No instruction set features specified";
-    return std::unique_ptr<const InstructionSetFeatures>();
-  }
+    const std::string& feature_list, /* out */ std::string* error_msg) const {
   std::vector<std::string> features;
   Split(feature_list, ',', &features);
-  bool use_default = false;  // Have we seen the 'default' feature?
-  bool first = false;  // Is this first feature?
-  for (auto it = features.begin(); it != features.end();) {
-    if (use_default) {
-      *error_msg = "Unexpected instruction set features after 'default'";
-      return std::unique_ptr<const InstructionSetFeatures>();
-    }
-    std::string feature = android::base::Trim(*it);
-    bool erase = false;
-    if (feature == "default") {
-      if (!first) {
-        use_default = true;
-        erase = true;
-      } else {
-        *error_msg = "Unexpected instruction set features before 'default'";
-        return std::unique_ptr<const InstructionSetFeatures>();
-      }
-    }
-    if (!erase) {
-      ++it;
-    } else {
-      it = features.erase(it);
-    }
-    first = true;
+  std::transform(std::begin(features), std::end(features), std::begin(features),
+                 [](const std::string &s) { return android::base::Trim(s); });
+  auto empty_strings_begin = std::copy_if(std::begin(features), std::end(features),
+                                          std::begin(features),
+                                          [](const std::string& s) { return !s.empty(); });
+  features.erase(empty_strings_begin, std::end(features));
+  if (features.empty()) {
+    *error_msg = "No instruction set features specified";
+    return nullptr;
   }
-  // Expectation: "default" is standalone, no other flags. But an empty features vector after
-  // processing can also come along if the handled flags are the only ones in the list. So
-  // logically, we check "default -> features.empty."
-  DCHECK(!use_default || features.empty());
 
-  return AddFeaturesFromSplitString(features, error_msg);
+  bool use_default = false;
+  bool use_runtime_detection = false;
+  for (const std::string& feature : features) {
+    if (feature == "default") {
+      if (features.size() > 1) {
+        *error_msg = "Specific instruction set feature(s) cannot be used when 'default' is used.";
+        return nullptr;
+      }
+      use_default = true;
+      features.pop_back();
+      break;
+    } else if (feature == "runtime") {
+      if (features.size() > 1) {
+        *error_msg = "Specific instruction set feature(s) cannot be used when 'runtime' is used.";
+        return nullptr;
+      }
+      use_runtime_detection = true;
+      features.pop_back();
+      break;
+    }
+  }
+  // Expectation: "default" and "runtime" are standalone, no other feature names.
+  // But an empty features vector after processing can also come along if the
+  // handled feature names  are the only ones in the list. So
+  // logically, we check "default or runtime => features.empty."
+  DCHECK((!use_default && !use_runtime_detection) || features.empty());
+
+  std::unique_ptr<const InstructionSetFeatures> runtime_detected_features;
+  if (use_runtime_detection) {
+    runtime_detected_features = FromRuntimeDetection();
+  }
+
+  if (runtime_detected_features != nullptr) {
+    return AddRuntimeDetectedFeatures(runtime_detected_features.get());
+  } else {
+    return AddFeaturesFromSplitString(features, error_msg);
+  }
 }
 
 const ArmInstructionSetFeatures* InstructionSetFeatures::AsArmInstructionSetFeatures() const {
@@ -259,6 +287,12 @@
   return std::find(begin, end, variant) != end;
 }
 
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::AddRuntimeDetectedFeatures(
+    const InstructionSetFeatures *features ATTRIBUTE_UNUSED) const {
+  UNIMPLEMENTED(FATAL) << kRuntimeISA;
+  UNREACHABLE();
+}
+
 std::ostream& operator<<(std::ostream& os, const InstructionSetFeatures& rhs) {
   os << "ISA: " << rhs.GetInstructionSet() << " Feature string: " << rhs.GetFeatureString();
   return os;
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
index c31c927..9222a7b 100644
--- a/runtime/arch/instruction_set_features.h
+++ b/runtime/arch/instruction_set_features.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_ARCH_INSTRUCTION_SET_FEATURES_H_
 #define ART_RUNTIME_ARCH_INSTRUCTION_SET_FEATURES_H_
 
+#include <iosfwd>
 #include <memory>
-#include <ostream>
 #include <vector>
 
 #include "arch/instruction_set.h"
@@ -48,6 +48,20 @@
   // Turn C pre-processor #defines into the equivalent instruction set features for kRuntimeISA.
   static std::unique_ptr<const InstructionSetFeatures> FromCppDefines();
 
+  // Check if run-time detection of instruction set features is supported.
+  //
+  // Return: true - if run-time detection is supported on a target device.
+  //         false - otherwise
+  static bool IsRuntimeDetectionSupported() {
+    return FromRuntimeDetection() != nullptr;
+  }
+
+  // Use run-time detection to get instruction set features.
+  //
+  // Return: a set of detected features or nullptr if runtime detection is not
+  //         supported on a target.
+  static std::unique_ptr<const InstructionSetFeatures> FromRuntimeDetection();
+
   // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
   static std::unique_ptr<const InstructionSetFeatures> FromCpuInfo();
 
@@ -126,6 +140,10 @@
       AddFeaturesFromSplitString(const std::vector<std::string>& features,
                                  std::string* error_msg) const = 0;
 
+  // Add run-time detected architecture specific features in sub-classes.
+  virtual std::unique_ptr<const InstructionSetFeatures>
+      AddRuntimeDetectedFeatures(const InstructionSetFeatures *features ATTRIBUTE_UNUSED) const;
+
  private:
   DISALLOW_COPY_AND_ASSIGN(InstructionSetFeatures);
 };
diff --git a/runtime/arch/instruction_set_features_test.cc b/runtime/arch/instruction_set_features_test.cc
index 3a39a2a..d9b2e3f 100644
--- a/runtime/arch/instruction_set_features_test.cc
+++ b/runtime/arch/instruction_set_features_test.cc
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#include <array>
+
 #include "instruction_set_features.h"
 
 #include <gtest/gtest.h>
@@ -161,4 +163,145 @@
       << "\nFeatures from build: " << *instruction_set_features.get();
 }
 
+TEST(InstructionSetFeaturesTest, FeaturesFromRuntimeDetection) {
+  if (!InstructionSetFeatures::IsRuntimeDetectionSupported()) {
+    EXPECT_EQ(InstructionSetFeatures::FromRuntimeDetection(), nullptr);
+  }
+}
+
+// The instruction set feature string must not contain 'default' together with
+// other feature names.
+//
+// Test that InstructionSetFeatures::AddFeaturesFromString returns nullptr and
+// an error is reported when the value 'default' is specified together
+// with other feature names in an instruction set feature string.
+TEST(InstructionSetFeaturesTest, AddFeaturesFromStringWithDefaultAndOtherNames) {
+  std::unique_ptr<const InstructionSetFeatures> cpp_defined_features(
+      InstructionSetFeatures::FromCppDefines());
+  std::vector<std::string> invalid_feature_strings = {
+    "a,default",
+    "default,a",
+    "a,default,b",
+    "a,b,default",
+    "default,a,b,c",
+    "a,b,default,c,d",
+    "a, default ",
+    " default , a",
+    "a, default , b",
+    "default,runtime"
+  };
+
+  for (const std::string& invalid_feature_string : invalid_feature_strings) {
+    std::string error_msg;
+    EXPECT_EQ(cpp_defined_features->AddFeaturesFromString(invalid_feature_string, &error_msg),
+              nullptr) << " Invalid feature string: '" << invalid_feature_string << "'";
+    EXPECT_EQ(error_msg,
+              "Specific instruction set feature(s) cannot be used when 'default' is used.");
+  }
+}
+
+// The instruction set feature string must not contain 'runtime' together with
+// other feature names.
+//
+// Test that InstructionSetFeatures::AddFeaturesFromString returns nullptr and
+// an error is reported when the value 'runtime' is specified together
+// with other feature names in an instruction set feature string.
+TEST(InstructionSetFeaturesTest, AddFeaturesFromStringWithRuntimeAndOtherNames) {
+  std::unique_ptr<const InstructionSetFeatures> cpp_defined_features(
+      InstructionSetFeatures::FromCppDefines());
+  std::vector<std::string> invalid_feature_strings = {
+    "a,runtime",
+    "runtime,a",
+    "a,runtime,b",
+    "a,b,runtime",
+    "runtime,a,b,c",
+    "a,b,runtime,c,d",
+    "a, runtime ",
+    " runtime , a",
+    "a, runtime , b",
+    "runtime,default"
+  };
+
+  for (const std::string& invalid_feature_string : invalid_feature_strings) {
+    std::string error_msg;
+    EXPECT_EQ(cpp_defined_features->AddFeaturesFromString(invalid_feature_string, &error_msg),
+              nullptr) << " Invalid feature string: '" << invalid_feature_string << "'";
+    EXPECT_EQ(error_msg,
+              "Specific instruction set feature(s) cannot be used when 'runtime' is used.");
+  }
+}
+
+// Spaces and multiple commas are ignores in a instruction set feature string.
+//
+// Test that a use of spaces and multiple commas with 'default' and 'runtime'
+// does not cause errors.
+TEST(InstructionSetFeaturesTest, AddFeaturesFromValidStringContainingDefaultOrRuntime) {
+  std::unique_ptr<const InstructionSetFeatures> cpp_defined_features(
+      InstructionSetFeatures::FromCppDefines());
+  std::vector<std::string> valid_feature_strings = {
+    "default",
+    ",,,default",
+    "default,,,,",
+    ",,,default,,,,",
+    "default, , , ",
+    " , , ,default",
+    " , , ,default, , , ",
+    " default , , , ",
+    ",,,runtime",
+    "runtime,,,,",
+    ",,,runtime,,,,",
+    "runtime, , , ",
+    " , , ,runtime",
+    " , , ,runtime, , , ",
+    " runtime , , , "
+  };
+  for (const std::string& valid_feature_string : valid_feature_strings) {
+    std::string error_msg;
+    EXPECT_NE(cpp_defined_features->AddFeaturesFromString(valid_feature_string, &error_msg),
+              nullptr) << " Valid feature string: '" << valid_feature_string << "'";
+    EXPECT_TRUE(error_msg.empty()) << error_msg;
+  }
+}
+
+// Spaces and multiple commas are ignores in a instruction set feature string.
+//
+// Test that a use of spaces and multiple commas without any feature names
+// causes errors.
+TEST(InstructionSetFeaturesTest, AddFeaturesFromInvalidStringWithoutFeatureNames) {
+  std::unique_ptr<const InstructionSetFeatures> cpp_defined_features(
+      InstructionSetFeatures::FromCppDefines());
+  std::vector<std::string> invalid_feature_strings = {
+    " ",
+    "       ",
+    ",",
+    ",,",
+    " , , ,,,,,,",
+    "\t",
+    "  \t     ",
+    ",",
+    ",,",
+    " , , ,,,,,,"
+  };
+  for (const std::string& invalid_feature_string : invalid_feature_strings) {
+    std::string error_msg;
+    EXPECT_EQ(cpp_defined_features->AddFeaturesFromString(invalid_feature_string, &error_msg),
+              nullptr) << " Invalid feature string: '" << invalid_feature_string << "'";
+    EXPECT_EQ(error_msg, "No instruction set features specified");
+  }
+}
+
+TEST(InstructionSetFeaturesTest, AddFeaturesFromStringRuntime) {
+  std::unique_ptr<const InstructionSetFeatures> cpp_defined_features(
+      InstructionSetFeatures::FromCppDefines());
+  std::string error_msg;
+
+  const std::unique_ptr<const InstructionSetFeatures> features =
+      cpp_defined_features->AddFeaturesFromString("runtime", &error_msg);
+  EXPECT_NE(features, nullptr);
+  EXPECT_TRUE(error_msg.empty()) << error_msg;
+  if (!InstructionSetFeatures::IsRuntimeDetectionSupported()) {
+    EXPECT_TRUE(features->Equals(cpp_defined_features.get()));
+  }
+}
+
 }  // namespace art
diff --git a/runtime/arch/mips/callee_save_frame_mips.h b/runtime/arch/mips/callee_save_frame_mips.h
index 6e88d08..84ce209 100644
--- a/runtime/arch/mips/callee_save_frame_mips.h
+++ b/runtime/arch/mips/callee_save_frame_mips.h
@@ -21,9 +21,9 @@
 #include "base/bit_utils.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
-#include "base/globals.h"
 #include "quick/quick_method_frame_info.h"
 #include "registers_mips.h"
+#include "runtime_globals.h"
 
 namespace art {
 namespace mips {
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 05172db..cbf5681 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -184,7 +184,7 @@
   jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
 
   // Alloc
-  ResetQuickAllocEntryPoints(qpoints, /*is_active*/ false);
+  ResetQuickAllocEntryPoints(qpoints, /*is_active=*/ false);
 
   // Cast
   qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
@@ -445,7 +445,7 @@
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
   static_assert(IsDirectEntrypoint(kQuickReadBarrierJni), "Direct C stub not marked direct.");
-  UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+  UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
   // Cannot use the following registers to pass arguments:
   // 0(ZERO), 1(AT), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
   // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index 7c8ac28..0354f0c 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -17,14 +17,15 @@
 #include <sys/ucontext.h>
 #include "fault_handler.h"
 
+#include "arch/instruction_set.h"
 #include "arch/mips/callee_save_frame_mips.h"
 #include "art_method.h"
 #include "base/callee_save_type.h"
-#include "base/globals.h"
 #include "base/hex_dump.h"
 #include "base/logging.h"  // For VLOG.
 #include "base/macros.h"
 #include "registers_mips.h"
+#include "runtime_globals.h"
 #include "thread-current-inl.h"
 
 extern "C" void art_quick_throw_stack_overflow();
diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc
index 952ed25..99ce536 100644
--- a/runtime/arch/mips/instruction_set_features_mips.cc
+++ b/runtime/arch/mips/instruction_set_features_mips.cc
@@ -214,8 +214,9 @@
   bool mips_isa_gte2 = mips_isa_gte2_;
   bool r6 = r6_;
   bool msa = msa_;
-  for (auto i = features.begin(); i != features.end(); i++) {
-    std::string feature = android::base::Trim(*i);
+  for (const std::string& feature : features) {
+    DCHECK_EQ(android::base::Trim(feature), feature)
+        << "Feature name is not trimmed: '" << feature << "'";
     if (feature == "fpu32") {
       fpu_32bit = true;
     } else if (feature == "-fpu32") {
diff --git a/runtime/arch/mips/registers_mips.h b/runtime/arch/mips/registers_mips.h
index 34f2f96..4900e41 100644
--- a/runtime/arch/mips/registers_mips.h
+++ b/runtime/arch/mips/registers_mips.h
@@ -19,9 +19,6 @@
 
 #include <iosfwd>
 
-#include <android-base/logging.h>
-
-#include "base/globals.h"
 #include "base/macros.h"
 
 namespace art {
diff --git a/runtime/arch/mips64/callee_save_frame_mips64.h b/runtime/arch/mips64/callee_save_frame_mips64.h
index 59529a0..64d6bec 100644
--- a/runtime/arch/mips64/callee_save_frame_mips64.h
+++ b/runtime/arch/mips64/callee_save_frame_mips64.h
@@ -21,9 +21,9 @@
 #include "base/bit_utils.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
-#include "base/globals.h"
 #include "quick/quick_method_frame_info.h"
 #include "registers_mips64.h"
+#include "runtime_globals.h"
 
 namespace art {
 namespace mips64 {
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 2acfe14..741d41a 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -191,7 +191,7 @@
 
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
-  UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+  UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
   // Cannot use the following registers to pass arguments:
   // 0(ZERO), 1(AT), 15(T3), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
   // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
index 85f3528..6255235 100644
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ b/runtime/arch/mips64/fault_handler_mips64.cc
@@ -18,14 +18,15 @@
 
 #include <sys/ucontext.h>
 
+#include "arch/instruction_set.h"
 #include "arch/mips64/callee_save_frame_mips64.h"
 #include "art_method.h"
 #include "base/callee_save_type.h"
-#include "base/globals.h"
 #include "base/hex_dump.h"
 #include "base/logging.h"  // For VLOG.
 #include "base/macros.h"
 #include "registers_mips64.h"
+#include "runtime_globals.h"
 #include "thread-current-inl.h"
 
 extern "C" void art_quick_throw_stack_overflow();
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc
index ea9f84b..2031433 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64.cc
@@ -114,8 +114,9 @@
 Mips64InstructionSetFeatures::AddFeaturesFromSplitString(
     const std::vector<std::string>& features, std::string* error_msg) const {
   bool msa = msa_;
-  for (auto i = features.begin(); i != features.end(); i++) {
-    std::string feature = android::base::Trim(*i);
+  for (const std::string& feature : features) {
+    DCHECK_EQ(android::base::Trim(feature), feature)
+        << "Feature name is not trimmed: '" << feature << "'";
     if (feature == "msa") {
       msa = true;
     } else if (feature == "-msa") {
diff --git a/runtime/arch/mips64/registers_mips64.h b/runtime/arch/mips64/registers_mips64.h
index a3fa2ac4..1c22c07 100644
--- a/runtime/arch/mips64/registers_mips64.h
+++ b/runtime/arch/mips64/registers_mips64.h
@@ -19,9 +19,6 @@
 
 #include <iosfwd>
 
-#include <android-base/logging.h>
-
-#include "base/globals.h"
 #include "base/macros.h"
 
 namespace art {
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index e8df90e..c9774a7 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -27,8 +27,9 @@
 #include "imt_conflict_table.h"
 #include "jni/jni_internal.h"
 #include "linear_alloc.h"
-#include "mirror/class-inl.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/string-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "scoped_thread_state_change-inl.h"
 
 namespace art {
@@ -308,12 +309,13 @@
           // Use the result from r0
         : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
           [referrer] "r"(referrer), [hidden] "r"(hidden), [fpr_result] "m" (fpr_result)
+          // X18 is a reserved register, cannot be clobbered.
           // Leave one register unclobbered, which is needed for compiling with
           // -fstack-protector-strong. According to AAPCS64 registers x9-x15 are caller-saved,
           // which means we should unclobber one of the callee-saved registers that are unused.
           // Here we use x20.
           // http://b/72613441, Clang 7.0 asks for one more register, so we do not reserve x21.
-        : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
+        : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19",
           "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
           "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
           "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
@@ -1899,7 +1901,7 @@
   LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
   ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc);
   ImtConflictTable* empty_conflict_table =
-      Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count*/0u, linear_alloc);
+      Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count=*/0u, linear_alloc);
   void* data = linear_alloc->Alloc(
       self,
       ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, kRuntimePointerSize));
diff --git a/runtime/arch/x86/callee_save_frame_x86.h b/runtime/arch/x86/callee_save_frame_x86.h
index f336f43..2edcade 100644
--- a/runtime/arch/x86/callee_save_frame_x86.h
+++ b/runtime/arch/x86/callee_save_frame_x86.h
@@ -21,9 +21,9 @@
 #include "base/bit_utils.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
-#include "base/globals.h"
 #include "quick/quick_method_frame_info.h"
 #include "registers_x86.h"
+#include "runtime_globals.h"
 
 namespace art {
 namespace x86 {
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index ffb0c94..3db4ede 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -98,7 +98,7 @@
 
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
-  UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+  UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
   qpoints->pReadBarrierMarkReg04 = nullptr;  // Cannot use register 4 (ESP) to pass arguments.
   // x86 has only 8 core registers.
   qpoints->pReadBarrierMarkReg08 = nullptr;
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 8b24334..26312fb 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -18,13 +18,14 @@
 
 #include <sys/ucontext.h>
 
+#include "arch/instruction_set.h"
 #include "art_method.h"
 #include "base/enums.h"
-#include "base/globals.h"
 #include "base/hex_dump.h"
 #include "base/logging.h"  // For VLOG.
 #include "base/macros.h"
 #include "base/safe_copy.h"
+#include "runtime_globals.h"
 #include "thread-current-inl.h"
 
 #if defined(__APPLE__)
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
index 9846251..0c3d26e 100644
--- a/runtime/arch/x86/instruction_set_features_x86.cc
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -35,27 +35,39 @@
     "atom",
     "sandybridge",
     "silvermont",
+    "kabylake",
 };
 
 static constexpr const char* x86_variants_with_ssse3[] = {
     "atom",
     "sandybridge",
     "silvermont",
+    "kabylake",
 };
 
 static constexpr const char* x86_variants_with_sse4_1[] = {
     "sandybridge",
     "silvermont",
+    "kabylake",
 };
 
 static constexpr const char* x86_variants_with_sse4_2[] = {
     "sandybridge",
     "silvermont",
+    "kabylake",
 };
 
 static constexpr const char* x86_variants_with_popcnt[] = {
     "sandybridge",
     "silvermont",
+    "kabylake",
+};
+static constexpr const char* x86_variants_with_avx[] = {
+    "kabylake",
+};
+
+static constexpr const char* x86_variants_with_avx2[] = {
+    "kabylake",
 };
 
 X86FeaturesUniquePtr X86InstructionSetFeatures::Create(bool x86_64,
@@ -93,9 +105,12 @@
   bool has_SSE4_2 = FindVariantInArray(x86_variants_with_sse4_2,
                                        arraysize(x86_variants_with_sse4_2),
                                        variant);
-  bool has_AVX = false;
-  bool has_AVX2 = false;
-
+  bool has_AVX = FindVariantInArray(x86_variants_with_avx,
+                                    arraysize(x86_variants_with_avx),
+                                    variant);
+  bool has_AVX2 = FindVariantInArray(x86_variants_with_avx2,
+                                    arraysize(x86_variants_with_avx2),
+                                    variant);
   bool has_POPCNT = FindVariantInArray(x86_variants_with_popcnt,
                                        arraysize(x86_variants_with_popcnt),
                                        variant);
@@ -296,8 +311,9 @@
   bool has_AVX = has_AVX_;
   bool has_AVX2 = has_AVX2_;
   bool has_POPCNT = has_POPCNT_;
-  for (auto i = features.begin(); i != features.end(); i++) {
-    std::string feature = android::base::Trim(*i);
+  for (const std::string& feature : features) {
+    DCHECK_EQ(android::base::Trim(feature), feature)
+        << "Feature name is not trimmed: '" << feature << "'";
     if (feature == "ssse3") {
       has_SSSE3 = true;
     } else if (feature == "-ssse3") {
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 6bd6263..34d908b 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -67,6 +67,8 @@
 
   bool HasPopCnt() const { return has_POPCNT_; }
 
+  bool HasAVX2() const { return has_AVX2_; }
+
  protected:
   // Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
   std::unique_ptr<const InstructionSetFeatures>
diff --git a/runtime/arch/x86/instruction_set_features_x86_test.cc b/runtime/arch/x86/instruction_set_features_x86_test.cc
index 33eac0f..cdf15af 100644
--- a/runtime/arch/x86/instruction_set_features_x86_test.cc
+++ b/runtime/arch/x86/instruction_set_features_x86_test.cc
@@ -143,4 +143,40 @@
   EXPECT_FALSE(x86_features->Equals(x86_default_features.get()));
 }
 
+TEST(X86InstructionSetFeaturesTest, X86FeaturesFromKabylakeVariant) {
+  // Build features for a 32-bit kabylake x86 processor.
+  std::string error_msg;
+  std::unique_ptr<const InstructionSetFeatures> x86_features(
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86, "kabylake", &error_msg));
+  ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
+  EXPECT_EQ(x86_features->GetInstructionSet(), InstructionSet::kX86);
+  EXPECT_TRUE(x86_features->Equals(x86_features.get()));
+  EXPECT_STREQ("ssse3,sse4.1,sse4.2,avx,avx2,popcnt",
+               x86_features->GetFeatureString().c_str());
+  EXPECT_EQ(x86_features->AsBitmap(), 63U);
+
+  // Build features for a 32-bit x86 default processor.
+  std::unique_ptr<const InstructionSetFeatures> x86_default_features(
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86, "default", &error_msg));
+  ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
+  EXPECT_EQ(x86_default_features->GetInstructionSet(), InstructionSet::kX86);
+  EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
+  EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
+               x86_default_features->GetFeatureString().c_str());
+  EXPECT_EQ(x86_default_features->AsBitmap(), 0U);
+
+  // Build features for a 64-bit x86-64 kabylake processor.
+  std::unique_ptr<const InstructionSetFeatures> x86_64_features(
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86_64, "kabylake", &error_msg));
+  ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
+  EXPECT_EQ(x86_64_features->GetInstructionSet(), InstructionSet::kX86_64);
+  EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
+  EXPECT_STREQ("ssse3,sse4.1,sse4.2,avx,avx2,popcnt",
+               x86_64_features->GetFeatureString().c_str());
+  EXPECT_EQ(x86_64_features->AsBitmap(), 63U);
+
+  EXPECT_FALSE(x86_64_features->Equals(x86_features.get()));
+  EXPECT_FALSE(x86_64_features->Equals(x86_default_features.get()));
+  EXPECT_FALSE(x86_features->Equals(x86_default_features.get()));
+  }
 }  // namespace art
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index b0bed56..306c4eb 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1804,6 +1804,7 @@
     PUSH ESI
     PUSH EDX
     movl 16(%esp), %edi         // Load referrer.
+    movd %xmm7, %esi            // Get target method index stored in xmm7, remember it in ESI.
     // If the method is obsolete, just go through the dex cache miss slow path.
     // The obsolete flag is set with suspended threads, so we do not need an acquire operation here.
     testl LITERAL(ACC_OBSOLETE_METHOD), ART_METHOD_ACCESS_FLAGS_OFFSET(%edi)
@@ -1814,8 +1815,7 @@
     movl MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET(%edi), %edi  // Load the resolved methods.
     pushl ART_METHOD_JNI_OFFSET_32(%eax)  // Push ImtConflictTable.
     CFI_ADJUST_CFA_OFFSET(4)
-    movd %xmm7, %eax            // Get target method index stored in xmm7.
-    movl %eax, %esi             // Remember method index in ESI.
+    movl %esi, %eax             // Copy the method index from ESI.
     andl LITERAL(METHOD_DEX_CACHE_SIZE_MINUS_ONE), %eax  // Calculate DexCache method slot index.
     leal 0(%edi, %eax, 2 * __SIZEOF_POINTER__), %edi  // Load DexCache method slot address.
     mov %ecx, %edx              // Make EDX:EAX == ECX:EBX so that LOCK CMPXCHG8B makes no changes.
diff --git a/runtime/arch/x86/registers_x86.h b/runtime/arch/x86/registers_x86.h
index d3b959f..ff6c18f 100644
--- a/runtime/arch/x86/registers_x86.h
+++ b/runtime/arch/x86/registers_x86.h
@@ -19,9 +19,6 @@
 
 #include <iosfwd>
 
-#include <android-base/logging.h>
-
-#include "base/globals.h"
 #include "base/macros.h"
 
 namespace art {
diff --git a/runtime/arch/x86_64/callee_save_frame_x86_64.h b/runtime/arch/x86_64/callee_save_frame_x86_64.h
index 228a902..d4f2da7 100644
--- a/runtime/arch/x86_64/callee_save_frame_x86_64.h
+++ b/runtime/arch/x86_64/callee_save_frame_x86_64.h
@@ -21,9 +21,9 @@
 #include "base/bit_utils.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
-#include "base/globals.h"
 #include "quick/quick_method_frame_info.h"
 #include "registers_x86_64.h"
+#include "runtime_globals.h"
 
 namespace art {
 namespace x86_64 {
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 6bae69c..db011ba 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -120,7 +120,7 @@
 
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
-  UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+  UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
   qpoints->pReadBarrierMarkReg04 = nullptr;  // Cannot use register 4 (RSP) to pass arguments.
   // x86-64 has only 16 core registers.
   qpoints->pReadBarrierMarkReg16 = nullptr;
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index a8a648f..39bf6e8 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1654,7 +1654,7 @@
      * rdi is the conflict ArtMethod.
      * rax is a hidden argument that holds the target interface method's dex method index.
      *
-     * Note that this stub writes to r10 and rdi.
+     * Note that this stub writes to r10, r11, rax and rdi.
      */
 DEFINE_FUNCTION art_quick_imt_conflict_trampoline
 #if defined(__APPLE__)
@@ -1662,6 +1662,8 @@
     int3
 #else
     movq __SIZEOF_POINTER__(%rsp), %r10 // Load referrer.
+    mov %eax, %r11d             // Remember method index in R11.
+    PUSH rdx                    // Preserve RDX as we need to clobber it by LOCK CMPXCHG16B.
     // If the method is obsolete, just go through the dex cache miss slow path.
     // The obsolete flag is set with suspended threads, so we do not need an acquire operation here.
     testl LITERAL(ACC_OBSOLETE_METHOD), ART_METHOD_ACCESS_FLAGS_OFFSET(%r10)
@@ -1670,11 +1672,9 @@
     movl MIRROR_CLASS_DEX_CACHE_OFFSET(%r10), %r10d    // Load the DexCache (without read barrier).
     UNPOISON_HEAP_REF r10d
     movq MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET(%r10), %r10  // Load the resolved methods.
-    mov %eax, %r11d  // Remember method index in R11.
     andl LITERAL(METHOD_DEX_CACHE_SIZE_MINUS_ONE), %eax  // Calculate DexCache method slot index.
     shll LITERAL(1), %eax       // Multiply by 2 as entries have size 2 * __SIZEOF_POINTER__.
     leaq 0(%r10, %rax, __SIZEOF_POINTER__), %r10 // Load DexCache method slot address.
-    PUSH rdx                    // Preserve RDX as we need to clobber it by LOCK CMPXCHG16B.
     mov %rcx, %rdx              // Make RDX:RAX == RCX:RBX so that LOCK CMPXCHG16B makes no changes.
     mov %rbx, %rax              // (The actual value does not matter.)
     lock cmpxchg16b (%r10)      // Relaxed atomic load RDX:RAX from the dex cache slot.
diff --git a/runtime/arch/x86_64/registers_x86_64.h b/runtime/arch/x86_64/registers_x86_64.h
index 66aea70..248c82b 100644
--- a/runtime/arch/x86_64/registers_x86_64.h
+++ b/runtime/arch/x86_64/registers_x86_64.h
@@ -19,9 +19,6 @@
 
 #include <iosfwd>
 
-#include <android-base/logging.h>
-
-#include "base/globals.h"
 #include "base/macros.h"
 
 namespace art {
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index c5fb7d5..6f976d1 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -29,7 +29,6 @@
 #include "jvalue.h"
 #include "mirror/dex_cache-inl.h"
 #include "mirror/object-inl.h"
-#include "scoped_thread_state_change-inl.h"
 #include "thread-current-inl.h"
 
 namespace art {
@@ -291,7 +290,7 @@
     return field_index == 0 ? "[Ljava/lang/Class;" : "[[Ljava/lang/Class;";
   }
   const DexFile* dex_file = GetDexFile();
-  const DexFile::FieldId& field_id = dex_file->GetFieldId(field_index);
+  const dex::FieldId& field_id = dex_file->GetFieldId(field_index);
   return dex_file->GetFieldTypeDescriptor(field_id);
 }
 
@@ -342,7 +341,7 @@
 inline ObjPtr<mirror::String> ArtField::ResolveNameString() {
   uint32_t dex_field_index = GetDexFieldIndex();
   CHECK_NE(dex_field_index, dex::kDexNoIndex);
-  const DexFile::FieldId& field_id = GetDexFile()->GetFieldId(dex_field_index);
+  const dex::FieldId& field_id = GetDexFile()->GetFieldId(dex_field_index);
   return Runtime::Current()->GetClassLinker()->ResolveString(field_id.name_idx_, this);
 }
 
@@ -399,6 +398,10 @@
   return FindFieldWithOffset<kExactOffset>(klass->GetSFields(), field_offset);
 }
 
+inline mirror::ClassLoader* ArtField::GetClassLoader() {
+  return GetDeclaringClass()->GetClassLoader();
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_ART_FIELD_INL_H_
diff --git a/runtime/art_field.cc b/runtime/art_field.cc
index 6cbd9e4..e20e7f3 100644
--- a/runtime/art_field.cc
+++ b/runtime/art_field.cc
@@ -47,7 +47,7 @@
 ObjPtr<mirror::Class> ArtField::ProxyFindSystemClass(const char* descriptor) {
   DCHECK(GetDeclaringClass()->IsProxyClass());
   ObjPtr<mirror::Class> klass = Runtime::Current()->GetClassLinker()->LookupClass(
-      Thread::Current(), descriptor, /* class_loader */ nullptr);
+      Thread::Current(), descriptor, /* class_loader= */ nullptr);
   DCHECK(klass != nullptr);
   return klass;
 }
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 5afd000..43adae5 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -17,10 +17,7 @@
 #ifndef ART_RUNTIME_ART_FIELD_H_
 #define ART_RUNTIME_ART_FIELD_H_
 
-#include <jni.h>
-
 #include "dex/dex_file_types.h"
-#include "dex/hidden_api_access_flags.h"
 #include "dex/modifiers.h"
 #include "dex/primitive.h"
 #include "gc_root.h"
@@ -35,6 +32,7 @@
 
 namespace mirror {
 class Class;
+class ClassLoader;
 class DexCache;
 class Object;
 class String;
@@ -45,6 +43,8 @@
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ObjPtr<mirror::Class> GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
+  mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
+
   void SetDeclaringClass(ObjPtr<mirror::Class> new_declaring_class)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -180,10 +180,6 @@
     return (GetAccessFlags() & kAccVolatile) != 0;
   }
 
-  HiddenApiAccessFlags::ApiList GetHiddenApiAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return HiddenApiAccessFlags::DecodeFromRuntime(GetAccessFlags());
-  }
-
   // Returns an instance field with this offset in the given class or null if not found.
   // If kExactOffset is true then we only find the matching offset, not the field containing the
   // offset.
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 9b69166..e28ffa2 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -21,7 +21,6 @@
 
 #include "art_field.h"
 #include "base/callee_save_type.h"
-#include "base/utils.h"
 #include "class_linker-inl.h"
 #include "common_throws.h"
 #include "dex/code_item_accessors-inl.h"
@@ -31,6 +30,7 @@
 #include "dex/invoke_type.h"
 #include "dex/primitive.h"
 #include "gc_root-inl.h"
+#include "imtable-inl.h"
 #include "intrinsics_enum.h"
 #include "jit/profiling_info.h"
 #include "mirror/class-inl.h"
@@ -43,7 +43,6 @@
 #include "quick/quick_method_frame_info.h"
 #include "read_barrier-inl.h"
 #include "runtime-inl.h"
-#include "scoped_thread_state_change-inl.h"
 #include "thread-current-inl.h"
 
 namespace art {
@@ -224,11 +223,11 @@
 
 inline ObjPtr<mirror::String> ArtMethod::ResolveNameString() {
   DCHECK(!IsProxyMethod());
-  const DexFile::MethodId& method_id = GetDexFile()->GetMethodId(GetDexMethodIndex());
+  const dex::MethodId& method_id = GetDexFile()->GetMethodId(GetDexMethodIndex());
   return Runtime::Current()->GetClassLinker()->ResolveString(method_id.name_idx_, this);
 }
 
-inline const DexFile::CodeItem* ArtMethod::GetCodeItem() {
+inline const dex::CodeItem* ArtMethod::GetCodeItem() {
   return GetDexFile()->GetCodeItem(GetCodeItemOffset());
 }
 
@@ -245,16 +244,16 @@
   return annotations::GetLineNumFromPC(GetDexFile(), this, dex_pc);
 }
 
-inline const DexFile::ProtoId& ArtMethod::GetPrototype() {
+inline const dex::ProtoId& ArtMethod::GetPrototype() {
   DCHECK(!IsProxyMethod());
   const DexFile* dex_file = GetDexFile();
   return dex_file->GetMethodPrototype(dex_file->GetMethodId(GetDexMethodIndex()));
 }
 
-inline const DexFile::TypeList* ArtMethod::GetParameterTypeList() {
+inline const dex::TypeList* ArtMethod::GetParameterTypeList() {
   DCHECK(!IsProxyMethod());
   const DexFile* dex_file = GetDexFile();
-  const DexFile::ProtoId& proto = dex_file->GetMethodPrototype(
+  const dex::ProtoId& proto = dex_file->GetMethodPrototype(
       dex_file->GetMethodId(GetDexMethodIndex()));
   return dex_file->GetProtoParameters(proto);
 }
@@ -273,7 +272,7 @@
   }
 }
 
-inline const DexFile::ClassDef& ArtMethod::GetClassDef() {
+inline const dex::ClassDef& ArtMethod::GetClassDef() {
   DCHECK(!IsProxyMethod());
   return GetDexFile()->GetClassDef(GetClassDefIndex());
 }
@@ -344,8 +343,8 @@
 inline dex::TypeIndex ArtMethod::GetReturnTypeIndex() {
   DCHECK(!IsProxyMethod());
   const DexFile* dex_file = GetDexFile();
-  const DexFile::MethodId& method_id = dex_file->GetMethodId(GetDexMethodIndex());
-  const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
+  const dex::MethodId& method_id = dex_file->GetMethodId(GetDexMethodIndex());
+  const dex::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
   return proto_id.return_type_idx_;
 }
 
@@ -367,158 +366,6 @@
   return (GetAccessFlags() & kAccSingleImplementation) != 0;
 }
 
-inline HiddenApiAccessFlags::ApiList ArtMethod::GetHiddenApiAccessFlags()
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (UNLIKELY(IsIntrinsic())) {
-    switch (static_cast<Intrinsics>(GetIntrinsic())) {
-      case Intrinsics::kSystemArrayCopyChar:
-      case Intrinsics::kStringGetCharsNoCheck:
-      case Intrinsics::kReferenceGetReferent:
-      case Intrinsics::kMemoryPeekByte:
-      case Intrinsics::kMemoryPokeByte:
-      case Intrinsics::kUnsafeCASInt:
-      case Intrinsics::kUnsafeCASLong:
-      case Intrinsics::kUnsafeCASObject:
-      case Intrinsics::kUnsafeGet:
-      case Intrinsics::kUnsafeGetAndAddInt:
-      case Intrinsics::kUnsafeGetAndAddLong:
-      case Intrinsics::kUnsafeGetAndSetInt:
-      case Intrinsics::kUnsafeGetAndSetLong:
-      case Intrinsics::kUnsafeGetAndSetObject:
-      case Intrinsics::kUnsafeGetLong:
-      case Intrinsics::kUnsafeGetLongVolatile:
-      case Intrinsics::kUnsafeGetObject:
-      case Intrinsics::kUnsafeGetObjectVolatile:
-      case Intrinsics::kUnsafeGetVolatile:
-      case Intrinsics::kUnsafePut:
-      case Intrinsics::kUnsafePutLong:
-      case Intrinsics::kUnsafePutLongOrdered:
-      case Intrinsics::kUnsafePutLongVolatile:
-      case Intrinsics::kUnsafePutObject:
-      case Intrinsics::kUnsafePutObjectOrdered:
-      case Intrinsics::kUnsafePutObjectVolatile:
-      case Intrinsics::kUnsafePutOrdered:
-      case Intrinsics::kUnsafePutVolatile:
-      case Intrinsics::kUnsafeLoadFence:
-      case Intrinsics::kUnsafeStoreFence:
-      case Intrinsics::kUnsafeFullFence:
-        // These intrinsics are on the light greylist and will fail a DCHECK in
-        // SetIntrinsic() if their flags change on the respective dex methods.
-        // Note that the DCHECK currently won't fail if the dex methods are
-        // whitelisted, e.g. in the core image (b/77733081). As a result, we
-        // might print warnings but we won't change the semantics.
-        return HiddenApiAccessFlags::kLightGreylist;
-      case Intrinsics::kStringNewStringFromBytes:
-      case Intrinsics::kStringNewStringFromChars:
-      case Intrinsics::kStringNewStringFromString:
-      case Intrinsics::kMemoryPeekIntNative:
-      case Intrinsics::kMemoryPeekLongNative:
-      case Intrinsics::kMemoryPeekShortNative:
-      case Intrinsics::kMemoryPokeIntNative:
-      case Intrinsics::kMemoryPokeLongNative:
-      case Intrinsics::kMemoryPokeShortNative:
-      case Intrinsics::kVarHandleFullFence:
-      case Intrinsics::kVarHandleAcquireFence:
-      case Intrinsics::kVarHandleReleaseFence:
-      case Intrinsics::kVarHandleLoadLoadFence:
-      case Intrinsics::kVarHandleStoreStoreFence:
-      case Intrinsics::kVarHandleCompareAndExchange:
-      case Intrinsics::kVarHandleCompareAndExchangeAcquire:
-      case Intrinsics::kVarHandleCompareAndExchangeRelease:
-      case Intrinsics::kVarHandleCompareAndSet:
-      case Intrinsics::kVarHandleGet:
-      case Intrinsics::kVarHandleGetAcquire:
-      case Intrinsics::kVarHandleGetAndAdd:
-      case Intrinsics::kVarHandleGetAndAddAcquire:
-      case Intrinsics::kVarHandleGetAndAddRelease:
-      case Intrinsics::kVarHandleGetAndBitwiseAnd:
-      case Intrinsics::kVarHandleGetAndBitwiseAndAcquire:
-      case Intrinsics::kVarHandleGetAndBitwiseAndRelease:
-      case Intrinsics::kVarHandleGetAndBitwiseOr:
-      case Intrinsics::kVarHandleGetAndBitwiseOrAcquire:
-      case Intrinsics::kVarHandleGetAndBitwiseOrRelease:
-      case Intrinsics::kVarHandleGetAndBitwiseXor:
-      case Intrinsics::kVarHandleGetAndBitwiseXorAcquire:
-      case Intrinsics::kVarHandleGetAndBitwiseXorRelease:
-      case Intrinsics::kVarHandleGetAndSet:
-      case Intrinsics::kVarHandleGetAndSetAcquire:
-      case Intrinsics::kVarHandleGetAndSetRelease:
-      case Intrinsics::kVarHandleGetOpaque:
-      case Intrinsics::kVarHandleGetVolatile:
-      case Intrinsics::kVarHandleSet:
-      case Intrinsics::kVarHandleSetOpaque:
-      case Intrinsics::kVarHandleSetRelease:
-      case Intrinsics::kVarHandleSetVolatile:
-      case Intrinsics::kVarHandleWeakCompareAndSet:
-      case Intrinsics::kVarHandleWeakCompareAndSetAcquire:
-      case Intrinsics::kVarHandleWeakCompareAndSetPlain:
-      case Intrinsics::kVarHandleWeakCompareAndSetRelease:
-        // These intrinsics are on the blacklist and will fail a DCHECK in
-        // SetIntrinsic() if their flags change on the respective dex methods.
-        // Note that the DCHECK currently won't fail if the dex methods are
-        // whitelisted, e.g. in the core image (b/77733081). Given that they are
-        // exclusively VarHandle intrinsics, they should not be used outside
-        // tests that do not enable hidden API checks.
-        return HiddenApiAccessFlags::kBlacklist;
-      default:
-        // Remaining intrinsics are public API. We DCHECK that in SetIntrinsic().
-        return HiddenApiAccessFlags::kWhitelist;
-    }
-  } else {
-    return HiddenApiAccessFlags::DecodeFromRuntime(GetAccessFlags());
-  }
-}
-
-inline void ArtMethod::SetIntrinsic(uint32_t intrinsic) {
-  // Currently we only do intrinsics for static/final methods or methods of final
-  // classes. We don't set kHasSingleImplementation for those methods.
-  DCHECK(IsStatic() || IsFinal() || GetDeclaringClass()->IsFinal()) <<
-      "Potential conflict with kAccSingleImplementation";
-  static const int kAccFlagsShift = CTZ(kAccIntrinsicBits);
-  DCHECK_LE(intrinsic, kAccIntrinsicBits >> kAccFlagsShift);
-  uint32_t intrinsic_bits = intrinsic << kAccFlagsShift;
-  uint32_t new_value = (GetAccessFlags() & ~kAccIntrinsicBits) | kAccIntrinsic | intrinsic_bits;
-  if (kIsDebugBuild) {
-    uint32_t java_flags = (GetAccessFlags() & kAccJavaFlagsMask);
-    bool is_constructor = IsConstructor();
-    bool is_synchronized = IsSynchronized();
-    bool skip_access_checks = SkipAccessChecks();
-    bool is_fast_native = IsFastNative();
-    bool is_critical_native = IsCriticalNative();
-    bool is_copied = IsCopied();
-    bool is_miranda = IsMiranda();
-    bool is_default = IsDefault();
-    bool is_default_conflict = IsDefaultConflicting();
-    bool is_compilable = IsCompilable();
-    bool must_count_locks = MustCountLocks();
-    HiddenApiAccessFlags::ApiList hidden_api_flags = GetHiddenApiAccessFlags();
-    SetAccessFlags(new_value);
-    DCHECK_EQ(java_flags, (GetAccessFlags() & kAccJavaFlagsMask));
-    DCHECK_EQ(is_constructor, IsConstructor());
-    DCHECK_EQ(is_synchronized, IsSynchronized());
-    DCHECK_EQ(skip_access_checks, SkipAccessChecks());
-    DCHECK_EQ(is_fast_native, IsFastNative());
-    DCHECK_EQ(is_critical_native, IsCriticalNative());
-    DCHECK_EQ(is_copied, IsCopied());
-    DCHECK_EQ(is_miranda, IsMiranda());
-    DCHECK_EQ(is_default, IsDefault());
-    DCHECK_EQ(is_default_conflict, IsDefaultConflicting());
-    DCHECK_EQ(is_compilable, IsCompilable());
-    DCHECK_EQ(must_count_locks, MustCountLocks());
-    // Only DCHECK that we have preserved the hidden API access flags if the
-    // original method was not on the whitelist. This is because the core image
-    // does not have the access flags set (b/77733081). It is fine to hard-code
-    // these because (a) warnings on greylist do not change semantics, and
-    // (b) only VarHandle intrinsics are blacklisted at the moment and they
-    // should not be used outside tests with disabled API checks.
-    if (hidden_api_flags != HiddenApiAccessFlags::kWhitelist) {
-      DCHECK_EQ(hidden_api_flags, GetHiddenApiAccessFlags()) << PrettyMethod();
-    }
-  } else {
-    SetAccessFlags(new_value);
-  }
-}
-
 template<ReadBarrierOption kReadBarrierOption, typename RootVisitorType>
 void ArtMethod::VisitRoots(RootVisitorType& visitor, PointerSize pointer_size) {
   if (LIKELY(!declaring_class_.IsNull())) {
@@ -573,6 +420,31 @@
   return CodeItemDebugInfoAccessor(*GetDexFile(), GetCodeItem(), GetDexMethodIndex());
 }
 
+inline void ArtMethod::SetCounter(int16_t hotness_count) {
+  DCHECK(!IsAbstract()) << PrettyMethod();
+  hotness_count_ = hotness_count;
+}
+
+inline uint16_t ArtMethod::GetCounter() {
+  DCHECK(!IsAbstract()) << PrettyMethod();
+  return hotness_count_;
+}
+
+inline uint32_t ArtMethod::GetImtIndex() {
+  if (LIKELY(IsAbstract() && imt_index_ != 0)) {
+    uint16_t imt_index = ~imt_index_;
+    DCHECK_EQ(imt_index, ImTable::GetImtIndex(this)) << PrettyMethod();
+    return imt_index;
+  } else {
+    return ImTable::GetImtIndex(this);
+  }
+}
+
+inline void ArtMethod::CalculateAndSetImtIndex() {
+  DCHECK(IsAbstract()) << PrettyMethod();
+  imt_index_ = ~ImTable::GetImtIndex(this);
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_ART_METHOD_INL_H_
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 68ccfee..c7e41be 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -22,7 +22,6 @@
 
 #include "arch/context.h"
 #include "art_method-inl.h"
-#include "base/stringpiece.h"
 #include "class_linker-inl.h"
 #include "class_root.h"
 #include "debugger.h"
@@ -31,8 +30,10 @@
 #include "dex/dex_file-inl.h"
 #include "dex/dex_file_exception_helpers.h"
 #include "dex/dex_instruction.h"
+#include "dex/signature-inl.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc/accounting/card_table-inl.h"
+#include "hidden_api.h"
 #include "interpreter/interpreter.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
@@ -69,7 +70,7 @@
   } else {
     ObjPtr<mirror::Class> declaring_class = GetDeclaringClass();
     DCHECK(declaring_class->IsInterface());
-    ArtMethod* ret = declaring_class->FindInterfaceMethod(declaring_class->GetDexCache(),
+    ArtMethod* ret = declaring_class->FindInterfaceMethod(GetDexCache(),
                                                           GetDexMethodIndex(),
                                                           pointer_size);
     DCHECK(ret != nullptr);
@@ -78,10 +79,11 @@
 }
 
 ArtMethod* ArtMethod::GetNonObsoleteMethod() {
-  DCHECK_EQ(kRuntimePointerSize, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
   if (LIKELY(!IsObsolete())) {
     return this;
-  } else if (IsDirect()) {
+  }
+  DCHECK_EQ(kRuntimePointerSize, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
+  if (IsDirect()) {
     return &GetDeclaringClass()->GetDirectMethodsSlice(kRuntimePointerSize)[GetMethodIndex()];
   } else {
     return GetDeclaringClass()->GetVTableEntry(GetMethodIndex(), kRuntimePointerSize);
@@ -131,7 +133,7 @@
   DCHECK(IsObsolete());
   const DexFile* dex_file = GetDexFile();
   const dex::TypeIndex declaring_class_type = dex_file->GetMethodId(GetDexMethodIndex()).class_idx_;
-  const DexFile::ClassDef* class_def = dex_file->FindClassDef(declaring_class_type);
+  const dex::ClassDef* class_def = dex_file->FindClassDef(declaring_class_type);
   CHECK(class_def != nullptr);
   return dex_file->GetIndexForClassDef(*class_def);
 }
@@ -163,12 +165,11 @@
   }
 }
 
-size_t ArtMethod::NumArgRegisters(const StringPiece& shorty) {
-  CHECK_LE(1U, shorty.length());
+size_t ArtMethod::NumArgRegisters(const char* shorty) {
+  CHECK_NE(shorty[0], '\0');
   uint32_t num_registers = 0;
-  for (size_t i = 1; i < shorty.length(); ++i) {
-    char ch = shorty[i];
-    if (ch == 'D' || ch == 'J') {
+  for (const char* s = shorty + 1; *s != '\0'; ++s) {
+    if (*s == 'D' || *s == 'J') {
       num_registers += 2;
     } else {
       num_registers += 1;
@@ -180,14 +181,14 @@
 bool ArtMethod::HasSameNameAndSignature(ArtMethod* other) {
   ScopedAssertNoThreadSuspension ants("HasSameNameAndSignature");
   const DexFile* dex_file = GetDexFile();
-  const DexFile::MethodId& mid = dex_file->GetMethodId(GetDexMethodIndex());
+  const dex::MethodId& mid = dex_file->GetMethodId(GetDexMethodIndex());
   if (GetDexCache() == other->GetDexCache()) {
-    const DexFile::MethodId& mid2 = dex_file->GetMethodId(other->GetDexMethodIndex());
+    const dex::MethodId& mid2 = dex_file->GetMethodId(other->GetDexMethodIndex());
     return mid.name_idx_ == mid2.name_idx_ && mid.proto_idx_ == mid2.proto_idx_;
   }
   const DexFile* dex_file2 = other->GetDexFile();
-  const DexFile::MethodId& mid2 = dex_file2->GetMethodId(other->GetDexMethodIndex());
-  if (!DexFileStringEquals(dex_file, mid.name_idx_, dex_file2, mid2.name_idx_)) {
+  const dex::MethodId& mid2 = dex_file2->GetMethodId(other->GetDexMethodIndex());
+  if (!DexFile::StringEquals(dex_file, mid.name_idx_, dex_file2, mid2.name_idx_)) {
     return false;  // Name mismatch.
   }
   return dex_file->GetMethodSignature(mid) == dex_file2->GetMethodSignature(mid2);
@@ -233,17 +234,17 @@
                                                      uint32_t name_and_signature_idx) {
   const DexFile* dexfile = GetDexFile();
   const uint32_t dex_method_idx = GetDexMethodIndex();
-  const DexFile::MethodId& mid = dexfile->GetMethodId(dex_method_idx);
-  const DexFile::MethodId& name_and_sig_mid = other_dexfile.GetMethodId(name_and_signature_idx);
+  const dex::MethodId& mid = dexfile->GetMethodId(dex_method_idx);
+  const dex::MethodId& name_and_sig_mid = other_dexfile.GetMethodId(name_and_signature_idx);
   DCHECK_STREQ(dexfile->GetMethodName(mid), other_dexfile.GetMethodName(name_and_sig_mid));
   DCHECK_EQ(dexfile->GetMethodSignature(mid), other_dexfile.GetMethodSignature(name_and_sig_mid));
   if (dexfile == &other_dexfile) {
     return dex_method_idx;
   }
   const char* mid_declaring_class_descriptor = dexfile->StringByTypeIdx(mid.class_idx_);
-  const DexFile::TypeId* other_type_id = other_dexfile.FindTypeId(mid_declaring_class_descriptor);
+  const dex::TypeId* other_type_id = other_dexfile.FindTypeId(mid_declaring_class_descriptor);
   if (other_type_id != nullptr) {
-    const DexFile::MethodId* other_mid = other_dexfile.FindMethodId(
+    const dex::MethodId* other_mid = other_dexfile.FindMethodId(
         *other_type_id, other_dexfile.GetStringId(name_and_sig_mid.name_idx_),
         other_dexfile.GetProtoId(name_and_sig_mid.proto_idx_));
     if (other_mid != nullptr) {
@@ -324,12 +325,12 @@
   if (UNLIKELY(!runtime->IsStarted() || Dbg::IsForcedInterpreterNeededForCalling(self, this))) {
     if (IsStatic()) {
       art::interpreter::EnterInterpreterFromInvoke(
-          self, this, nullptr, args, result, /*stay_in_interpreter*/ true);
+          self, this, nullptr, args, result, /*stay_in_interpreter=*/ true);
     } else {
       mirror::Object* receiver =
           reinterpret_cast<StackReference<mirror::Object>*>(&args[0])->AsMirrorPtr();
       art::interpreter::EnterInterpreterFromInvoke(
-          self, this, receiver, args + 1, result, /*stay_in_interpreter*/ true);
+          self, this, receiver, args + 1, result, /*stay_in_interpreter=*/ true);
     }
   } else {
     DCHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
@@ -445,11 +446,11 @@
 
   // recreate the class_def_index from the descriptor.
   std::string descriptor_storage;
-  const DexFile::TypeId* declaring_class_type_id =
+  const dex::TypeId* declaring_class_type_id =
       dex_file->FindTypeId(method->GetDeclaringClass()->GetDescriptor(&descriptor_storage));
   CHECK(declaring_class_type_id != nullptr);
   dex::TypeIndex declaring_class_type_index = dex_file->GetIndexForTypeId(*declaring_class_type_id);
-  const DexFile::ClassDef* declaring_class_type_def =
+  const dex::ClassDef* declaring_class_type_def =
       dex_file->FindClassDef(declaring_class_type_index);
   CHECK(declaring_class_type_def != nullptr);
   uint16_t declaring_class_def_index = dex_file->GetIndexForClassDef(*declaring_class_type_def);
@@ -503,10 +504,10 @@
                          << method->PrettyMethod();
   }
   DCHECK_EQ(oat_method_index,
-            GetOatMethodIndexFromMethodIndex(*declaring_class->GetDexCache()->GetDexFile(),
+            GetOatMethodIndexFromMethodIndex(declaring_class->GetDexFile(),
                                              method->GetDeclaringClass()->GetDexClassDefIndex(),
                                              method->GetDexMethodIndex()));
-  OatFile::OatClass oat_class = OatFile::FindOatClass(*declaring_class->GetDexCache()->GetDexFile(),
+  OatFile::OatClass oat_class = OatFile::FindOatClass(declaring_class->GetDexFile(),
                                                       declaring_class->GetDexClassDefIndex(),
                                                       found);
   if (!(*found)) {
@@ -520,7 +521,7 @@
   auto* dex_file = dex_cache->GetDexFile();
   const auto& method_id = dex_file->GetMethodId(GetDexMethodIndex());
   const auto& proto_id = dex_file->GetMethodPrototype(method_id);
-  const DexFile::TypeList* proto_params = dex_file->GetProtoParameters(proto_id);
+  const dex::TypeList* proto_params = dex_file->GetProtoParameters(proto_id);
   auto count = proto_params != nullptr ? proto_params->Size() : 0u;
   auto param_len = params != nullptr ? params->GetLength() : 0u;
   if (param_len != count) {
@@ -542,13 +543,12 @@
 }
 
 ArrayRef<const uint8_t> ArtMethod::GetQuickenedInfo() {
-  const DexFile& dex_file = GetDeclaringClass()->GetDexFile();
+  const DexFile& dex_file = *GetDexFile();
   const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
-  if (oat_dex_file == nullptr || (oat_dex_file->GetOatFile() == nullptr)) {
+  if (oat_dex_file == nullptr) {
     return ArrayRef<const uint8_t>();
   }
-  return oat_dex_file->GetOatFile()->GetVdexFile()->GetQuickenedInfoOf(dex_file,
-                                                                       GetDexMethodIndex());
+  return oat_dex_file->GetQuickenedInfoOf(dex_file, GetDexMethodIndex());
 }
 
 uint16_t ArtMethod::GetIndexFromQuickening(uint32_t dex_pc) {
@@ -682,23 +682,72 @@
   return GetOatMethodQuickCode(runtime->GetClassLinker()->GetImagePointerSize()) != nullptr;
 }
 
+void ArtMethod::SetIntrinsic(uint32_t intrinsic) {
+  // Currently we only do intrinsics for static/final methods or methods of final
+  // classes. We don't set kHasSingleImplementation for those methods.
+  DCHECK(IsStatic() || IsFinal() || GetDeclaringClass()->IsFinal()) <<
+      "Potential conflict with kAccSingleImplementation";
+  static const int kAccFlagsShift = CTZ(kAccIntrinsicBits);
+  DCHECK_LE(intrinsic, kAccIntrinsicBits >> kAccFlagsShift);
+  uint32_t intrinsic_bits = intrinsic << kAccFlagsShift;
+  uint32_t new_value = (GetAccessFlags() & ~kAccIntrinsicBits) | kAccIntrinsic | intrinsic_bits;
+  if (kIsDebugBuild) {
+    uint32_t java_flags = (GetAccessFlags() & kAccJavaFlagsMask);
+    bool is_constructor = IsConstructor();
+    bool is_synchronized = IsSynchronized();
+    bool skip_access_checks = SkipAccessChecks();
+    bool is_fast_native = IsFastNative();
+    bool is_critical_native = IsCriticalNative();
+    bool is_copied = IsCopied();
+    bool is_miranda = IsMiranda();
+    bool is_default = IsDefault();
+    bool is_default_conflict = IsDefaultConflicting();
+    bool is_compilable = IsCompilable();
+    bool must_count_locks = MustCountLocks();
+    uint32_t hiddenapi_flags = hiddenapi::GetRuntimeFlags(this);
+    SetAccessFlags(new_value);
+    DCHECK_EQ(java_flags, (GetAccessFlags() & kAccJavaFlagsMask));
+    DCHECK_EQ(is_constructor, IsConstructor());
+    DCHECK_EQ(is_synchronized, IsSynchronized());
+    DCHECK_EQ(skip_access_checks, SkipAccessChecks());
+    DCHECK_EQ(is_fast_native, IsFastNative());
+    DCHECK_EQ(is_critical_native, IsCriticalNative());
+    DCHECK_EQ(is_copied, IsCopied());
+    DCHECK_EQ(is_miranda, IsMiranda());
+    DCHECK_EQ(is_default, IsDefault());
+    DCHECK_EQ(is_default_conflict, IsDefaultConflicting());
+    DCHECK_EQ(is_compilable, IsCompilable());
+    DCHECK_EQ(must_count_locks, MustCountLocks());
+    // Only DCHECK that we have preserved the hidden API access flags if the
+    // original method was not on the whitelist. This is because the core image
+    // does not have the access flags set (b/77733081). It is fine to hard-code
+    // these because (a) warnings on greylist do not change semantics, and
+    // (b) only VarHandle intrinsics are blacklisted at the moment and they
+    // should not be used outside tests with disabled API checks.
+    if ((hiddenapi_flags & kAccHiddenapiBits) != kAccPublicApi) {
+      DCHECK_EQ(hiddenapi_flags, hiddenapi::GetRuntimeFlags(this)) << PrettyMethod();
+    }
+  } else {
+    SetAccessFlags(new_value);
+  }
+}
+
 void ArtMethod::SetNotIntrinsic() {
   if (!IsIntrinsic()) {
     return;
   }
 
-  // Query the hidden API access flags of the intrinsic.
-  HiddenApiAccessFlags::ApiList intrinsic_api_list = GetHiddenApiAccessFlags();
+  // Read the existing hiddenapi flags.
+  uint32_t hiddenapi_runtime_flags = hiddenapi::GetRuntimeFlags(this);
 
   // Clear intrinsic-related access flags.
   ClearAccessFlags(kAccIntrinsic | kAccIntrinsicBits);
 
   // Re-apply hidden API access flags now that the method is not an intrinsic.
-  SetAccessFlags(HiddenApiAccessFlags::EncodeForRuntime(GetAccessFlags(), intrinsic_api_list));
-  DCHECK_EQ(GetHiddenApiAccessFlags(), intrinsic_api_list);
+  SetAccessFlags(GetAccessFlags() | hiddenapi_runtime_flags);
+  DCHECK_EQ(hiddenapi_runtime_flags, hiddenapi::GetRuntimeFlags(this));
 }
 
-
 void ArtMethod::CopyFrom(ArtMethod* src, PointerSize image_pointer_size) {
   memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src),
          Size(image_pointer_size));
@@ -763,7 +812,12 @@
   }
   ArtMethod* m =
       GetInterfaceMethodIfProxy(Runtime::Current()->GetClassLinker()->GetImagePointerSize());
-  return m->GetDexFile()->PrettyMethod(m->GetDexMethodIndex(), with_signature);
+  std::string res(m->GetDexFile()->PrettyMethod(m->GetDexMethodIndex(), with_signature));
+  if (with_signature && m->IsObsolete()) {
+    return "<OBSOLETE> " + res;
+  } else {
+    return res;
+  }
 }
 
 std::string ArtMethod::JniShortName() {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 48ddc69..feff91a 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -20,19 +20,20 @@
 #include <cstddef>
 
 #include <android-base/logging.h>
+#include <jni.h>
 
 #include "base/array_ref.h"
 #include "base/bit_utils.h"
 #include "base/casts.h"
 #include "base/enums.h"
-#include "base/iteration_range.h"
 #include "base/macros.h"
 #include "base/runtime_debug.h"
 #include "dex/code_item_accessors.h"
-#include "dex/dex_file.h"
+#include "dex/dex_file_structs.h"
 #include "dex/dex_instruction_iterator.h"
 #include "dex/modifiers.h"
 #include "dex/primitive.h"
+#include "dex/signature.h"
 #include "gc_root.h"
 #include "obj_ptr.h"
 #include "offsets.h"
@@ -40,6 +41,7 @@
 
 namespace art {
 
+class DexFile;
 template<class T> class Handle;
 class ImtConflictTable;
 enum InvokeType : uint32_t;
@@ -47,7 +49,6 @@
 class OatQuickMethodHeader;
 class ProfilingInfo;
 class ScopedObjectAccessAlreadyRunnable;
-class StringPiece;
 class ShadowFrame;
 
 namespace mirror {
@@ -103,7 +104,7 @@
   bool CASDeclaringClass(ObjPtr<mirror::Class> expected_class, ObjPtr<mirror::Class> desired_class)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static MemberOffset DeclaringClassOffset() {
+  static constexpr MemberOffset DeclaringClassOffset() {
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
   }
 
@@ -118,7 +119,7 @@
     access_flags_.store(new_access_flags, std::memory_order_relaxed);
   }
 
-  static MemberOffset AccessFlagsOffset() {
+  static constexpr MemberOffset AccessFlagsOffset() {
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, access_flags_));
   }
 
@@ -285,6 +286,23 @@
 
   bool IsPolymorphicSignature() REQUIRES_SHARED(Locks::mutator_lock_);
 
+  bool UseFastInterpreterToInterpreterInvoke() {
+    // The bit is applicable only if the method is not intrinsic.
+    constexpr uint32_t mask = kAccFastInterpreterToInterpreterInvoke | kAccIntrinsic;
+    return (GetAccessFlags() & mask) == kAccFastInterpreterToInterpreterInvoke;
+  }
+
+  void SetFastInterpreterToInterpreterInvokeFlag() {
+    DCHECK(!IsIntrinsic());
+    AddAccessFlags(kAccFastInterpreterToInterpreterInvoke);
+  }
+
+  void ClearFastInterpreterToInterpreterInvokeFlag() {
+    if (!IsIntrinsic()) {
+      ClearAccessFlags(kAccFastInterpreterToInterpreterInvoke);
+    }
+  }
+
   bool SkipAccessChecks() {
     // The kAccSkipAccessChecks flag value is used with a different meaning for native methods,
     // so we need to check the kAccNative flag as well.
@@ -326,8 +344,6 @@
     AddAccessFlags(kAccMustCountLocks);
   }
 
-  HiddenApiAccessFlags::ApiList GetHiddenApiAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_);
-
   // Returns true if this method could be overridden by a default method.
   bool IsOverridableByDefaultMethod() REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -351,11 +367,11 @@
     method_index_ = new_method_index;
   }
 
-  static MemberOffset DexMethodIndexOffset() {
+  static constexpr MemberOffset DexMethodIndexOffset() {
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, dex_method_index_));
   }
 
-  static MemberOffset MethodIndexOffset() {
+  static constexpr MemberOffset MethodIndexOffset() {
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, method_index_));
   }
 
@@ -369,7 +385,7 @@
   }
 
   // Number of 32bit registers that would be required to hold all the arguments
-  static size_t NumArgRegisters(const StringPiece& shorty);
+  static size_t NumArgRegisters(const char* shorty);
 
   ALWAYS_INLINE uint32_t GetDexMethodIndex() {
     return dex_method_index_;
@@ -422,6 +438,8 @@
     SetNativePointer(EntryPointFromQuickCompiledCodeOffset(pointer_size),
                      entry_point_from_quick_compiled_code,
                      pointer_size);
+    // We might want to invoke compiled code, so don't use the fast path.
+    ClearFastInterpreterToInterpreterInvokeFlag();
   }
 
   // Registers the native method and returns the new entry point. NB The returned entry point might
@@ -431,16 +449,16 @@
 
   void UnregisterNative() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static MemberOffset DataOffset(PointerSize pointer_size) {
+  static constexpr MemberOffset DataOffset(PointerSize pointer_size) {
     return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
         PtrSizedFields, data_) / sizeof(void*) * static_cast<size_t>(pointer_size));
   }
 
-  static MemberOffset EntryPointFromJniOffset(PointerSize pointer_size) {
+  static constexpr MemberOffset EntryPointFromJniOffset(PointerSize pointer_size) {
     return DataOffset(pointer_size);
   }
 
-  static MemberOffset EntryPointFromQuickCompiledCodeOffset(PointerSize pointer_size) {
+  static constexpr MemberOffset EntryPointFromQuickCompiledCodeOffset(PointerSize pointer_size) {
     return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
         PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*)
             * static_cast<size_t>(pointer_size));
@@ -457,7 +475,7 @@
   }
 
   ProfilingInfo* GetProfilingInfo(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (UNLIKELY(IsNative()) || UNLIKELY(IsProxyMethod())) {
+    if (UNLIKELY(IsNative() || IsProxyMethod() || !IsInvokable())) {
       return nullptr;
     }
     return reinterpret_cast<ProfilingInfo*>(GetDataPtrSize(pointer_size));
@@ -569,21 +587,21 @@
 
   ObjPtr<mirror::String> ResolveNameString() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  const DexFile::CodeItem* GetCodeItem() REQUIRES_SHARED(Locks::mutator_lock_);
+  const dex::CodeItem* GetCodeItem() REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool IsResolvedTypeIdx(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
 
   int32_t GetLineNumFromDexPC(uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  const DexFile::ProtoId& GetPrototype() REQUIRES_SHARED(Locks::mutator_lock_);
+  const dex::ProtoId& GetPrototype() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  const DexFile::TypeList* GetParameterTypeList() REQUIRES_SHARED(Locks::mutator_lock_);
+  const dex::TypeList* GetParameterTypeList() REQUIRES_SHARED(Locks::mutator_lock_);
 
   const char* GetDeclaringClassSourceFile() REQUIRES_SHARED(Locks::mutator_lock_);
 
   uint16_t GetClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  const DexFile::ClassDef& GetClassDef() REQUIRES_SHARED(Locks::mutator_lock_);
+  const dex::ClassDef& GetClassDef() REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE size_t GetNumberOfParameters() REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -633,26 +651,15 @@
   void CopyFrom(ArtMethod* src, PointerSize image_pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Note, hotness_counter_ updates are non-atomic but it doesn't need to be precise.  Also,
-  // given that the counter is only 16 bits wide we can expect wrap-around in some
-  // situations.  Consumers of hotness_count_ must be able to deal with that.
-  uint16_t IncrementCounter() {
-    return ++hotness_count_;
-  }
+  ALWAYS_INLINE void SetCounter(int16_t hotness_count) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void ClearCounter() {
-    hotness_count_ = 0;
-  }
+  ALWAYS_INLINE uint16_t GetCounter() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetCounter(int16_t hotness_count) {
-    hotness_count_ = hotness_count;
-  }
+  ALWAYS_INLINE uint32_t GetImtIndex() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  uint16_t GetCounter() const {
-    return hotness_count_;
-  }
+  void CalculateAndSetImtIndex() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static MemberOffset HotnessCountOffset() {
+  static constexpr MemberOffset HotnessCountOffset() {
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, hotness_count_));
   }
 
@@ -725,6 +732,10 @@
   ALWAYS_INLINE CodeItemDebugInfoAccessor DexInstructionDebugInfo()
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  GcRoot<mirror::Class>& DeclaringClassRoot() {
+    return declaring_class_;
+  }
+
  protected:
   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
   // The class we are a part of.
@@ -751,9 +762,14 @@
   // ifTable.
   uint16_t method_index_;
 
-  // The hotness we measure for this method. Not atomic, as we allow
-  // missing increments: if the method is hot, we will see it eventually.
-  uint16_t hotness_count_;
+  union {
+    // Non-abstract methods: The hotness we measure for this method. Not atomic,
+    // as we allow missing increments: if the method is hot, we will see it eventually.
+    uint16_t hotness_count_;
+    // Abstract methods: IMT index (bitwise negated) or zero if it was not cached.
+    // The negation is needed to distinguish zero index and missing cached entry.
+    uint16_t imt_index_;
+  };
 
   // Fake padding field gets inserted here.
 
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index e65c194..eac9856 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -20,207 +20,7 @@
 #include "heap_poisoning.h"
 #include "read_barrier_config.h"
 
-// To generate tests related to the constants in this header, either define ADD_TEST_EQ before
-// including, or use asm_support_check.h.
-#ifndef ADD_TEST_EQ  // Allow #include-r to replace with their own.
-#define DEFINED_ADD_TEST_EQ 1
-#define ADD_TEST_EQ(x, y)
-#endif
-
-#if defined(__LP64__)
-#define POINTER_SIZE_SHIFT 3
-#define POINTER_SIZE art::PointerSize::k64
-#else
-#define POINTER_SIZE_SHIFT 2
-#define POINTER_SIZE art::PointerSize::k32
-#endif
-ADD_TEST_EQ(static_cast<size_t>(1U << POINTER_SIZE_SHIFT),
-            static_cast<size_t>(__SIZEOF_POINTER__))
-
-// Import platform-independent constant defines from our autogenerated list.
-// Export new defines (for assembly use) by editing cpp-define-generator def files.
-#define DEFINE_CHECK_EQ ADD_TEST_EQ
-#include "asm_support_gen.h"
-#undef DEFINE_CHECK_EQ
-
-// Offset of field Thread::tlsPtr_.exception.
-#define THREAD_EXCEPTION_OFFSET (THREAD_CARD_TABLE_OFFSET + __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_EXCEPTION_OFFSET,
-            art::Thread::ExceptionOffset<POINTER_SIZE>().Int32Value())
-
-// Offset of field Thread::tlsPtr_.managed_stack.top_quick_frame_.
-#define THREAD_TOP_QUICK_FRAME_OFFSET (THREAD_CARD_TABLE_OFFSET + (3 * __SIZEOF_POINTER__))
-ADD_TEST_EQ(THREAD_TOP_QUICK_FRAME_OFFSET,
-            art::Thread::TopOfManagedStackOffset<POINTER_SIZE>().Int32Value())
-
-// Offset of field Thread::tlsPtr_.self.
-#define THREAD_SELF_OFFSET (THREAD_CARD_TABLE_OFFSET + (9 * __SIZEOF_POINTER__))
-ADD_TEST_EQ(THREAD_SELF_OFFSET,
-            art::Thread::SelfOffset<POINTER_SIZE>().Int32Value())
-
-// Offset of field Thread::tlsPtr_.thread_local_pos.
-#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 34 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
-            art::Thread::ThreadLocalPosOffset<POINTER_SIZE>().Int32Value())
-// Offset of field Thread::tlsPtr_.thread_local_end.
-#define THREAD_LOCAL_END_OFFSET (THREAD_LOCAL_POS_OFFSET + __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET,
-            art::Thread::ThreadLocalEndOffset<POINTER_SIZE>().Int32Value())
-// Offset of field Thread::tlsPtr_.thread_local_objects.
-#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_END_OFFSET + 2 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
-            art::Thread::ThreadLocalObjectsOffset<POINTER_SIZE>().Int32Value())
-
-// Offset of field Thread::tlsPtr_.mterp_current_ibase.
-#define THREAD_CURRENT_IBASE_OFFSET \
-    (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__ + (1 + 166) * __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
-            art::Thread::MterpCurrentIBaseOffset<POINTER_SIZE>().Int32Value())
-// Offset of field Thread::tlsPtr_.mterp_default_ibase.
-#define THREAD_DEFAULT_IBASE_OFFSET (THREAD_CURRENT_IBASE_OFFSET + __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_DEFAULT_IBASE_OFFSET,
-            art::Thread::MterpDefaultIBaseOffset<POINTER_SIZE>().Int32Value())
-// Offset of field Thread::tlsPtr_.mterp_alt_ibase.
-#define THREAD_ALT_IBASE_OFFSET (THREAD_DEFAULT_IBASE_OFFSET + __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_ALT_IBASE_OFFSET,
-            art::Thread::MterpAltIBaseOffset<POINTER_SIZE>().Int32Value())
-// Offset of field Thread::tlsPtr_.rosalloc_runs.
-#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_ALT_IBASE_OFFSET + __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_ROSALLOC_RUNS_OFFSET,
-            art::Thread::RosAllocRunsOffset<POINTER_SIZE>().Int32Value())
-// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_top.
-#define THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 16 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET,
-            art::Thread::ThreadLocalAllocStackTopOffset<POINTER_SIZE>().Int32Value())
-// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_end.
-#define THREAD_LOCAL_ALLOC_STACK_END_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 17 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
-            art::Thread::ThreadLocalAllocStackEndOffset<POINTER_SIZE>().Int32Value())
-
-// Offsets within ShadowFrame.
-#define SHADOWFRAME_LINK_OFFSET 0
-ADD_TEST_EQ(SHADOWFRAME_LINK_OFFSET,
-            static_cast<int32_t>(art::ShadowFrame::LinkOffset()))
-#define SHADOWFRAME_METHOD_OFFSET (SHADOWFRAME_LINK_OFFSET + 1 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(SHADOWFRAME_METHOD_OFFSET,
-            static_cast<int32_t>(art::ShadowFrame::MethodOffset()))
-#define SHADOWFRAME_RESULT_REGISTER_OFFSET (SHADOWFRAME_LINK_OFFSET + 2 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(SHADOWFRAME_RESULT_REGISTER_OFFSET,
-            static_cast<int32_t>(art::ShadowFrame::ResultRegisterOffset()))
-#define SHADOWFRAME_DEX_PC_PTR_OFFSET (SHADOWFRAME_LINK_OFFSET + 3 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(SHADOWFRAME_DEX_PC_PTR_OFFSET,
-            static_cast<int32_t>(art::ShadowFrame::DexPCPtrOffset()))
-#define SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET (SHADOWFRAME_LINK_OFFSET + 4 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET,
-            static_cast<int32_t>(art::ShadowFrame::DexInstructionsOffset()))
-#define SHADOWFRAME_LOCK_COUNT_DATA_OFFSET (SHADOWFRAME_LINK_OFFSET + 5 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(SHADOWFRAME_LOCK_COUNT_DATA_OFFSET,
-            static_cast<int32_t>(art::ShadowFrame::LockCountDataOffset()))
-#define SHADOWFRAME_NUMBER_OF_VREGS_OFFSET (SHADOWFRAME_LINK_OFFSET + 6 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET,
-            static_cast<int32_t>(art::ShadowFrame::NumberOfVRegsOffset()))
-#define SHADOWFRAME_DEX_PC_OFFSET (SHADOWFRAME_NUMBER_OF_VREGS_OFFSET + 4)
-ADD_TEST_EQ(SHADOWFRAME_DEX_PC_OFFSET,
-            static_cast<int32_t>(art::ShadowFrame::DexPCOffset()))
-#define SHADOWFRAME_CACHED_HOTNESS_COUNTDOWN_OFFSET (SHADOWFRAME_NUMBER_OF_VREGS_OFFSET + 8)
-ADD_TEST_EQ(SHADOWFRAME_CACHED_HOTNESS_COUNTDOWN_OFFSET,
-            static_cast<int32_t>(art::ShadowFrame::CachedHotnessCountdownOffset()))
-#define SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET (SHADOWFRAME_NUMBER_OF_VREGS_OFFSET + 10)
-ADD_TEST_EQ(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET,
-            static_cast<int32_t>(art::ShadowFrame::HotnessCountdownOffset()))
-#define SHADOWFRAME_VREGS_OFFSET (SHADOWFRAME_NUMBER_OF_VREGS_OFFSET + 16)
-ADD_TEST_EQ(SHADOWFRAME_VREGS_OFFSET,
-            static_cast<int32_t>(art::ShadowFrame::VRegsOffset()))
-
-#if defined(USE_BROOKS_READ_BARRIER)
-#define MIRROR_OBJECT_HEADER_SIZE 16
-#else
-#define MIRROR_OBJECT_HEADER_SIZE 8
-#endif
-ADD_TEST_EQ(size_t(MIRROR_OBJECT_HEADER_SIZE), sizeof(art::mirror::Object))
-
-// Offsets within java.lang.Class.
-#define MIRROR_CLASS_COMPONENT_TYPE_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CLASS_COMPONENT_TYPE_OFFSET,
-            art::mirror::Class::ComponentTypeOffset().Int32Value())
-#define MIRROR_CLASS_IF_TABLE_OFFSET (16 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CLASS_IF_TABLE_OFFSET,
-            art::mirror::Class::IfTableOffset().Int32Value())
-#define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (56 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CLASS_ACCESS_FLAGS_OFFSET,
-            art::mirror::Class::AccessFlagsOffset().Int32Value())
-#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (88 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_OFFSET,
-            art::mirror::Class::ObjectSizeOffset().Int32Value())
-#define MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET (92 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET,
-            art::mirror::Class::ObjectSizeAllocFastPathOffset().Int32Value())
-#define MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET (96 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET,
-            art::mirror::Class::PrimitiveTypeOffset().Int32Value())
-#define MIRROR_CLASS_STATUS_OFFSET (104 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET,
-            art::mirror::Class::StatusOffset().Int32Value())
-
-#define PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT 16
-ADD_TEST_EQ(PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT,
-            static_cast<int>(art::mirror::Class::kPrimitiveTypeSizeShiftShift))
-
-// Array offsets.
-#define MIRROR_ARRAY_LENGTH_OFFSET      MIRROR_OBJECT_HEADER_SIZE
-ADD_TEST_EQ(MIRROR_ARRAY_LENGTH_OFFSET, art::mirror::Array::LengthOffset().Int32Value())
-
-#define MIRROR_CHAR_ARRAY_DATA_OFFSET   (4 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CHAR_ARRAY_DATA_OFFSET,
-            art::mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value())
-
-#define MIRROR_BOOLEAN_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
-ADD_TEST_EQ(MIRROR_BOOLEAN_ARRAY_DATA_OFFSET,
-            art::mirror::Array::DataOffset(sizeof(uint8_t)).Int32Value())
-
-#define MIRROR_BYTE_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
-ADD_TEST_EQ(MIRROR_BYTE_ARRAY_DATA_OFFSET,
-            art::mirror::Array::DataOffset(sizeof(int8_t)).Int32Value())
-
-#define MIRROR_SHORT_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
-ADD_TEST_EQ(MIRROR_SHORT_ARRAY_DATA_OFFSET,
-            art::mirror::Array::DataOffset(sizeof(int16_t)).Int32Value())
-
-#define MIRROR_INT_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
-ADD_TEST_EQ(MIRROR_INT_ARRAY_DATA_OFFSET,
-            art::mirror::Array::DataOffset(sizeof(int32_t)).Int32Value())
-
-#define MIRROR_WIDE_ARRAY_DATA_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_WIDE_ARRAY_DATA_OFFSET,
-            art::mirror::Array::DataOffset(sizeof(uint64_t)).Int32Value())
-
-#define MIRROR_OBJECT_ARRAY_DATA_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_OBJECT_ARRAY_DATA_OFFSET,
-    art::mirror::Array::DataOffset(
-        sizeof(art::mirror::HeapReference<art::mirror::Object>)).Int32Value())
-
-#define MIRROR_OBJECT_ARRAY_COMPONENT_SIZE 4
-ADD_TEST_EQ(static_cast<size_t>(MIRROR_OBJECT_ARRAY_COMPONENT_SIZE),
-            sizeof(art::mirror::HeapReference<art::mirror::Object>))
-
-#define MIRROR_LONG_ARRAY_DATA_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_LONG_ARRAY_DATA_OFFSET,
-            art::mirror::Array::DataOffset(sizeof(uint64_t)).Int32Value())
-
-// Offsets within java.lang.String.
-#define MIRROR_STRING_COUNT_OFFSET  MIRROR_OBJECT_HEADER_SIZE
-ADD_TEST_EQ(MIRROR_STRING_COUNT_OFFSET, art::mirror::String::CountOffset().Int32Value())
-
-#define MIRROR_STRING_VALUE_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_STRING_VALUE_OFFSET, art::mirror::String::ValueOffset().Int32Value())
-
-// String compression feature.
-#define STRING_COMPRESSION_FEATURE 1
-ADD_TEST_EQ(STRING_COMPRESSION_FEATURE, art::mirror::kUseStringCompression);
-
-#ifdef DEFINED_ADD_TEST_EQ
-#undef ADD_TEST_EQ
-#undef DEFINED_ADD_TEST_EQ
-#endif
+// Automatically generated header based on the asm_defines.def file.
+#include "asm_defines.h"
 
 #endif  // ART_RUNTIME_ASM_SUPPORT_H_
diff --git a/runtime/asm_support_check.h b/runtime/asm_support_check.h
deleted file mode 100644
index 3163506..0000000
--- a/runtime/asm_support_check.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ASM_SUPPORT_CHECK_H_
-#define ART_RUNTIME_ASM_SUPPORT_CHECK_H_
-
-#include "art_method.h"
-#include "base/bit_utils.h"
-#include "base/callee_save_type.h"
-#include "gc/accounting/card_table.h"
-#include "gc/allocator/rosalloc.h"
-#include "gc/heap.h"
-#include "jit/jit.h"
-#include "lock_word.h"
-#include "mirror/class.h"
-#include "mirror/dex_cache.h"
-#include "mirror/string.h"
-#include "runtime.h"
-#include "stack.h"
-#include "thread.h"
-#include "utils/dex_cache_arrays_layout.h"
-
-#ifndef ADD_TEST_EQ
-#define ADD_TEST_EQ(x, y) CHECK_EQ(x, y);
-#endif
-
-#ifndef ASM_SUPPORT_CHECK_RETURN_TYPE
-#define ASM_SUPPORT_CHECK_RETURN_TYPE void
-#endif
-
-// Prepare for re-include of asm_support.h.
-#ifdef ART_RUNTIME_ASM_SUPPORT_H_
-#undef ART_RUNTIME_ASM_SUPPORT_H_
-#endif
-
-namespace art {
-
-static inline ASM_SUPPORT_CHECK_RETURN_TYPE CheckAsmSupportOffsetsAndSizes() {
-#ifdef ASM_SUPPORT_CHECK_HEADER
-  ASM_SUPPORT_CHECK_HEADER
-#endif
-
-#include "asm_support.h"
-
-#ifdef ASM_SUPPORT_CHECK_FOOTER
-  ASM_SUPPORT_CHECK_FOOTER
-#endif
-}
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_ASM_SUPPORT_CHECK_H_
diff --git a/runtime/barrier.cc b/runtime/barrier.cc
index 8d3cf45..a1a3659 100644
--- a/runtime/barrier.cc
+++ b/runtime/barrier.cc
@@ -27,15 +27,15 @@
 
 Barrier::Barrier(int count)
     : count_(count),
-      lock_("GC barrier lock", kThreadSuspendCountLock),
-      condition_("GC barrier condition", lock_) {
+      lock_(new Mutex("GC barrier lock", kThreadSuspendCountLock)),
+      condition_(new ConditionVariable("GC barrier condition", *lock_)) {
 }
 
 template void Barrier::Increment<Barrier::kAllowHoldingLocks>(Thread* self, int delta);
 template void Barrier::Increment<Barrier::kDisallowHoldingLocks>(Thread* self, int delta);
 
 void Barrier::Pass(Thread* self) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *GetLock());
   SetCountLocked(self, count_ - 1);
 }
 
@@ -44,13 +44,13 @@
 }
 
 void Barrier::Init(Thread* self, int count) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *GetLock());
   SetCountLocked(self, count);
 }
 
 template <Barrier::LockHandling locks>
 void Barrier::Increment(Thread* self, int delta) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *GetLock());
   SetCountLocked(self, count_ + delta);
 
   // Increment the count.  If it becomes zero after the increment
@@ -62,22 +62,22 @@
   // condition variable, thus waking this up.
   while (count_ != 0) {
     if (locks == kAllowHoldingLocks) {
-      condition_.WaitHoldingLocks(self);
+      condition_->WaitHoldingLocks(self);
     } else {
-      condition_.Wait(self);
+      condition_->Wait(self);
     }
   }
 }
 
 bool Barrier::Increment(Thread* self, int delta, uint32_t timeout_ms) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *GetLock());
   SetCountLocked(self, count_ + delta);
   bool timed_out = false;
   if (count_ != 0) {
     uint32_t timeout_ns = 0;
     uint64_t abs_timeout = NanoTime() + MsToNs(timeout_ms);
     for (;;) {
-      timed_out = condition_.TimedWait(self, timeout_ms, timeout_ns);
+      timed_out = condition_->TimedWait(self, timeout_ms, timeout_ns);
       if (timed_out || count_ == 0) return timed_out;
       // Compute time remaining on timeout.
       uint64_t now = NanoTime();
@@ -91,14 +91,14 @@
 }
 
 int Barrier::GetCount(Thread* self) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *GetLock());
   return count_;
 }
 
 void Barrier::SetCountLocked(Thread* self, int count) {
   count_ = count;
   if (count == 0) {
-    condition_.Broadcast(self);
+    condition_->Broadcast(self);
   }
 }
 
diff --git a/runtime/barrier.h b/runtime/barrier.h
index 8a38c4c..e21627e 100644
--- a/runtime/barrier.h
+++ b/runtime/barrier.h
@@ -28,10 +28,14 @@
 #define ART_RUNTIME_BARRIER_H_
 
 #include <memory>
-#include "base/mutex.h"
+
+#include "base/locks.h"
 
 namespace art {
 
+class ConditionVariable;
+class LOCKABLE Mutex;
+
 // TODO: Maybe give this a better name.
 class Barrier {
  public:
@@ -44,10 +48,10 @@
   virtual ~Barrier();
 
   // Pass through the barrier, decrement the count but do not block.
-  void Pass(Thread* self) REQUIRES(!lock_);
+  void Pass(Thread* self) REQUIRES(!GetLock());
 
   // Wait on the barrier, decrement the count.
-  void Wait(Thread* self) REQUIRES(!lock_);
+  void Wait(Thread* self) REQUIRES(!GetLock());
 
   // The following three calls are only safe if we somehow know that no other thread both
   // - has been woken up, and
@@ -58,26 +62,30 @@
   // Increment the count by delta, wait on condition if count is non zero.  If LockHandling is
   // kAllowHoldingLocks we will not check that all locks are released when waiting.
   template <Barrier::LockHandling locks = kDisallowHoldingLocks>
-  void Increment(Thread* self, int delta) REQUIRES(!lock_);
+  void Increment(Thread* self, int delta) REQUIRES(!GetLock());
 
   // Increment the count by delta, wait on condition if count is non zero, with a timeout. Returns
   // true if time out occurred.
-  bool Increment(Thread* self, int delta, uint32_t timeout_ms) REQUIRES(!lock_);
+  bool Increment(Thread* self, int delta, uint32_t timeout_ms) REQUIRES(!GetLock());
 
   // Set the count to a new value.  This should only be used if there is no possibility that
   // another thread is still in Wait().  See above.
-  void Init(Thread* self, int count) REQUIRES(!lock_);
+  void Init(Thread* self, int count) REQUIRES(!GetLock());
 
-  int GetCount(Thread* self) REQUIRES(!lock_);
+  int GetCount(Thread* self) REQUIRES(!GetLock());
 
  private:
-  void SetCountLocked(Thread* self, int count) REQUIRES(lock_);
+  void SetCountLocked(Thread* self, int count) REQUIRES(GetLock());
+
+  Mutex* GetLock() {
+    return lock_.get();
+  }
 
   // Counter, when this reaches 0 all people blocked on the barrier are signalled.
-  int count_ GUARDED_BY(lock_);
+  int count_ GUARDED_BY(GetLock());
 
-  Mutex lock_ ACQUIRED_AFTER(Locks::abort_lock_);
-  ConditionVariable condition_ GUARDED_BY(lock_);
+  std::unique_ptr<Mutex> lock_ ACQUIRED_AFTER(Locks::abort_lock_);
+  std::unique_ptr<ConditionVariable> condition_ GUARDED_BY(GetLock());
 };
 
 }  // namespace art
diff --git a/runtime/barrier_test.cc b/runtime/barrier_test.cc
index 88075ba..5ec24bc 100644
--- a/runtime/barrier_test.cc
+++ b/runtime/barrier_test.cc
@@ -32,7 +32,7 @@
         count1_(count1),
         count2_(count2) {}
 
-  void Run(Thread* self) {
+  void Run(Thread* self) override {
     LOG(INFO) << "Before barrier" << *self;
     ++*count1_;
     barrier_->Wait(self);
@@ -40,7 +40,7 @@
     LOG(INFO) << "After barrier" << *self;
   }
 
-  virtual void Finalize() {
+  void Finalize() override {
     delete this;
   }
 
@@ -91,7 +91,7 @@
         count_(count),
         subtasks_(subtasks) {}
 
-  void Run(Thread* self) {
+  void Run(Thread* self) override {
     for (size_t i = 0; i < subtasks_; ++i) {
       ++*count_;
       // Pass through to next subtask.
@@ -99,7 +99,7 @@
     }
   }
 
-  void Finalize() {
+  void Finalize() override {
     delete this;
   }
  private:
diff --git a/runtime/base/callee_save_type.h b/runtime/base/callee_save_type.h
new file mode 100644
index 0000000..e7cc7e6
--- /dev/null
+++ b/runtime/base/callee_save_type.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_CALLEE_SAVE_TYPE_H_
+#define ART_RUNTIME_BASE_CALLEE_SAVE_TYPE_H_
+
+#include <cstdint>
+#include <iosfwd>
+
+namespace art {
+
+// Returns a special method that describes all callee saves being spilled to the stack.
+enum class CalleeSaveType : uint32_t {
+  kSaveAllCalleeSaves,  // All callee-save registers.
+  kSaveRefsOnly,        // Only those callee-save registers that can hold references.
+  kSaveRefsAndArgs,     // References (see above) and arguments (usually caller-save registers).
+  kSaveEverything,      // All registers, including both callee-save and caller-save.
+  kSaveEverythingForClinit,    // Special kSaveEverything for clinit.
+  kSaveEverythingForSuspendCheck,  // Special kSaveEverything for suspend check.
+  kLastCalleeSaveType   // Value used for iteration.
+};
+std::ostream& operator<<(std::ostream& os, const CalleeSaveType& rhs);
+
+static inline constexpr CalleeSaveType GetCanonicalCalleeSaveType(CalleeSaveType type) {
+  if (type == CalleeSaveType::kSaveEverythingForClinit ||
+      type == CalleeSaveType::kSaveEverythingForSuspendCheck) {
+    return CalleeSaveType::kSaveEverything;
+  }
+  return type;
+}
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_BASE_CALLEE_SAVE_TYPE_H_
diff --git a/runtime/base/locks.cc b/runtime/base/locks.cc
new file mode 100644
index 0000000..a7922a2
--- /dev/null
+++ b/runtime/base/locks.cc
@@ -0,0 +1,390 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "locks.h"
+
+#include <errno.h>
+#include <sys/time.h>
+
+#include "android-base/logging.h"
+
+#include "base/atomic.h"
+#include "base/logging.h"
+#include "base/systrace.h"
+#include "base/time_utils.h"
+#include "base/value_object.h"
+#include "mutex-inl.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+
+namespace art {
+
+static Atomic<Locks::ClientCallback*> safe_to_call_abort_callback(nullptr);
+
+Mutex* Locks::abort_lock_ = nullptr;
+Mutex* Locks::alloc_tracker_lock_ = nullptr;
+Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
+Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
+ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
+ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
+Mutex* Locks::custom_tls_lock_ = nullptr;
+Mutex* Locks::deoptimization_lock_ = nullptr;
+ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
+Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
+Mutex* Locks::intern_table_lock_ = nullptr;
+Mutex* Locks::jni_function_table_lock_ = nullptr;
+Mutex* Locks::jni_libraries_lock_ = nullptr;
+Mutex* Locks::logging_lock_ = nullptr;
+Mutex* Locks::modify_ldt_lock_ = nullptr;
+MutatorMutex* Locks::mutator_lock_ = nullptr;
+Mutex* Locks::profiler_lock_ = nullptr;
+ReaderWriterMutex* Locks::verifier_deps_lock_ = nullptr;
+ReaderWriterMutex* Locks::oat_file_manager_lock_ = nullptr;
+Mutex* Locks::host_dlopen_handles_lock_ = nullptr;
+Mutex* Locks::reference_processor_lock_ = nullptr;
+Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
+Mutex* Locks::runtime_shutdown_lock_ = nullptr;
+Mutex* Locks::runtime_thread_pool_lock_ = nullptr;
+Mutex* Locks::cha_lock_ = nullptr;
+Mutex* Locks::subtype_check_lock_ = nullptr;
+Mutex* Locks::thread_list_lock_ = nullptr;
+ConditionVariable* Locks::thread_exit_cond_ = nullptr;
+Mutex* Locks::thread_suspend_count_lock_ = nullptr;
+Mutex* Locks::trace_lock_ = nullptr;
+Mutex* Locks::unexpected_signal_lock_ = nullptr;
+Mutex* Locks::user_code_suspension_lock_ = nullptr;
+Uninterruptible Roles::uninterruptible_;
+ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
+Mutex* Locks::jni_weak_globals_lock_ = nullptr;
+ReaderWriterMutex* Locks::dex_lock_ = nullptr;
+Mutex* Locks::native_debug_interface_lock_ = nullptr;
+std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
+Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_;
+
+// Wait for an amount of time that roughly increases in the argument i.
+// Spin for small arguments and yield/sleep for longer ones.
+static void BackOff(uint32_t i) {
+  static constexpr uint32_t kSpinMax = 10;
+  static constexpr uint32_t kYieldMax = 20;
+  if (i <= kSpinMax) {
+    // TODO: Esp. in very latency-sensitive cases, consider replacing this with an explicit
+    // test-and-test-and-set loop in the caller.  Possibly skip entirely on a uniprocessor.
+    volatile uint32_t x = 0;
+    const uint32_t spin_count = 10 * i;
+    for (uint32_t spin = 0; spin < spin_count; ++spin) {
+      ++x;  // Volatile; hence should not be optimized away.
+    }
+    // TODO: Consider adding x86 PAUSE and/or ARM YIELD here.
+  } else if (i <= kYieldMax) {
+    sched_yield();
+  } else {
+    NanoSleep(1000ull * (i - kYieldMax));
+  }
+}
+
+class Locks::ScopedExpectedMutexesOnWeakRefAccessLock final {
+ public:
+  explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
+    for (uint32_t i = 0;
+         !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareAndSetWeakAcquire(nullptr,
+                                                                                     mutex);
+         ++i) {
+      BackOff(i);
+    }
+  }
+
+  ~ScopedExpectedMutexesOnWeakRefAccessLock() {
+    DCHECK_EQ(Locks::expected_mutexes_on_weak_ref_access_guard_.load(std::memory_order_relaxed),
+              mutex_);
+    Locks::expected_mutexes_on_weak_ref_access_guard_.store(nullptr, std::memory_order_release);
+  }
+
+ private:
+  const BaseMutex* const mutex_;
+};
+
+void Locks::Init() {
+  if (logging_lock_ != nullptr) {
+    // Already initialized.
+    if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
+      DCHECK(modify_ldt_lock_ != nullptr);
+    } else {
+      DCHECK(modify_ldt_lock_ == nullptr);
+    }
+    DCHECK(abort_lock_ != nullptr);
+    DCHECK(alloc_tracker_lock_ != nullptr);
+    DCHECK(allocated_monitor_ids_lock_ != nullptr);
+    DCHECK(allocated_thread_ids_lock_ != nullptr);
+    DCHECK(breakpoint_lock_ != nullptr);
+    DCHECK(classlinker_classes_lock_ != nullptr);
+    DCHECK(custom_tls_lock_ != nullptr);
+    DCHECK(deoptimization_lock_ != nullptr);
+    DCHECK(heap_bitmap_lock_ != nullptr);
+    DCHECK(oat_file_manager_lock_ != nullptr);
+    DCHECK(verifier_deps_lock_ != nullptr);
+    DCHECK(host_dlopen_handles_lock_ != nullptr);
+    DCHECK(intern_table_lock_ != nullptr);
+    DCHECK(jni_function_table_lock_ != nullptr);
+    DCHECK(jni_libraries_lock_ != nullptr);
+    DCHECK(logging_lock_ != nullptr);
+    DCHECK(mutator_lock_ != nullptr);
+    DCHECK(profiler_lock_ != nullptr);
+    DCHECK(cha_lock_ != nullptr);
+    DCHECK(subtype_check_lock_ != nullptr);
+    DCHECK(thread_list_lock_ != nullptr);
+    DCHECK(thread_suspend_count_lock_ != nullptr);
+    DCHECK(trace_lock_ != nullptr);
+    DCHECK(unexpected_signal_lock_ != nullptr);
+    DCHECK(user_code_suspension_lock_ != nullptr);
+    DCHECK(dex_lock_ != nullptr);
+    DCHECK(native_debug_interface_lock_ != nullptr);
+    DCHECK(runtime_thread_pool_lock_ != nullptr);
+  } else {
+    // Create global locks in level order from highest lock level to lowest.
+    LockLevel current_lock_level = kInstrumentEntrypointsLock;
+    DCHECK(instrument_entrypoints_lock_ == nullptr);
+    instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
+
+    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
+      if ((new_level) >= current_lock_level) { \
+        /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
+        fprintf(stderr, "New local level %d is not less than current level %d\n", \
+                new_level, current_lock_level); \
+        exit(1); \
+      } \
+      current_lock_level = new_level;
+
+    UPDATE_CURRENT_LOCK_LEVEL(kUserCodeSuspensionLock);
+    DCHECK(user_code_suspension_lock_ == nullptr);
+    user_code_suspension_lock_ = new Mutex("user code suspension lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
+    DCHECK(mutator_lock_ == nullptr);
+    mutator_lock_ = new MutatorMutex("mutator lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
+    DCHECK(heap_bitmap_lock_ == nullptr);
+    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
+    DCHECK(trace_lock_ == nullptr);
+    trace_lock_ = new Mutex("trace lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
+    DCHECK(runtime_shutdown_lock_ == nullptr);
+    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeThreadPoolLock);
+    DCHECK(runtime_thread_pool_lock_ == nullptr);
+    runtime_thread_pool_lock_ = new Mutex("runtime thread pool lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
+    DCHECK(profiler_lock_ == nullptr);
+    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
+    DCHECK(deoptimization_lock_ == nullptr);
+    deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
+    DCHECK(alloc_tracker_lock_ == nullptr);
+    alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
+    DCHECK(thread_list_lock_ == nullptr);
+    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
+    DCHECK(jni_libraries_lock_ == nullptr);
+    jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
+    DCHECK(breakpoint_lock_ == nullptr);
+    breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kSubtypeCheckLock);
+    DCHECK(subtype_check_lock_ == nullptr);
+    subtype_check_lock_ = new Mutex("SubtypeCheck lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
+    DCHECK(classlinker_classes_lock_ == nullptr);
+    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
+                                                      current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
+    DCHECK(allocated_monitor_ids_lock_ == nullptr);
+    allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
+    DCHECK(allocated_thread_ids_lock_ == nullptr);
+    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
+
+    if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
+      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
+      DCHECK(modify_ldt_lock_ == nullptr);
+      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
+    }
+
+    UPDATE_CURRENT_LOCK_LEVEL(kDexLock);
+    DCHECK(dex_lock_ == nullptr);
+    dex_lock_ = new ReaderWriterMutex("ClassLinker dex lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kOatFileManagerLock);
+    DCHECK(oat_file_manager_lock_ == nullptr);
+    oat_file_manager_lock_ = new ReaderWriterMutex("OatFile manager lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kVerifierDepsLock);
+    DCHECK(verifier_deps_lock_ == nullptr);
+    verifier_deps_lock_ = new ReaderWriterMutex("verifier deps lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kHostDlOpenHandlesLock);
+    DCHECK(host_dlopen_handles_lock_ == nullptr);
+    host_dlopen_handles_lock_ = new Mutex("host dlopen handles lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
+    DCHECK(intern_table_lock_ == nullptr);
+    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
+    DCHECK(reference_processor_lock_ == nullptr);
+    reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
+    DCHECK(reference_queue_cleared_references_lock_ == nullptr);
+    reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
+    DCHECK(reference_queue_weak_references_lock_ == nullptr);
+    reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
+    DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
+    reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
+    DCHECK(reference_queue_phantom_references_lock_ == nullptr);
+    reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
+    DCHECK(reference_queue_soft_references_lock_ == nullptr);
+    reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kJniGlobalsLock);
+    DCHECK(jni_globals_lock_ == nullptr);
+    jni_globals_lock_ =
+        new ReaderWriterMutex("JNI global reference table lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kJniWeakGlobalsLock);
+    DCHECK(jni_weak_globals_lock_ == nullptr);
+    jni_weak_globals_lock_ = new Mutex("JNI weak global reference table lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kJniFunctionTableLock);
+    DCHECK(jni_function_table_lock_ == nullptr);
+    jni_function_table_lock_ = new Mutex("JNI function table lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kCustomTlsLock);
+    DCHECK(custom_tls_lock_ == nullptr);
+    custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
+    DCHECK(cha_lock_ == nullptr);
+    cha_lock_ = new Mutex("CHA lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
+    DCHECK(native_debug_interface_lock_ == nullptr);
+    native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
+    DCHECK(abort_lock_ == nullptr);
+    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
+    DCHECK(thread_suspend_count_lock_ == nullptr);
+    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
+    DCHECK(unexpected_signal_lock_ == nullptr);
+    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
+    DCHECK(logging_lock_ == nullptr);
+    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
+
+    #undef UPDATE_CURRENT_LOCK_LEVEL
+
+    // List of mutexes that we may hold when accessing a weak ref.
+    AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock=*/ false);
+    AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock=*/ false);
+    AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock=*/ false);
+
+    InitConditions();
+  }
+}
+
+void Locks::InitConditions() {
+  thread_exit_cond_ = new ConditionVariable("thread exit condition variable", *thread_list_lock_);
+}
+
+void Locks::SetClientCallback(ClientCallback* safe_to_call_abort_cb) {
+  safe_to_call_abort_callback.store(safe_to_call_abort_cb, std::memory_order_release);
+}
+
+// Helper to allow checking shutdown while ignoring locking requirements.
+bool Locks::IsSafeToCallAbortRacy() {
+  Locks::ClientCallback* safe_to_call_abort_cb =
+      safe_to_call_abort_callback.load(std::memory_order_acquire);
+  return safe_to_call_abort_cb != nullptr && safe_to_call_abort_cb();
+}
+
+void Locks::AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
+  if (need_lock) {
+    ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
+    mutex->SetShouldRespondToEmptyCheckpointRequest(true);
+    expected_mutexes_on_weak_ref_access_.push_back(mutex);
+  } else {
+    mutex->SetShouldRespondToEmptyCheckpointRequest(true);
+    expected_mutexes_on_weak_ref_access_.push_back(mutex);
+  }
+}
+
+void Locks::RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
+  if (need_lock) {
+    ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
+    mutex->SetShouldRespondToEmptyCheckpointRequest(false);
+    std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
+    auto it = std::find(list.begin(), list.end(), mutex);
+    DCHECK(it != list.end());
+    list.erase(it);
+  } else {
+    mutex->SetShouldRespondToEmptyCheckpointRequest(false);
+    std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
+    auto it = std::find(list.begin(), list.end(), mutex);
+    DCHECK(it != list.end());
+    list.erase(it);
+  }
+}
+
+bool Locks::IsExpectedOnWeakRefAccess(BaseMutex* mutex) {
+  ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
+  std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
+  return std::find(list.begin(), list.end(), mutex) != list.end();
+}
+
+}  // namespace art
diff --git a/runtime/base/locks.h b/runtime/base/locks.h
new file mode 100644
index 0000000..b7d8e31
--- /dev/null
+++ b/runtime/base/locks.h
@@ -0,0 +1,368 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_LOCKS_H_
+#define ART_RUNTIME_BASE_LOCKS_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+#include <vector>
+
+#include "base/atomic.h"
+#include "base/macros.h"
+
+namespace art {
+
+class BaseMutex;
+class ConditionVariable;
+class SHARED_LOCKABLE ReaderWriterMutex;
+class SHARED_LOCKABLE MutatorMutex;
+class LOCKABLE Mutex;
+class Thread;
+
+// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
+// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
+// partial ordering and thereby cause deadlock situations to fail checks.
+//
+// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
+enum LockLevel : uint8_t {
+  kLoggingLock = 0,
+  kSwapMutexesLock,
+  kUnexpectedSignalLock,
+  kThreadSuspendCountLock,
+  kAbortLock,
+  kNativeDebugInterfaceLock,
+  kSignalHandlingLock,
+  // A generic lock level for mutexs that should not allow any additional mutexes to be gained after
+  // acquiring it.
+  kGenericBottomLock,
+  // Tracks the second acquisition at the same lock level for kThreadWaitLock. This is an exception
+  // to the normal lock ordering, used to implement Monitor::Wait - while holding one kThreadWait
+  // level lock, it is permitted to acquire a second one - with internal safeguards to ensure that
+  // the second lock acquisition does not result in deadlock. This is implemented in the lock
+  // order by treating the second acquisition of a kThreadWaitLock as a kThreadWaitWakeLock
+  // acquisition. Thus, acquiring kThreadWaitWakeLock requires holding kThreadWaitLock. This entry
+  // is here near the bottom of the hierarchy because other locks should not be
+  // acquired while it is held. kThreadWaitLock cannot be moved here because GC
+  // activity acquires locks while holding the wait lock.
+  kThreadWaitWakeLock,
+  kJdwpAdbStateLock,
+  kJdwpSocketLock,
+  kRegionSpaceRegionLock,
+  kMarkSweepMarkStackLock,
+  // Can be held while GC related work is done, and thus must be above kMarkSweepMarkStackLock
+  kThreadWaitLock,
+  kCHALock,
+  kJitCodeCacheLock,
+  kRosAllocGlobalLock,
+  kRosAllocBracketLock,
+  kRosAllocBulkFreeLock,
+  kAllocSpaceLock,
+  kTaggingLockLevel,
+  kTransactionLogLock,
+  kCustomTlsLock,
+  kJniFunctionTableLock,
+  kJniWeakGlobalsLock,
+  kJniGlobalsLock,
+  kReferenceQueueSoftReferencesLock,
+  kReferenceQueuePhantomReferencesLock,
+  kReferenceQueueFinalizerReferencesLock,
+  kReferenceQueueWeakReferencesLock,
+  kReferenceQueueClearedReferencesLock,
+  kReferenceProcessorLock,
+  kJitDebugInterfaceLock,
+  kBumpPointerSpaceBlockLock,
+  kArenaPoolLock,
+  kInternTableLock,
+  kOatFileSecondaryLookupLock,
+  kHostDlOpenHandlesLock,
+  kVerifierDepsLock,
+  kOatFileManagerLock,
+  kTracingUniqueMethodsLock,
+  kTracingStreamingLock,
+  kClassLoaderClassesLock,
+  kDefaultMutexLevel,
+  kDexLock,
+  kMarkSweepLargeObjectLock,
+  kJdwpObjectRegistryLock,
+  kModifyLdtLock,
+  kAllocatedThreadIdsLock,
+  kMonitorPoolLock,
+  kClassLinkerClassesLock,  // TODO rename.
+  kDexToDexCompilerLock,
+  kSubtypeCheckLock,
+  kBreakpointLock,
+  kMonitorLock,
+  kMonitorListLock,
+  kJniLoadLibraryLock,
+  kThreadListLock,
+  kAllocTrackerLock,
+  kDeoptimizationLock,
+  kProfilerLock,
+  kJdwpShutdownLock,
+  kJdwpEventListLock,
+  kJdwpAttachLock,
+  kJdwpStartLock,
+  kRuntimeThreadPoolLock,
+  kRuntimeShutdownLock,
+  kTraceLock,
+  kHeapBitmapLock,
+  kMutatorLock,
+  kUserCodeSuspensionLock,
+  kInstrumentEntrypointsLock,
+  kZygoteCreationLock,
+
+  // The highest valid lock level. Use this if there is code that should only be called with no
+  // other locks held. Since this is the highest lock level we also allow it to be held even if the
+  // runtime or current thread is not fully set-up yet (for example during thread attach). Note that
+  // this lock also has special behavior around the mutator_lock_. Since the mutator_lock_ is not
+  // really a 'real' lock we allow this to be locked when the mutator_lock_ is held exclusive.
+  // Furthermore, the mutator_lock_ may not be acquired in any form when a lock of this level is
+  // held. Since the mutator_lock_ being held strong means that all other threads are suspended this
+  // will prevent deadlocks while still allowing this lock level to function as a "highest" level.
+  kTopLockLevel,
+
+  kLockLevelCount  // Must come last.
+};
+std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
+
+// For StartNoThreadSuspension and EndNoThreadSuspension.
+class CAPABILITY("role") Role {
+ public:
+  void Acquire() ACQUIRE() {}
+  void Release() RELEASE() {}
+  const Role& operator!() const { return *this; }
+};
+
+class Uninterruptible : public Role {
+};
+
+// Global mutexes corresponding to the levels above.
+class Locks {
+ public:
+  static void Init();
+  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
+
+  // Destroying various lock types can emit errors that vary depending upon
+  // whether the client (art::Runtime) is currently active.  Allow the client
+  // to set a callback that is used to check when it is acceptable to call
+  // Abort.  The default behavior is that the client *is not* able to call
+  // Abort if no callback is established.
+  using ClientCallback = bool();
+  static void SetClientCallback(ClientCallback* is_safe_to_call_abort_cb) NO_THREAD_SAFETY_ANALYSIS;
+  // Checks for whether it is safe to call Abort() without using locks.
+  static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
+
+  // Add a mutex to expected_mutexes_on_weak_ref_access_.
+  static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
+  // Remove a mutex from expected_mutexes_on_weak_ref_access_.
+  static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
+  // Check if the given mutex is in expected_mutexes_on_weak_ref_access_.
+  static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex);
+
+  // Guards allocation entrypoint instrumenting.
+  static Mutex* instrument_entrypoints_lock_;
+
+  // Guards code that deals with user-code suspension. This mutex must be held when suspending or
+  // resuming threads with SuspendReason::kForUserCode. It may be held by a suspended thread, but
+  // only if the suspension is not due to SuspendReason::kForUserCode.
+  static Mutex* user_code_suspension_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
+
+  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
+  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
+  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
+  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
+  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
+  //
+  // Thread suspension:
+  // mutator thread                                | GC/Debugger
+  //   .. running ..                               |   .. running ..
+  //   .. running ..                               | Request thread suspension by:
+  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
+  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
+  //   .. running ..                               |     all mutator threads
+  //   .. running ..                               |   - releasing thread_suspend_count_lock_
+  //   .. running ..                               | Block wait for all threads to pass a barrier
+  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
+  // suspend code.                                 |   .. blocked ..
+  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
+  // x: Acquire thread_suspend_count_lock_         |   .. running ..
+  // while Thread::suspend_count_ > 0              |   .. running ..
+  //   - wait on Thread::resume_cond_              |   .. running ..
+  //     (releases thread_suspend_count_lock_)     |   .. running ..
+  //   .. waiting ..                               | Request thread resumption by:
+  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
+  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
+  //   .. waiting ..                               |     all mutator threads
+  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
+  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
+  // Release thread_suspend_count_lock_            |  .. running ..
+  // Change to kRunnable                           |  .. running ..
+  //  - this uses a CAS operation to ensure the    |  .. running ..
+  //    suspend request flag isn't raised as the   |  .. running ..
+  //    state is changed                           |  .. running ..
+  //  - if the CAS operation fails then goto x     |  .. running ..
+  //  .. running ..                                |  .. running ..
+  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(user_code_suspension_lock_);
+
+  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
+  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
+
+  // Guards shutdown of the runtime.
+  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
+
+  // Runtime thread pool lock.
+  static Mutex* runtime_thread_pool_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+
+  // Guards background profiler global state.
+  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_thread_pool_lock_);
+
+  // Guards trace (ie traceview) requests.
+  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
+
+  // Guards debugger recent allocation records.
+  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
+
+  // Guards updates to instrumentation to ensure mutual exclusion of
+  // events like deoptimization requests.
+  // TODO: improve name, perhaps instrumentation_update_lock_.
+  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
+
+  // Guard the update of the SubtypeCheck data stores in each Class::status_ field.
+  // This lock is used in SubtypeCheck methods which are the interface for
+  // any SubtypeCheck-mutating methods.
+  // In Class::IsSubClass, the lock is not required since it does not update the SubtypeCheck data.
+  static Mutex* subtype_check_lock_ ACQUIRED_AFTER(deoptimization_lock_);
+
+  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
+  // attaching and detaching.
+  static Mutex* thread_list_lock_ ACQUIRED_AFTER(subtype_check_lock_);
+
+  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
+  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
+
+  // Guards maintaining loading library data structures.
+  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
+
+  // Guards breakpoints.
+  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
+
+  // Guards lists of classes within the class linker.
+  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
+
+  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
+  // doesn't try to hold a higher level Mutex.
+  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::classlinker_classes_lock_)
+
+  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
+
+  // Guard the allocation/deallocation of thread ids.
+  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
+
+  // Guards modification of the LDT on x86.
+  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
+
+  static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
+
+  // Guards opened oat files in OatFileManager.
+  static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
+
+  // Guards extra string entries for VerifierDeps.
+  static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
+
+  // Guards dlopen_handles_ in DlOpenOatFile.
+  static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_);
+
+  // Guards intern table.
+  static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
+
+  // Guards reference processor.
+  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
+
+  // Guards cleared references queue.
+  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
+
+  // Guards weak references queue.
+  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
+
+  // Guards finalizer references queue.
+  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
+
+  // Guards phantom references queue.
+  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
+
+  // Guards soft references queue.
+  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
+
+  // Guard accesses to the JNI Global Reference table.
+  static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
+
+  // Guard accesses to the JNI Weak Global Reference table.
+  static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
+
+  // Guard accesses to the JNI function table override.
+  static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
+
+  // Guard accesses to the Thread::custom_tls_. We use this to allow the TLS of other threads to be
+  // read (the reader must hold the ThreadListLock or have some other way of ensuring the thread
+  // will not die in that case though). This is useful for (eg) the implementation of
+  // GetThreadLocalStorage.
+  static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
+
+  // Guards Class Hierarchy Analysis (CHA).
+  static Mutex* cha_lock_ ACQUIRED_AFTER(custom_tls_lock_);
+
+  // When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
+  // doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
+  // actually only encodes the mutex being below jni_function_table_lock_ although having
+  // kGenericBottomLock level is lower than this.
+  #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::cha_lock_)
+
+  // Have an exclusive aborting thread.
+  static Mutex* abort_lock_ ACQUIRED_AFTER(custom_tls_lock_);
+
+  // Allow mutual exclusion when manipulating Thread::suspend_count_.
+  // TODO: Does the trade-off of a per-thread lock make sense?
+  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
+
+  // One unexpected signal at a time lock.
+  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
+
+  // Guards the magic global variables used by native tools (e.g. libunwind).
+  static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+
+  // Have an exclusive logging thread.
+  static Mutex* logging_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
+
+  // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
+  // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
+  // encounter an unexpected mutex on accessing weak refs,
+  // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
+  static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
+  static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_;
+  class ScopedExpectedMutexesOnWeakRefAccessLock;
+};
+
+class Roles {
+ public:
+  // Uninterruptible means that the thread may not become suspended.
+  static Uninterruptible uninterruptible_;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_BASE_LOCKS_H_
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index 851c23f..ae7db45 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -58,7 +58,6 @@
   size = RoundUp(size, kPageSize);
   std::string error_msg;
   MemMap map = MemMap::MapAnonymous(name,
-                                    /* addr */ nullptr,
                                     size,
                                     PROT_READ | PROT_WRITE,
                                     low_4gb,
diff --git a/runtime/base/mutator_locked_dumpable.h b/runtime/base/mutator_locked_dumpable.h
index cf2199c..afbd732 100644
--- a/runtime/base/mutator_locked_dumpable.h
+++ b/runtime/base/mutator_locked_dumpable.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_BASE_MUTATOR_LOCKED_DUMPABLE_H_
 #define ART_RUNTIME_BASE_MUTATOR_LOCKED_DUMPABLE_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "thread-current-inl.h"
 
 namespace art {
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index 74db312..5daead9 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -91,6 +91,15 @@
     CheckUnattachedThread(level_);
     return;
   }
+  LockLevel level = level_;
+  // It would be nice to avoid this condition checking in the non-debug case,
+  // but that would make the various methods that check if a mutex is held not
+  // work properly for thread wait locks. Since the vast majority of lock
+  // acquisitions are not thread wait locks, this check should not be too
+  // expensive.
+  if (UNLIKELY(level == kThreadWaitLock) && self->GetHeldMutex(kThreadWaitLock) != nullptr) {
+    level = kThreadWaitWakeLock;
+  }
   if (kDebugLocking) {
     // Check if a bad Mutex of this level or lower is held.
     bool bad_mutexes_held = false;
@@ -98,13 +107,13 @@
     // mutator_lock_ exclusive. This is because we suspending when holding locks at this level is
     // not allowed and if we hold the mutator_lock_ exclusive we must unsuspend stuff eventually
     // so there are no deadlocks.
-    if (level_ == kTopLockLevel &&
+    if (level == kTopLockLevel &&
         Locks::mutator_lock_->IsSharedHeld(self) &&
         !Locks::mutator_lock_->IsExclusiveHeld(self)) {
       LOG(ERROR) << "Lock level violation: holding \"" << Locks::mutator_lock_->name_ << "\" "
                   << "(level " << kMutatorLock << " - " << static_cast<int>(kMutatorLock)
                   << ") non-exclusive while locking \"" << name_ << "\" "
-                  << "(level " << level_ << " - " << static_cast<int>(level_) << ") a top level"
+                  << "(level " << level << " - " << static_cast<int>(level) << ") a top level"
                   << "mutex. This is not allowed.";
       bad_mutexes_held = true;
     } else if (this == Locks::mutator_lock_ && self->GetHeldMutex(kTopLockLevel) != nullptr) {
@@ -113,10 +122,10 @@
                  << "not allowed.";
       bad_mutexes_held = true;
     }
-    for (int i = level_; i >= 0; --i) {
+    for (int i = level; i >= 0; --i) {
       LockLevel lock_level_i = static_cast<LockLevel>(i);
       BaseMutex* held_mutex = self->GetHeldMutex(lock_level_i);
-      if (level_ == kTopLockLevel &&
+      if (level == kTopLockLevel &&
           lock_level_i == kMutatorLock &&
           Locks::mutator_lock_->IsExclusiveHeld(self)) {
         // This is checked above.
@@ -125,7 +134,7 @@
         LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
                    << "(level " << lock_level_i << " - " << i
                    << ") while locking \"" << name_ << "\" "
-                   << "(level " << level_ << " - " << static_cast<int>(level_) << ")";
+                   << "(level " << level << " - " << static_cast<int>(level) << ")";
         if (lock_level_i > kAbortLock) {
           // Only abort in the check below if this is more than abort level lock.
           bad_mutexes_held = true;
@@ -138,8 +147,8 @@
   }
   // Don't record monitors as they are outside the scope of analysis. They may be inspected off of
   // the monitor list.
-  if (level_ != kMonitorLock) {
-    self->SetHeldMutex(level_, this);
+  if (level != kMonitorLock) {
+    self->SetHeldMutex(level, this);
   }
 }
 
@@ -149,10 +158,17 @@
     return;
   }
   if (level_ != kMonitorLock) {
-    if (kDebugLocking && gAborting == 0) {  // Avoid recursive aborts.
-      CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
+    auto level = level_;
+    if (UNLIKELY(level == kThreadWaitLock) && self->GetHeldMutex(kThreadWaitWakeLock) == this) {
+      level = kThreadWaitWakeLock;
     }
-    self->SetHeldMutex(level_, nullptr);
+    if (kDebugLocking && gAborting == 0) {  // Avoid recursive aborts.
+      if (level == kThreadWaitWakeLock) {
+        CHECK(self->GetHeldMutex(kThreadWaitLock) != nullptr) << "Held " << kThreadWaitWakeLock << " without " << kThreadWaitLock;;
+      }
+      CHECK(self->GetHeldMutex(level) == this) << "Unlocking on unacquired mutex: " << name_;
+    }
+    self->SetHeldMutex(level, nullptr);
   }
 }
 
@@ -196,7 +212,7 @@
         if (num_pending_writers_.load(std::memory_order_seq_cst) > 0 ||
             num_pending_readers_.load(std::memory_order_seq_cst) > 0) {
           // Wake any exclusive waiters as there are now no readers.
-          futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
+          futex(state_.Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0);
         }
       }
     } else {
@@ -214,7 +230,11 @@
   if (kDebugLocking) {
     // Sanity debug check that if we think it is locked we have it in our held mutexes.
     if (result && self != nullptr && level_ != kMonitorLock && !gAborting) {
-      CHECK_EQ(self->GetHeldMutex(level_), this);
+      if (level_ == kThreadWaitLock && self->GetHeldMutex(kThreadWaitLock) != this) {
+        CHECK_EQ(self->GetHeldMutex(kThreadWaitWakeLock), this);
+      } else {
+        CHECK_EQ(self->GetHeldMutex(level_), this);
+      }
     }
   }
   return result;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 28b2912..7aec661 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -34,51 +34,6 @@
 
 using android::base::StringPrintf;
 
-static Atomic<Locks::ClientCallback*> safe_to_call_abort_callback(nullptr);
-
-Mutex* Locks::abort_lock_ = nullptr;
-Mutex* Locks::alloc_tracker_lock_ = nullptr;
-Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
-Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
-ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
-ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
-Mutex* Locks::custom_tls_lock_ = nullptr;
-Mutex* Locks::deoptimization_lock_ = nullptr;
-ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
-Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
-Mutex* Locks::intern_table_lock_ = nullptr;
-Mutex* Locks::jni_function_table_lock_ = nullptr;
-Mutex* Locks::jni_libraries_lock_ = nullptr;
-Mutex* Locks::logging_lock_ = nullptr;
-Mutex* Locks::modify_ldt_lock_ = nullptr;
-MutatorMutex* Locks::mutator_lock_ = nullptr;
-Mutex* Locks::profiler_lock_ = nullptr;
-ReaderWriterMutex* Locks::verifier_deps_lock_ = nullptr;
-ReaderWriterMutex* Locks::oat_file_manager_lock_ = nullptr;
-Mutex* Locks::host_dlopen_handles_lock_ = nullptr;
-Mutex* Locks::reference_processor_lock_ = nullptr;
-Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
-Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
-Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
-Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
-Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
-Mutex* Locks::runtime_shutdown_lock_ = nullptr;
-Mutex* Locks::cha_lock_ = nullptr;
-Mutex* Locks::subtype_check_lock_ = nullptr;
-Mutex* Locks::thread_list_lock_ = nullptr;
-ConditionVariable* Locks::thread_exit_cond_ = nullptr;
-Mutex* Locks::thread_suspend_count_lock_ = nullptr;
-Mutex* Locks::trace_lock_ = nullptr;
-Mutex* Locks::unexpected_signal_lock_ = nullptr;
-Mutex* Locks::user_code_suspension_lock_ = nullptr;
-Uninterruptible Roles::uninterruptible_;
-ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
-Mutex* Locks::jni_weak_globals_lock_ = nullptr;
-ReaderWriterMutex* Locks::dex_lock_ = nullptr;
-Mutex* Locks::native_debug_interface_lock_ = nullptr;
-std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
-Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_;
-
 struct AllMutexData {
   // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
   Atomic<const BaseMutex*> all_mutexes_guard;
@@ -144,27 +99,6 @@
   const BaseMutex* const mutex_;
 };
 
-class Locks::ScopedExpectedMutexesOnWeakRefAccessLock final {
- public:
-  explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
-    for (uint32_t i = 0;
-         !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareAndSetWeakAcquire(nullptr,
-                                                                                     mutex);
-         ++i) {
-      BackOff(i);
-    }
-  }
-
-  ~ScopedExpectedMutexesOnWeakRefAccessLock() {
-    DCHECK_EQ(Locks::expected_mutexes_on_weak_ref_access_guard_.load(std::memory_order_relaxed),
-              mutex_);
-    Locks::expected_mutexes_on_weak_ref_access_guard_.store(nullptr, std::memory_order_release);
-  }
-
- private:
-  const BaseMutex* const mutex_;
-};
-
 // Scoped class that generates events at the beginning and end of lock contention.
 class ScopedContentionRecorder final : public ValueObject {
  public:
@@ -173,15 +107,15 @@
         blocked_tid_(kLogLockContentions ? blocked_tid : 0),
         owner_tid_(kLogLockContentions ? owner_tid : 0),
         start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
-    if (ATRACE_ENABLED()) {
+    if (ATraceEnabled()) {
       std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
                                      mutex->GetName(), owner_tid);
-      ATRACE_BEGIN(msg.c_str());
+      ATraceBegin(msg.c_str());
     }
   }
 
   ~ScopedContentionRecorder() {
-    ATRACE_END();
+    ATraceEnd();
     if (kLogLockContentions) {
       uint64_t end_nano_time = NanoTime();
       mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
@@ -227,18 +161,15 @@
       // No mutexes have been created yet during at startup.
       return;
     }
-    typedef std::set<BaseMutex*>::const_iterator It;
     os << "(Contended)\n";
-    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
-      BaseMutex* mutex = *it;
+    for (const BaseMutex* mutex : *all_mutexes) {
       if (mutex->HasEverContended()) {
         mutex->Dump(os);
         os << "\n";
       }
     }
     os << "(Never contented)\n";
-    for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
-      BaseMutex* mutex = *it;
+    for (const BaseMutex* mutex : *all_mutexes) {
       if (!mutex->HasEverContended()) {
         mutex->Dump(os);
         os << "\n";
@@ -450,7 +381,7 @@
         if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
           self->CheckEmptyCheckpointFromMutex();
         }
-        if (futex(state_.Address(), FUTEX_WAIT, 1, nullptr, nullptr, 0) != 0) {
+        if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, 1, nullptr, nullptr, 0) != 0) {
           // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
           // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
           if ((errno != EAGAIN) && (errno != EINTR)) {
@@ -554,7 +485,7 @@
         if (LIKELY(done)) {  // Spurious fail?
           // Wake a contender.
           if (UNLIKELY(num_contenders_.load(std::memory_order_seq_cst) > 0)) {
-            futex(state_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
+            futex(state_.Address(), FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0);
           }
         }
       } else {
@@ -597,7 +528,7 @@
   // Wake up all the waiters so they will respond to the emtpy checkpoint.
   DCHECK(should_respond_to_empty_checkpoint_request_);
   if (UNLIKELY(num_contenders_.load(std::memory_order_relaxed) > 0)) {
-    futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
+    futex(state_.Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0);
   }
 #else
   LOG(FATAL) << "Non futex case isn't supported.";
@@ -651,7 +582,7 @@
       if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
         self->CheckEmptyCheckpointFromMutex();
       }
-      if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
+      if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
         // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
         // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
         if ((errno != EAGAIN) && (errno != EINTR)) {
@@ -692,7 +623,7 @@
         // Wake any waiters.
         if (UNLIKELY(num_pending_readers_.load(std::memory_order_seq_cst) > 0 ||
                      num_pending_writers_.load(std::memory_order_seq_cst) > 0)) {
-          futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
+          futex(state_.Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0);
         }
       }
     } else {
@@ -730,7 +661,7 @@
       if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
         self->CheckEmptyCheckpointFromMutex();
       }
-      if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, nullptr, 0) != 0) {
+      if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, &rel_ts, nullptr, 0) != 0) {
         if (errno == ETIMEDOUT) {
           --num_pending_writers_;
           return false;  // Timed out.
@@ -771,7 +702,7 @@
   if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
     self->CheckEmptyCheckpointFromMutex();
   }
-  if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
+  if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
     if (errno != EAGAIN && errno != EINTR) {
       PLOG(FATAL) << "futex wait failed for " << name_;
     }
@@ -849,7 +780,7 @@
   DCHECK(should_respond_to_empty_checkpoint_request_);
   if (UNLIKELY(num_pending_readers_.load(std::memory_order_relaxed) > 0 ||
                num_pending_writers_.load(std::memory_order_relaxed) > 0)) {
-    futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
+    futex(state_.Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0);
   }
 #else
   LOG(FATAL) << "Non futex case isn't supported.";
@@ -898,40 +829,37 @@
   // guard_.AssertExclusiveHeld(self);
   DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
 #if ART_USE_FUTEXES
-  if (num_waiters_ > 0) {
-    sequence_++;  // Indicate the broadcast occurred.
-    bool done = false;
-    do {
-      int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
-      // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
-      // mutex unlocks will awaken the requeued waiter thread.
-      done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
-                   reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
-                   guard_.state_.Address(), cur_sequence) != -1;
-      if (!done) {
-        if (errno != EAGAIN && errno != EINTR) {
-          PLOG(FATAL) << "futex cmp requeue failed for " << name_;
-        }
-      }
-    } while (!done);
-  }
+  RequeueWaiters(std::numeric_limits<int32_t>::max());
 #else
   CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
 #endif
 }
 
+#if ART_USE_FUTEXES
+void ConditionVariable::RequeueWaiters(int32_t count) {
+  if (num_waiters_ > 0) {
+    sequence_++;  // Indicate a signal occurred.
+    // Move waiters from the condition variable's futex to the guard's futex,
+    // so that they will be woken up when the mutex is released.
+    bool done = futex(sequence_.Address(),
+                      FUTEX_REQUEUE_PRIVATE,
+                      /* Threads to wake */ 0,
+                      /* Threads to requeue*/ reinterpret_cast<const timespec*>(count),
+                      guard_.state_.Address(),
+                      0) != -1;
+    if (!done && errno != EAGAIN && errno != EINTR) {
+      PLOG(FATAL) << "futex requeue failed for " << name_;
+    }
+  }
+}
+#endif
+
+
 void ConditionVariable::Signal(Thread* self) {
   DCHECK(self == nullptr || self == Thread::Current());
   guard_.AssertExclusiveHeld(self);
 #if ART_USE_FUTEXES
-  if (num_waiters_ > 0) {
-    sequence_++;  // Indicate a signal occurred.
-    // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
-    // to avoid this, however, requeueing can only move all waiters.
-    int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
-    // Check something was woken or else we changed sequence_ before they had chance to wait.
-    CHECK((num_woken == 0) || (num_woken == 1));
-  }
+  RequeueWaiters(1);
 #else
   CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
 #endif
@@ -953,7 +881,7 @@
   guard_.recursion_count_ = 1;
   int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
   guard_.ExclusiveUnlock(self);
-  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, nullptr, nullptr, 0) != 0) {
+  if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, nullptr, nullptr, 0) != 0) {
     // Futex failed, check it is an expected error.
     // EAGAIN == EWOULDBLK, so we let the caller try again.
     // EINTR implies a signal was sent to this thread.
@@ -1004,7 +932,7 @@
   guard_.recursion_count_ = 1;
   int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
   guard_.ExclusiveUnlock(self);
-  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, nullptr, 0) != 0) {
+  if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, &rel_ts, nullptr, 0) != 0) {
     if (errno == ETIMEDOUT) {
       // Timed out we're done.
       timed_out = true;
@@ -1031,7 +959,11 @@
   guard_.recursion_count_ = 0;
   timespec ts;
   InitTimeSpec(true, clock, ms, ns, &ts);
-  int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
+  int rc;
+  while ((rc = pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts)) == EINTR) {
+    continue;
+  }
+
   if (rc == ETIMEDOUT) {
     timed_out = true;
   } else if (rc != 0) {
@@ -1044,266 +976,4 @@
   return timed_out;
 }
 
-void Locks::Init() {
-  if (logging_lock_ != nullptr) {
-    // Already initialized.
-    if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
-      DCHECK(modify_ldt_lock_ != nullptr);
-    } else {
-      DCHECK(modify_ldt_lock_ == nullptr);
-    }
-    DCHECK(abort_lock_ != nullptr);
-    DCHECK(alloc_tracker_lock_ != nullptr);
-    DCHECK(allocated_monitor_ids_lock_ != nullptr);
-    DCHECK(allocated_thread_ids_lock_ != nullptr);
-    DCHECK(breakpoint_lock_ != nullptr);
-    DCHECK(classlinker_classes_lock_ != nullptr);
-    DCHECK(custom_tls_lock_ != nullptr);
-    DCHECK(deoptimization_lock_ != nullptr);
-    DCHECK(heap_bitmap_lock_ != nullptr);
-    DCHECK(oat_file_manager_lock_ != nullptr);
-    DCHECK(verifier_deps_lock_ != nullptr);
-    DCHECK(host_dlopen_handles_lock_ != nullptr);
-    DCHECK(intern_table_lock_ != nullptr);
-    DCHECK(jni_function_table_lock_ != nullptr);
-    DCHECK(jni_libraries_lock_ != nullptr);
-    DCHECK(logging_lock_ != nullptr);
-    DCHECK(mutator_lock_ != nullptr);
-    DCHECK(profiler_lock_ != nullptr);
-    DCHECK(cha_lock_ != nullptr);
-    DCHECK(subtype_check_lock_ != nullptr);
-    DCHECK(thread_list_lock_ != nullptr);
-    DCHECK(thread_suspend_count_lock_ != nullptr);
-    DCHECK(trace_lock_ != nullptr);
-    DCHECK(unexpected_signal_lock_ != nullptr);
-    DCHECK(user_code_suspension_lock_ != nullptr);
-    DCHECK(dex_lock_ != nullptr);
-    DCHECK(native_debug_interface_lock_ != nullptr);
-  } else {
-    // Create global locks in level order from highest lock level to lowest.
-    LockLevel current_lock_level = kInstrumentEntrypointsLock;
-    DCHECK(instrument_entrypoints_lock_ == nullptr);
-    instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
-
-    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
-      if ((new_level) >= current_lock_level) { \
-        /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
-        fprintf(stderr, "New local level %d is not less than current level %d\n", \
-                new_level, current_lock_level); \
-        exit(1); \
-      } \
-      current_lock_level = new_level;
-
-    UPDATE_CURRENT_LOCK_LEVEL(kUserCodeSuspensionLock);
-    DCHECK(user_code_suspension_lock_ == nullptr);
-    user_code_suspension_lock_ = new Mutex("user code suspension lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
-    DCHECK(mutator_lock_ == nullptr);
-    mutator_lock_ = new MutatorMutex("mutator lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
-    DCHECK(heap_bitmap_lock_ == nullptr);
-    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
-    DCHECK(trace_lock_ == nullptr);
-    trace_lock_ = new Mutex("trace lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
-    DCHECK(runtime_shutdown_lock_ == nullptr);
-    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
-    DCHECK(profiler_lock_ == nullptr);
-    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
-    DCHECK(deoptimization_lock_ == nullptr);
-    deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
-    DCHECK(alloc_tracker_lock_ == nullptr);
-    alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
-    DCHECK(thread_list_lock_ == nullptr);
-    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
-    DCHECK(jni_libraries_lock_ == nullptr);
-    jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
-    DCHECK(breakpoint_lock_ == nullptr);
-    breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kSubtypeCheckLock);
-    DCHECK(subtype_check_lock_ == nullptr);
-    subtype_check_lock_ = new Mutex("SubtypeCheck lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
-    DCHECK(classlinker_classes_lock_ == nullptr);
-    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
-                                                      current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
-    DCHECK(allocated_monitor_ids_lock_ == nullptr);
-    allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
-    DCHECK(allocated_thread_ids_lock_ == nullptr);
-    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
-
-    if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
-      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
-      DCHECK(modify_ldt_lock_ == nullptr);
-      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
-    }
-
-    UPDATE_CURRENT_LOCK_LEVEL(kDexLock);
-    DCHECK(dex_lock_ == nullptr);
-    dex_lock_ = new ReaderWriterMutex("ClassLinker dex lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kOatFileManagerLock);
-    DCHECK(oat_file_manager_lock_ == nullptr);
-    oat_file_manager_lock_ = new ReaderWriterMutex("OatFile manager lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kVerifierDepsLock);
-    DCHECK(verifier_deps_lock_ == nullptr);
-    verifier_deps_lock_ = new ReaderWriterMutex("verifier deps lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kHostDlOpenHandlesLock);
-    DCHECK(host_dlopen_handles_lock_ == nullptr);
-    host_dlopen_handles_lock_ = new Mutex("host dlopen handles lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
-    DCHECK(intern_table_lock_ == nullptr);
-    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
-    DCHECK(reference_processor_lock_ == nullptr);
-    reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
-    DCHECK(reference_queue_cleared_references_lock_ == nullptr);
-    reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
-    DCHECK(reference_queue_weak_references_lock_ == nullptr);
-    reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
-    DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
-    reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
-    DCHECK(reference_queue_phantom_references_lock_ == nullptr);
-    reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
-    DCHECK(reference_queue_soft_references_lock_ == nullptr);
-    reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kJniGlobalsLock);
-    DCHECK(jni_globals_lock_ == nullptr);
-    jni_globals_lock_ =
-        new ReaderWriterMutex("JNI global reference table lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kJniWeakGlobalsLock);
-    DCHECK(jni_weak_globals_lock_ == nullptr);
-    jni_weak_globals_lock_ = new Mutex("JNI weak global reference table lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kJniFunctionTableLock);
-    DCHECK(jni_function_table_lock_ == nullptr);
-    jni_function_table_lock_ = new Mutex("JNI function table lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kCustomTlsLock);
-    DCHECK(custom_tls_lock_ == nullptr);
-    custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
-    DCHECK(cha_lock_ == nullptr);
-    cha_lock_ = new Mutex("CHA lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
-    DCHECK(native_debug_interface_lock_ == nullptr);
-    native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
-    DCHECK(abort_lock_ == nullptr);
-    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
-    DCHECK(thread_suspend_count_lock_ == nullptr);
-    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
-    DCHECK(unexpected_signal_lock_ == nullptr);
-    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
-    DCHECK(logging_lock_ == nullptr);
-    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
-
-    #undef UPDATE_CURRENT_LOCK_LEVEL
-
-    // List of mutexes that we may hold when accessing a weak ref.
-    AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock*/ false);
-    AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock*/ false);
-    AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock*/ false);
-
-    InitConditions();
-  }
-}
-
-void Locks::InitConditions() {
-  thread_exit_cond_ = new ConditionVariable("thread exit condition variable", *thread_list_lock_);
-}
-
-void Locks::SetClientCallback(ClientCallback* safe_to_call_abort_cb) {
-  safe_to_call_abort_callback.store(safe_to_call_abort_cb, std::memory_order_release);
-}
-
-// Helper to allow checking shutdown while ignoring locking requirements.
-bool Locks::IsSafeToCallAbortRacy() {
-  Locks::ClientCallback* safe_to_call_abort_cb =
-      safe_to_call_abort_callback.load(std::memory_order_acquire);
-  return safe_to_call_abort_cb != nullptr && safe_to_call_abort_cb();
-}
-
-void Locks::AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
-  if (need_lock) {
-    ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
-    mutex->SetShouldRespondToEmptyCheckpointRequest(true);
-    expected_mutexes_on_weak_ref_access_.push_back(mutex);
-  } else {
-    mutex->SetShouldRespondToEmptyCheckpointRequest(true);
-    expected_mutexes_on_weak_ref_access_.push_back(mutex);
-  }
-}
-
-void Locks::RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
-  if (need_lock) {
-    ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
-    mutex->SetShouldRespondToEmptyCheckpointRequest(false);
-    std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
-    auto it = std::find(list.begin(), list.end(), mutex);
-    DCHECK(it != list.end());
-    list.erase(it);
-  } else {
-    mutex->SetShouldRespondToEmptyCheckpointRequest(false);
-    std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
-    auto it = std::find(list.begin(), list.end(), mutex);
-    DCHECK(it != list.end());
-    list.erase(it);
-  }
-}
-
-bool Locks::IsExpectedOnWeakRefAccess(BaseMutex* mutex) {
-  ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
-  std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
-  return std::find(list.begin(), list.end(), mutex) != list.end();
-}
-
 }  // namespace art
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index d127d0f..aaa1ee6 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -28,8 +28,9 @@
 
 #include "base/aborting.h"
 #include "base/atomic.h"
-#include "base/globals.h"
+#include "runtime_globals.h"
 #include "base/macros.h"
+#include "locks.h"
 
 #if defined(__linux__)
 #define ART_USE_FUTEXES 1
@@ -50,100 +51,7 @@
 class SHARED_LOCKABLE MutatorMutex;
 class ScopedContentionRecorder;
 class Thread;
-class Mutex;
-
-// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
-// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
-// partial ordering and thereby cause deadlock situations to fail checks.
-//
-// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
-enum LockLevel : uint8_t {
-  kLoggingLock = 0,
-  kSwapMutexesLock,
-  kUnexpectedSignalLock,
-  kThreadSuspendCountLock,
-  kAbortLock,
-  kNativeDebugInterfaceLock,
-  kSignalHandlingLock,
-  // A generic lock level for mutexs that should not allow any additional mutexes to be gained after
-  // acquiring it.
-  kGenericBottomLock,
-  kJdwpAdbStateLock,
-  kJdwpSocketLock,
-  kRegionSpaceRegionLock,
-  kMarkSweepMarkStackLock,
-  kCHALock,
-  kJitCodeCacheLock,
-  kRosAllocGlobalLock,
-  kRosAllocBracketLock,
-  kRosAllocBulkFreeLock,
-  kTaggingLockLevel,
-  kTransactionLogLock,
-  kCustomTlsLock,
-  kJniFunctionTableLock,
-  kJniWeakGlobalsLock,
-  kJniGlobalsLock,
-  kReferenceQueueSoftReferencesLock,
-  kReferenceQueuePhantomReferencesLock,
-  kReferenceQueueFinalizerReferencesLock,
-  kReferenceQueueWeakReferencesLock,
-  kReferenceQueueClearedReferencesLock,
-  kReferenceProcessorLock,
-  kJitDebugInterfaceLock,
-  kAllocSpaceLock,
-  kBumpPointerSpaceBlockLock,
-  kArenaPoolLock,
-  kInternTableLock,
-  kOatFileSecondaryLookupLock,
-  kHostDlOpenHandlesLock,
-  kVerifierDepsLock,
-  kOatFileManagerLock,
-  kTracingUniqueMethodsLock,
-  kTracingStreamingLock,
-  kClassLoaderClassesLock,
-  kDefaultMutexLevel,
-  kDexLock,
-  kMarkSweepLargeObjectLock,
-  kJdwpObjectRegistryLock,
-  kModifyLdtLock,
-  kAllocatedThreadIdsLock,
-  kMonitorPoolLock,
-  kClassLinkerClassesLock,  // TODO rename.
-  kDexToDexCompilerLock,
-  kSubtypeCheckLock,
-  kBreakpointLock,
-  kMonitorLock,
-  kMonitorListLock,
-  kJniLoadLibraryLock,
-  kThreadListLock,
-  kAllocTrackerLock,
-  kDeoptimizationLock,
-  kProfilerLock,
-  kJdwpShutdownLock,
-  kJdwpEventListLock,
-  kJdwpAttachLock,
-  kJdwpStartLock,
-  kRuntimeShutdownLock,
-  kTraceLock,
-  kHeapBitmapLock,
-  kMutatorLock,
-  kUserCodeSuspensionLock,
-  kInstrumentEntrypointsLock,
-  kZygoteCreationLock,
-
-  // The highest valid lock level. Use this if there is code that should only be called with no
-  // other locks held. Since this is the highest lock level we also allow it to be held even if the
-  // runtime or current thread is not fully set-up yet (for example during thread attach). Note that
-  // this lock also has special behavior around the mutator_lock_. Since the mutator_lock_ is not
-  // really a 'real' lock we allow this to be locked when the mutator_lock_ is held exclusive.
-  // Furthermore, the mutator_lock_ may not be acquired in any form when a lock of this level is
-  // held. Since the mutator_lock_ being held strong means that all other threads are suspended this
-  // will prevent deadlocks while still allowing this lock level to function as a "highest" level.
-  kTopLockLevel,
-
-  kLockLevelCount  // Must come last.
-};
-std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
+class LOCKABLE Mutex;
 
 constexpr bool kDebugLocking = kIsDebugBuild;
 
@@ -480,7 +388,9 @@
   ConditionVariable(const char* name, Mutex& mutex);
   ~ConditionVariable();
 
+  // Requires the mutex to be held.
   void Broadcast(Thread* self);
+  // Requires the mutex to be held.
   void Signal(Thread* self);
   // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
   //       pointer copy, thereby defeating annotalysis.
@@ -505,6 +415,8 @@
   // Number of threads that have come into to wait, not the length of the waiters on the futex as
   // waiters may have been requeued onto guard_. Guarded by guard_.
   volatile int32_t num_waiters_;
+
+  void RequeueWaiters(int32_t count);
 #else
   pthread_cond_t cond_;
 #endif
@@ -562,226 +474,6 @@
   DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
 };
 
-// For StartNoThreadSuspension and EndNoThreadSuspension.
-class CAPABILITY("role") Role {
- public:
-  void Acquire() ACQUIRE() {}
-  void Release() RELEASE() {}
-  const Role& operator!() const { return *this; }
-};
-
-class Uninterruptible : public Role {
-};
-
-// Global mutexes corresponding to the levels above.
-class Locks {
- public:
-  static void Init();
-  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
-
-  // Destroying various lock types can emit errors that vary depending upon
-  // whether the client (art::Runtime) is currently active.  Allow the client
-  // to set a callback that is used to check when it is acceptable to call
-  // Abort.  The default behavior is that the client *is not* able to call
-  // Abort if no callback is established.
-  using ClientCallback = bool();
-  static void SetClientCallback(ClientCallback* is_safe_to_call_abort_cb) NO_THREAD_SAFETY_ANALYSIS;
-  // Checks for whether it is safe to call Abort() without using locks.
-  static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
-
-  // Add a mutex to expected_mutexes_on_weak_ref_access_.
-  static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
-  // Remove a mutex from expected_mutexes_on_weak_ref_access_.
-  static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
-  // Check if the given mutex is in expected_mutexes_on_weak_ref_access_.
-  static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex);
-
-  // Guards allocation entrypoint instrumenting.
-  static Mutex* instrument_entrypoints_lock_;
-
-  // Guards code that deals with user-code suspension. This mutex must be held when suspending or
-  // resuming threads with SuspendReason::kForUserCode. It may be held by a suspended thread, but
-  // only if the suspension is not due to SuspendReason::kForUserCode.
-  static Mutex* user_code_suspension_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
-
-  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
-  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
-  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
-  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
-  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
-  //
-  // Thread suspension:
-  // mutator thread                                | GC/Debugger
-  //   .. running ..                               |   .. running ..
-  //   .. running ..                               | Request thread suspension by:
-  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
-  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
-  //   .. running ..                               |     all mutator threads
-  //   .. running ..                               |   - releasing thread_suspend_count_lock_
-  //   .. running ..                               | Block wait for all threads to pass a barrier
-  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
-  // suspend code.                                 |   .. blocked ..
-  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
-  // x: Acquire thread_suspend_count_lock_         |   .. running ..
-  // while Thread::suspend_count_ > 0              |   .. running ..
-  //   - wait on Thread::resume_cond_              |   .. running ..
-  //     (releases thread_suspend_count_lock_)     |   .. running ..
-  //   .. waiting ..                               | Request thread resumption by:
-  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
-  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
-  //   .. waiting ..                               |     all mutator threads
-  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
-  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
-  // Release thread_suspend_count_lock_            |  .. running ..
-  // Change to kRunnable                           |  .. running ..
-  //  - this uses a CAS operation to ensure the    |  .. running ..
-  //    suspend request flag isn't raised as the   |  .. running ..
-  //    state is changed                           |  .. running ..
-  //  - if the CAS operation fails then goto x     |  .. running ..
-  //  .. running ..                                |  .. running ..
-  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(user_code_suspension_lock_);
-
-  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
-  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
-
-  // Guards shutdown of the runtime.
-  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
-
-  // Guards background profiler global state.
-  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
-
-  // Guards trace (ie traceview) requests.
-  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
-
-  // Guards debugger recent allocation records.
-  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
-
-  // Guards updates to instrumentation to ensure mutual exclusion of
-  // events like deoptimization requests.
-  // TODO: improve name, perhaps instrumentation_update_lock_.
-  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
-
-  // Guard the update of the SubtypeCheck data stores in each Class::status_ field.
-  // This lock is used in SubtypeCheck methods which are the interface for
-  // any SubtypeCheck-mutating methods.
-  // In Class::IsSubClass, the lock is not required since it does not update the SubtypeCheck data.
-  static Mutex* subtype_check_lock_ ACQUIRED_AFTER(deoptimization_lock_);
-
-  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
-  // attaching and detaching.
-  static Mutex* thread_list_lock_ ACQUIRED_AFTER(subtype_check_lock_);
-
-  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
-  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
-
-  // Guards maintaining loading library data structures.
-  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
-
-  // Guards breakpoints.
-  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
-
-  // Guards lists of classes within the class linker.
-  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
-
-  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
-  // doesn't try to hold a higher level Mutex.
-  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::classlinker_classes_lock_)
-
-  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
-
-  // Guard the allocation/deallocation of thread ids.
-  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
-
-  // Guards modification of the LDT on x86.
-  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
-
-  static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
-
-  // Guards opened oat files in OatFileManager.
-  static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
-
-  // Guards extra string entries for VerifierDeps.
-  static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
-
-  // Guards dlopen_handles_ in DlOpenOatFile.
-  static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_);
-
-  // Guards intern table.
-  static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
-
-  // Guards reference processor.
-  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
-
-  // Guards cleared references queue.
-  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
-
-  // Guards weak references queue.
-  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
-
-  // Guards finalizer references queue.
-  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
-
-  // Guards phantom references queue.
-  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
-
-  // Guards soft references queue.
-  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
-
-  // Guard accesses to the JNI Global Reference table.
-  static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
-
-  // Guard accesses to the JNI Weak Global Reference table.
-  static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
-
-  // Guard accesses to the JNI function table override.
-  static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
-
-  // Guard accesses to the Thread::custom_tls_. We use this to allow the TLS of other threads to be
-  // read (the reader must hold the ThreadListLock or have some other way of ensuring the thread
-  // will not die in that case though). This is useful for (eg) the implementation of
-  // GetThreadLocalStorage.
-  static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
-
-  // Guards Class Hierarchy Analysis (CHA).
-  static Mutex* cha_lock_ ACQUIRED_AFTER(custom_tls_lock_);
-
-  // When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
-  // doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
-  // actually only encodes the mutex being below jni_function_table_lock_ although having
-  // kGenericBottomLock level is lower than this.
-  #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::cha_lock_)
-
-  // Have an exclusive aborting thread.
-  static Mutex* abort_lock_ ACQUIRED_AFTER(custom_tls_lock_);
-
-  // Allow mutual exclusion when manipulating Thread::suspend_count_.
-  // TODO: Does the trade-off of a per-thread lock make sense?
-  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
-
-  // One unexpected signal at a time lock.
-  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
-
-  // Guards the magic global variables used by native tools (e.g. libunwind).
-  static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
-
-  // Have an exclusive logging thread.
-  static Mutex* logging_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
-
-  // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
-  // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
-  // encounter an unexpected mutex on accessing weak refs,
-  // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
-  static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
-  static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_;
-  class ScopedExpectedMutexesOnWeakRefAccessLock;
-};
-
-class Roles {
- public:
-  // Uninterruptible means that the thread may not become suspended.
-  static Uninterruptible uninterruptible_;
-};
-
 }  // namespace art
 
 #endif  // ART_RUNTIME_BASE_MUTEX_H_
diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc
index 23ec3e1..bd39192 100644
--- a/runtime/base/timing_logger.cc
+++ b/runtime/base/timing_logger.cc
@@ -21,6 +21,7 @@
 #include <android-base/logging.h>
 
 #include "base/histogram-inl.h"
+#include "base/mutex.h"
 #include "base/stl_util.h"
 #include "base/systrace.h"
 #include "base/time_utils.h"
@@ -40,7 +41,7 @@
 CumulativeLogger::CumulativeLogger(const std::string& name)
     : name_(name),
       lock_name_("CumulativeLoggerLock" + name),
-      lock_(lock_name_.c_str(), kDefaultMutexLevel, true) {
+      lock_(new Mutex(lock_name_.c_str(), kDefaultMutexLevel, true)) {
   Reset();
 }
 
@@ -49,7 +50,7 @@
 }
 
 void CumulativeLogger::SetName(const std::string& name) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *GetLock());
   name_.assign(name);
 }
 
@@ -57,19 +58,19 @@
 }
 
 void CumulativeLogger::End() {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *GetLock());
   ++iterations_;
 }
 
 void CumulativeLogger::Reset() {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *GetLock());
   iterations_ = 0;
   total_time_ = 0;
   STLDeleteElements(&histograms_);
 }
 
 void CumulativeLogger::AddLogger(const TimingLogger &logger) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *GetLock());
   TimingLogger::TimingData timing_data(logger.CalculateTimingData());
   const std::vector<TimingLogger::Timing>& timings = logger.GetTimings();
   for (size_t i = 0; i < timings.size(); ++i) {
@@ -81,12 +82,12 @@
 }
 
 size_t CumulativeLogger::GetIterations() const {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *GetLock());
   return iterations_;
 }
 
 void CumulativeLogger::Dump(std::ostream &os) const {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *GetLock());
   DumpHistogram(os);
 }
 
@@ -143,12 +144,12 @@
 void TimingLogger::StartTiming(const char* label) {
   DCHECK(label != nullptr);
   timings_.push_back(Timing(kind_, label));
-  ATRACE_BEGIN(label);
+  ATraceBegin(label);
 }
 
 void TimingLogger::EndTiming() {
   timings_.push_back(Timing(kind_, nullptr));
-  ATRACE_END();
+  ATraceEnd();
 }
 
 uint64_t TimingLogger::GetTotalNs() const {
diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h
index a8a6701..974a14d 100644
--- a/runtime/base/timing_logger.h
+++ b/runtime/base/timing_logger.h
@@ -18,10 +18,11 @@
 #define ART_RUNTIME_BASE_TIMING_LOGGER_H_
 
 #include "base/histogram.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/time_utils.h"
 
+#include <memory>
 #include <set>
 #include <string>
 #include <vector>
@@ -34,17 +35,17 @@
   explicit CumulativeLogger(const std::string& name);
   ~CumulativeLogger();
   void Start();
-  void End() REQUIRES(!lock_);
-  void Reset() REQUIRES(!lock_);
-  void Dump(std::ostream& os) const REQUIRES(!lock_);
+  void End() REQUIRES(!GetLock());
+  void Reset() REQUIRES(!GetLock());
+  void Dump(std::ostream& os) const REQUIRES(!GetLock());
   uint64_t GetTotalNs() const {
     return GetTotalTime() * kAdjust;
   }
   // Allow the name to be modified, particularly when the cumulative logger is a field within a
   // parent class that is unable to determine the "name" of a sub-class.
-  void SetName(const std::string& name) REQUIRES(!lock_);
-  void AddLogger(const TimingLogger& logger) REQUIRES(!lock_);
-  size_t GetIterations() const REQUIRES(!lock_);
+  void SetName(const std::string& name) REQUIRES(!GetLock());
+  void AddLogger(const TimingLogger& logger) REQUIRES(!GetLock());
+  size_t GetIterations() const REQUIRES(!GetLock());
 
  private:
   class HistogramComparator {
@@ -58,18 +59,22 @@
   static constexpr size_t kDefaultBucketCount = 100;
   static constexpr size_t kInitialBucketSize = 50;  // 50 microseconds.
 
-  void AddPair(const std::string &label, uint64_t delta_time)
-      REQUIRES(lock_);
-  void DumpHistogram(std::ostream &os) const REQUIRES(lock_);
+  void AddPair(const std::string &label, uint64_t delta_time) REQUIRES(GetLock());
+  void DumpHistogram(std::ostream &os) const REQUIRES(GetLock());
   uint64_t GetTotalTime() const {
     return total_time_;
   }
+
+  Mutex* GetLock() const {
+    return lock_.get();
+  }
+
   static const uint64_t kAdjust = 1000;
-  std::set<Histogram<uint64_t>*, HistogramComparator> histograms_ GUARDED_BY(lock_);
+  std::set<Histogram<uint64_t>*, HistogramComparator> histograms_ GUARDED_BY(GetLock());
   std::string name_;
   const std::string lock_name_;
-  mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  size_t iterations_ GUARDED_BY(lock_);
+  mutable std::unique_ptr<Mutex> lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  size_t iterations_ GUARDED_BY(GetLock());
   uint64_t total_time_;
 
   DISALLOW_COPY_AND_ASSIGN(CumulativeLogger);
diff --git a/runtime/cha.cc b/runtime/cha.cc
index 3ea920d..5110b7a 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -18,6 +18,7 @@
 
 #include "art_method-inl.h"
 #include "base/logging.h"  // For VLOG
+#include "base/mutex.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
 #include "linear_alloc.h"
@@ -115,7 +116,7 @@
   // if they have SingleImplementations methods defined by 'klass'.
   // Skip all virtual methods that do not override methods from super class since they cannot be
   // SingleImplementations for anything.
-  int32_t vtbl_size = super->GetVTableLength<kDefaultVerifyFlags, kWithoutReadBarrier>();
+  int32_t vtbl_size = super->GetVTableLength<kDefaultVerifyFlags>();
   ObjPtr<mirror::ClassLoader> loader =
       klass->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>();
   for (int vtbl_index = 0; vtbl_index < vtbl_size; ++vtbl_index) {
@@ -131,7 +132,7 @@
     // so start with a superclass and move up looking into a corresponding vtbl slot.
     for (ObjPtr<mirror::Class> super_it = super;
          super_it != nullptr &&
-             super_it->GetVTableLength<kDefaultVerifyFlags, kWithoutReadBarrier>() > vtbl_index;
+             super_it->GetVTableLength<kDefaultVerifyFlags>() > vtbl_index;
          super_it = super_it->GetSuperClass<kDefaultVerifyFlags, kWithoutReadBarrier>()) {
       // Skip superclasses that are also going to be unloaded.
       ObjPtr<mirror::ClassLoader> super_loader = super_it->
@@ -158,7 +159,7 @@
 
   // Check all possible interface methods too.
   ObjPtr<mirror::IfTable> iftable = klass->GetIfTable<kDefaultVerifyFlags, kWithoutReadBarrier>();
-  const size_t ifcount = klass->GetIfTableCount<kDefaultVerifyFlags, kWithoutReadBarrier>();
+  const size_t ifcount = klass->GetIfTableCount<kDefaultVerifyFlags>();
   for (size_t i = 0; i < ifcount; ++i) {
     ObjPtr<mirror::Class> interface =
         iftable->GetInterface<kDefaultVerifyFlags, kWithoutReadBarrier>(i);
@@ -277,7 +278,7 @@
 };
 
 
-static void VerifyNonSingleImplementation(mirror::Class* verify_class,
+static void VerifyNonSingleImplementation(ObjPtr<mirror::Class> verify_class,
                                           uint16_t verify_index,
                                           ArtMethod* excluded_method)
     REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -291,7 +292,7 @@
   PointerSize image_pointer_size =
       Runtime::Current()->GetClassLinker()->GetImagePointerSize();
 
-  mirror::Class* input_verify_class = verify_class;
+  ObjPtr<mirror::Class> input_verify_class = verify_class;
 
   while (verify_class != nullptr) {
     if (verify_index >= verify_class->GetVTableLength()) {
@@ -299,7 +300,7 @@
     }
     ArtMethod* verify_method = verify_class->GetVTableEntry(verify_index, image_pointer_size);
     if (verify_method != excluded_method) {
-      auto construct_parent_chain = [](mirror::Class* failed, mirror::Class* in)
+      auto construct_parent_chain = [](ObjPtr<mirror::Class> failed, ObjPtr<mirror::Class> in)
           REQUIRES_SHARED(Locks::mutator_lock_) {
         std::string tmp = in->PrettyClass();
         while (in != failed) {
@@ -363,7 +364,7 @@
     // non-single-implementation already.
     VerifyNonSingleImplementation(klass->GetSuperClass()->GetSuperClass(),
                                   method_in_super->GetMethodIndex(),
-                                  nullptr /* excluded_method */);
+                                  /* excluded_method= */ nullptr);
     return;
   }
 
@@ -432,7 +433,7 @@
 
     // method_in_super might be the single-implementation of another abstract method,
     // which should be also invalidated of its single-implementation status.
-    mirror::Class* super_super = klass->GetSuperClass()->GetSuperClass();
+    ObjPtr<mirror::Class> super_super = klass->GetSuperClass()->GetSuperClass();
     while (super_super != nullptr &&
            method_index < super_super->GetVTableLength()) {
       ArtMethod* method_in_super_super = super_super->GetVTableEntry(method_index, pointer_size);
@@ -507,7 +508,8 @@
     return;
   }
   DCHECK(!single_impl->IsAbstract());
-  if (single_impl->GetDeclaringClass() == implementation_method->GetDeclaringClass()) {
+  if ((single_impl->GetDeclaringClass() == implementation_method->GetDeclaringClass()) &&
+      !implementation_method->IsDefaultConflicting()) {
     // Same implementation. Since implementation_method may be a copy of a default
     // method, we need to check the declaring class for equality.
     return;
@@ -543,7 +545,10 @@
       method->SetHasSingleImplementation(true);
       DCHECK(method->GetSingleImplementation(pointer_size) == nullptr);
     }
-  } else {
+  // Default conflicting methods cannot be treated with single implementations,
+  // as we need to call them (and not inline them) in case of ICCE.
+  // See class_linker.cc:EnsureThrowsInvocationError.
+  } else if (!method->IsDefaultConflicting()) {
     method->SetHasSingleImplementation(true);
     // Single implementation of non-abstract method is itself.
     DCHECK_EQ(method->GetSingleImplementation(pointer_size), method);
@@ -560,7 +565,7 @@
     return;
   }
 
-  mirror::Class* super_class = klass->GetSuperClass();
+  ObjPtr<mirror::Class> super_class = klass->GetSuperClass();
   if (super_class == nullptr) {
     return;
   }
diff --git a/runtime/cha.h b/runtime/cha.h
index d1a1b7c..a07ee91 100644
--- a/runtime/cha.h
+++ b/runtime/cha.h
@@ -21,7 +21,7 @@
 #include <unordered_set>
 
 #include "base/enums.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "handle.h"
 #include "mirror/class.h"
 #include "oat_quick_method_header.h"
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 2536b23..978b1ab 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -21,8 +21,10 @@
 
 #include "art_field-inl.h"
 #include "art_method-inl.h"
+#include "base/mutex.h"
 #include "class_linker.h"
-#include "gc/heap-inl.h"
+#include "dex/dex_file.h"
+#include "dex/dex_file_structs.h"
 #include "gc_root-inl.h"
 #include "handle_scope-inl.h"
 #include "mirror/class_loader.h"
@@ -136,7 +138,7 @@
   ObjPtr<mirror::Class> resolved_type =
       referrer->GetDexCache<kWithoutReadBarrier>()->GetResolvedType(type_idx);
   if (UNLIKELY(resolved_type == nullptr)) {
-    resolved_type = DoResolveType(type_idx, referrer->GetDeclaringClass());
+    resolved_type = DoResolveType(type_idx, referrer);
   }
   return resolved_type;
 }
@@ -150,7 +152,7 @@
   ObjPtr<mirror::Class> resolved_type =
       referrer->GetDexCache<kWithoutReadBarrier>()->GetResolvedType(type_idx);
   if (UNLIKELY(resolved_type == nullptr)) {
-    resolved_type = DoResolveType(type_idx, referrer->GetDeclaringClass());
+    resolved_type = DoResolveType(type_idx, referrer);
   }
   return resolved_type;
 }
@@ -271,7 +273,7 @@
       dex_cache,
       type,
       [this, dex_cache, method_idx, class_loader]() REQUIRES_SHARED(Locks::mutator_lock_) {
-        const DexFile::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(method_idx);
+        const dex::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(method_idx);
         ObjPtr<mirror::Class> klass =
             LookupResolvedType(method_id.class_idx_, dex_cache, class_loader);
         DCHECK(klass != nullptr);
@@ -286,7 +288,7 @@
   ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx, pointer_size);
   if (resolved == nullptr) {
     const DexFile& dex_file = *dex_cache->GetDexFile();
-    const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
+    const dex::MethodId& method_id = dex_file.GetMethodId(method_idx);
     ObjPtr<mirror::Class> klass = LookupResolvedType(method_id.class_idx_, dex_cache, class_loader);
     if (klass != nullptr) {
       resolved = FindResolvedMethod(klass, dex_cache, class_loader, method_idx);
@@ -315,7 +317,7 @@
     // Check if the invoke type matches the class type.
     ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
     ObjPtr<mirror::ClassLoader> class_loader = referrer->GetClassLoader();
-    if (CheckInvokeClassMismatch</* kThrow */ false>(dex_cache, type, method_idx, class_loader)) {
+    if (CheckInvokeClassMismatch</* kThrow= */ false>(dex_cache, type, method_idx, class_loader)) {
       return nullptr;
     }
     // Check access.
@@ -366,7 +368,7 @@
     // Check if the invoke type matches the class type.
     ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
     ObjPtr<mirror::ClassLoader> class_loader = referrer->GetClassLoader();
-    if (CheckInvokeClassMismatch</* kThrow */ true>(dex_cache, type, method_idx, class_loader)) {
+    if (CheckInvokeClassMismatch</* kThrow= */ true>(dex_cache, type, method_idx, class_loader)) {
       DCHECK(Thread::Current()->IsExceptionPending());
       return nullptr;
     }
@@ -439,6 +441,14 @@
   }
 }
 
+template <ReadBarrierOption kReadBarrierOption>
+inline ObjPtr<mirror::ObjectArray<mirror::Class>> ClassLinker::GetClassRoots() {
+  ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
+      class_roots_.Read<kReadBarrierOption>();
+  DCHECK(class_roots != nullptr);
+  return class_roots;
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_CLASS_LINKER_INL_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 511d468..d29a6b7 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -61,6 +61,7 @@
 #include "dex/dex_file-inl.h"
 #include "dex/dex_file_exception_helpers.h"
 #include "dex/dex_file_loader.h"
+#include "dex/signature-inl.h"
 #include "dex/utf.h"
 #include "entrypoints/entrypoint_utils.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
@@ -79,7 +80,7 @@
 #include "image-inl.h"
 #include "imt_conflict_table.h"
 #include "imtable-inl.h"
-#include "intern_table.h"
+#include "intern_table-inl.h"
 #include "interpreter/interpreter.h"
 #include "jit/debugger_interface.h"
 #include "jit/jit.h"
@@ -87,7 +88,10 @@
 #include "jni/java_vm_ext.h"
 #include "jni/jni_internal.h"
 #include "linear_alloc.h"
+#include "mirror/array-alloc-inl.h"
+#include "mirror/array-inl.h"
 #include "mirror/call_site.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/class.h"
 #include "mirror/class_ext.h"
@@ -103,7 +107,10 @@
 #include "mirror/method_type.h"
 #include "mirror/object-inl.h"
 #include "mirror/object-refvisitor-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
+#include "mirror/object_reference.h"
+#include "mirror/object_reference-inl.h"
 #include "mirror/proxy.h"
 #include "mirror/reference-inl.h"
 #include "mirror/stack_trace_element.h"
@@ -206,7 +213,25 @@
   self->AssertPendingException();
 }
 
-void ClassLinker::ThrowEarlierClassFailure(ObjPtr<mirror::Class> c, bool wrap_in_no_class_def) {
+// Ensures that methods have the kAccSkipAccessChecks bit set. We use the
+// kAccVerificationAttempted bit on the class access flags to determine whether this has been done
+// before.
+template <bool kNeedsVerified = false>
+static void EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass, PointerSize pointer_size)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (kNeedsVerified) {
+    // To not fail access-flags access checks, push a minimal state.
+    mirror::Class::SetStatus(klass, ClassStatus::kVerified, Thread::Current());
+  }
+  if (!klass->WasVerificationAttempted()) {
+    klass->SetSkipAccessChecksFlagOnAllMethods(pointer_size);
+    klass->SetVerificationAttempted();
+  }
+}
+
+void ClassLinker::ThrowEarlierClassFailure(ObjPtr<mirror::Class> c,
+                                           bool wrap_in_no_class_def,
+                                           bool log) {
   // The class failed to initialize on a previous attempt, so we want to throw
   // a NoClassDefFoundError (v2 2.17.5).  The exception to this rule is if we
   // failed in verification, in which case v2 5.4.1 says we need to re-throw
@@ -222,8 +247,10 @@
         extra = verify_error->AsThrowable()->Dump();
       }
     }
-    LOG(INFO) << "Rejecting re-init on previously-failed class " << c->PrettyClass()
-              << ": " << extra;
+    if (log) {
+      LOG(INFO) << "Rejecting re-init on previously-failed class " << c->PrettyClass()
+                << ": " << extra;
+    }
   }
 
   CHECK(c->IsErroneous()) << c->PrettyClass() << " " << c->GetStatus();
@@ -307,7 +334,7 @@
     return lhs.size < rhs.size || (lhs.size == rhs.size && lhs.start_offset > rhs.start_offset);
   }
 };
-typedef std::priority_queue<FieldGap, std::vector<FieldGap>, FieldGapsComparator> FieldGaps;
+using FieldGaps = std::priority_queue<FieldGap, std::vector<FieldGap>, FieldGapsComparator>;
 
 // Adds largest aligned gaps to queue of gaps.
 static void AddFieldGap(uint32_t gap_start, uint32_t gap_end, FieldGaps* gaps) {
@@ -431,6 +458,8 @@
   heap->IncrementDisableMovingGC(self);
   StackHandleScope<64> hs(self);  // 64 is picked arbitrarily.
   auto class_class_size = mirror::Class::ClassClassSize(image_pointer_size_);
+  // Allocate the object as non-movable so that there are no cases where Object::IsClass returns
+  // the incorrect result when comparing to-space vs from-space.
   Handle<mirror::Class> java_lang_Class(hs.NewHandle(ObjPtr<mirror::Class>::DownCast(MakeObjPtr(
       heap->AllocNonMovableObject<true>(self, nullptr, class_class_size, VoidFunctor())))));
   CHECK(java_lang_Class != nullptr);
@@ -492,7 +521,7 @@
   // Space (LOS) -- see the comment about the dirty card scanning logic in
   // art::gc::collector::ConcurrentCopying::MarkingPhase.
   Handle<mirror::Class> java_lang_String(hs.NewHandle(
-      AllocClass</* kMovable */ false>(
+      AllocClass</* kMovable= */ false>(
           self, java_lang_Class.Get(), mirror::String::ClassSize(image_pointer_size_))));
   java_lang_String->SetStringClass();
   mirror::Class::SetStatus(java_lang_String, ClassStatus::kResolved, self);
@@ -790,6 +819,8 @@
                FindSystemClass(self, "Ljava/lang/StackTraceElement;"));
   SetClassRoot(ClassRoot::kJavaLangStackTraceElementArrayClass,
                FindSystemClass(self, "[Ljava/lang/StackTraceElement;"));
+  SetClassRoot(ClassRoot::kJavaLangClassLoaderArrayClass,
+               FindSystemClass(self, "[Ljava/lang/ClassLoader;"));
 
   // Create conflict tables that depend on the class linker.
   runtime->FixupConflictTables();
@@ -957,9 +988,6 @@
       runtime->GetOatFileManager().RegisterImageOatFiles(spaces);
   DCHECK(!oat_files.empty());
   const OatHeader& default_oat_header = oat_files[0]->GetOatHeader();
-  const char* image_file_location = oat_files[0]->GetOatHeader().
-      GetStoreValueByKey(OatHeader::kImageLocationKey);
-  CHECK(image_file_location == nullptr || *image_file_location == 0);
   quick_resolution_trampoline_ = default_oat_header.GetQuickResolutionTrampoline();
   quick_imt_conflict_trampoline_ = default_oat_header.GetQuickImtConflictTrampoline();
   quick_generic_jni_trampoline_ = default_oat_header.GetQuickGenericJniTrampoline();
@@ -1030,13 +1058,15 @@
   runtime->SetSentinel(heap->AllocNonMovableObject<true>(
       self, java_lang_Object, java_lang_Object->GetObjectSize(), VoidFunctor()));
 
-  for (gc::space::ImageSpace* image_space : spaces) {
+  const std::vector<std::string>& boot_class_path_locations = runtime->GetBootClassPathLocations();
+  CHECK_LE(spaces.size(), boot_class_path_locations.size());
+  for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
     // Boot class loader, use a null handle.
     std::vector<std::unique_ptr<const DexFile>> dex_files;
-    if (!AddImageSpace(image_space,
+    if (!AddImageSpace(spaces[i],
                        ScopedNullHandle<mirror::ClassLoader>(),
-                       /*dex_elements*/nullptr,
-                       /*dex_location*/nullptr,
+                       /*dex_elements=*/ nullptr,
+                       /*dex_location=*/ boot_class_path_locations[i].c_str(),
                        /*out*/&dex_files,
                        error_msg)) {
       return false;
@@ -1055,6 +1085,15 @@
   return true;
 }
 
+void ClassLinker::AddExtraBootDexFiles(
+    Thread* self,
+    std::vector<std::unique_ptr<const DexFile>>&& additional_dex_files) {
+  for (std::unique_ptr<const DexFile>& dex_file : additional_dex_files) {
+    AppendToBootClassPath(self, *dex_file);
+    boot_dex_files_.push_back(std::move(dex_file));
+  }
+}
+
 bool ClassLinker::IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
                                     ObjPtr<mirror::ClassLoader> class_loader) {
   return class_loader == nullptr ||
@@ -1087,48 +1126,165 @@
   return false;
 }
 
-static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader,
-                                   std::list<ObjPtr<mirror::String>>* out_dex_file_names,
-                                   std::string* error_msg)
+static bool GetDexFileNames(ScopedObjectAccessUnchecked& soa,
+                            ObjPtr<mirror::ClassLoader> class_loader,
+                            /*out*/std::list<ObjPtr<mirror::String>>* dex_files,
+                            /*out*/std::string* error_msg)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  DCHECK(out_dex_file_names != nullptr);
-  DCHECK(error_msg != nullptr);
-  ScopedObjectAccessUnchecked soa(Thread::Current());
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::ClassLoader> handle(hs.NewHandle(class_loader));
-  while (!ClassLinker::IsBootClassLoader(soa, class_loader)) {
-    if (soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader) !=
-        class_loader->GetClass()) {
-      *error_msg = StringPrintf("Unknown class loader type %s",
-                                class_loader->PrettyTypeOf().c_str());
-      // Unsupported class loader.
+  // Get element names. Sets error to true on failure.
+  auto add_element_names = [&](ObjPtr<mirror::Object> element, bool* error)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (element == nullptr) {
+      *error_msg = "Null dex element";
+      *error = true;  // Null element is a critical error.
+      return false;   // Had an error, stop the visit.
+    }
+    ObjPtr<mirror::String> name;
+    if (!GetDexPathListElementName(element, &name)) {
+      *error_msg = "Invalid dex path list element";
+      *error = true;   // Invalid element, make it a critical error.
+      return false;    // Stop the visit.
+    }
+    if (name != nullptr) {
+      dex_files->push_front(name);
+    }
+    return true;  // Continue with the next Element.
+  };
+  bool error = VisitClassLoaderDexElements(soa,
+                                           handle,
+                                           add_element_names,
+                                           /*defaultReturn=*/ false);
+  return !error;
+}
+
+static bool CompareClassLoaderTypes(ScopedObjectAccessUnchecked& soa,
+                                    ObjPtr<mirror::ClassLoader> image_class_loader,
+                                    ObjPtr<mirror::ClassLoader> class_loader,
+                                    std::string* error_msg)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (ClassLinker::IsBootClassLoader(soa, class_loader)) {
+    if (!ClassLinker::IsBootClassLoader(soa, image_class_loader)) {
+      *error_msg = "Hierarchies don't match";
       return false;
     }
-    // Get element names. Sets error to true on failure.
-    auto add_element_names = [&](ObjPtr<mirror::Object> element, bool* error)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      if (element == nullptr) {
-        *error_msg = "Null dex element";
-        *error = true;  // Null element is a critical error.
-        return false;   // Had an error, stop the visit.
-      }
-      ObjPtr<mirror::String> name;
-      if (!GetDexPathListElementName(element, &name)) {
-        *error_msg = "Invalid dex path list element";
-        *error = false;  // Invalid element is not a critical error.
-        return false;    // Stop the visit.
-      }
-      if (name != nullptr) {
-        out_dex_file_names->push_front(name);
-      }
-      return true;  // Continue with the next Element.
-    };
-    bool error = VisitClassLoaderDexElements(soa, handle, add_element_names, /* error */ false);
-    if (error) {
-      // An error occurred during DexPathList Element visiting.
+  } else if (ClassLinker::IsBootClassLoader(soa, image_class_loader)) {
+    *error_msg = "Hierarchies don't match";
+    return false;
+  } else if (class_loader->GetClass() != image_class_loader->GetClass()) {
+    *error_msg = StringPrintf("Class loader types don't match %s and %s",
+                              image_class_loader->PrettyTypeOf().c_str(),
+                              class_loader->PrettyTypeOf().c_str());
+    return false;
+  } else if (soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader) !=
+      class_loader->GetClass()) {
+    *error_msg = StringPrintf("Unknown class loader type %s",
+                              class_loader->PrettyTypeOf().c_str());
+    // Unsupported class loader.
+    return false;
+  }
+  return true;
+}
+
+static bool CompareDexFiles(const std::list<ObjPtr<mirror::String>>& image_dex_files,
+                            const std::list<ObjPtr<mirror::String>>& loader_dex_files,
+                            std::string* error_msg)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool equal = (image_dex_files.size() == loader_dex_files.size()) &&
+      std::equal(image_dex_files.begin(),
+                 image_dex_files.end(),
+                 loader_dex_files.begin(),
+                 [](ObjPtr<mirror::String> lhs, ObjPtr<mirror::String> rhs)
+                     REQUIRES_SHARED(Locks::mutator_lock_) {
+                   return lhs->Equals(rhs);
+                 });
+  if (!equal) {
+    VLOG(image) << "Image dex files " << image_dex_files.size();
+    for (ObjPtr<mirror::String> name : image_dex_files) {
+      VLOG(image) << name->ToModifiedUtf8();
+    }
+    VLOG(image) << "Loader dex files " << loader_dex_files.size();
+    for (ObjPtr<mirror::String> name : loader_dex_files) {
+      VLOG(image) << name->ToModifiedUtf8();
+    }
+    *error_msg = "Mismatch in dex files";
+  }
+  return equal;
+}
+
+static bool CompareClassLoaders(ScopedObjectAccessUnchecked& soa,
+                                ObjPtr<mirror::ClassLoader> image_class_loader,
+                                ObjPtr<mirror::ClassLoader> class_loader,
+                                bool check_dex_file_names,
+                                std::string* error_msg)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (!CompareClassLoaderTypes(soa, image_class_loader, class_loader, error_msg)) {
+    return false;
+  }
+
+  if (ClassLinker::IsBootClassLoader(soa, class_loader)) {
+    // No need to check further.
+    return true;
+  }
+
+  if (check_dex_file_names) {
+    std::list<ObjPtr<mirror::String>> image_dex_files;
+    if (!GetDexFileNames(soa, image_class_loader, &image_dex_files, error_msg)) {
       return false;
     }
-    class_loader = class_loader->GetParent();
+
+    std::list<ObjPtr<mirror::String>> loader_dex_files;
+    if (!GetDexFileNames(soa, class_loader, &loader_dex_files, error_msg)) {
+      return false;
+    }
+
+    if (!CompareDexFiles(image_dex_files, loader_dex_files, error_msg)) {
+      return false;
+    }
+  }
+
+  ArtField* field =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_sharedLibraryLoaders);
+  ObjPtr<mirror::Object> shared_libraries_image_loader = field->GetObject(image_class_loader.Ptr());
+  ObjPtr<mirror::Object> shared_libraries_loader = field->GetObject(class_loader.Ptr());
+  if (shared_libraries_image_loader == nullptr) {
+    if (shared_libraries_loader != nullptr) {
+      *error_msg = "Mismatch in shared libraries";
+      return false;
+    }
+  } else if (shared_libraries_loader == nullptr) {
+    *error_msg = "Mismatch in shared libraries";
+    return false;
+  } else {
+    ObjPtr<mirror::ObjectArray<mirror::ClassLoader>> array1 =
+        shared_libraries_image_loader->AsObjectArray<mirror::ClassLoader>();
+    ObjPtr<mirror::ObjectArray<mirror::ClassLoader>> array2 =
+        shared_libraries_loader->AsObjectArray<mirror::ClassLoader>();
+    if (array1->GetLength() != array2->GetLength()) {
+      *error_msg = "Mismatch in number of shared libraries";
+      return false;
+    }
+
+    for (int32_t i = 0; i < array1->GetLength(); ++i) {
+      // Do a full comparison of the class loaders, including comparing their dex files.
+      if (!CompareClassLoaders(soa,
+                               array1->Get(i),
+                               array2->Get(i),
+                               /*check_dex_file_names=*/ true,
+                               error_msg)) {
+        return false;
+      }
+    }
+  }
+
+  // Do a full comparison of the class loaders, including comparing their dex files.
+  if (!CompareClassLoaders(soa,
+                           image_class_loader->GetParent(),
+                           class_loader->GetParent(),
+                           /*check_dex_file_names=*/ true,
+                           error_msg)) {
+    return false;
   }
   return true;
 }
@@ -1157,7 +1313,7 @@
   VerifyDeclaringClassVisitor() REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
       : live_bitmap_(Runtime::Current()->GetHeap()->GetLiveBitmap()) {}
 
-  virtual void Visit(ArtMethod* method)
+  void Visit(ArtMethod* method) override
       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     ObjPtr<mirror::Class> klass = method->GetDeclaringClassUnchecked();
     if (klass != nullptr) {
@@ -1169,18 +1325,36 @@
   gc::accounting::HeapBitmap* const live_bitmap_;
 };
 
-class FixupInternVisitor {
+/*
+ * A class used to ensure that all strings in an AppImage have been properly
+ * interned, and is only ever run in debug mode.
+ */
+class VerifyStringInterningVisitor {
  public:
-  ALWAYS_INLINE ObjPtr<mirror::Object> TryInsertIntern(mirror::Object* obj) const
+  explicit VerifyStringInterningVisitor(const gc::space::ImageSpace& space) :
+      space_(space),
+      intern_table_(*Runtime::Current()->GetInternTable()) {}
+
+  void TestObject(ObjPtr<mirror::Object> referred_obj) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (obj != nullptr && obj->IsString()) {
-      const auto intern = Runtime::Current()->GetInternTable()->InternStrong(obj->AsString());
-      return intern;
+    if (referred_obj != nullptr &&
+        space_.HasAddress(referred_obj.Ptr()) &&
+        referred_obj->IsString()) {
+      ObjPtr<mirror::String> referred_str = referred_obj->AsString();
+
+      if (kIsDebugBuild) {
+        // Saved to temporary variables to aid in debugging.
+        ObjPtr<mirror::String> strong_lookup_result =
+            intern_table_.LookupStrong(Thread::Current(), referred_str);
+        ObjPtr<mirror::String> weak_lookup_result =
+            intern_table_.LookupWeak(Thread::Current(), referred_str);
+
+        DCHECK((strong_lookup_result == referred_str) || (weak_lookup_result == referred_str));
+      }
     }
-    return obj;
   }
 
-  ALWAYS_INLINE void VisitRootIfNonNull(
+  void VisitRootIfNonNull(
       mirror::CompressedReference<mirror::Object>* root) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (!root->IsNull()) {
@@ -1188,48 +1362,77 @@
     }
   }
 
-  ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    root->Assign(TryInsertIntern(root->AsMirrorPtr()));
+    TestObject(root->AsMirrorPtr());
   }
 
   // Visit Class Fields
-  ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> obj,
-                                MemberOffset offset,
-                                bool is_static ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<mirror::Object> obj,
+                  MemberOffset offset,
+                  bool is_static ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // There could be overlap between ranges, we must avoid visiting the same reference twice.
     // Avoid the class field since we already fixed it up in FixupClassVisitor.
     if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
       // Updating images, don't do a read barrier.
-      // Only string fields are fixed, don't do a verify.
-      mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
-          offset);
-      obj->SetFieldObject<false, false>(offset, TryInsertIntern(ref));
+      ObjPtr<mirror::Object> referred_obj =
+          obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset);
+
+      TestObject(referred_obj);
     }
   }
 
   void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
                   ObjPtr<mirror::Reference> ref) const
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
-    this->operator()(ref, mirror::Reference::ReferentOffset(), false);
+    operator()(ref, mirror::Reference::ReferentOffset(), false);
   }
 
-  void operator()(mirror::Object* obj) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (obj->IsDexCache()) {
-      obj->VisitReferences<true, kVerifyNone, kWithoutReadBarrier>(*this, *this);
-    } else {
-      // Don't visit native roots for non-dex-cache
-      obj->VisitReferences<false, kVerifyNone, kWithoutReadBarrier>(*this, *this);
-    }
-  }
+  const gc::space::ImageSpace& space_;
+  InternTable& intern_table_;
 };
 
+/*
+ * This function verifies that string references in the AppImage have been
+ * properly interned.  To be considered properly interned a reference must
+ * point to the same version of the string that the intern table does.
+ */
+void VerifyStringInterning(gc::space::ImageSpace& space) REQUIRES_SHARED(Locks::mutator_lock_) {
+  const gc::accounting::ContinuousSpaceBitmap* bitmap = space.GetMarkBitmap();
+  const ImageHeader& image_header = space.GetImageHeader();
+  const uint8_t* target_base = space.GetMemMap()->Begin();
+  const ImageSection& objects_section = image_header.GetObjectsSection();
+
+  auto objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
+  auto objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
+
+  VerifyStringInterningVisitor visitor(space);
+  bitmap->VisitMarkedRange(objects_begin,
+                           objects_end,
+                           [&space, &visitor](mirror::Object* obj)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (space.HasAddress(obj)) {
+      if (obj->IsDexCache()) {
+        obj->VisitReferences</* kVisitNativeRoots= */ true,
+                             kVerifyNone,
+                             kWithoutReadBarrier>(visitor, visitor);
+      } else {
+        // Don't visit native roots for non-dex-cache as they can't contain
+        // native references to strings.  This is verified during compilation
+        // by ImageWriter::VerifyNativeGCRootInvariants.
+        obj->VisitReferences</* kVisitNativeRoots= */ false,
+                             kVerifyNone,
+                             kWithoutReadBarrier>(visitor, visitor);
+      }
+    }
+  });
+}
+
 // new_class_set is the set of classes that were read from the class table section in the image.
 // If there was no class table section, it is null.
 // Note: using a class here to avoid having to make ClassLinker internals public.
-class AppImageClassLoadersAndDexCachesHelper {
+class AppImageLoadingHelper {
  public:
   static void Update(
       ClassLinker* class_linker,
@@ -1239,9 +1442,17 @@
       ClassTable::ClassSet* new_class_set)
       REQUIRES(!Locks::dex_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
+
+  static void HandleAppImageStrings(gc::space::ImageSpace* space)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  static void UpdateInternStrings(
+      gc::space::ImageSpace* space,
+      const SafeMap<mirror::String*, mirror::String*>& intern_remap)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
-void AppImageClassLoadersAndDexCachesHelper::Update(
+void AppImageLoadingHelper::Update(
     ClassLinker* class_linker,
     gc::space::ImageSpace* space,
     Handle<mirror::ClassLoader> class_loader,
@@ -1249,6 +1460,8 @@
     ClassTable::ClassSet* new_class_set)
     REQUIRES(!Locks::dex_lock_)
     REQUIRES_SHARED(Locks::mutator_lock_) {
+  ScopedTrace app_image_timing("AppImage:Updating");
+
   Thread* const self = Thread::Current();
   gc::Heap* const heap = Runtime::Current()->GetHeap();
   const ImageHeader& header = space->GetImageHeader();
@@ -1264,6 +1477,7 @@
         CHECK(!class_linker->FindDexCacheDataLocked(*dex_file).IsValid());
         class_linker->RegisterDexFileLocked(*dex_file, dex_cache, class_loader.Get());
       }
+
       if (kIsDebugBuild) {
         CHECK(new_class_set != nullptr);
         mirror::TypeDexCacheType* const types = dex_cache->GetResolvedTypes();
@@ -1271,17 +1485,20 @@
         for (size_t j = 0; j != num_types; ++j) {
           // The image space is not yet added to the heap, avoid read barriers.
           ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read();
+
           if (space->HasAddress(klass.Ptr())) {
             DCHECK(!klass->IsErroneous()) << klass->GetStatus();
             auto it = new_class_set->find(ClassTable::TableSlot(klass));
             DCHECK(it != new_class_set->end());
             DCHECK_EQ(it->Read(), klass);
             ObjPtr<mirror::Class> super_class = klass->GetSuperClass();
+
             if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) {
               auto it2 = new_class_set->find(ClassTable::TableSlot(super_class));
               DCHECK(it2 != new_class_set->end());
               DCHECK_EQ(it2->Read(), super_class);
             }
+
             for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
               const void* code = m.GetEntryPointFromQuickCompiledCode();
               const void* oat_code = m.IsInvokable() ? class_linker->GetQuickOatCodeFor(&m) : code;
@@ -1292,6 +1509,7 @@
                 DCHECK_EQ(code, oat_code) << m.PrettyMethod();
               }
             }
+
             for (ArtMethod& m : klass->GetVirtualMethods(kRuntimePointerSize)) {
               const void* code = m.GetEntryPointFromQuickCompiledCode();
               const void* oat_code = m.IsInvokable() ? class_linker->GetQuickOatCodeFor(&m) : code;
@@ -1307,47 +1525,171 @@
       }
     }
   }
+
   if (ClassLinker::kAppImageMayContainStrings) {
-    // Fixup all the literal strings happens at app images which are supposed to be interned.
-    ScopedTrace timing("Fixup String Intern in image and dex_cache");
-    const auto& image_header = space->GetImageHeader();
-    const auto bitmap = space->GetMarkBitmap();  // bitmap of objects
-    const uint8_t* target_base = space->GetMemMap()->Begin();
-    const ImageSection& objects_section = image_header.GetObjectsSection();
+    HandleAppImageStrings(space);
 
-    uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
-    uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
-
-    FixupInternVisitor fixup_intern_visitor;
-    bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_intern_visitor);
+    if (kIsDebugBuild) {
+      VerifyStringInterning(*space);
+    }
   }
+
   if (kVerifyArtMethodDeclaringClasses) {
-    ScopedTrace timing("Verify declaring classes");
+    ScopedTrace timing("AppImage:VerifyDeclaringClasses");
     ReaderMutexLock rmu(self, *Locks::heap_bitmap_lock_);
     VerifyDeclaringClassVisitor visitor;
     header.VisitPackedArtMethods(&visitor, space->Begin(), kRuntimePointerSize);
   }
 }
 
-// Update the class loader. Should only be used on classes in the image space.
-class UpdateClassLoaderVisitor {
- public:
-  UpdateClassLoaderVisitor(gc::space::ImageSpace* space, ObjPtr<mirror::ClassLoader> class_loader)
-      : space_(space),
-        class_loader_(class_loader) {}
+void AppImageLoadingHelper::UpdateInternStrings(
+    gc::space::ImageSpace* space,
+    const SafeMap<mirror::String*, mirror::String*>& intern_remap) {
+  const uint8_t* target_base = space->Begin();
+  const ImageSection& sro_section =
+      space->GetImageHeader().GetImageStringReferenceOffsetsSection();
+  const size_t num_string_offsets = sro_section.Size() / sizeof(AppImageReferenceOffsetInfo);
 
-  bool operator()(ObjPtr<mirror::Class> klass) const REQUIRES_SHARED(Locks::mutator_lock_) {
-    // Do not update class loader for boot image classes where the app image
-    // class loader is only the initiating loader but not the defining loader.
-    if (klass->GetClassLoader() != nullptr) {
-      klass->SetClassLoader(class_loader_);
+  VLOG(image)
+      << "ClassLinker:AppImage:InternStrings:imageStringReferenceOffsetCount = "
+      << num_string_offsets;
+
+  const auto* sro_base =
+      reinterpret_cast<const AppImageReferenceOffsetInfo*>(target_base + sro_section.Offset());
+
+  for (size_t offset_index = 0; offset_index < num_string_offsets; ++offset_index) {
+    uint32_t base_offset = sro_base[offset_index].first;
+
+    if (HasDexCacheStringNativeRefTag(base_offset)) {
+      base_offset = ClearDexCacheNativeRefTags(base_offset);
+      DCHECK_ALIGNED(base_offset, 2);
+
+      ObjPtr<mirror::DexCache> dex_cache =
+          reinterpret_cast<mirror::DexCache*>(space->Begin() + base_offset);
+      uint32_t string_index = sro_base[offset_index].second;
+
+      mirror::StringDexCachePair source = dex_cache->GetStrings()[string_index].load();
+      ObjPtr<mirror::String> referred_string = source.object.Read();
+      DCHECK(referred_string != nullptr);
+
+      auto it = intern_remap.find(referred_string.Ptr());
+      if (it != intern_remap.end()) {
+        // This doesn't use SetResolvedString to maintain consistency with how
+        // we load the string.  The index from the source string must be
+        // re-used due to the circular nature of the cache.  Because we are not
+        // using a helper function we need to mark the GC card manually.
+        WriteBarrier::ForEveryFieldWrite(dex_cache);
+        dex_cache->GetStrings()[string_index].store(
+            mirror::StringDexCachePair(it->second, source.index));
+      }
+    } else if (HasDexCachePreResolvedStringNativeRefTag(base_offset)) {
+      base_offset = ClearDexCacheNativeRefTags(base_offset);
+      DCHECK_ALIGNED(base_offset, 2);
+
+      ObjPtr<mirror::DexCache> dex_cache =
+          reinterpret_cast<mirror::DexCache*>(space->Begin() + base_offset);
+      uint32_t string_index = sro_base[offset_index].second;
+
+      ObjPtr<mirror::String> referred_string =
+          dex_cache->GetPreResolvedStrings()[string_index].Read();
+      DCHECK(referred_string != nullptr);
+
+      auto it = intern_remap.find(referred_string.Ptr());
+      if (it != intern_remap.end()) {
+        // Because we are not using a helper function we need to mark the GC card manually.
+        WriteBarrier::ForEveryFieldWrite(dex_cache);
+        dex_cache->GetPreResolvedStrings()[string_index] = GcRoot<mirror::String>(it->second);
+      }
+    } else {
+      uint32_t raw_member_offset = sro_base[offset_index].second;
+      DCHECK_ALIGNED(base_offset, 2);
+      DCHECK_ALIGNED(raw_member_offset, 2);
+
+      ObjPtr<mirror::Object> obj_ptr =
+          reinterpret_cast<mirror::Object*>(space->Begin() + base_offset);
+      MemberOffset member_offset(raw_member_offset);
+      ObjPtr<mirror::String> referred_string =
+          obj_ptr->GetFieldObject<mirror::String,
+                                  kVerifyNone,
+                                  kWithoutReadBarrier,
+                                  /* kIsVolatile= */ false>(member_offset);
+      DCHECK(referred_string != nullptr);
+
+      auto it = intern_remap.find(referred_string.Ptr());
+      if (it != intern_remap.end()) {
+        obj_ptr->SetFieldObject</* kTransactionActive= */ false,
+                                /* kCheckTransaction= */ false,
+                                kVerifyNone,
+                                /* kIsVolatile= */ false>(member_offset, it->second);
+      }
     }
-    return true;
   }
+}
 
-  gc::space::ImageSpace* const space_;
-  ObjPtr<mirror::ClassLoader> const class_loader_;
-};
+void AppImageLoadingHelper::HandleAppImageStrings(gc::space::ImageSpace* space) {
+  // Iterate over the string reference offsets stored in the image and intern
+  // the strings they point to.
+  ScopedTrace timing("AppImage:InternString");
+
+  InternTable* const intern_table = Runtime::Current()->GetInternTable();
+
+  // Add the intern table, removing any conflicts. For conflicts, store the new address in a map
+  // for faster lookup.
+  // TODO: Optimize with a bitmap or bloom filter
+  SafeMap<mirror::String*, mirror::String*> intern_remap;
+  intern_table->AddImageStringsToTable(space, [&](InternTable::UnorderedSet& interns)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(Locks::intern_table_lock_) {
+    const size_t non_boot_image_strings = intern_table->CountInterns(
+        /*visit_boot_images=*/false,
+        /*visit_non_boot_images=*/true);
+    VLOG(image) << "AppImage:stringsInInternTableSize = " << interns.size();
+    VLOG(image) << "AppImage:nonBootImageInternStrings = " << non_boot_image_strings;
+    // Visit the smaller of the two sets to compute the intersection.
+    if (interns.size() < non_boot_image_strings) {
+      for (auto it = interns.begin(); it != interns.end(); ) {
+        ObjPtr<mirror::String> string = it->Read();
+        ObjPtr<mirror::String> existing = intern_table->LookupWeakLocked(string);
+        if (existing == nullptr) {
+          existing = intern_table->LookupStrongLocked(string);
+        }
+        if (existing != nullptr) {
+          intern_remap.Put(string.Ptr(), existing.Ptr());
+          it = interns.erase(it);
+        } else {
+          ++it;
+        }
+      }
+    } else {
+      intern_table->VisitInterns([&](const GcRoot<mirror::String>& root)
+          REQUIRES_SHARED(Locks::mutator_lock_)
+          REQUIRES(Locks::intern_table_lock_) {
+        auto it = interns.find(root);
+        if (it != interns.end()) {
+          ObjPtr<mirror::String> existing = root.Read();
+          intern_remap.Put(it->Read(), existing.Ptr());
+          it = interns.erase(it);
+        }
+      }, /*visit_boot_images=*/false, /*visit_non_boot_images=*/true);
+    }
+    // Sanity check to ensure correctness.
+    if (kIsDebugBuild) {
+      for (GcRoot<mirror::String>& root : interns) {
+        ObjPtr<mirror::String> string = root.Read();
+        CHECK(intern_table->LookupWeakLocked(string) == nullptr) << string->ToModifiedUtf8();
+        CHECK(intern_table->LookupStrongLocked(string) == nullptr) << string->ToModifiedUtf8();
+      }
+    }
+  });
+
+  VLOG(image) << "AppImage:conflictingInternStrings = " << intern_remap.size();
+
+  // For debug builds, always run the code below to get coverage.
+  if (kIsDebugBuild || !intern_remap.empty()) {
+    // Slow path case is when there are conflicting intern strings to fix up.
+    UpdateInternStrings(space, intern_remap);
+  }
+}
 
 static std::unique_ptr<const DexFile> OpenOatDexFile(const OatFile* oat_file,
                                                      const char* location,
@@ -1557,7 +1899,7 @@
      public:
       explicit VerifyClassInTableArtMethodVisitor(ClassTable* table) : table_(table) {}
 
-      virtual void Visit(ArtMethod* method)
+      void Visit(ArtMethod* method) override
           REQUIRES_SHARED(Locks::mutator_lock_, Locks::classlinker_classes_lock_) {
         ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
         if (klass != nullptr && !Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
@@ -1671,13 +2013,7 @@
     std::string dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
     // TODO: Only store qualified paths.
     // If non qualified, qualify it.
-    if (dex_file_location.find('/') == std::string::npos) {
-      std::string dex_location_path = dex_location;
-      const size_t pos = dex_location_path.find_last_of('/');
-      CHECK_NE(pos, std::string::npos);
-      dex_location_path = dex_location_path.substr(0, pos + 1);  // Keep trailing '/'
-      dex_file_location = dex_location_path + dex_file_location;
-    }
+    dex_file_location = OatFile::ResolveRelativeEncodedDexLocation(dex_location, dex_file_location);
     std::unique_ptr<const DexFile> dex_file = OpenOatDexFile(oat_file,
                                                              dex_file_location.c_str(),
                                                              error_msg);
@@ -1711,11 +2047,12 @@
 
   if (app_image) {
     ScopedObjectAccessUnchecked soa(Thread::Current());
+    ScopedAssertNoThreadSuspension sants("Checking app image", soa.Self());
     // Check that the class loader resolves the same way as the ones in the image.
     // Image class loader [A][B][C][image dex files]
     // Class loader = [???][dex_elements][image dex files]
     // Need to ensure that [???][dex_elements] == [A][B][C].
-    // For each class loader, PathClassLoader, the laoder checks the parent first. Also the logic
+    // For each class loader, PathClassLoader, the loader checks the parent first. Also the logic
     // for PathClassLoader does this by looping through the array of dex files. To ensure they
     // resolve the same way, simply flatten the hierarchy in the way the resolution order would be,
     // and check that the dex file names are the same.
@@ -1723,21 +2060,12 @@
       *error_msg = "Unexpected BootClassLoader in app image";
       return false;
     }
-    std::list<ObjPtr<mirror::String>> image_dex_file_names;
-    std::string temp_error_msg;
-    if (!FlattenPathClassLoader(image_class_loader.Get(), &image_dex_file_names, &temp_error_msg)) {
-      *error_msg = StringPrintf("Failed to flatten image class loader hierarchy '%s'",
-                                temp_error_msg.c_str());
-      return false;
-    }
-    std::list<ObjPtr<mirror::String>> loader_dex_file_names;
-    if (!FlattenPathClassLoader(class_loader.Get(), &loader_dex_file_names, &temp_error_msg)) {
-      *error_msg = StringPrintf("Failed to flatten class loader hierarchy '%s'",
-                                temp_error_msg.c_str());
-      return false;
-    }
-    // Add the temporary dex path list elements at the end.
+    // The dex files of `class_loader` are not setup yet, so we cannot do a full comparison
+    // of `class_loader` and `image_class_loader` in `CompareClassLoaders`. Therefore, we
+    // special case the comparison of dex files of the two class loaders, but then do full
+    // comparisons for their shared libraries and parent.
     auto elements = soa.Decode<mirror::ObjectArray<mirror::Object>>(dex_elements);
+    std::list<ObjPtr<mirror::String>> loader_dex_file_names;
     for (size_t i = 0, num_elems = elements->GetLength(); i < num_elems; ++i) {
       ObjPtr<mirror::Object> element = elements->GetWithoutChecks(i);
       if (element != nullptr) {
@@ -1748,31 +2076,29 @@
         }
       }
     }
-    // Ignore the number of image dex files since we are adding those to the class loader anyways.
-    CHECK_GE(static_cast<size_t>(image_dex_file_names.size()),
-             static_cast<size_t>(dex_caches->GetLength()));
-    size_t image_count = image_dex_file_names.size() - dex_caches->GetLength();
-    // Check that the dex file names match.
-    bool equal = image_count == loader_dex_file_names.size();
-    if (equal) {
-      auto it1 = image_dex_file_names.begin();
-      auto it2 = loader_dex_file_names.begin();
-      for (size_t i = 0; equal && i < image_count; ++i, ++it1, ++it2) {
-        equal = equal && (*it1)->Equals(*it2);
-      }
+    std::string temp_error_msg;
+    std::list<ObjPtr<mirror::String>> image_dex_file_names;
+    bool success = GetDexFileNames(
+        soa, image_class_loader.Get(), &image_dex_file_names, &temp_error_msg);
+    if (success) {
+      // Ignore the number of image dex files since we are adding those to the class loader anyways.
+      CHECK_GE(static_cast<size_t>(image_dex_file_names.size()),
+               static_cast<size_t>(dex_caches->GetLength()));
+      size_t image_count = image_dex_file_names.size() - dex_caches->GetLength();
+      image_dex_file_names.resize(image_count);
+      success = success && CompareDexFiles(image_dex_file_names,
+                                           loader_dex_file_names,
+                                           &temp_error_msg);
+      success = success && CompareClassLoaders(soa,
+                                               image_class_loader.Get(),
+                                               class_loader.Get(),
+                                               /*check_dex_file_names=*/ false,
+                                               &temp_error_msg);
     }
-    if (!equal) {
-      VLOG(image) << "Image dex files " << image_dex_file_names.size();
-      for (ObjPtr<mirror::String> name : image_dex_file_names) {
-        VLOG(image) << name->ToModifiedUtf8();
-      }
-      VLOG(image) << "Loader dex files " << loader_dex_file_names.size();
-      for (ObjPtr<mirror::String> name : loader_dex_file_names) {
-        VLOG(image) << name->ToModifiedUtf8();
-      }
-      *error_msg = "Rejecting application image due to class loader mismatch";
-      // Ignore class loader mismatch for now since these would just use possibly incorrect
-      // oat code anyways. The structural class check should be done in the parent.
+    if (!success) {
+      *error_msg = StringPrintf("Rejecting application image due to class loader mismatch: '%s'",
+                               temp_error_msg.c_str());
+      return false;
     }
   }
 
@@ -1816,16 +2142,24 @@
     VLOG(image) << "Adding class table classes took " << PrettyDuration(NanoTime() - start_time2);
   }
   if (app_image) {
-    AppImageClassLoadersAndDexCachesHelper::Update(this,
-                                                   space,
-                                                   class_loader,
-                                                   dex_caches,
-                                                   &temp_set);
-    // Update class loader and resolved strings. If added_class_table is false, the resolved
-    // strings were forwarded UpdateAppImageClassLoadersAndDexCaches.
-    UpdateClassLoaderVisitor visitor(space, class_loader.Get());
-    for (const ClassTable::TableSlot& root : temp_set) {
-      visitor(root.Read());
+    AppImageLoadingHelper::Update(this, space, class_loader, dex_caches, &temp_set);
+
+    {
+      ScopedTrace trace("AppImage:UpdateClassLoaders");
+      // Update class loader and resolved strings. If added_class_table is false, the resolved
+      // strings were forwarded UpdateAppImageClassLoadersAndDexCaches.
+      ObjPtr<mirror::ClassLoader> loader(class_loader.Get());
+      for (const ClassTable::TableSlot& root : temp_set) {
+        // Note: We probably don't need the read barrier unless we copy the app image objects into
+        // the region space.
+        ObjPtr<mirror::Class> klass(root.Read());
+        // Do not update class loader for boot image classes where the app image
+        // class loader is only the initiating loader but not the defining loader.
+        // Avoid read barrier since we are comparing against null.
+        if (klass->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
+          klass->SetClassLoader</*kCheckTransaction=*/ false>(loader);
+        }
+      }
     }
 
     if (kBitstringSubtypeCheckEnabled) {
@@ -1840,7 +2174,7 @@
       // Force every app image class's SubtypeCheck to be at least kIninitialized.
       //
       // See also ImageWriter::FixupClass.
-      ScopedTrace trace("Recalculate app image SubtypeCheck bitstrings");
+      ScopedTrace trace("AppImage:RecacluateSubtypeCheckBitstrings");
       MutexLock subtype_check_lock(Thread::Current(), *Locks::subtype_check_lock_);
       for (const ClassTable::TableSlot& root : temp_set) {
         SubtypeCheck<ObjPtr<mirror::Class>>::EnsureInitialized(root.Read());
@@ -1860,7 +2194,7 @@
   if (kIsDebugBuild && app_image) {
     // This verification needs to happen after the classes have been added to the class loader.
     // Since it ensures classes are in the class table.
-    ScopedTrace trace("VerifyAppImage");
+    ScopedTrace trace("AppImage:Verify");
     VerifyAppImage(header, class_loader, dex_caches, class_table, space);
   }
 
@@ -2107,7 +2441,7 @@
   for (const ClassLoaderData& data : class_loaders_) {
     // CHA unloading analysis is not needed. No negative consequences are expected because
     // all the classloaders are deleted at the same time.
-    DeleteClassLoader(self, data, false /*cleanup_cha*/);
+    DeleteClassLoader(self, data, /*cleanup_cha=*/ false);
   }
   class_loaders_.clear();
 }
@@ -2211,7 +2545,7 @@
   // in the `klass_` field of one of its instances allocated in the Large-Object
   // Space (LOS) -- see the comment about the dirty card scanning logic in
   // art::gc::collector::ConcurrentCopying::MarkingPhase.
-  return AllocClass</* kMovable */ false>(
+  return AllocClass</* kMovable= */ false>(
       self, java_lang_Class, mirror::Array::ClassSize(image_pointer_size_));
 }
 
@@ -2299,13 +2633,13 @@
   return klass;
 }
 
-typedef std::pair<const DexFile*, const DexFile::ClassDef*> ClassPathEntry;
+using ClassPathEntry = std::pair<const DexFile*, const dex::ClassDef*>;
 
 // Search a collection of DexFiles for a descriptor
 ClassPathEntry FindInClassPath(const char* descriptor,
                                size_t hash, const std::vector<const DexFile*>& class_path) {
   for (const DexFile* dex_file : class_path) {
-    const DexFile::ClassDef* dex_class_def = OatDexFile::FindClassDef(*dex_file, descriptor, hash);
+    const dex::ClassDef* dex_class_def = OatDexFile::FindClassDef(*dex_file, descriptor, hash);
     if (dex_class_def != nullptr) {
       return ClassPathEntry(dex_file, dex_class_def);
     }
@@ -2313,6 +2647,35 @@
   return ClassPathEntry(nullptr, nullptr);
 }
 
+bool ClassLinker::FindClassInSharedLibraries(ScopedObjectAccessAlreadyRunnable& soa,
+                                             Thread* self,
+                                             const char* descriptor,
+                                             size_t hash,
+                                             Handle<mirror::ClassLoader> class_loader,
+                                             /*out*/ ObjPtr<mirror::Class>* result) {
+  ArtField* field =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_sharedLibraryLoaders);
+  ObjPtr<mirror::Object> raw_shared_libraries = field->GetObject(class_loader.Get());
+  if (raw_shared_libraries == nullptr) {
+    return true;
+  }
+
+  StackHandleScope<2> hs(self);
+  Handle<mirror::ObjectArray<mirror::ClassLoader>> shared_libraries(
+      hs.NewHandle(raw_shared_libraries->AsObjectArray<mirror::ClassLoader>()));
+  MutableHandle<mirror::ClassLoader> temp_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
+  for (int32_t i = 0; i < shared_libraries->GetLength(); ++i) {
+    temp_loader.Assign(shared_libraries->Get(i));
+    if (!FindClassInBaseDexClassLoader(soa, self, descriptor, hash, temp_loader, result)) {
+      return false;  // One of the shared libraries is not supported.
+    }
+    if (*result != nullptr) {
+      return true;  // Found the class up the chain.
+    }
+  }
+  return true;
+}
+
 bool ClassLinker::FindClassInBaseDexClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
                                                 Thread* self,
                                                 const char* descriptor,
@@ -2325,9 +2688,10 @@
     return true;
   }
 
-  if (IsPathOrDexClassLoader(soa, class_loader)) {
+  if (IsPathOrDexClassLoader(soa, class_loader) || IsInMemoryDexClassLoader(soa, class_loader)) {
     // For regular path or dex class loader the search order is:
     //    - parent
+    //    - shared libraries
     //    - class loader dex files
 
     // Handles as RegisterDexFile may allocate dex caches (and cause thread suspension).
@@ -2340,6 +2704,13 @@
       return true;  // Found the class up the chain.
     }
 
+    if (!FindClassInSharedLibraries(soa, self, descriptor, hash, class_loader, result)) {
+      return false;  // One of the shared library loader is not supported.
+    }
+    if (*result != nullptr) {
+      return true;  // Found the class in a shared library.
+    }
+
     // Search the current class loader classpath.
     *result = FindClassInBaseDexClassLoaderClassPath(soa, descriptor, hash, class_loader);
     return true;
@@ -2348,6 +2719,7 @@
   if (IsDelegateLastClassLoader(soa, class_loader)) {
     // For delegate last, the search order is:
     //    - boot class path
+    //    - shared libraries
     //    - class loader dex files
     //    - parent
     *result = FindClassInBootClassLoaderClassPath(self, descriptor, hash);
@@ -2355,6 +2727,13 @@
       return true;  // The class is part of the boot class path.
     }
 
+    if (!FindClassInSharedLibraries(soa, self, descriptor, hash, class_loader, result)) {
+      return false;  // One of the shared library loader is not supported.
+    }
+    if (*result != nullptr) {
+      return true;  // Found the class in a shared library.
+    }
+
     *result = FindClassInBaseDexClassLoaderClassPath(soa, descriptor, hash, class_loader);
     if (*result != nullptr) {
       return true;  // Found the class in the current class loader
@@ -2403,13 +2782,14 @@
     const char* descriptor,
     size_t hash,
     Handle<mirror::ClassLoader> class_loader) {
-  DCHECK(IsPathOrDexClassLoader(soa, class_loader) || IsDelegateLastClassLoader(soa, class_loader))
+  DCHECK(IsPathOrDexClassLoader(soa, class_loader) ||
+         IsInMemoryDexClassLoader(soa, class_loader) ||
+         IsDelegateLastClassLoader(soa, class_loader))
       << "Unexpected class loader for descriptor " << descriptor;
 
   ObjPtr<mirror::Class> ret;
   auto define_class = [&](const DexFile* cp_dex_file) REQUIRES_SHARED(Locks::mutator_lock_) {
-    const DexFile::ClassDef* dex_class_def =
-        OatDexFile::FindClassDef(*cp_dex_file, descriptor, hash);
+    const dex::ClassDef* dex_class_def = OatDexFile::FindClassDef(*cp_dex_file, descriptor, hash);
     if (dex_class_def != nullptr) {
       ObjPtr<mirror::Class> klass = DefineClass(soa.Self(),
                                                 descriptor,
@@ -2497,7 +2877,8 @@
       // the Java-side could still succeed for racy programs if another thread is actively
       // modifying the class loader's path list.
 
-      if (!self->CanCallIntoJava()) {
+      // The runtime is not allowed to call into java from a runtime-thread so just abort.
+      if (self->IsRuntimeThread()) {
         // Oops, we can't call into java so we can't run actual class-loader code.
         // This is true for e.g. for the compiler (jit or aot).
         ObjPtr<mirror::Throwable> pre_allocated =
@@ -2608,7 +2989,7 @@
                                                size_t hash,
                                                Handle<mirror::ClassLoader> class_loader,
                                                const DexFile& dex_file,
-                                               const DexFile::ClassDef& dex_class_def) {
+                                               const dex::ClassDef& dex_class_def) {
   StackHandleScope<3> hs(self);
   auto klass = hs.NewHandle<mirror::Class>(nullptr);
 
@@ -2630,6 +3011,17 @@
     }
   }
 
+  // This is to prevent the calls to ClassLoad and ClassPrepare which can cause java/user-supplied
+  // code to be executed. We put it up here so we can avoid all the allocations associated with
+  // creating the class. This can happen with (eg) jit threads.
+  if (!self->CanLoadClasses()) {
+    // Make sure we don't try to load anything, potentially causing an infinite loop.
+    ObjPtr<mirror::Throwable> pre_allocated =
+        Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
+    self->SetException(pre_allocated);
+    return nullptr;
+  }
+
   if (klass == nullptr) {
     // Allocate a class with the status of not ready.
     // Interface object should get the right size here. Regular class will
@@ -2644,7 +3036,7 @@
   // Get the real dex file. This will return the input if there aren't any callbacks or they do
   // nothing.
   DexFile const* new_dex_file = nullptr;
-  DexFile::ClassDef const* new_class_def = nullptr;
+  dex::ClassDef const* new_class_def = nullptr;
   // TODO We should ideally figure out some way to move this after we get a lock on the klass so it
   // will only be called once.
   Runtime::Current()->GetRuntimeCallbacks()->ClassPreDefine(descriptor,
@@ -2765,7 +3157,7 @@
 }
 
 uint32_t ClassLinker::SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
-                                                       const DexFile::ClassDef& dex_class_def) {
+                                                       const dex::ClassDef& dex_class_def) {
   size_t num_ref = 0;
   size_t num_8 = 0;
   size_t num_16 = 0;
@@ -2783,7 +3175,7 @@
       continue;
     }
     last_field_idx = field_idx;
-    const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+    const dex::FieldId& field_id = dex_file.GetFieldId(field_idx);
     const char* descriptor = dex_file.GetFieldTypeDescriptor(field_id);
     char c = descriptor[0];
     switch (c) {
@@ -3012,7 +3404,7 @@
 }
 
 void ClassLinker::SetupClass(const DexFile& dex_file,
-                             const DexFile::ClassDef& dex_class_def,
+                             const dex::ClassDef& dex_class_def,
                              Handle<mirror::Class> klass,
                              ObjPtr<mirror::ClassLoader> class_loader) {
   CHECK(klass != nullptr);
@@ -3093,9 +3485,11 @@
 
 void ClassLinker::LoadClass(Thread* self,
                             const DexFile& dex_file,
-                            const DexFile::ClassDef& dex_class_def,
+                            const dex::ClassDef& dex_class_def,
                             Handle<mirror::Class> klass) {
-  ClassAccessor accessor(dex_file, dex_class_def);
+  ClassAccessor accessor(dex_file,
+                         dex_class_def,
+                         /* parse_hiddenapi_class_data= */ klass->IsBootStrapClassLoaded());
   if (!accessor.HasClassData()) {
     return;
   }
@@ -3207,14 +3601,8 @@
   dst->SetDexFieldIndex(field_idx);
   dst->SetDeclaringClass(klass.Get());
 
-  // Get access flags from the DexFile. If this is a boot class path class,
-  // also set its runtime hidden API access flags.
-  uint32_t access_flags = field.GetAccessFlags();
-  if (klass->IsBootStrapClassLoaded()) {
-    access_flags =
-        HiddenApiAccessFlags::EncodeForRuntime(access_flags, field.DecodeHiddenAccessFlags());
-  }
-  dst->SetAccessFlags(access_flags);
+  // Get access flags from the DexFile and set hiddenapi runtime access flags.
+  dst->SetAccessFlags(field.GetAccessFlags() | hiddenapi::CreateRuntimeFlags(field));
 }
 
 void ClassLinker::LoadMethod(const DexFile& dex_file,
@@ -3222,7 +3610,7 @@
                              Handle<mirror::Class> klass,
                              ArtMethod* dst) {
   const uint32_t dex_method_idx = method.GetIndex();
-  const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
+  const dex::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
   const char* method_name = dex_file.StringDataByIdx(method_id.name_idx_);
 
   ScopedAssertNoThreadSuspension ants("LoadMethod");
@@ -3230,14 +3618,8 @@
   dst->SetDeclaringClass(klass.Get());
   dst->SetCodeItemOffset(method.GetCodeItemOffset());
 
-  // Get access flags from the DexFile. If this is a boot class path class,
-  // also set its runtime hidden API access flags.
-  uint32_t access_flags = method.GetAccessFlags();
-
-  if (klass->IsBootStrapClassLoaded()) {
-    access_flags =
-        HiddenApiAccessFlags::EncodeForRuntime(access_flags, method.DecodeHiddenAccessFlags());
-  }
+  // Get access flags from the DexFile and set hiddenapi runtime access flags.
+  uint32_t access_flags = method.GetAccessFlags() | hiddenapi::CreateRuntimeFlags(method);
 
   if (UNLIKELY(strcmp("finalize", method_name) == 0)) {
     // Set finalizable flag on declaring class.
@@ -3279,6 +3661,10 @@
         dex_file, dst->GetClassDef(), dex_method_idx);
   }
   dst->SetAccessFlags(access_flags);
+  // Must be done after SetAccessFlags since IsAbstract depends on it.
+  if (klass->IsInterface() && dst->IsAbstract()) {
+    dst->CalculateAndSetImtIndex();
+  }
 }
 
 void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile& dex_file) {
@@ -3295,7 +3681,7 @@
   CHECK(dex_cache != nullptr) << dex_file.GetLocation();
   boot_class_path_.push_back(&dex_file);
   WriterMutexLock mu(Thread::Current(), *Locks::dex_lock_);
-  RegisterDexFileLocked(dex_file, dex_cache, /* class_loader */ nullptr);
+  RegisterDexFileLocked(dex_file, dex_cache, /* class_loader= */ nullptr);
 }
 
 void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
@@ -3374,8 +3760,7 @@
   data.weak_root = dex_cache_jweak;
   data.dex_file = dex_cache->GetDexFile();
   data.class_table = ClassTableForClassLoader(class_loader);
-  AddNativeDebugInfoForDex(self, ArrayRef<const uint8_t>(data.dex_file->Begin(),
-                                                         data.dex_file->Size()));
+  AddNativeDebugInfoForDex(self, data.dex_file);
   DCHECK(data.class_table != nullptr);
   // Make sure to hold the dex cache live in the class table. This case happens for the boot class
   // path dex caches without an image.
@@ -3585,6 +3970,7 @@
   h_class->SetAccessFlags(kAccPublic | kAccFinal | kAccAbstract);
   h_class->SetPrimitiveType(type);
   h_class->SetIfTable(GetClassRoot<mirror::Object>(this)->GetIfTable());
+  EnsureSkipAccessChecksMethods</* kNeedsVerified= */ true>(h_class, image_pointer_size_);
   mirror::Class::SetStatus(h_class, ClassStatus::kInitialized, self);
   const char* descriptor = Primitive::Descriptor(type);
   ObjPtr<mirror::Class> existing = InsertClass(descriptor,
@@ -3618,6 +4004,18 @@
   // Identify the underlying component type
   CHECK_EQ('[', descriptor[0]);
   StackHandleScope<2> hs(self);
+
+  // This is to prevent the calls to ClassLoad and ClassPrepare which can cause java/user-supplied
+  // code to be executed. We put it up here so we can avoid all the allocations associated with
+  // creating the class. This can happen with (eg) jit threads.
+  if (!self->CanLoadClasses()) {
+    // Make sure we don't try to load anything, potentially causing an infinite loop.
+    ObjPtr<mirror::Throwable> pre_allocated =
+        Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
+    self->SetException(pre_allocated);
+    return nullptr;
+  }
+
   MutableHandle<mirror::Class> component_type(hs.NewHandle(FindClass(self, descriptor + 1,
                                                                      class_loader)));
   if (component_type == nullptr) {
@@ -3720,6 +4118,7 @@
   new_class->PopulateEmbeddedVTable(image_pointer_size_);
   ImTable* object_imt = java_lang_Object->GetImt(image_pointer_size_);
   new_class->SetImt(object_imt, image_pointer_size_);
+  EnsureSkipAccessChecksMethods</* kNeedsVerified= */ true>(new_class, image_pointer_size_);
   mirror::Class::SetStatus(new_class, ClassStatus::kInitialized, self);
   // don't need to set new_class->SetObjectSize(..)
   // because Object::SizeOf delegates to Array::SizeOf
@@ -3750,6 +4149,8 @@
   // and remove "interface".
   access_flags |= kAccAbstract | kAccFinal;
   access_flags &= ~kAccInterface;
+  // Arrays are access-checks-clean and preverified.
+  access_flags |= kAccVerificationAttempted;
 
   new_class->SetAccessFlags(access_flags);
 
@@ -3773,38 +4174,37 @@
   return existing;
 }
 
-ObjPtr<mirror::Class> ClassLinker::FindPrimitiveClass(char type) {
-  ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots = GetClassRoots();
+ObjPtr<mirror::Class> ClassLinker::LookupPrimitiveClass(char type) {
+  ClassRoot class_root;
   switch (type) {
-    case 'B':
-      return GetClassRoot(ClassRoot::kPrimitiveByte, class_roots);
-    case 'C':
-      return GetClassRoot(ClassRoot::kPrimitiveChar, class_roots);
-    case 'D':
-      return GetClassRoot(ClassRoot::kPrimitiveDouble, class_roots);
-    case 'F':
-      return GetClassRoot(ClassRoot::kPrimitiveFloat, class_roots);
-    case 'I':
-      return GetClassRoot(ClassRoot::kPrimitiveInt, class_roots);
-    case 'J':
-      return GetClassRoot(ClassRoot::kPrimitiveLong, class_roots);
-    case 'S':
-      return GetClassRoot(ClassRoot::kPrimitiveShort, class_roots);
-    case 'Z':
-      return GetClassRoot(ClassRoot::kPrimitiveBoolean, class_roots);
-    case 'V':
-      return GetClassRoot(ClassRoot::kPrimitiveVoid, class_roots);
+    case 'B': class_root = ClassRoot::kPrimitiveByte; break;
+    case 'C': class_root = ClassRoot::kPrimitiveChar; break;
+    case 'D': class_root = ClassRoot::kPrimitiveDouble; break;
+    case 'F': class_root = ClassRoot::kPrimitiveFloat; break;
+    case 'I': class_root = ClassRoot::kPrimitiveInt; break;
+    case 'J': class_root = ClassRoot::kPrimitiveLong; break;
+    case 'S': class_root = ClassRoot::kPrimitiveShort; break;
+    case 'Z': class_root = ClassRoot::kPrimitiveBoolean; break;
+    case 'V': class_root = ClassRoot::kPrimitiveVoid; break;
     default:
-      break;
+      return nullptr;
   }
-  std::string printable_type(PrintableChar(type));
-  ThrowNoClassDefFoundError("Not a primitive type: %s", printable_type.c_str());
-  return nullptr;
+  return GetClassRoot(class_root, this);
+}
+
+ObjPtr<mirror::Class> ClassLinker::FindPrimitiveClass(char type) {
+  ObjPtr<mirror::Class> result = LookupPrimitiveClass(type);
+  if (UNLIKELY(result == nullptr)) {
+    std::string printable_type(PrintableChar(type));
+    ThrowNoClassDefFoundError("Not a primitive type: %s", printable_type.c_str());
+  }
+  return result;
 }
 
 ObjPtr<mirror::Class> ClassLinker::InsertClass(const char* descriptor,
                                                ObjPtr<mirror::Class> klass,
                                                size_t hash) {
+  DCHECK(Thread::Current()->CanLoadClasses());
   if (VLOG_IS_ON(class_linker)) {
     ObjPtr<mirror::DexCache> dex_cache = klass->GetDexCache();
     std::string source;
@@ -3985,17 +4385,6 @@
   return false;
 }
 
-// Ensures that methods have the kAccSkipAccessChecks bit set. We use the
-// kAccVerificationAttempted bit on the class access flags to determine whether this has been done
-// before.
-static void EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass, PointerSize pointer_size)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (!klass->WasVerificationAttempted()) {
-    klass->SetSkipAccessChecksFlagOnAllMethods(pointer_size);
-    klass->SetVerificationAttempted();
-  }
-}
-
 verifier::FailureKind ClassLinker::VerifyClass(
     Thread* self, Handle<mirror::Class> klass, verifier::HardFailLogMode log_level) {
   {
@@ -4329,6 +4718,18 @@
                                                     jobjectArray methods,
                                                     jobjectArray throws) {
   Thread* self = soa.Self();
+
+  // This is to prevent the calls to ClassLoad and ClassPrepare which can cause java/user-supplied
+  // code to be executed. We put it up here so we can avoid all the allocations associated with
+  // creating the class. This can happen with (eg) jit-threads.
+  if (!self->CanLoadClasses()) {
+    // Make sure we don't try to load anything, potentially causing an infinite loop.
+    ObjPtr<mirror::Throwable> pre_allocated =
+        Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
+    self->SetException(pre_allocated);
+    return nullptr;
+  }
+
   StackHandleScope<10> hs(self);
   MutableHandle<mirror::Class> temp_klass(hs.NewHandle(
       AllocClass(self, GetClassRoot<mirror::Class>(this), sizeof(mirror::Class))));
@@ -4460,6 +4861,7 @@
   {
     // Lock on klass is released. Lock new class object.
     ObjectLock<mirror::Class> initialization_lock(self, klass);
+    EnsureSkipAccessChecksMethods(klass, image_pointer_size_);
     mirror::Class::SetStatus(klass, ClassStatus::kInitialized, self);
   }
 
@@ -4589,7 +4991,7 @@
     }
     // Check if there are encoded static values needing initialization.
     if (klass->NumStaticFields() != 0) {
-      const DexFile::ClassDef* dex_class_def = klass->GetClassDef();
+      const dex::ClassDef* dex_class_def = klass->GetClassDef();
       DCHECK(dex_class_def != nullptr);
       if (dex_class_def->static_values_off_ != 0) {
         return false;
@@ -4646,7 +5048,7 @@
 
     // Was the class already found to be erroneous? Done under the lock to match the JLS.
     if (klass->IsErroneous()) {
-      ThrowEarlierClassFailure(klass.Get(), true);
+      ThrowEarlierClassFailure(klass.Get(), true, /* log= */ true);
       VlogClassInitializationFailure(klass);
       return false;
     }
@@ -4678,7 +5080,10 @@
         } else {
           CHECK(Runtime::Current()->IsAotCompiler());
           CHECK_EQ(klass->GetStatus(), ClassStatus::kRetryVerificationAtRuntime);
+          self->AssertNoPendingException();
+          self->SetException(Runtime::Current()->GetPreAllocatedNoClassDefFoundError());
         }
+        self->AssertPendingException();
         return false;
       } else {
         self->AssertNoPendingException();
@@ -4803,7 +5208,7 @@
 
   const size_t num_static_fields = klass->NumStaticFields();
   if (num_static_fields > 0) {
-    const DexFile::ClassDef* dex_class_def = klass->GetClassDef();
+    const dex::ClassDef* dex_class_def = klass->GetClassDef();
     CHECK(dex_class_def != nullptr);
     StackHandleScope<3> hs(self);
     Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
@@ -4817,8 +5222,10 @@
       ArtField* resolved_field = dex_cache->GetResolvedField(field_idx, image_pointer_size_);
       if (resolved_field == nullptr) {
         // Populating cache of a dex file which defines `klass` should always be allowed.
-        DCHECK_EQ(hiddenapi::GetMemberAction(
-            field, class_loader.Get(), dex_cache.Get(), hiddenapi::kNone), hiddenapi::kAllow);
+        DCHECK(!hiddenapi::ShouldDenyAccessToMember(
+            field,
+            hiddenapi::AccessContext(class_loader.Get(), dex_cache.Get()),
+            hiddenapi::AccessMethod::kNone));
         dex_cache->SetResolvedField(field_idx, field, image_pointer_size_);
       } else {
         DCHECK_EQ(field, resolved_field);
@@ -4841,7 +5248,7 @@
         ArtField* art_field = ResolveField(field.GetIndex(),
                                            dex_cache,
                                            class_loader,
-                                           /* is_static */ true);
+                                           /* is_static= */ true);
         if (Runtime::Current()->IsActiveTransaction()) {
           value_it.ReadValueToField<true>(art_field);
         } else {
@@ -5010,8 +5417,8 @@
   DCHECK(Thread::Current()->IsExceptionPending());
   DCHECK(!m->IsProxyMethod());
   const DexFile* dex_file = m->GetDexFile();
-  const DexFile::MethodId& method_id = dex_file->GetMethodId(m->GetDexMethodIndex());
-  const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
+  const dex::MethodId& method_id = dex_file->GetMethodId(m->GetDexMethodIndex());
+  const dex::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
   dex::TypeIndex return_type_idx = proto_id.return_type_idx_;
   std::string return_type = dex_file->PrettyType(return_type_idx);
   std::string class_loader = mirror::Object::PrettyTypeOf(m->GetDeclaringClass()->GetClassLoader());
@@ -5089,8 +5496,8 @@
       return false;
     }
   }
-  const DexFile::TypeList* types1 = method1->GetParameterTypeList();
-  const DexFile::TypeList* types2 = method2->GetParameterTypeList();
+  const dex::TypeList* types1 = method1->GetParameterTypeList();
+  const dex::TypeList* types2 = method2->GetParameterTypeList();
   if (types1 == nullptr) {
     if (types2 != nullptr && types2->Size() != 0) {
       ThrowSignatureMismatch(klass, super_klass, method1,
@@ -5205,8 +5612,7 @@
   DCHECK(c != nullptr);
 
   if (c->IsInitialized()) {
-    EnsureSkipAccessChecksMethods(c, image_pointer_size_);
-    self->AssertNoPendingException();
+    DCHECK(c->WasVerificationAttempted()) << c->PrettyClassAndClassLoader();
     return true;
   }
   // SubtypeCheckInfo::Initialized must happen-before any new-instance for that type.
@@ -5450,7 +5856,7 @@
 
 bool ClassLinker::LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file) {
   CHECK_EQ(ClassStatus::kIdx, klass->GetStatus());
-  const DexFile::ClassDef& class_def = dex_file.GetClassDef(klass->GetDexClassDefIndex());
+  const dex::ClassDef& class_def = dex_file.GetClassDef(klass->GetDexClassDefIndex());
   dex::TypeIndex super_class_idx = class_def.superclass_idx_;
   if (super_class_idx.IsValid()) {
     // Check that a class does not inherit from itself directly.
@@ -5481,7 +5887,7 @@
     CHECK(super_class->IsResolved());
     klass->SetSuperClass(super_class);
   }
-  const DexFile::TypeList* interfaces = dex_file.GetInterfacesList(class_def);
+  const dex::TypeList* interfaces = dex_file.GetInterfacesList(class_def);
   if (interfaces != nullptr) {
     for (size_t i = 0; i < interfaces->Size(); i++) {
       dex::TypeIndex idx = interfaces->GetTypeItem(i).type_idx_;
@@ -5625,7 +6031,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(!other->IsProxyMethod()) << other->PrettyMethod();
     const DexFile* other_dex_file = other->GetDexFile();
-    const DexFile::MethodId& other_mid = other_dex_file->GetMethodId(other->GetDexMethodIndex());
+    const dex::MethodId& other_mid = other_dex_file->GetMethodId(other->GetDexMethodIndex());
     if (dex_file_ == other_dex_file) {
       return mid_->name_idx_ == other_mid.name_idx_ && mid_->proto_idx_ == other_mid.proto_idx_;
     }
@@ -5643,7 +6049,7 @@
   // Dex file for the method to compare against.
   const DexFile* const dex_file_;
   // MethodId for the method to compare against.
-  const DexFile::MethodId* const mid_;
+  const dex::MethodId* const mid_;
   // Lazily computed name from the dex file's strings.
   const char* name_;
   // Lazily computed name length.
@@ -6241,8 +6647,8 @@
                        unimplemented_method,
                        conflict_method,
                        klass,
-                       /*create_conflict_tables*/true,
-                       /*ignore_copied_methods*/false,
+                       /*create_conflict_tables=*/true,
+                       /*ignore_copied_methods=*/false,
                        &new_conflict,
                        &imt_data[0]);
   }
@@ -6340,7 +6746,7 @@
       // or interface methods in the IMT here they will not create extra conflicts since we compare
       // names and signatures in SetIMTRef.
       ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
-      const uint32_t imt_index = ImTable::GetImtIndex(interface_method);
+      const uint32_t imt_index = interface_method->GetImtIndex();
 
       // There is only any conflicts if all of the interface methods for an IMT slot don't have
       // the same implementation method, keep track of this to avoid creating a conflict table in
@@ -6394,7 +6800,7 @@
         }
         DCHECK(implementation_method != nullptr);
         ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
-        const uint32_t imt_index = ImTable::GetImtIndex(interface_method);
+        const uint32_t imt_index = interface_method->GetImtIndex();
         if (!imt[imt_index]->IsRuntimeMethod() ||
             imt[imt_index] == unimplemented_method ||
             imt[imt_index] == imt_conflict_method) {
@@ -6446,7 +6852,7 @@
 // iftable must be large enough to hold all interfaces without changing its size.
 static size_t FillIfTable(ObjPtr<mirror::IfTable> iftable,
                           size_t super_ifcount,
-                          std::vector<ObjPtr<mirror::Class>> to_process)
+                          const std::vector<ObjPtr<mirror::Class>>& to_process)
     REQUIRES(Roles::uninterruptible_)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // This is the set of all class's already in the iftable. Used to make checking if a class has
@@ -6730,8 +7136,8 @@
                          unimplemented_method,
                          imt_conflict_method,
                          klass.Get(),
-                         /*create_conflict_table*/false,
-                         /*ignore_copied_methods*/true,
+                         /*create_conflict_tables=*/false,
+                         /*ignore_copied_methods=*/true,
                          /*out*/new_conflict,
                          /*out*/imt);
     }
@@ -7081,9 +7487,12 @@
       // mark this as a default, non-abstract method, since thats what it is. Also clear the
       // kAccSkipAccessChecks bit since this class hasn't been verified yet it shouldn't have
       // methods that are skipping access checks.
+      // Also clear potential kAccSingleImplementation to avoid CHA trying to inline
+      // the default method.
       DCHECK_EQ(new_method.GetAccessFlags() & kAccNative, 0u);
       constexpr uint32_t kSetFlags = kAccDefault | kAccDefaultConflict | kAccCopied;
-      constexpr uint32_t kMaskFlags = ~(kAccAbstract | kAccSkipAccessChecks);
+      constexpr uint32_t kMaskFlags =
+          ~(kAccAbstract | kAccSkipAccessChecks | kAccSingleImplementation);
       new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags);
       DCHECK(new_method.IsDefaultConflicting());
       // The actual method might or might not be marked abstract since we just copied it from a
@@ -7317,7 +7726,7 @@
         auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j, image_pointer_size_);
         MethodNameAndSignatureComparator interface_name_comparator(
             interface_method->GetInterfaceMethodIfProxy(image_pointer_size_));
-        uint32_t imt_index = ImTable::GetImtIndex(interface_method);
+        uint32_t imt_index = interface_method->GetImtIndex();
         ArtMethod** imt_ptr = &out_imt[imt_index];
         // For each method listed in the interface's method list, find the
         // matching method in our class's method list.  We want to favor the
@@ -7776,7 +8185,7 @@
   if (descriptor[1] == '\0') {
     // only the descriptors of primitive types should be 1 character long, also avoid class lookup
     // for primitive classes that aren't backed by dex files.
-    type = FindPrimitiveClass(descriptor[0]);
+    type = LookupPrimitiveClass(descriptor[0]);
   } else {
     Thread* const self = Thread::Current();
     DCHECK(self != nullptr);
@@ -7794,14 +8203,22 @@
   return type;
 }
 
-ObjPtr<mirror::Class> ClassLinker::DoResolveType(dex::TypeIndex type_idx,
-                                                 ObjPtr<mirror::Class> referrer) {
+template <typename T>
+ObjPtr<mirror::Class> ClassLinker::DoResolveType(dex::TypeIndex type_idx, T referrer) {
   StackHandleScope<2> hs(Thread::Current());
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
   return DoResolveType(type_idx, dex_cache, class_loader);
 }
 
+// Instantiate the above.
+template ObjPtr<mirror::Class> ClassLinker::DoResolveType(dex::TypeIndex type_idx,
+                                                          ArtField* referrer);
+template ObjPtr<mirror::Class> ClassLinker::DoResolveType(dex::TypeIndex type_idx,
+                                                          ArtMethod* referrer);
+template ObjPtr<mirror::Class> ClassLinker::DoResolveType(dex::TypeIndex type_idx,
+                                                          ObjPtr<mirror::Class> referrer);
+
 ObjPtr<mirror::Class> ClassLinker::DoResolveType(dex::TypeIndex type_idx,
                                                  Handle<mirror::DexCache> dex_cache,
                                                  Handle<mirror::ClassLoader> class_loader) {
@@ -7846,8 +8263,9 @@
   }
   DCHECK(resolved == nullptr || resolved->GetDeclaringClassUnchecked() != nullptr);
   if (resolved != nullptr &&
-      hiddenapi::GetMemberAction(
-          resolved, class_loader, dex_cache, hiddenapi::kLinking) == hiddenapi::kDeny) {
+      hiddenapi::ShouldDenyAccessToMember(resolved,
+                                          hiddenapi::AccessContext(class_loader, dex_cache),
+                                          hiddenapi::AccessMethod::kLinking)) {
     resolved = nullptr;
   }
   if (resolved != nullptr) {
@@ -7877,11 +8295,9 @@
                               ObjPtr<mirror::ClassLoader> class_loader)
       REQUIRES_SHARED(Locks::mutator_lock_) {
   return method == nullptr ||
-         hiddenapi::GetMemberAction(method,
-                                    class_loader,
-                                    dex_cache,
-                                    hiddenapi::kNone)  // do not print warnings
-             == hiddenapi::kDeny;
+         hiddenapi::ShouldDenyAccessToMember(method,
+                                             hiddenapi::AccessContext(class_loader, dex_cache),
+                                             hiddenapi::AccessMethod::kNone);  // no warnings
 }
 
 ArtMethod* ClassLinker::FindIncompatibleMethod(ObjPtr<mirror::Class> klass,
@@ -7924,7 +8340,7 @@
     return resolved;
   }
   const DexFile& dex_file = *dex_cache->GetDexFile();
-  const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
+  const dex::MethodId& method_id = dex_file.GetMethodId(method_idx);
   ObjPtr<mirror::Class> klass = nullptr;
   if (valid_dex_cache_method) {
     // We have a valid method from the DexCache but we need to perform ICCE and IAE checks.
@@ -7946,7 +8362,7 @@
 
   // Check if the invoke type matches the class type.
   if (kResolveMode == ResolveMode::kCheckICCEAndIAE &&
-      CheckInvokeClassMismatch</* kThrow */ true>(
+      CheckInvokeClassMismatch</* kThrow= */ true>(
           dex_cache.Get(), type, [klass]() { return klass; })) {
     DCHECK(Thread::Current()->IsExceptionPending());
     return nullptr;
@@ -8005,7 +8421,7 @@
     return resolved;
   }
   // Fail, get the declaring class.
-  const DexFile::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(method_idx);
+  const dex::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(method_idx);
   ObjPtr<mirror::Class> klass = ResolveType(method_id.class_idx_, dex_cache, class_loader);
   if (klass == nullptr) {
     Thread::Current()->AssertPendingException();
@@ -8017,8 +8433,10 @@
     resolved = klass->FindClassMethod(dex_cache.Get(), method_idx, image_pointer_size_);
   }
   if (resolved != nullptr &&
-      hiddenapi::GetMemberAction(
-          resolved, class_loader.Get(), dex_cache.Get(), hiddenapi::kLinking) == hiddenapi::kDeny) {
+      hiddenapi::ShouldDenyAccessToMember(
+          resolved,
+          hiddenapi::AccessContext(class_loader.Get(), dex_cache.Get()),
+          hiddenapi::AccessMethod::kLinking)) {
     resolved = nullptr;
   }
   return resolved;
@@ -8029,7 +8447,7 @@
                                            ObjPtr<mirror::ClassLoader> class_loader,
                                            bool is_static) {
   const DexFile& dex_file = *dex_cache->GetDexFile();
-  const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+  const dex::FieldId& field_id = dex_file.GetFieldId(field_idx);
   ObjPtr<mirror::Class> klass = dex_cache->GetResolvedType(field_id.class_idx_);
   if (klass == nullptr) {
     klass = LookupResolvedType(field_id.class_idx_, dex_cache, class_loader);
@@ -8054,7 +8472,7 @@
     return resolved;
   }
   const DexFile& dex_file = *dex_cache->GetDexFile();
-  const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+  const dex::FieldId& field_id = dex_file.GetFieldId(field_idx);
   ObjPtr<mirror::Class> klass = ResolveType(field_id.class_idx_, dex_cache, class_loader);
   if (klass == nullptr) {
     DCHECK(Thread::Current()->IsExceptionPending());
@@ -8080,7 +8498,7 @@
     return resolved;
   }
   const DexFile& dex_file = *dex_cache->GetDexFile();
-  const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+  const dex::FieldId& field_id = dex_file.GetFieldId(field_idx);
   ObjPtr<mirror::Class> klass = ResolveType(field_id.class_idx_, dex_cache, class_loader);
   if (klass == nullptr) {
     DCHECK(Thread::Current()->IsExceptionPending());
@@ -8109,7 +8527,7 @@
                        : klass->FindInstanceField(dex_cache, field_idx);
 
   if (resolved == nullptr) {
-    const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+    const dex::FieldId& field_id = dex_file.GetFieldId(field_idx);
     const char* name = dex_file.GetFieldName(field_id);
     const char* type = dex_file.GetFieldTypeDescriptor(field_id);
     resolved = is_static ? mirror::Class::FindStaticField(self, klass, name, type)
@@ -8117,8 +8535,9 @@
   }
 
   if (resolved != nullptr &&
-      hiddenapi::GetMemberAction(
-          resolved, class_loader, dex_cache, hiddenapi::kLinking) == hiddenapi::kDeny) {
+      hiddenapi::ShouldDenyAccessToMember(resolved,
+                                          hiddenapi::AccessContext(class_loader, dex_cache),
+                                          hiddenapi::AccessMethod::kLinking)) {
     resolved = nullptr;
   }
 
@@ -8136,15 +8555,16 @@
   ArtField* resolved = nullptr;
   Thread* self = Thread::Current();
   const DexFile& dex_file = *dex_cache->GetDexFile();
-  const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+  const dex::FieldId& field_id = dex_file.GetFieldId(field_idx);
 
   const char* name = dex_file.GetFieldName(field_id);
   const char* type = dex_file.GetFieldTypeDescriptor(field_id);
   resolved = mirror::Class::FindField(self, klass, name, type);
 
   if (resolved != nullptr &&
-      hiddenapi::GetMemberAction(
-          resolved, class_loader, dex_cache, hiddenapi::kLinking) == hiddenapi::kDeny) {
+      hiddenapi::ShouldDenyAccessToMember(resolved,
+                                          hiddenapi::AccessContext(class_loader, dex_cache),
+                                          hiddenapi::AccessMethod::kLinking)) {
     resolved = nullptr;
   }
 
@@ -8172,7 +8592,7 @@
 
   // First resolve the return type.
   const DexFile& dex_file = *dex_cache->GetDexFile();
-  const DexFile::ProtoId& proto_id = dex_file.GetProtoId(proto_idx);
+  const dex::ProtoId& proto_id = dex_file.GetProtoId(proto_idx);
   Handle<mirror::Class> return_type(hs.NewHandle(
       ResolveType(proto_id.return_type_idx_, dex_cache, class_loader)));
   if (return_type == nullptr) {
@@ -8228,7 +8648,7 @@
 
 mirror::MethodHandle* ClassLinker::ResolveMethodHandleForField(
     Thread* self,
-    const DexFile::MethodHandleItem& method_handle,
+    const dex::MethodHandleItem& method_handle,
     ArtMethod* referrer) {
   DexFile::MethodHandleType handle_type =
       static_cast<DexFile::MethodHandleType>(method_handle.method_handle_type_);
@@ -8305,7 +8725,7 @@
   switch (handle_type) {
     case DexFile::MethodHandleType::kStaticPut: {
       method_params->Set(0, target_field->ResolveType());
-      return_type = hs.NewHandle(FindPrimitiveClass('V'));
+      return_type = hs.NewHandle(GetClassRoot(ClassRoot::kPrimitiveVoid, this));
       break;
     }
     case DexFile::MethodHandleType::kStaticGet: {
@@ -8315,7 +8735,7 @@
     case DexFile::MethodHandleType::kInstancePut: {
       method_params->Set(0, target_field->GetDeclaringClass());
       method_params->Set(1, target_field->ResolveType());
-      return_type = hs.NewHandle(FindPrimitiveClass('V'));
+      return_type = hs.NewHandle(GetClassRoot(ClassRoot::kPrimitiveVoid, this));
       break;
     }
     case DexFile::MethodHandleType::kInstanceGet: {
@@ -8356,7 +8776,7 @@
 
 mirror::MethodHandle* ClassLinker::ResolveMethodHandleForMethod(
     Thread* self,
-    const DexFile::MethodHandleItem& method_handle,
+    const dex::MethodHandleItem& method_handle,
     ArtMethod* referrer) {
   DexFile::MethodHandleType handle_type =
       static_cast<DexFile::MethodHandleType>(method_handle.method_handle_type_);
@@ -8469,7 +8889,7 @@
   target_method->GetShorty(&shorty_length);
   int32_t num_params = static_cast<int32_t>(shorty_length + receiver_count - 1);
 
-  StackHandleScope<7> hs(self);
+  StackHandleScope<5> hs(self);
   ObjPtr<mirror::Class> array_of_class = GetClassRoot<mirror::ObjectArray<mirror::Class>>(this);
   Handle<mirror::ObjectArray<mirror::Class>> method_params(hs.NewHandle(
       mirror::ObjectArray<mirror::Class>::Alloc(self, array_of_class, num_params)));
@@ -8478,20 +8898,25 @@
     return nullptr;
   }
 
+  const DexFile* dex_file = referrer->GetDexFile();
+  const dex::MethodId& method_id = dex_file->GetMethodId(method_handle.field_or_method_idx_);
   int32_t index = 0;
   if (receiver_count != 0) {
-    // Insert receiver
-    method_params->Set(index++, target_method->GetDeclaringClass());
+    // Insert receiver. Use the class identified in the method handle rather than the declaring
+    // class of the resolved method which may be super class or default interface method
+    // (b/115964401).
+    ObjPtr<mirror::Class> receiver_class = LookupResolvedType(method_id.class_idx_, referrer);
+    // receiver_class should have been resolved when resolving the target method.
+    DCHECK(receiver_class != nullptr);
+    method_params->Set(index++, receiver_class);
   }
-  DexFileParameterIterator it(*target_method->GetDexFile(), target_method->GetPrototype());
-  Handle<mirror::DexCache> target_method_dex_cache(hs.NewHandle(target_method->GetDexCache()));
-  Handle<mirror::ClassLoader> target_method_class_loader(hs.NewHandle(target_method->GetClassLoader()));
+
+  const dex::ProtoId& proto_id = dex_file->GetProtoId(method_id.proto_idx_);
+  DexFileParameterIterator it(*dex_file, proto_id);
   while (it.HasNext()) {
     DCHECK_LT(index, num_params);
     const dex::TypeIndex type_idx = it.GetTypeIdx();
-    ObjPtr<mirror::Class> klass = ResolveType(type_idx,
-                                              target_method_dex_cache,
-                                              target_method_class_loader);
+    ObjPtr<mirror::Class> klass = ResolveType(type_idx, referrer);
     if (nullptr == klass) {
       DCHECK(self->IsExceptionPending());
       return nullptr;
@@ -8500,7 +8925,8 @@
     it.Next();
   }
 
-  Handle<mirror::Class> return_type = hs.NewHandle(target_method->ResolveReturnType());
+  Handle<mirror::Class> return_type =
+      hs.NewHandle(ResolveType(proto_id.return_type_idx_, referrer));
   if (UNLIKELY(return_type.IsNull())) {
     DCHECK(self->IsExceptionPending());
     return nullptr;
@@ -8529,7 +8955,7 @@
                                                               ArtMethod* referrer)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const DexFile* const dex_file = referrer->GetDexFile();
-  const DexFile::MethodHandleItem& method_handle = dex_file->GetMethodHandle(method_handle_idx);
+  const dex::MethodHandleItem& method_handle = dex_file->GetMethodHandle(method_handle_idx);
   switch (static_cast<DexFile::MethodHandleType>(method_handle.method_handle_type_)) {
     case DexFile::MethodHandleType::kStaticPut:
     case DexFile::MethodHandleType::kStaticGet:
@@ -8590,6 +9016,49 @@
   ReaderMutexLock mu(soa.Self(), *Locks::classlinker_classes_lock_);
   os << "Zygote loaded classes=" << NumZygoteClasses() << " post zygote classes="
      << NumNonZygoteClasses() << "\n";
+  ReaderMutexLock mu2(soa.Self(), *Locks::dex_lock_);
+  os << "Dumping registered class loaders\n";
+  size_t class_loader_index = 0;
+  for (const ClassLoaderData& class_loader : class_loaders_) {
+    ObjPtr<mirror::ClassLoader> loader =
+        ObjPtr<mirror::ClassLoader>::DownCast(soa.Self()->DecodeJObject(class_loader.weak_root));
+    if (loader != nullptr) {
+      os << "#" << class_loader_index++ << " " << loader->GetClass()->PrettyDescriptor() << ": [";
+      bool saw_one_dex_file = false;
+      for (const DexCacheData& dex_cache : dex_caches_) {
+        if (dex_cache.IsValid() && dex_cache.class_table == class_loader.class_table) {
+          if (saw_one_dex_file) {
+            os << ":";
+          }
+          saw_one_dex_file = true;
+          os << dex_cache.dex_file->GetLocation();
+        }
+      }
+      os << "]";
+      bool found_parent = false;
+      if (loader->GetParent() != nullptr) {
+        size_t parent_index = 0;
+        for (const ClassLoaderData& class_loader2 : class_loaders_) {
+          ObjPtr<mirror::ClassLoader> loader2 = ObjPtr<mirror::ClassLoader>::DownCast(
+              soa.Self()->DecodeJObject(class_loader2.weak_root));
+          if (loader2 == loader->GetParent()) {
+            os << ", parent #" << parent_index;
+            found_parent = true;
+            break;
+          }
+          parent_index++;
+        }
+        if (!found_parent) {
+          os << ", unregistered parent of type "
+             << loader->GetParent()->GetClass()->PrettyDescriptor();
+        }
+      } else {
+        os << ", no parent";
+      }
+      os << "\n";
+    }
+  }
+  os << "Done dumping class loaders\n";
 }
 
 class CountClassesVisitor : public ClassLoaderVisitor {
@@ -8662,21 +9131,14 @@
   CheckSystemClass(self, primitive_array_class, descriptor);
 }
 
-jobject ClassLinker::CreateWellKnownClassLoader(Thread* self,
-                                                const std::vector<const DexFile*>& dex_files,
-                                                jclass loader_class,
-                                                jobject parent_loader) {
-  CHECK(self->GetJniEnv()->IsSameObject(loader_class,
-                                        WellKnownClasses::dalvik_system_PathClassLoader) ||
-        self->GetJniEnv()->IsSameObject(loader_class,
-                                        WellKnownClasses::dalvik_system_DelegateLastClassLoader));
+ObjPtr<mirror::ClassLoader> ClassLinker::CreateWellKnownClassLoader(
+    Thread* self,
+    const std::vector<const DexFile*>& dex_files,
+    Handle<mirror::Class> loader_class,
+    Handle<mirror::ClassLoader> parent_loader,
+    Handle<mirror::ObjectArray<mirror::ClassLoader>> shared_libraries) {
 
-  // SOAAlreadyRunnable is protected, and we need something to add a global reference.
-  // We could move the jobject to the callers, but all call-sites do this...
-  ScopedObjectAccessUnchecked soa(self);
-
-  // For now, create a libcore-level DexFile for each ART DexFile. This "explodes" multidex.
-  StackHandleScope<6> hs(self);
+  StackHandleScope<5> hs(self);
 
   ArtField* dex_elements_field =
       jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements);
@@ -8712,7 +9174,7 @@
         self,
         kDexFileIndexStart + 1));
     DCHECK(h_long_array != nullptr);
-    h_long_array->Set(kDexFileIndexStart, reinterpret_cast<intptr_t>(dex_file));
+    h_long_array->Set(kDexFileIndexStart, reinterpret_cast64<int64_t>(dex_file));
 
     // Note that this creates a finalizable dalvik.system.DexFile object and a corresponding
     // FinalizerReference which will never get cleaned up without a started runtime.
@@ -8766,8 +9228,8 @@
   }
 
   // Create the class loader..
-  Handle<mirror::Class> h_loader_class = hs.NewHandle(soa.Decode<mirror::Class>(loader_class));
-  Handle<mirror::Object> h_class_loader = hs.NewHandle(h_loader_class->AllocObject(self));
+  Handle<mirror::ClassLoader> h_class_loader = hs.NewHandle<mirror::ClassLoader>(
+      ObjPtr<mirror::ClassLoader>::DownCast(loader_class->AllocObject(self)));
   DCHECK(h_class_loader != nullptr);
   // Set DexPathList.
   ArtField* path_list_field =
@@ -8783,15 +9245,57 @@
                                "parent",
                                "Ljava/lang/ClassLoader;");
   DCHECK(parent_field != nullptr);
+  if (parent_loader.Get() == nullptr) {
+    ScopedObjectAccessUnchecked soa(self);
+    ObjPtr<mirror::Object> boot_loader(soa.Decode<mirror::Class>(
+        WellKnownClasses::java_lang_BootClassLoader)->AllocObject(self));
+    parent_field->SetObject<false>(h_class_loader.Get(), boot_loader);
+  } else {
+    parent_field->SetObject<false>(h_class_loader.Get(), parent_loader.Get());
+  }
 
-  ObjPtr<mirror::Object> parent = (parent_loader != nullptr)
-      ? soa.Decode<mirror::ClassLoader>(parent_loader)
-      : soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader)->AllocObject(self);
-  parent_field->SetObject<false>(h_class_loader.Get(), parent);
+  ArtField* shared_libraries_field =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_sharedLibraryLoaders);
+  DCHECK(shared_libraries_field != nullptr);
+  shared_libraries_field->SetObject<false>(h_class_loader.Get(), shared_libraries.Get());
+
+  return h_class_loader.Get();
+}
+
+jobject ClassLinker::CreateWellKnownClassLoader(Thread* self,
+                                                const std::vector<const DexFile*>& dex_files,
+                                                jclass loader_class,
+                                                jobject parent_loader,
+                                                jobject shared_libraries) {
+  CHECK(self->GetJniEnv()->IsSameObject(loader_class,
+                                        WellKnownClasses::dalvik_system_PathClassLoader) ||
+        self->GetJniEnv()->IsSameObject(loader_class,
+                                        WellKnownClasses::dalvik_system_DelegateLastClassLoader));
+
+  // SOAAlreadyRunnable is protected, and we need something to add a global reference.
+  // We could move the jobject to the callers, but all call-sites do this...
+  ScopedObjectAccessUnchecked soa(self);
+
+  // For now, create a libcore-level DexFile for each ART DexFile. This "explodes" multidex.
+  StackHandleScope<4> hs(self);
+
+  Handle<mirror::Class> h_loader_class =
+      hs.NewHandle<mirror::Class>(soa.Decode<mirror::Class>(loader_class));
+  Handle<mirror::ClassLoader> h_parent =
+      hs.NewHandle<mirror::ClassLoader>(soa.Decode<mirror::ClassLoader>(parent_loader));
+  Handle<mirror::ObjectArray<mirror::ClassLoader>> h_shared_libraries =
+      hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::ClassLoader>>(shared_libraries));
+
+  ObjPtr<mirror::ClassLoader> loader = CreateWellKnownClassLoader(
+      self,
+      dex_files,
+      h_loader_class,
+      h_parent,
+      h_shared_libraries);
 
   // Make it a global ref and return.
   ScopedLocalRef<jobject> local_ref(
-      soa.Env(), soa.Env()->AddLocalReference<jobject>(h_class_loader.Get()));
+      soa.Env(), soa.Env()->AddLocalReference<jobject>(loader));
   return soa.Env()->NewGlobalRef(local_ref.get());
 }
 
@@ -8865,7 +9369,7 @@
   }
   for (ClassLoaderData& data : to_delete) {
     // CHA unloading analysis and SingleImplementaion cleanups are required.
-    DeleteClassLoader(self, data, true /*cleanup_cha*/);
+    DeleteClassLoader(self, data, /*cleanup_cha=*/ true);
   }
 }
 
@@ -9011,11 +9515,11 @@
     InvokeType type);
 
 // Instantiate ClassLinker::AllocClass.
-template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable */ true>(
+template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable= */ true>(
     Thread* self,
     ObjPtr<mirror::Class> java_lang_Class,
     uint32_t class_size);
-template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable */ false>(
+template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable= */ false>(
     Thread* self,
     ObjPtr<mirror::Class> java_lang_Class,
     uint32_t class_size);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index efe29d3..4f4cb4b 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_CLASS_LINKER_H_
 #define ART_RUNTIME_CLASS_LINKER_H_
 
+#include <list>
 #include <set>
 #include <string>
 #include <unordered_map>
@@ -25,11 +26,10 @@
 #include <vector>
 
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/class_accessor.h"
 #include "dex/dex_cache_resolved_classes.h"
-#include "dex/dex_file.h"
 #include "dex/dex_file_types.h"
 #include "gc_root.h"
 #include "handle.h"
@@ -39,6 +39,11 @@
 
 namespace art {
 
+namespace dex {
+struct ClassDef;
+struct MethodHandleItem;
+}  // namespace dex
+
 namespace gc {
 namespace space {
 class ImageSpace;
@@ -73,6 +78,7 @@
 class ClassHierarchyAnalysis;
 enum class ClassRoot : uint32_t;
 class ClassTable;
+class DexFile;
 template<class T> class Handle;
 class ImtConflictTable;
 template<typename T> class LengthPrefixedArray;
@@ -84,6 +90,7 @@
 class Runtime;
 class ScopedObjectAccessAlreadyRunnable;
 template<size_t kNumReferences> class PACKED(4) StackHandleScope;
+class Thread;
 
 enum VisitRootFlags : uint8_t;
 
@@ -111,7 +118,7 @@
 
 class ClassLinker {
  public:
-  static constexpr bool kAppImageMayContainStrings = false;
+  static constexpr bool kAppImageMayContainStrings = true;
 
   explicit ClassLinker(InternTable* intern_table);
   virtual ~ClassLinker();
@@ -127,6 +134,12 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_);
 
+  // Add boot class path dex files that were not included in the boot image.
+  // ClassLinker takes ownership of these dex files.
+  void AddExtraBootDexFiles(Thread* self,
+                            std::vector<std::unique_ptr<const DexFile>>&& additional_dex_files)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Add an image space to the class linker, may fix up classloader fields and dex cache fields.
   // The dex files that were newly opened for the space are placed in the out argument
   // out_dex_files. Returns true if the operation succeeded.
@@ -179,7 +192,7 @@
                                     size_t hash,
                                     Handle<mirror::ClassLoader> class_loader,
                                     const DexFile& dex_file,
-                                    const DexFile::ClassDef& dex_class_def)
+                                    const dex::ClassDef& dex_class_def)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_);
 
@@ -196,6 +209,7 @@
       REQUIRES(!Locks::classlinker_classes_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  ObjPtr<mirror::Class> LookupPrimitiveClass(char type) REQUIRES_SHARED(Locks::mutator_lock_);
   ObjPtr<mirror::Class> FindPrimitiveClass(char type) REQUIRES_SHARED(Locks::mutator_lock_);
 
   void DumpForSigQuit(std::ostream& os) REQUIRES(!Locks::classlinker_classes_lock_);
@@ -554,12 +568,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  ObjPtr<mirror::ObjectArray<mirror::Class>> GetClassRoots() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
-        class_roots_.Read<kReadBarrierOption>();
-    DCHECK(class_roots != nullptr);
-    return class_roots;
-  }
+  ObjPtr<mirror::ObjectArray<mirror::Class>> GetClassRoots() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Move the class table to the pre-zygote table to reduce memory usage. This works by ensuring
   // that no more classes are ever added to the pre zygote table which makes it that the pages
@@ -578,7 +587,8 @@
   jobject CreateWellKnownClassLoader(Thread* self,
                                      const std::vector<const DexFile*>& dex_files,
                                      jclass loader_class,
-                                     jobject parent_loader)
+                                     jobject parent_loader,
+                                     jobject shared_libraries = nullptr)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_);
 
@@ -590,6 +600,16 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_);
 
+  // Non-GlobalRef version of CreateWellKnownClassLoader
+  ObjPtr<mirror::ClassLoader> CreateWellKnownClassLoader(
+      Thread* self,
+      const std::vector<const DexFile*>& dex_files,
+      Handle<mirror::Class> loader_class,
+      Handle<mirror::ClassLoader> parent_loader,
+      Handle<mirror::ObjectArray<mirror::ClassLoader>> shared_libraries)
+          REQUIRES_SHARED(Locks::mutator_lock_)
+          REQUIRES(!Locks::dex_lock_);
+
   PointerSize GetImagePointerSize() const {
     return image_pointer_size_;
   }
@@ -667,7 +687,9 @@
 
   // Throw the class initialization failure recorded when first trying to initialize the given
   // class.
-  void ThrowEarlierClassFailure(ObjPtr<mirror::Class> c, bool wrap_in_no_class_def = false)
+  void ThrowEarlierClassFailure(ObjPtr<mirror::Class> c,
+                                bool wrap_in_no_class_def = false,
+                                bool log = false)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_);
 
@@ -831,19 +853,19 @@
   // Precomputes size needed for Class, in the case of a non-temporary class this size must be
   // sufficient to hold all static fields.
   uint32_t SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
-                                            const DexFile::ClassDef& dex_class_def);
+                                            const dex::ClassDef& dex_class_def);
 
   // Setup the classloader, class def index, type idx so that we can insert this class in the class
   // table.
   void SetupClass(const DexFile& dex_file,
-                  const DexFile::ClassDef& dex_class_def,
+                  const dex::ClassDef& dex_class_def,
                   Handle<mirror::Class> klass,
                   ObjPtr<mirror::ClassLoader> class_loader)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void LoadClass(Thread* self,
                  const DexFile& dex_file,
-                 const DexFile::ClassDef& dex_class_def,
+                 const dex::ClassDef& dex_class_def,
                  Handle<mirror::Class> klass)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -872,6 +894,15 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_);
 
+  bool FindClassInSharedLibraries(ScopedObjectAccessAlreadyRunnable& soa,
+                                  Thread* self,
+                                  const char* descriptor,
+                                  size_t hash,
+                                  Handle<mirror::ClassLoader> class_loader,
+                                  /*out*/ ObjPtr<mirror::Class>* result)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!Locks::dex_lock_);
+
   // Finds the class in the classpath of the given class loader. It only searches the class loader
   // dex files and does not recurse into its parent.
   // The method checks that the provided class loader is either a PathClassLoader or a
@@ -916,8 +947,8 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Implementation of ResolveType() called when the type was not found in the dex cache.
-  ObjPtr<mirror::Class> DoResolveType(dex::TypeIndex type_idx,
-                                      ObjPtr<mirror::Class> referrer)
+  template <typename T>
+  ObjPtr<mirror::Class> DoResolveType(dex::TypeIndex type_idx, T referrer)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
   ObjPtr<mirror::Class> DoResolveType(dex::TypeIndex type_idx,
@@ -1006,12 +1037,12 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   mirror::MethodHandle* ResolveMethodHandleForField(Thread* self,
-                                                    const DexFile::MethodHandleItem& method_handle,
+                                                    const dex::MethodHandleItem& method_handle,
                                                     ArtMethod* referrer)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   mirror::MethodHandle* ResolveMethodHandleForMethod(Thread* self,
-                                                     const DexFile::MethodHandleItem& method_handle,
+                                                     const dex::MethodHandleItem& method_handle,
                                                      ArtMethod* referrer)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -1038,12 +1069,12 @@
    public:
     // This slot must become a default conflict method.
     static MethodTranslation CreateConflictingMethod() {
-      return MethodTranslation(Type::kConflict, /*translation*/nullptr);
+      return MethodTranslation(Type::kConflict, /*translation=*/nullptr);
     }
 
     // This slot must become an abstract method.
     static MethodTranslation CreateAbstractMethod() {
-      return MethodTranslation(Type::kAbstract, /*translation*/nullptr);
+      return MethodTranslation(Type::kAbstract, /*translation=*/nullptr);
     }
 
     // Use the given method as the current value for this vtable slot during translation.
@@ -1352,7 +1383,7 @@
 
   class FindVirtualMethodHolderVisitor;
 
-  friend class AppImageClassLoadersAndDexCachesHelper;
+  friend class AppImageLoadingHelper;
   friend class ImageDumper;  // for DexLock
   friend struct linker::CompilationHelper;  // For Compile in ImageTest.
   friend class linker::ImageWriter;  // for GetClassRoots
@@ -1381,9 +1412,9 @@
                               Handle<mirror::Class> klass ATTRIBUTE_UNUSED,
                               Handle<mirror::ClassLoader> class_loader ATTRIBUTE_UNUSED,
                               const DexFile& initial_dex_file ATTRIBUTE_UNUSED,
-                              const DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
+                              const dex::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
                               /*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
-                              /*out*/DexFile::ClassDef const** final_class_def ATTRIBUTE_UNUSED)
+                              /*out*/dex::ClassDef const** final_class_def ATTRIBUTE_UNUSED)
       REQUIRES_SHARED(Locks::mutator_lock_) {}
 
   // A class has been loaded.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index ab7182a..2f37123 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -28,11 +28,13 @@
 #include "class_root.h"
 #include "common_runtime_test.h"
 #include "dex/dex_file_types.h"
+#include "dex/signature-inl.h"
 #include "dex/standard_dex_file.h"
 #include "entrypoints/entrypoint_utils-inl.h"
 #include "experimental_flags.h"
 #include "gc/heap.h"
 #include "handle_scope-inl.h"
+#include "mirror/array-alloc-inl.h"
 #include "mirror/accessible_object.h"
 #include "mirror/call_site.h"
 #include "mirror/class-inl.h"
@@ -45,6 +47,7 @@
 #include "mirror/method_handles_lookup.h"
 #include "mirror/method_type.h"
 #include "mirror/object-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/proxy.h"
 #include "mirror/reference.h"
@@ -112,7 +115,8 @@
     EXPECT_EQ(0, primitive->GetIfTableCount());
     EXPECT_TRUE(primitive->GetIfTable() != nullptr);
     EXPECT_EQ(primitive->GetIfTable()->Count(), 0u);
-    EXPECT_EQ(kAccPublic | kAccFinal | kAccAbstract, primitive->GetAccessFlags());
+    EXPECT_EQ(kAccPublic | kAccFinal | kAccAbstract | kAccVerificationAttempted,
+              primitive->GetAccessFlags());
   }
 
   void AssertObjectClass(ObjPtr<mirror::Class> JavaLangObject)
@@ -426,13 +430,13 @@
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Verify all the classes defined in this file
     for (size_t i = 0; i < dex.NumClassDefs(); i++) {
-      const DexFile::ClassDef& class_def = dex.GetClassDef(i);
+      const dex::ClassDef& class_def = dex.GetClassDef(i);
       const char* descriptor = dex.GetClassDescriptor(class_def);
       AssertDexFileClass(class_loader, descriptor);
     }
     // Verify all the types referenced by this file
     for (size_t i = 0; i < dex.NumTypeIds(); i++) {
-      const DexFile::TypeId& type_id = dex.GetTypeId(dex::TypeIndex(i));
+      const dex::TypeId& type_id = dex.GetTypeId(dex::TypeIndex(i));
       const char* descriptor = dex.GetTypeDescriptor(type_id);
       AssertDexFileClass(class_loader, descriptor);
     }
@@ -609,6 +613,10 @@
     addOffset(OFFSETOF_MEMBER(mirror::ClassExt, obsolete_dex_caches_), "obsoleteDexCaches");
     addOffset(OFFSETOF_MEMBER(mirror::ClassExt, obsolete_methods_), "obsoleteMethods");
     addOffset(OFFSETOF_MEMBER(mirror::ClassExt, original_dex_file_), "originalDexFile");
+    addOffset(OFFSETOF_MEMBER(mirror::ClassExt, pre_redefine_class_def_index_),
+              "preRedefineClassDefIndex");
+    addOffset(OFFSETOF_MEMBER(mirror::ClassExt, pre_redefine_dex_file_ptr_),
+              "preRedefineDexFilePtr");
     addOffset(OFFSETOF_MEMBER(mirror::ClassExt, verify_error_), "verifyError");
   }
 };
@@ -660,12 +668,14 @@
   DexCacheOffsets() : CheckOffsets<mirror::DexCache>(false, "Ljava/lang/DexCache;") {
     addOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_file_), "dexFile");
     addOffset(OFFSETOF_MEMBER(mirror::DexCache, location_), "location");
+    addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_preresolved_strings_), "numPreResolvedStrings");
     addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_call_sites_), "numResolvedCallSites");
     addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_fields_), "numResolvedFields");
     addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_method_types_), "numResolvedMethodTypes");
     addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_methods_), "numResolvedMethods");
     addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_types_), "numResolvedTypes");
     addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_strings_), "numStrings");
+    addOffset(OFFSETOF_MEMBER(mirror::DexCache, preresolved_strings_), "preResolvedStrings");
     addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_call_sites_), "resolvedCallSites");
     addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_fields_), "resolvedFields");
     addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_method_types_), "resolvedMethodTypes");
@@ -988,7 +998,7 @@
   Handle<mirror::DexCache> dex_cache = hs.NewHandle(all_fields_klass->GetDexCache());
   const DexFile& dex_file = *dex_cache->GetDexFile();
   // Get the index of the array class we want to test.
-  const DexFile::TypeId* array_id = dex_file.FindTypeId("[Ljava/lang/Object;");
+  const dex::TypeId* array_id = dex_file.FindTypeId("[Ljava/lang/Object;");
   ASSERT_TRUE(array_id != nullptr);
   dex::TypeIndex array_idx = dex_file.GetIndexForTypeId(*array_id);
   // Check that the array class wasn't resolved yet.
@@ -1034,8 +1044,8 @@
   // Force initialization to turn the class erroneous.
   bool initialized = class_linker_->EnsureInitialized(soa.Self(),
                                                       klass,
-                                                      /* can_init_fields */ true,
-                                                      /* can_init_parents */ true);
+                                                      /* can_init_fields= */ true,
+                                                      /* can_init_parents= */ true);
   EXPECT_FALSE(initialized);
   EXPECT_TRUE(soa.Self()->IsExceptionPending());
   soa.Self()->ClearException();
@@ -1314,21 +1324,21 @@
       klass->FindClassMethod("getS0", "()Ljava/lang/Object;", kRuntimePointerSize);
   ASSERT_TRUE(getS0 != nullptr);
   ASSERT_TRUE(getS0->IsStatic());
-  const DexFile::TypeId* type_id = dex_file->FindTypeId("LStaticsFromCode;");
+  const dex::TypeId* type_id = dex_file->FindTypeId("LStaticsFromCode;");
   ASSERT_TRUE(type_id != nullptr);
   dex::TypeIndex type_idx = dex_file->GetIndexForTypeId(*type_id);
   ObjPtr<mirror::Class> uninit = ResolveVerifyAndClinit(type_idx,
                                                         clinit,
                                                         soa.Self(),
-                                                        /* can_run_clinit */ true,
-                                                        /* verify_access */ false);
+                                                        /* can_run_clinit= */ true,
+                                                        /* verify_access= */ false);
   EXPECT_TRUE(uninit != nullptr);
   EXPECT_FALSE(uninit->IsInitialized());
   ObjPtr<mirror::Class> init = ResolveVerifyAndClinit(type_idx,
                                                       getS0,
                                                       soa.Self(),
-                                                      /* can_run_clinit */ true,
-                                                      /* verify_access */ false);
+                                                      /* can_run_clinit= */ true,
+                                                      /* verify_access= */ false);
   EXPECT_TRUE(init != nullptr);
   EXPECT_TRUE(init->IsInitialized());
 }
@@ -1530,7 +1540,7 @@
   {
     WriterMutexLock mu(soa.Self(), *Locks::dex_lock_);
     // Check that inserting with a UTF16 name works.
-    class_linker->RegisterDexFileLocked(*dex_file, dex_cache.Get(), /* class_loader */ nullptr);
+    class_linker->RegisterDexFileLocked(*dex_file, dex_cache.Get(), /* class_loader= */ nullptr);
   }
 }
 
@@ -1555,7 +1565,7 @@
   Handle<mirror::DexCache> dex_cache = hs.NewHandle(
       class_linker_->FindDexCache(soa.Self(), dex_file));
 
-  const DexFile::MethodId& method1_id = dex_file.GetMethodId(method1->GetDexMethodIndex());
+  const dex::MethodId& method1_id = dex_file.GetMethodId(method1->GetDexMethodIndex());
 
   // This is the MethodType corresponding to the prototype of
   // String MethodTypes# method1(String).
@@ -1587,7 +1597,7 @@
       kRuntimePointerSize);
   ASSERT_TRUE(method2 != nullptr);
   ASSERT_FALSE(method2->IsDirect());
-  const DexFile::MethodId& method2_id = dex_file.GetMethodId(method2->GetDexMethodIndex());
+  const dex::MethodId& method2_id = dex_file.GetMethodId(method2->GetDexMethodIndex());
   Handle<mirror::MethodType> method2_type = hs.NewHandle(
       class_linker_->ResolveMethodType(soa.Self(), method2_id.proto_idx_, dex_cache, class_loader));
   ASSERT_OBJ_PTR_NE(method1_type.Get(), method2_type.Get());
@@ -1699,14 +1709,14 @@
   jobject class_loader_a = LoadDexInPathClassLoader("ForClassLoaderA", nullptr);
   VerifyClassResolution("LDefinedInA;", class_loader_a, class_loader_a);
   VerifyClassResolution("Ljava/lang/String;", class_loader_a, nullptr);
-  VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find*/ false);
+  VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find=*/ false);
 }
 
 TEST_F(ClassLinkerClassLoaderTest, CreateDelegateLastClassLoader) {
   jobject class_loader_a = LoadDexInDelegateLastClassLoader("ForClassLoaderA", nullptr);
   VerifyClassResolution("LDefinedInA;", class_loader_a, class_loader_a);
   VerifyClassResolution("Ljava/lang/String;", class_loader_a, nullptr);
-  VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find*/ false);
+  VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find=*/ false);
 }
 
 TEST_F(ClassLinkerClassLoaderTest, CreateClassLoaderChain) {
@@ -1753,7 +1763,7 @@
   VerifyClassResolution("LDefinedInAC;", class_loader_d, class_loader_a);
 
   // Sanity check that we don't find an undefined class.
-  VerifyClassResolution("LNotDefined;", class_loader_d, nullptr, /*should_find*/ false);
+  VerifyClassResolution("LNotDefined;", class_loader_d, nullptr, /*should_find=*/ false);
 }
 
 }  // namespace art
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index 2bd5411..c5988f6 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -16,16 +16,23 @@
 
 #include "class_loader_context.h"
 
+#include <android-base/parseint.h>
+#include <android-base/strings.h>
+
 #include "art_field-inl.h"
+#include "base/casts.h"
 #include "base/dchecked_vector.h"
 #include "base/stl_util.h"
 #include "class_linker.h"
 #include "class_loader_utils.h"
+#include "class_root.h"
 #include "dex/art_dex_file_loader.h"
 #include "dex/dex_file.h"
 #include "dex/dex_file_loader.h"
 #include "handle_scope-inl.h"
 #include "jni/jni_internal.h"
+#include "mirror/object_array-alloc-inl.h"
+#include "nativehelper/scoped_local_ref.h"
 #include "oat_file_assistant.h"
 #include "obj_ptr-inl.h"
 #include "runtime.h"
@@ -39,6 +46,9 @@
 static constexpr char kDelegateLastClassLoaderString[] = "DLC";
 static constexpr char kClassLoaderOpeningMark = '[';
 static constexpr char kClassLoaderClosingMark = ']';
+static constexpr char kClassLoaderSharedLibraryOpeningMark = '{';
+static constexpr char kClassLoaderSharedLibraryClosingMark = '}';
+static constexpr char kClassLoaderSharedLibrarySeparator = '#';
 static constexpr char kClassLoaderSeparator = ';';
 static constexpr char kClasspathSeparator = ':';
 static constexpr char kDexFileChecksumSeparator = '*';
@@ -55,17 +65,35 @@
       dex_files_open_result_(true),
       owns_the_dex_files_(owns_the_dex_files) {}
 
+// Utility method to add parent and shared libraries of `info` into
+// the `work_list`.
+static void AddToWorkList(
+    ClassLoaderContext::ClassLoaderInfo* info,
+    std::vector<ClassLoaderContext::ClassLoaderInfo*>& work_list) {
+  if (info->parent != nullptr) {
+    work_list.push_back(info->parent.get());
+  }
+  for (size_t i = 0; i < info->shared_libraries.size(); ++i) {
+    work_list.push_back(info->shared_libraries[i].get());
+  }
+}
+
 ClassLoaderContext::~ClassLoaderContext() {
-  if (!owns_the_dex_files_) {
+  if (!owns_the_dex_files_ && class_loader_chain_ != nullptr) {
     // If the context does not own the dex/oat files release the unique pointers to
     // make sure we do not de-allocate them.
-    for (ClassLoaderInfo& info : class_loader_chain_) {
-      for (std::unique_ptr<OatFile>& oat_file : info.opened_oat_files) {
-        oat_file.release();
+    std::vector<ClassLoaderInfo*> work_list;
+    work_list.push_back(class_loader_chain_.get());
+    while (!work_list.empty()) {
+      ClassLoaderInfo* info = work_list.back();
+      work_list.pop_back();
+      for (std::unique_ptr<OatFile>& oat_file : info->opened_oat_files) {
+        oat_file.release();  // NOLINT b/117926937
       }
-      for (std::unique_ptr<const DexFile>& dex_file : info.opened_dex_files) {
-        dex_file.release();
+      for (std::unique_ptr<const DexFile>& dex_file : info->opened_dex_files) {
+        dex_file.release();  // NOLINT b/117926937
       }
+      AddToWorkList(info, work_list);
     }
   }
 }
@@ -83,11 +111,49 @@
   }
 }
 
-// The expected format is: "ClassLoaderType1[ClasspathElem1*Checksum1:ClasspathElem2*Checksum2...]".
+static size_t FindMatchingSharedLibraryCloseMarker(const std::string& spec,
+                                                   size_t shared_library_open_index) {
+  // Counter of opened shared library marker we've encountered so far.
+  uint32_t counter = 1;
+  // The index at which we're operating in the loop.
+  uint32_t string_index = shared_library_open_index + 1;
+  size_t shared_library_close = std::string::npos;
+  while (counter != 0) {
+    shared_library_close =
+        spec.find_first_of(kClassLoaderSharedLibraryClosingMark, string_index);
+    size_t shared_library_open =
+        spec.find_first_of(kClassLoaderSharedLibraryOpeningMark, string_index);
+    if (shared_library_close == std::string::npos) {
+      // No matching closing marker. Return an error.
+      break;
+    }
+
+    if ((shared_library_open == std::string::npos) ||
+        (shared_library_close < shared_library_open)) {
+      // We have seen a closing marker. Decrement the counter.
+      --counter;
+      // Move the search index forward.
+      string_index = shared_library_close + 1;
+    } else {
+      // New nested opening marker. Increment the counter and move the search
+      // index after the marker.
+      ++counter;
+      string_index = shared_library_open + 1;
+    }
+  }
+  return shared_library_close;
+}
+
+// The expected format is:
+// "ClassLoaderType1[ClasspathElem1*Checksum1:ClasspathElem2*Checksum2...]{ClassLoaderType2[...]}".
 // The checksum part of the format is expected only if parse_cheksums is true.
-bool ClassLoaderContext::ParseClassLoaderSpec(const std::string& class_loader_spec,
-                                              ClassLoaderType class_loader_type,
-                                              bool parse_checksums) {
+std::unique_ptr<ClassLoaderContext::ClassLoaderInfo> ClassLoaderContext::ParseClassLoaderSpec(
+    const std::string& class_loader_spec,
+    bool parse_checksums) {
+  ClassLoaderType class_loader_type = ExtractClassLoaderType(class_loader_spec);
+  if (class_loader_type == kInvalidClassLoader) {
+    return nullptr;
+  }
   const char* class_loader_type_str = GetClassLoaderTypeName(class_loader_type);
   size_t type_str_size = strlen(class_loader_type_str);
 
@@ -95,21 +161,24 @@
 
   // Check the opening and closing markers.
   if (class_loader_spec[type_str_size] != kClassLoaderOpeningMark) {
-    return false;
+    return nullptr;
   }
-  if (class_loader_spec[class_loader_spec.length() - 1] != kClassLoaderClosingMark) {
-    return false;
+  if ((class_loader_spec[class_loader_spec.length() - 1] != kClassLoaderClosingMark) &&
+      (class_loader_spec[class_loader_spec.length() - 1] != kClassLoaderSharedLibraryClosingMark)) {
+    return nullptr;
   }
 
+  size_t closing_index = class_loader_spec.find_first_of(kClassLoaderClosingMark);
+
   // At this point we know the format is ok; continue and extract the classpath.
   // Note that class loaders with an empty class path are allowed.
   std::string classpath = class_loader_spec.substr(type_str_size + 1,
-                                                   class_loader_spec.length() - type_str_size - 2);
+                                                   closing_index - type_str_size - 1);
 
-  class_loader_chain_.push_back(ClassLoaderInfo(class_loader_type));
+  std::unique_ptr<ClassLoaderInfo> info(new ClassLoaderInfo(class_loader_type));
 
   if (!parse_checksums) {
-    Split(classpath, kClasspathSeparator, &class_loader_chain_.back().classpath);
+    Split(classpath, kClasspathSeparator, &info->classpath);
   } else {
     std::vector<std::string> classpath_elements;
     Split(classpath, kClasspathSeparator, &classpath_elements);
@@ -117,18 +186,74 @@
       std::vector<std::string> dex_file_with_checksum;
       Split(element, kDexFileChecksumSeparator, &dex_file_with_checksum);
       if (dex_file_with_checksum.size() != 2) {
-        return false;
+        return nullptr;
       }
       uint32_t checksum = 0;
-      if (!ParseInt(dex_file_with_checksum[1].c_str(), &checksum)) {
-        return false;
+      if (!android::base::ParseUint(dex_file_with_checksum[1].c_str(), &checksum)) {
+        return nullptr;
       }
-      class_loader_chain_.back().classpath.push_back(dex_file_with_checksum[0]);
-      class_loader_chain_.back().checksums.push_back(checksum);
+      info->classpath.push_back(dex_file_with_checksum[0]);
+      info->checksums.push_back(checksum);
     }
   }
 
-  return true;
+  if ((class_loader_spec[class_loader_spec.length() - 1] == kClassLoaderSharedLibraryClosingMark) &&
+      (class_loader_spec[class_loader_spec.length() - 2] != kClassLoaderSharedLibraryOpeningMark)) {
+    // Non-empty list of shared libraries.
+    size_t start_index = class_loader_spec.find_first_of(kClassLoaderSharedLibraryOpeningMark);
+    if (start_index == std::string::npos) {
+      return nullptr;
+    }
+    std::string shared_libraries_spec =
+        class_loader_spec.substr(start_index + 1, class_loader_spec.length() - start_index - 2);
+    std::vector<std::string> shared_libraries;
+    size_t cursor = 0;
+    while (cursor != shared_libraries_spec.length()) {
+      size_t shared_library_separator =
+          shared_libraries_spec.find_first_of(kClassLoaderSharedLibrarySeparator, cursor);
+      size_t shared_library_open =
+          shared_libraries_spec.find_first_of(kClassLoaderSharedLibraryOpeningMark, cursor);
+      std::string shared_library_spec;
+      if (shared_library_separator == std::string::npos) {
+        // Only one shared library, for example:
+        // PCL[...]
+        shared_library_spec =
+            shared_libraries_spec.substr(cursor, shared_libraries_spec.length() - cursor);
+        cursor = shared_libraries_spec.length();
+      } else if ((shared_library_open == std::string::npos) ||
+                 (shared_library_open > shared_library_separator)) {
+        // We found a shared library without nested shared libraries, for example:
+        // PCL[...]#PCL[...]{...}
+        shared_library_spec =
+            shared_libraries_spec.substr(cursor, shared_library_separator - cursor);
+        cursor = shared_library_separator + 1;
+      } else {
+        // The shared library contains nested shared libraries. Find the matching closing shared
+        // marker for it.
+        size_t closing_marker =
+            FindMatchingSharedLibraryCloseMarker(shared_libraries_spec, shared_library_open);
+        if (closing_marker == std::string::npos) {
+          // No matching closing marker, return an error.
+          return nullptr;
+        }
+        shared_library_spec = shared_libraries_spec.substr(cursor, closing_marker + 1 - cursor);
+        cursor = closing_marker + 1;
+        if (cursor != shared_libraries_spec.length() &&
+            shared_libraries_spec[cursor] == kClassLoaderSharedLibrarySeparator) {
+          // Pass the shared library separator marker.
+          ++cursor;
+        }
+      }
+      std::unique_ptr<ClassLoaderInfo> shared_library(
+          ParseInternal(shared_library_spec, parse_checksums));
+      if (shared_library == nullptr) {
+        return nullptr;
+      }
+      info->shared_libraries.push_back(std::move(shared_library));
+    }
+  }
+
+  return info;
 }
 
 // Extracts the class loader type from the given spec.
@@ -154,7 +279,7 @@
     // By default we load the dex files in a PathClassLoader.
     // So an empty spec is equivalent to an empty PathClassLoader (this happens when running
     // tests)
-    class_loader_chain_.push_back(ClassLoaderInfo(kPathClassLoader));
+    class_loader_chain_.reset(new ClassLoaderInfo(kPathClassLoader));
     return true;
   }
 
@@ -166,21 +291,76 @@
     return true;
   }
 
-  std::vector<std::string> class_loaders;
-  Split(spec, kClassLoaderSeparator, &class_loaders);
+  CHECK(class_loader_chain_ == nullptr);
+  class_loader_chain_.reset(ParseInternal(spec, parse_checksums));
+  return class_loader_chain_ != nullptr;
+}
 
-  for (const std::string& class_loader : class_loaders) {
-    ClassLoaderType type = ExtractClassLoaderType(class_loader);
-    if (type == kInvalidClassLoader) {
-      LOG(ERROR) << "Invalid class loader type: " << class_loader;
-      return false;
+ClassLoaderContext::ClassLoaderInfo* ClassLoaderContext::ParseInternal(
+    const std::string& spec, bool parse_checksums) {
+  CHECK(!spec.empty());
+  CHECK_NE(spec, OatFile::kSpecialSharedLibrary);
+  std::string remaining = spec;
+  std::unique_ptr<ClassLoaderInfo> first(nullptr);
+  ClassLoaderInfo* previous_iteration = nullptr;
+  while (!remaining.empty()) {
+    std::string class_loader_spec;
+    size_t first_class_loader_separator = remaining.find_first_of(kClassLoaderSeparator);
+    size_t first_shared_library_open =
+        remaining.find_first_of(kClassLoaderSharedLibraryOpeningMark);
+    if (first_class_loader_separator == std::string::npos) {
+      // Only one class loader, for example:
+      // PCL[...]
+      class_loader_spec = remaining;
+      remaining = "";
+    } else if ((first_shared_library_open == std::string::npos) ||
+               (first_shared_library_open > first_class_loader_separator)) {
+      // We found a class loader spec without shared libraries, for example:
+      // PCL[...];PCL[...]{...}
+      class_loader_spec = remaining.substr(0, first_class_loader_separator);
+      remaining = remaining.substr(first_class_loader_separator + 1,
+                                   remaining.size() - first_class_loader_separator - 1);
+    } else {
+      // The class loader spec contains shared libraries. Find the matching closing
+      // shared library marker for it.
+
+      uint32_t shared_library_close =
+          FindMatchingSharedLibraryCloseMarker(remaining, first_shared_library_open);
+      if (shared_library_close == std::string::npos) {
+        LOG(ERROR) << "Invalid class loader spec: " << class_loader_spec;
+        return nullptr;
+      }
+      class_loader_spec = remaining.substr(0, shared_library_close + 1);
+
+      // Compute the remaining string to analyze.
+      if (remaining.size() == shared_library_close + 1) {
+        remaining = "";
+      } else if ((remaining.size() == shared_library_close + 2) ||
+                 (remaining.at(shared_library_close + 1) != kClassLoaderSeparator)) {
+        LOG(ERROR) << "Invalid class loader spec: " << class_loader_spec;
+        return nullptr;
+      } else {
+        remaining = remaining.substr(shared_library_close + 2,
+                                     remaining.size() - shared_library_close - 2);
+      }
     }
-    if (!ParseClassLoaderSpec(class_loader, type, parse_checksums)) {
-      LOG(ERROR) << "Invalid class loader spec: " << class_loader;
-      return false;
+
+    std::unique_ptr<ClassLoaderInfo> info =
+        ParseClassLoaderSpec(class_loader_spec, parse_checksums);
+    if (info == nullptr) {
+      LOG(ERROR) << "Invalid class loader spec: " << class_loader_spec;
+      return nullptr;
+    }
+    if (first == nullptr) {
+      first = std::move(info);
+      previous_iteration = first.get();
+    } else {
+      CHECK(previous_iteration != nullptr);
+      previous_iteration->parent = std::move(info);
+      previous_iteration = previous_iteration->parent.get();
     }
   }
-  return true;
+  return first.release();
 }
 
 // Opens requested class path files and appends them to opened_dex_files. If the dex files have
@@ -205,9 +385,14 @@
   // TODO(calin): Refine the dex opening interface to be able to tell if an archive contains
   // no dex files. So that we can distinguish the real failures...
   const ArtDexFileLoader dex_file_loader;
-  for (ClassLoaderInfo& info : class_loader_chain_) {
-    size_t opened_dex_files_index = info.opened_dex_files.size();
-    for (const std::string& cp_elem : info.classpath) {
+  std::vector<ClassLoaderInfo*> work_list;
+  CHECK(class_loader_chain_ != nullptr);
+  work_list.push_back(class_loader_chain_.get());
+  while (!work_list.empty()) {
+    ClassLoaderInfo* info = work_list.back();
+    work_list.pop_back();
+    size_t opened_dex_files_index = info->opened_dex_files.size();
+    for (const std::string& cp_elem : info->classpath) {
       // If path is relative, append it to the provided base directory.
       std::string location = cp_elem;
       if (location[0] != '/' && !classpath_dir.empty()) {
@@ -220,9 +405,9 @@
       if (!dex_file_loader.Open(location.c_str(),
                                 location.c_str(),
                                 Runtime::Current()->IsVerificationEnabled(),
-                                /*verify_checksum*/ true,
+                                /*verify_checksum=*/ true,
                                 &error_msg,
-                                &info.opened_dex_files)) {
+                                &info->opened_dex_files)) {
         // If we fail to open the dex file because it's been stripped, try to open the dex file
         // from its corresponding oat file.
         // This could happen when we need to recompile a pre-build whose dex code has been stripped.
@@ -234,10 +419,10 @@
         std::vector<std::unique_ptr<const DexFile>> oat_dex_files;
         if (oat_file != nullptr &&
             OatFileAssistant::LoadDexFiles(*oat_file, location, &oat_dex_files)) {
-          info.opened_oat_files.push_back(std::move(oat_file));
-          info.opened_dex_files.insert(info.opened_dex_files.end(),
-                                       std::make_move_iterator(oat_dex_files.begin()),
-                                       std::make_move_iterator(oat_dex_files.end()));
+          info->opened_oat_files.push_back(std::move(oat_file));
+          info->opened_dex_files.insert(info->opened_dex_files.end(),
+                                        std::make_move_iterator(oat_dex_files.begin()),
+                                        std::make_move_iterator(oat_dex_files.end()));
         } else {
           LOG(WARNING) << "Could not open dex files from location: " << location;
           dex_files_open_result_ = false;
@@ -254,14 +439,15 @@
     // This will allow the context to VerifyClassLoaderContextMatch which expects or multidex
     // location in the class paths.
     // Note that this will also remove the paths that could not be opened.
-    info.original_classpath = std::move(info.classpath);
-    info.classpath.clear();
-    info.checksums.clear();
-    for (size_t k = opened_dex_files_index; k < info.opened_dex_files.size(); k++) {
-      std::unique_ptr<const DexFile>& dex = info.opened_dex_files[k];
-      info.classpath.push_back(dex->GetLocation());
-      info.checksums.push_back(dex->GetLocationChecksum());
+    info->original_classpath = std::move(info->classpath);
+    info->classpath.clear();
+    info->checksums.clear();
+    for (size_t k = opened_dex_files_index; k < info->opened_dex_files.size(); k++) {
+      std::unique_ptr<const DexFile>& dex = info->opened_dex_files[k];
+      info->classpath.push_back(dex->GetLocation());
+      info->checksums.push_back(dex->GetLocationChecksum());
     }
+    AddToWorkList(info, work_list);
   }
 
   return dex_files_open_result_;
@@ -272,35 +458,44 @@
   CHECK(!dex_files_open_attempted_)
       << "RemoveLocationsFromClasspaths cannot be call after OpenDexFiles";
 
+  if (class_loader_chain_ == nullptr) {
+    return false;
+  }
+
   std::set<std::string> canonical_locations;
   for (const std::string& location : locations) {
     canonical_locations.insert(DexFileLoader::GetDexCanonicalLocation(location.c_str()));
   }
   bool removed_locations = false;
-  for (ClassLoaderInfo& info : class_loader_chain_) {
-    size_t initial_size = info.classpath.size();
+  std::vector<ClassLoaderInfo*> work_list;
+  work_list.push_back(class_loader_chain_.get());
+  while (!work_list.empty()) {
+    ClassLoaderInfo* info = work_list.back();
+    work_list.pop_back();
+    size_t initial_size = info->classpath.size();
     auto kept_it = std::remove_if(
-        info.classpath.begin(),
-        info.classpath.end(),
+        info->classpath.begin(),
+        info->classpath.end(),
         [canonical_locations](const std::string& location) {
             return ContainsElement(canonical_locations,
                                    DexFileLoader::GetDexCanonicalLocation(location.c_str()));
         });
-    info.classpath.erase(kept_it, info.classpath.end());
-    if (initial_size != info.classpath.size()) {
+    info->classpath.erase(kept_it, info->classpath.end());
+    if (initial_size != info->classpath.size()) {
       removed_locations = true;
     }
+    AddToWorkList(info, work_list);
   }
   return removed_locations;
 }
 
 std::string ClassLoaderContext::EncodeContextForDex2oat(const std::string& base_dir) const {
-  return EncodeContext(base_dir, /*for_dex2oat*/ true, /*stored_context*/ nullptr);
+  return EncodeContext(base_dir, /*for_dex2oat=*/ true, /*stored_context=*/ nullptr);
 }
 
 std::string ClassLoaderContext::EncodeContextForOatFile(const std::string& base_dir,
                                                         ClassLoaderContext* stored_context) const {
-  return EncodeContext(base_dir, /*for_dex2oat*/ false, stored_context);
+  return EncodeContext(base_dir, /*for_dex2oat=*/ false, stored_context);
 }
 
 std::string ClassLoaderContext::EncodeContext(const std::string& base_dir,
@@ -312,11 +507,11 @@
   }
 
   if (stored_context != nullptr) {
-    DCHECK_EQ(class_loader_chain_.size(), stored_context->class_loader_chain_.size());
+    DCHECK_EQ(GetParentChainSize(), stored_context->GetParentChainSize());
   }
 
   std::ostringstream out;
-  if (class_loader_chain_.empty()) {
+  if (class_loader_chain_ == nullptr) {
     // We can get in this situation if the context was created with a class path containing the
     // source dex files which were later removed (happens during run-tests).
     out << GetClassLoaderTypeName(kPathClassLoader)
@@ -325,62 +520,190 @@
     return out.str();
   }
 
-  for (size_t i = 0; i < class_loader_chain_.size(); i++) {
-    const ClassLoaderInfo& info = class_loader_chain_[i];
-    if (i > 0) {
-      out << kClassLoaderSeparator;
-    }
-    out << GetClassLoaderTypeName(info.type);
-    out << kClassLoaderOpeningMark;
-    std::set<std::string> seen_locations;
-    SafeMap<std::string, std::string> remap;
-    if (stored_context != nullptr) {
-      DCHECK_EQ(info.original_classpath.size(),
-                stored_context->class_loader_chain_[i].classpath.size());
-      for (size_t k = 0; k < info.original_classpath.size(); ++k) {
-        // Note that we don't care if the same name appears twice.
-        remap.Put(info.original_classpath[k], stored_context->class_loader_chain_[i].classpath[k]);
-      }
-    }
-    for (size_t k = 0; k < info.opened_dex_files.size(); k++) {
-      const std::unique_ptr<const DexFile>& dex_file = info.opened_dex_files[k];
-      if (for_dex2oat) {
-        // dex2oat only needs the base location. It cannot accept multidex locations.
-        // So ensure we only add each file once.
-        bool new_insert = seen_locations.insert(
-            DexFileLoader::GetBaseLocation(dex_file->GetLocation())).second;
-        if (!new_insert) {
-          continue;
-        }
-      }
-      std::string location = dex_file->GetLocation();
-      // If there is a stored class loader remap, fix up the multidex strings.
-      if (!remap.empty()) {
-        std::string base_dex_location = DexFileLoader::GetBaseLocation(location);
-        auto it = remap.find(base_dex_location);
-        CHECK(it != remap.end()) << base_dex_location;
-        location = it->second + DexFileLoader::GetMultiDexSuffix(location);
-      }
-      if (k > 0) {
-        out << kClasspathSeparator;
-      }
-      // Find paths that were relative and convert them back from absolute.
-      if (!base_dir.empty() && location.substr(0, base_dir.length()) == base_dir) {
-        out << location.substr(base_dir.length() + 1).c_str();
-      } else {
-        out << location.c_str();
-      }
-      // dex2oat does not need the checksums.
-      if (!for_dex2oat) {
-        out << kDexFileChecksumSeparator;
-        out << dex_file->GetLocationChecksum();
-      }
-    }
-    out << kClassLoaderClosingMark;
-  }
+  EncodeContextInternal(
+      *class_loader_chain_,
+      base_dir,
+      for_dex2oat,
+      (stored_context == nullptr ? nullptr : stored_context->class_loader_chain_.get()),
+      out);
   return out.str();
 }
 
+void ClassLoaderContext::EncodeContextInternal(const ClassLoaderInfo& info,
+                                               const std::string& base_dir,
+                                               bool for_dex2oat,
+                                               ClassLoaderInfo* stored_info,
+                                               std::ostringstream& out) const {
+  out << GetClassLoaderTypeName(info.type);
+  out << kClassLoaderOpeningMark;
+  std::set<std::string> seen_locations;
+  SafeMap<std::string, std::string> remap;
+  if (stored_info != nullptr) {
+    for (size_t k = 0; k < info.original_classpath.size(); ++k) {
+      // Note that we don't care if the same name appears twice.
+      remap.Put(info.original_classpath[k], stored_info->classpath[k]);
+    }
+  }
+  for (size_t k = 0; k < info.opened_dex_files.size(); k++) {
+    const std::unique_ptr<const DexFile>& dex_file = info.opened_dex_files[k];
+    if (for_dex2oat) {
+      // dex2oat only needs the base location. It cannot accept multidex locations.
+      // So ensure we only add each file once.
+      bool new_insert = seen_locations.insert(
+          DexFileLoader::GetBaseLocation(dex_file->GetLocation())).second;
+      if (!new_insert) {
+        continue;
+      }
+    }
+    std::string location = dex_file->GetLocation();
+    // If there is a stored class loader remap, fix up the multidex strings.
+    if (!remap.empty()) {
+      std::string base_dex_location = DexFileLoader::GetBaseLocation(location);
+      auto it = remap.find(base_dex_location);
+      CHECK(it != remap.end()) << base_dex_location;
+      location = it->second + DexFileLoader::GetMultiDexSuffix(location);
+    }
+    if (k > 0) {
+      out << kClasspathSeparator;
+    }
+    // Find paths that were relative and convert them back from absolute.
+    if (!base_dir.empty() && location.substr(0, base_dir.length()) == base_dir) {
+      out << location.substr(base_dir.length() + 1).c_str();
+    } else {
+      out << location.c_str();
+    }
+    // dex2oat does not need the checksums.
+    if (!for_dex2oat) {
+      out << kDexFileChecksumSeparator;
+      out << dex_file->GetLocationChecksum();
+    }
+  }
+  out << kClassLoaderClosingMark;
+
+  if (!info.shared_libraries.empty()) {
+    out << kClassLoaderSharedLibraryOpeningMark;
+    for (uint32_t i = 0; i < info.shared_libraries.size(); ++i) {
+      if (i > 0) {
+        out << kClassLoaderSharedLibrarySeparator;
+      }
+      EncodeContextInternal(
+          *info.shared_libraries[i].get(),
+          base_dir,
+          for_dex2oat,
+          (stored_info == nullptr ? nullptr : stored_info->shared_libraries[i].get()),
+          out);
+    }
+    out << kClassLoaderSharedLibraryClosingMark;
+  }
+  if (info.parent != nullptr) {
+    out << kClassLoaderSeparator;
+    EncodeContextInternal(
+        *info.parent.get(),
+        base_dir,
+        for_dex2oat,
+        (stored_info == nullptr ? nullptr : stored_info->parent.get()),
+        out);
+  }
+}
+
+// Returns the WellKnownClass for the given class loader type.
+static jclass GetClassLoaderClass(ClassLoaderContext::ClassLoaderType type) {
+  switch (type) {
+    case ClassLoaderContext::kPathClassLoader:
+      return WellKnownClasses::dalvik_system_PathClassLoader;
+    case ClassLoaderContext::kDelegateLastClassLoader:
+      return WellKnownClasses::dalvik_system_DelegateLastClassLoader;
+    case ClassLoaderContext::kInvalidClassLoader: break;  // will fail after the switch.
+  }
+  LOG(FATAL) << "Invalid class loader type " << type;
+  UNREACHABLE();
+}
+
+static std::string FlattenClasspath(const std::vector<std::string>& classpath) {
+  return android::base::Join(classpath, ':');
+}
+
+static ObjPtr<mirror::ClassLoader> CreateClassLoaderInternal(
+    Thread* self,
+    ScopedObjectAccess& soa,
+    const ClassLoaderContext::ClassLoaderInfo& info,
+    bool for_shared_library,
+    VariableSizedHandleScope& map_scope,
+    std::map<std::string, Handle<mirror::ClassLoader>>& canonicalized_libraries,
+    bool add_compilation_sources,
+    const std::vector<const DexFile*>& compilation_sources)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (for_shared_library) {
+    // Check if the shared library has already been created.
+    auto search = canonicalized_libraries.find(FlattenClasspath(info.classpath));
+    if (search != canonicalized_libraries.end()) {
+      return search->second.Get();
+    }
+  }
+
+  StackHandleScope<3> hs(self);
+  MutableHandle<mirror::ObjectArray<mirror::ClassLoader>> libraries(
+      hs.NewHandle<mirror::ObjectArray<mirror::ClassLoader>>(nullptr));
+
+  if (!info.shared_libraries.empty()) {
+    libraries.Assign(mirror::ObjectArray<mirror::ClassLoader>::Alloc(
+        self,
+        GetClassRoot<mirror::ObjectArray<mirror::ClassLoader>>(),
+        info.shared_libraries.size()));
+    for (uint32_t i = 0; i < info.shared_libraries.size(); ++i) {
+      // We should only add the compilation sources to the first class loader.
+      libraries->Set(i,
+                     CreateClassLoaderInternal(
+                         self,
+                         soa,
+                         *info.shared_libraries[i].get(),
+                         /* for_shared_library= */ true,
+                         map_scope,
+                         canonicalized_libraries,
+                         /* add_compilation_sources= */ false,
+                         compilation_sources));
+    }
+  }
+
+  MutableHandle<mirror::ClassLoader> parent = hs.NewHandle<mirror::ClassLoader>(nullptr);
+  if (info.parent != nullptr) {
+    // We should only add the compilation sources to the first class loader.
+    parent.Assign(CreateClassLoaderInternal(
+        self,
+        soa,
+        *info.parent.get(),
+        /* for_shared_library= */ false,
+        map_scope,
+        canonicalized_libraries,
+        /* add_compilation_sources= */ false,
+        compilation_sources));
+  }
+  std::vector<const DexFile*> class_path_files = MakeNonOwningPointerVector(
+      info.opened_dex_files);
+  if (add_compilation_sources) {
+    // For the first class loader, its classpath comes first, followed by compilation sources.
+    // This ensures that whenever we need to resolve classes from it the classpath elements
+    // come first.
+    class_path_files.insert(class_path_files.end(),
+                            compilation_sources.begin(),
+                            compilation_sources.end());
+  }
+  Handle<mirror::Class> loader_class = hs.NewHandle<mirror::Class>(
+      soa.Decode<mirror::Class>(GetClassLoaderClass(info.type)));
+  ObjPtr<mirror::ClassLoader> loader =
+      Runtime::Current()->GetClassLinker()->CreateWellKnownClassLoader(
+          self,
+          class_path_files,
+          loader_class,
+          parent,
+          libraries);
+  if (for_shared_library) {
+    canonicalized_libraries[FlattenClasspath(info.classpath)] =
+        map_scope.NewHandle<mirror::ClassLoader>(loader);
+  }
+  return loader;
+}
+
 jobject ClassLoaderContext::CreateClassLoader(
     const std::vector<const DexFile*>& compilation_sources) const {
   CheckDexFilesOpened("CreateClassLoader");
@@ -390,49 +713,48 @@
 
   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
 
-  if (class_loader_chain_.empty()) {
+  if (class_loader_chain_ == nullptr) {
+    CHECK(special_shared_library_);
     return class_linker->CreatePathClassLoader(self, compilation_sources);
   }
 
-  // Create the class loaders starting from the top most parent (the one on the last position
-  // in the chain) but omit the first class loader which will contain the compilation_sources and
-  // needs special handling.
-  jobject current_parent = nullptr;  // the starting parent is the BootClassLoader.
-  for (size_t i = class_loader_chain_.size() - 1; i > 0; i--) {
-    std::vector<const DexFile*> class_path_files = MakeNonOwningPointerVector(
-        class_loader_chain_[i].opened_dex_files);
-    current_parent = class_linker->CreateWellKnownClassLoader(
-        self,
-        class_path_files,
-        GetClassLoaderClass(class_loader_chain_[i].type),
-        current_parent);
-  }
+  // Create a map of canonicalized shared libraries. As we're holding objects,
+  // we're creating a variable size handle scope to put handles in the map.
+  VariableSizedHandleScope map_scope(self);
+  std::map<std::string, Handle<mirror::ClassLoader>> canonicalized_libraries;
 
-  // We set up all the parents. Move on to create the first class loader.
-  // Its classpath comes first, followed by compilation sources. This ensures that whenever
-  // we need to resolve classes from it the classpath elements come first.
-
-  std::vector<const DexFile*> first_class_loader_classpath = MakeNonOwningPointerVector(
-      class_loader_chain_[0].opened_dex_files);
-  first_class_loader_classpath.insert(first_class_loader_classpath.end(),
-                                    compilation_sources.begin(),
-                                    compilation_sources.end());
-
-  return class_linker->CreateWellKnownClassLoader(
-      self,
-      first_class_loader_classpath,
-      GetClassLoaderClass(class_loader_chain_[0].type),
-      current_parent);
+  // Create the class loader.
+  ObjPtr<mirror::ClassLoader> loader =
+      CreateClassLoaderInternal(self,
+                                soa,
+                                *class_loader_chain_.get(),
+                                /* for_shared_library= */ false,
+                                map_scope,
+                                canonicalized_libraries,
+                                /* add_compilation_sources= */ true,
+                                compilation_sources);
+  // Make it a global ref and return.
+  ScopedLocalRef<jobject> local_ref(
+      soa.Env(), soa.Env()->AddLocalReference<jobject>(loader));
+  return soa.Env()->NewGlobalRef(local_ref.get());
 }
 
 std::vector<const DexFile*> ClassLoaderContext::FlattenOpenedDexFiles() const {
   CheckDexFilesOpened("FlattenOpenedDexFiles");
 
   std::vector<const DexFile*> result;
-  for (const ClassLoaderInfo& info : class_loader_chain_) {
-    for (const std::unique_ptr<const DexFile>& dex_file : info.opened_dex_files) {
+  if (class_loader_chain_ == nullptr) {
+    return result;
+  }
+  std::vector<ClassLoaderInfo*> work_list;
+  work_list.push_back(class_loader_chain_.get());
+  while (!work_list.empty()) {
+    ClassLoaderInfo* info = work_list.back();
+    work_list.pop_back();
+    for (const std::unique_ptr<const DexFile>& dex_file : info->opened_dex_files) {
       result.push_back(dex_file.get());
     }
+    AddToWorkList(info, work_list);
   }
   return result;
 }
@@ -472,8 +794,8 @@
   int32_t long_array_size = long_array->GetLength();
   // Index 0 from the long array stores the oat file. The dex files start at index 1.
   for (int32_t j = 1; j < long_array_size; ++j) {
-    const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
-        long_array->GetWithoutChecks(j)));
+    const DexFile* cp_dex_file =
+        reinterpret_cast64<const DexFile*>(long_array->GetWithoutChecks(j));
     if (cp_dex_file != nullptr && cp_dex_file->NumClassDefs() > 0) {
       // TODO(calin): It's unclear why the dex files with no classes are skipped here and when
       // cp_dex_file can be null.
@@ -592,10 +914,12 @@
 // the classpath.
 // This method is recursive (w.r.t. the class loader parent) and will stop once it reaches the
 // BootClassLoader. Note that the class loader chain is expected to be short.
-bool ClassLoaderContext::AddInfoToContextFromClassLoader(
+bool ClassLoaderContext::CreateInfoFromClassLoader(
       ScopedObjectAccessAlreadyRunnable& soa,
       Handle<mirror::ClassLoader> class_loader,
-      Handle<mirror::ObjectArray<mirror::Object>> dex_elements)
+      Handle<mirror::ObjectArray<mirror::Object>> dex_elements,
+      ClassLoaderInfo* child_info,
+      bool is_shared_library)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   if (ClassLinker::IsBootClassLoader(soa, class_loader.Get())) {
     // Nothing to do for the boot class loader as we don't add its dex files to the context.
@@ -629,23 +953,53 @@
     GetDexFilesFromDexElementsArray(soa, dex_elements, &dex_files_loaded);
   }
 
-  class_loader_chain_.push_back(ClassLoaderContext::ClassLoaderInfo(type));
-  ClassLoaderInfo& info = class_loader_chain_.back();
-  for (const DexFile* dex_file : dex_files_loaded) {
-    info.classpath.push_back(dex_file->GetLocation());
-    info.checksums.push_back(dex_file->GetLocationChecksum());
-    info.opened_dex_files.emplace_back(dex_file);
+  ClassLoaderInfo* info = new ClassLoaderContext::ClassLoaderInfo(type);
+  // Attach the `ClassLoaderInfo` now, before populating dex files, as only the
+  // `ClassLoaderContext` knows whether these dex files should be deleted or not.
+  if (child_info == nullptr) {
+    class_loader_chain_.reset(info);
+  } else if (is_shared_library) {
+    child_info->shared_libraries.push_back(std::unique_ptr<ClassLoaderInfo>(info));
+  } else {
+    child_info->parent.reset(info);
   }
 
-  // We created the ClassLoaderInfo for the current loader. Move on to its parent.
-
-  StackHandleScope<1> hs(Thread::Current());
-  Handle<mirror::ClassLoader> parent = hs.NewHandle(class_loader->GetParent());
+  // Now that `info` is in the chain, populate dex files.
+  for (const DexFile* dex_file : dex_files_loaded) {
+    info->classpath.push_back(dex_file->GetLocation());
+    info->checksums.push_back(dex_file->GetLocationChecksum());
+    info->opened_dex_files.emplace_back(dex_file);
+  }
 
   // Note that dex_elements array is null here. The elements are considered to be part of the
   // current class loader and are not passed to the parents.
   ScopedNullHandle<mirror::ObjectArray<mirror::Object>> null_dex_elements;
-  return AddInfoToContextFromClassLoader(soa, parent, null_dex_elements);
+
+  // Add the shared libraries.
+  StackHandleScope<3> hs(Thread::Current());
+  ArtField* field =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_sharedLibraryLoaders);
+  ObjPtr<mirror::Object> raw_shared_libraries = field->GetObject(class_loader.Get());
+  if (raw_shared_libraries != nullptr) {
+    Handle<mirror::ObjectArray<mirror::ClassLoader>> shared_libraries =
+        hs.NewHandle(raw_shared_libraries->AsObjectArray<mirror::ClassLoader>());
+    MutableHandle<mirror::ClassLoader> temp_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
+    for (int32_t i = 0; i < shared_libraries->GetLength(); ++i) {
+      temp_loader.Assign(shared_libraries->Get(i));
+      if (!CreateInfoFromClassLoader(
+              soa, temp_loader, null_dex_elements, info, /*is_shared_library=*/ true)) {
+        return false;
+      }
+    }
+  }
+
+  // We created the ClassLoaderInfo for the current loader. Move on to its parent.
+  Handle<mirror::ClassLoader> parent = hs.NewHandle(class_loader->GetParent());
+  if (!CreateInfoFromClassLoader(
+          soa, parent, null_dex_elements, info, /*is_shared_library=*/ false)) {
+    return false;
+  }
+  return true;
 }
 
 std::unique_ptr<ClassLoaderContext> ClassLoaderContext::CreateContextForClassLoader(
@@ -659,13 +1013,12 @@
       hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader));
   Handle<mirror::ObjectArray<mirror::Object>> h_dex_elements =
       hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Object>>(dex_elements));
-
-  std::unique_ptr<ClassLoaderContext> result(new ClassLoaderContext(/*owns_the_dex_files*/ false));
-  if (result->AddInfoToContextFromClassLoader(soa, h_class_loader, h_dex_elements)) {
-    return result;
-  } else {
+  std::unique_ptr<ClassLoaderContext> result(new ClassLoaderContext(/*owns_the_dex_files=*/ false));
+  if (!result->CreateInfoFromClassLoader(
+          soa, h_class_loader, h_dex_elements, nullptr, /*is_shared_library=*/ false)) {
     return nullptr;
   }
+  return result;
 }
 
 static bool IsAbsoluteLocation(const std::string& location) {
@@ -693,7 +1046,9 @@
   // collision check.
   if (expected_context.special_shared_library_) {
     // Special case where we are the only entry in the class path.
-    if (class_loader_chain_.size() == 1 && class_loader_chain_[0].classpath.size() == 0) {
+    if (class_loader_chain_ != nullptr &&
+        class_loader_chain_->parent == nullptr &&
+        class_loader_chain_->classpath.size() == 0) {
       return VerificationResult::kVerifies;
     }
     return VerificationResult::kForcedToSkipChecks;
@@ -701,41 +1056,43 @@
     return VerificationResult::kForcedToSkipChecks;
   }
 
-  if (expected_context.class_loader_chain_.size() != class_loader_chain_.size()) {
-    LOG(WARNING) << "ClassLoaderContext size mismatch. expected="
-        << expected_context.class_loader_chain_.size()
-        << ", actual=" << class_loader_chain_.size()
-        << " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
+  ClassLoaderInfo* info = class_loader_chain_.get();
+  ClassLoaderInfo* expected = expected_context.class_loader_chain_.get();
+  CHECK(info != nullptr);
+  CHECK(expected != nullptr);
+  if (!ClassLoaderInfoMatch(*info, *expected, context_spec, verify_names, verify_checksums)) {
     return VerificationResult::kMismatch;
   }
+  return VerificationResult::kVerifies;
+}
 
-  for (size_t i = 0; i < class_loader_chain_.size(); i++) {
-    const ClassLoaderInfo& info = class_loader_chain_[i];
-    const ClassLoaderInfo& expected_info = expected_context.class_loader_chain_[i];
-    if (info.type != expected_info.type) {
-      LOG(WARNING) << "ClassLoaderContext type mismatch for position " << i
-          << ". expected=" << GetClassLoaderTypeName(expected_info.type)
-          << ", found=" << GetClassLoaderTypeName(info.type)
+bool ClassLoaderContext::ClassLoaderInfoMatch(
+    const ClassLoaderInfo& info,
+    const ClassLoaderInfo& expected_info,
+    const std::string& context_spec,
+    bool verify_names,
+    bool verify_checksums) const {
+  if (info.type != expected_info.type) {
+    LOG(WARNING) << "ClassLoaderContext type mismatch"
+        << ". expected=" << GetClassLoaderTypeName(expected_info.type)
+        << ", found=" << GetClassLoaderTypeName(info.type)
+        << " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
+    return false;
+  }
+  if (info.classpath.size() != expected_info.classpath.size()) {
+    LOG(WARNING) << "ClassLoaderContext classpath size mismatch"
+          << ". expected=" << expected_info.classpath.size()
+          << ", found=" << info.classpath.size()
           << " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
-      return VerificationResult::kMismatch;
-    }
-    if (info.classpath.size() != expected_info.classpath.size()) {
-      LOG(WARNING) << "ClassLoaderContext classpath size mismatch for position " << i
-            << ". expected=" << expected_info.classpath.size()
-            << ", found=" << info.classpath.size()
-            << " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
-      return VerificationResult::kMismatch;
-    }
+    return false;
+  }
 
-    if (verify_checksums) {
-      DCHECK_EQ(info.classpath.size(), info.checksums.size());
-      DCHECK_EQ(expected_info.classpath.size(), expected_info.checksums.size());
-    }
+  if (verify_checksums) {
+    DCHECK_EQ(info.classpath.size(), info.checksums.size());
+    DCHECK_EQ(expected_info.classpath.size(), expected_info.checksums.size());
+  }
 
-    if (!verify_names) {
-      continue;
-    }
-
+  if (verify_names) {
     for (size_t k = 0; k < info.classpath.size(); k++) {
       // Compute the dex location that must be compared.
       // We shouldn't do a naive comparison `info.classpath[k] == expected_info.classpath[k]`
@@ -775,34 +1132,58 @@
 
       // Compare the locations.
       if (dex_name != expected_dex_name) {
-        LOG(WARNING) << "ClassLoaderContext classpath element mismatch for position " << i
+        LOG(WARNING) << "ClassLoaderContext classpath element mismatch"
             << ". expected=" << expected_info.classpath[k]
             << ", found=" << info.classpath[k]
             << " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
-        return VerificationResult::kMismatch;
+        return false;
       }
 
       // Compare the checksums.
       if (info.checksums[k] != expected_info.checksums[k]) {
-        LOG(WARNING) << "ClassLoaderContext classpath element checksum mismatch for position " << i
+        LOG(WARNING) << "ClassLoaderContext classpath element checksum mismatch"
                      << ". expected=" << expected_info.checksums[k]
                      << ", found=" << info.checksums[k]
                      << " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
-        return VerificationResult::kMismatch;
+        return false;
       }
     }
   }
-  return VerificationResult::kVerifies;
-}
 
-jclass ClassLoaderContext::GetClassLoaderClass(ClassLoaderType type) {
-  switch (type) {
-    case kPathClassLoader: return WellKnownClasses::dalvik_system_PathClassLoader;
-    case kDelegateLastClassLoader: return WellKnownClasses::dalvik_system_DelegateLastClassLoader;
-    case kInvalidClassLoader: break;  // will fail after the switch.
+  if (info.shared_libraries.size() != expected_info.shared_libraries.size()) {
+    LOG(WARNING) << "ClassLoaderContext shared library size mismatch. "
+          << "Expected=" << expected_info.shared_libraries.size()
+          << ", found=" << info.shared_libraries.size()
+          << " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
+    return false;
   }
-  LOG(FATAL) << "Invalid class loader type " << type;
-  UNREACHABLE();
+  for (size_t i = 0; i < info.shared_libraries.size(); ++i) {
+    if (!ClassLoaderInfoMatch(*info.shared_libraries[i].get(),
+                              *expected_info.shared_libraries[i].get(),
+                              context_spec,
+                              verify_names,
+                              verify_checksums)) {
+      return false;
+    }
+  }
+  if (info.parent.get() == nullptr) {
+    if (expected_info.parent.get() != nullptr) {
+      LOG(WARNING) << "ClassLoaderContext parent mismatch. "
+            << " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
+      return false;
+    }
+    return true;
+  } else if (expected_info.parent.get() == nullptr) {
+    LOG(WARNING) << "ClassLoaderContext parent mismatch. "
+          << " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
+    return false;
+  } else {
+    return ClassLoaderInfoMatch(*info.parent.get(),
+                                *expected_info.parent.get(),
+                                context_spec,
+                                verify_names,
+                                verify_checksums);
+  }
 }
 
 }  // namespace art
diff --git a/runtime/class_loader_context.h b/runtime/class_loader_context.h
index a4268aa..5a89c4e 100644
--- a/runtime/class_loader_context.h
+++ b/runtime/class_loader_context.h
@@ -89,7 +89,10 @@
   // If the context is empty, this method only creates a single PathClassLoader with the
   // given compilation_sources.
   //
-  // Notes:
+  // Shared libraries found in the chain will be canonicalized based on the dex files they
+  // contain.
+  //
+  // Implementation notes:
   //   1) the objects are not completely set up. Do not use this outside of tests and the compiler.
   //   2) should only be called before the first call to OpenDexFiles().
   jobject CreateClassLoader(const std::vector<const DexFile*>& compilation_sources) const;
@@ -154,10 +157,11 @@
   // This will return a context with a single and empty PathClassLoader.
   static std::unique_ptr<ClassLoaderContext> Default();
 
- private:
   struct ClassLoaderInfo {
     // The type of this class loader.
     ClassLoaderType type;
+    // Shared libraries this context has.
+    std::vector<std::unique_ptr<ClassLoaderInfo>> shared_libraries;
     // The list of class path elements that this loader loads.
     // Note that this list may contain relative paths.
     std::vector<std::string> classpath;
@@ -171,13 +175,35 @@
     // After OpenDexFiles, in case some of the dex files were opened from their oat files
     // this holds the list of opened oat files.
     std::vector<std::unique_ptr<OatFile>> opened_oat_files;
+    // The parent class loader.
+    std::unique_ptr<ClassLoaderInfo> parent;
 
     explicit ClassLoaderInfo(ClassLoaderType cl_type) : type(cl_type) {}
   };
 
+ private:
   // Creates an empty context (with no class loaders).
   ClassLoaderContext();
 
+  // Get the parent of the class loader chain at depth `index`.
+  ClassLoaderInfo* GetParent(size_t index) const {
+    ClassLoaderInfo* result = class_loader_chain_.get();
+    while ((result != nullptr) && (index-- != 0)) {
+      result = result->parent.get();
+    }
+    return result;
+  }
+
+  size_t GetParentChainSize() const {
+    size_t result = 0;
+    ClassLoaderInfo* info = class_loader_chain_.get();
+    while (info != nullptr) {
+      ++result;
+      info = info->parent.get();
+    }
+    return result;
+  }
+
   // Constructs an empty context.
   // `owns_the_dex_files` specifies whether or not the context will own the opened dex files
   // present in the class loader chain. If `owns_the_dex_files` is true then OpenDexFiles cannot
@@ -188,25 +214,27 @@
   // Reads the class loader spec in place and returns true if the spec is valid and the
   // compilation context was constructed.
   bool Parse(const std::string& spec, bool parse_checksums = false);
+  ClassLoaderInfo* ParseInternal(const std::string& spec, bool parse_checksums);
 
-  // Attempts to parse a single class loader spec for the given class_loader_type.
-  // If successful the class loader spec will be added to the chain.
-  // Returns whether or not the operation was successful.
-  bool ParseClassLoaderSpec(const std::string& class_loader_spec,
-                            ClassLoaderType class_loader_type,
-                            bool parse_checksums = false);
+  // Attempts to parse a single class loader spec.
+  // Returns the ClassLoaderInfo abstraction for this spec, or null if it cannot be parsed.
+  std::unique_ptr<ClassLoaderInfo> ParseClassLoaderSpec(
+      const std::string& class_loader_spec,
+      bool parse_checksums = false);
 
   // CHECKs that the dex files were opened (OpenDexFiles was called and set dex_files_open_result_
   // to true). Aborts if not. The `calling_method` is used in the log message to identify the source
   // of the call.
   void CheckDexFilesOpened(const std::string& calling_method) const;
 
-  // Adds the `class_loader` info to the context.
+  // Creates the `ClassLoaderInfo` representing`class_loader` and attach it to `this`.
   // The dex file present in `dex_elements` array (if not null) will be added at the end of
   // the classpath.
-  bool AddInfoToContextFromClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
-                                       Handle<mirror::ClassLoader> class_loader,
-                                       Handle<mirror::ObjectArray<mirror::Object>> dex_elements)
+  bool CreateInfoFromClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+                                 Handle<mirror::ClassLoader> class_loader,
+                                 Handle<mirror::ObjectArray<mirror::Object>> dex_elements,
+                                 ClassLoaderInfo* child_info,
+                                 bool is_shared_library)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Encodes the context as a string suitable to be passed to dex2oat or to be added to the
@@ -219,6 +247,20 @@
                             bool for_dex2oat,
                             ClassLoaderContext* stored_context) const;
 
+  // Internal version of `EncodeContext`, which will be called recursively
+  // on the parent and shared libraries.
+  void EncodeContextInternal(const ClassLoaderInfo& info,
+                             const std::string& base_dir,
+                             bool for_dex2oat,
+                             ClassLoaderInfo* stored_info,
+                             std::ostringstream& out) const;
+
+  bool ClassLoaderInfoMatch(const ClassLoaderInfo& info,
+                            const ClassLoaderInfo& expected_info,
+                            const std::string& context_spec,
+                            bool verify_names,
+                            bool verify_checksums) const;
+
   // Extracts the class loader type from the given spec.
   // Return ClassLoaderContext::kInvalidClassLoader if the class loader type is not
   // recognized.
@@ -228,13 +270,8 @@
   // The returned format can be used when parsing a context spec.
   static const char* GetClassLoaderTypeName(ClassLoaderType type);
 
-  // Returns the WellKnownClass for the given class loader type.
-  static jclass GetClassLoaderClass(ClassLoaderType type);
-
-  // The class loader chain represented as a vector.
-  // The parent of class_loader_chain_[i] is class_loader_chain_[i++].
-  // The parent of the last element is assumed to be the boot class loader.
-  std::vector<ClassLoaderInfo> class_loader_chain_;
+  // The class loader chain.
+  std::unique_ptr<ClassLoaderInfo> class_loader_chain_;
 
   // Whether or not the class loader context should be ignored at runtime when loading the oat
   // files. When true, dex2oat will use OatFile::kSpecialSharedLibrary as the classpath key in
diff --git a/runtime/class_loader_context_test.cc b/runtime/class_loader_context_test.cc
index 5e3f48c..3c5f1ef 100644
--- a/runtime/class_loader_context_test.cc
+++ b/runtime/class_loader_context_test.cc
@@ -19,15 +19,19 @@
 #include <gtest/gtest.h>
 
 #include "android-base/strings.h"
+#include "art_field-inl.h"
 #include "base/dchecked_vector.h"
 #include "base/stl_util.h"
 #include "class_linker.h"
+#include "class_root.h"
 #include "common_runtime_test.h"
 #include "dex/dex_file.h"
 #include "handle_scope-inl.h"
+#include "jni/jni_internal.h"
 #include "mirror/class.h"
 #include "mirror/class_loader.h"
 #include "mirror/object-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "oat_file_assistant.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
@@ -40,7 +44,7 @@
  public:
   void VerifyContextSize(ClassLoaderContext* context, size_t expected_size) {
     ASSERT_TRUE(context != nullptr);
-    ASSERT_EQ(expected_size, context->class_loader_chain_.size());
+    ASSERT_EQ(expected_size, context->GetParentChainSize());
   }
 
   void VerifyClassLoaderPCL(ClassLoaderContext* context,
@@ -57,6 +61,33 @@
         context, index, ClassLoaderContext::kDelegateLastClassLoader, classpath);
   }
 
+  void VerifyClassLoaderSharedLibraryPCL(ClassLoaderContext* context,
+                                         size_t loader_index,
+                                         size_t shared_library_index,
+                                         const std::string& classpath) {
+    VerifyClassLoaderInfoSL(
+        context, loader_index, shared_library_index, ClassLoaderContext::kPathClassLoader,
+        classpath);
+  }
+
+  void VerifySharedLibrariesSize(ClassLoaderContext* context,
+                                 size_t loader_index,
+                                 size_t expected_size) {
+    ASSERT_TRUE(context != nullptr);
+    ASSERT_GT(context->GetParentChainSize(), loader_index);
+    const ClassLoaderContext::ClassLoaderInfo& info = *context->GetParent(loader_index);
+    ASSERT_EQ(info.shared_libraries.size(), expected_size);
+  }
+
+  void VerifyClassLoaderSharedLibraryDLC(ClassLoaderContext* context,
+                                         size_t loader_index,
+                                         size_t shared_library_index,
+                                         const std::string& classpath) {
+    VerifyClassLoaderInfoSL(
+        context, loader_index, shared_library_index, ClassLoaderContext::kDelegateLastClassLoader,
+        classpath);
+  }
+
   void VerifyClassLoaderPCLFromTestDex(ClassLoaderContext* context,
                                        size_t index,
                                        const std::string& test_name) {
@@ -91,7 +122,7 @@
     ASSERT_TRUE(context != nullptr);
     ASSERT_TRUE(context->dex_files_open_attempted_);
     ASSERT_TRUE(context->dex_files_open_result_);
-    ClassLoaderContext::ClassLoaderInfo& info = context->class_loader_chain_[index];
+    ClassLoaderContext::ClassLoaderInfo& info = *context->GetParent(index);
     ASSERT_EQ(all_dex_files->size(), info.classpath.size());
     ASSERT_EQ(all_dex_files->size(), info.opened_dex_files.size());
     size_t cur_open_dex_index = 0;
@@ -125,7 +156,7 @@
 
   std::unique_ptr<ClassLoaderContext> ParseContextWithChecksums(const std::string& context_spec) {
     std::unique_ptr<ClassLoaderContext> context(new ClassLoaderContext());
-    if (!context->Parse(context_spec, /*parse_checksums*/ true)) {
+    if (!context->Parse(context_spec, /*parse_checksums=*/ true)) {
       return nullptr;
     }
     return context;
@@ -168,14 +199,31 @@
                              ClassLoaderContext::ClassLoaderType type,
                              const std::string& classpath) {
     ASSERT_TRUE(context != nullptr);
-    ASSERT_GT(context->class_loader_chain_.size(), index);
-    ClassLoaderContext::ClassLoaderInfo& info = context->class_loader_chain_[index];
+    ASSERT_GT(context->GetParentChainSize(), index);
+    ClassLoaderContext::ClassLoaderInfo& info = *context->GetParent(index);
     ASSERT_EQ(type, info.type);
     std::vector<std::string> expected_classpath;
     Split(classpath, ':', &expected_classpath);
     ASSERT_EQ(expected_classpath, info.classpath);
   }
 
+  void VerifyClassLoaderInfoSL(ClassLoaderContext* context,
+                               size_t loader_index,
+                               size_t shared_library_index,
+                               ClassLoaderContext::ClassLoaderType type,
+                               const std::string& classpath) {
+    ASSERT_TRUE(context != nullptr);
+    ASSERT_GT(context->GetParentChainSize(), loader_index);
+    const ClassLoaderContext::ClassLoaderInfo& info = *context->GetParent(loader_index);
+    ASSERT_GT(info.shared_libraries.size(), shared_library_index);
+    const ClassLoaderContext::ClassLoaderInfo& sl =
+        *info.shared_libraries[shared_library_index].get();
+    ASSERT_EQ(type, info.type);
+    std::vector<std::string> expected_classpath;
+    Split(classpath, ':', &expected_classpath);
+    ASSERT_EQ(expected_classpath, sl.classpath);
+  }
+
   void VerifyClassLoaderFromTestDex(ClassLoaderContext* context,
                                     size_t index,
                                     ClassLoaderContext::ClassLoaderType type,
@@ -223,6 +271,42 @@
   VerifyClassLoaderPCL(context.get(), 2, "e.dex");
 }
 
+TEST_F(ClassLoaderContextTest, ParseSharedLibraries) {
+  std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(
+      "PCL[a.dex:b.dex]{PCL[s1.dex]#PCL[s2.dex:s3.dex]};DLC[c.dex:d.dex]{DLC[s4.dex]}");
+  VerifyContextSize(context.get(), 2);
+  VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 0, "s1.dex");
+  VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 1, "s2.dex:s3.dex");
+  VerifyClassLoaderDLC(context.get(), 1, "c.dex:d.dex");
+  VerifyClassLoaderSharedLibraryDLC(context.get(), 1, 0, "s4.dex");
+}
+
+TEST_F(ClassLoaderContextTest, ParseEnclosingSharedLibraries) {
+  std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(
+      "PCL[a.dex:b.dex]{PCL[s1.dex]{PCL[s2.dex:s3.dex];PCL[s4.dex]}}");
+  VerifyContextSize(context.get(), 1);
+  VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 0, "s1.dex");
+}
+
+TEST_F(ClassLoaderContextTest, ParseComplexSharedLibraries1) {
+  std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(
+      "PCL[]{PCL[s4.dex]{PCL[s5.dex]{PCL[s6.dex]}#PCL[s6.dex]}}");
+  VerifyContextSize(context.get(), 1);
+  VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 0, "s4.dex");
+}
+
+TEST_F(ClassLoaderContextTest, ParseComplexSharedLibraries2) {
+  std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(
+      "PCL[]{PCL[s1.dex]{PCL[s2.dex]}#PCL[s2.dex]#"
+      "PCL[s3.dex]#PCL[s4.dex]{PCL[s5.dex]{PCL[s6.dex]}#PCL[s6.dex]}#PCL[s5.dex]{PCL[s6.dex]}}");
+  VerifyContextSize(context.get(), 1);
+  VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 0, "s1.dex");
+  VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 1, "s2.dex");
+  VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 2, "s3.dex");
+  VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 3, "s4.dex");
+  VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 4, "s5.dex");
+}
+
 TEST_F(ClassLoaderContextTest, ParseValidEmptyContextDLC) {
   std::unique_ptr<ClassLoaderContext> context =
       ClassLoaderContext::Create("DLC[]");
@@ -230,6 +314,13 @@
   VerifyClassLoaderDLC(context.get(), 0, "");
 }
 
+TEST_F(ClassLoaderContextTest, ParseValidEmptyContextSharedLibrary) {
+  std::unique_ptr<ClassLoaderContext> context =
+      ClassLoaderContext::Create("DLC[]{}");
+  VerifyContextSize(context.get(), 1);
+  VerifySharedLibrariesSize(context.get(), 0, 0);
+}
+
 TEST_F(ClassLoaderContextTest, ParseValidContextSpecialSymbol) {
   std::unique_ptr<ClassLoaderContext> context =
     ClassLoaderContext::Create(OatFile::kSpecialSharedLibrary);
@@ -243,6 +334,15 @@
   ASSERT_TRUE(nullptr == ClassLoaderContext::Create("PCLa.dex]"));
   ASSERT_TRUE(nullptr == ClassLoaderContext::Create("PCL{a.dex}"));
   ASSERT_TRUE(nullptr == ClassLoaderContext::Create("PCL[a.dex];DLC[b.dex"));
+  ASSERT_TRUE(nullptr == ClassLoaderContext::Create("PCL[a.dex]{ABC};DLC[b.dex"));
+  ASSERT_TRUE(nullptr == ClassLoaderContext::Create("PCL[a.dex]{};DLC[b.dex"));
+  ASSERT_TRUE(nullptr == ClassLoaderContext::Create("DLC[s4.dex]}"));
+  ASSERT_TRUE(nullptr == ClassLoaderContext::Create("DLC[s4.dex]{"));
+  ASSERT_TRUE(nullptr == ClassLoaderContext::Create("DLC{DLC[s4.dex]}"));
+  ASSERT_TRUE(nullptr == ClassLoaderContext::Create("PCL{##}"));
+  ASSERT_TRUE(nullptr == ClassLoaderContext::Create("PCL{PCL[s4.dex]#}"));
+  ASSERT_TRUE(nullptr == ClassLoaderContext::Create("PCL{PCL[s4.dex]##}"));
+  ASSERT_TRUE(nullptr == ClassLoaderContext::Create("PCL{PCL[s4.dex]{PCL[s3.dex]}#}"));
 }
 
 TEST_F(ClassLoaderContextTest, OpenInvalidDexFiles) {
@@ -263,7 +363,7 @@
           "PCL[" + multidex_name + ":" + myclass_dex_name + "];" +
           "DLC[" + dex_name + "]");
 
-  ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir*/ ""));
+  ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir=*/ ""));
 
   VerifyContextSize(context.get(), 2);
 
@@ -314,7 +414,7 @@
           "PCL[" + multidex_name + ":" + myclass_dex_name + "];" +
           "DLC[" + dex_name + "]");
 
-  ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir*/ ""));
+  ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir=*/ ""));
 
   std::vector<std::unique_ptr<const DexFile>> all_dex_files0 = OpenTestDexFiles("MultiDex");
   std::vector<std::unique_ptr<const DexFile>> myclass_dex_files = OpenTestDexFiles("MyClass");
@@ -532,6 +632,292 @@
       soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
 }
 
+TEST_F(ClassLoaderContextTest, CreateClassLoaderWithSharedLibraries) {
+  // Setup the context.
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_a = OpenTestDexFiles("ForClassLoaderA");
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_b = OpenTestDexFiles("ForClassLoaderB");
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_c = OpenTestDexFiles("ForClassLoaderC");
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_d = OpenTestDexFiles("ForClassLoaderD");
+
+  std::string context_spec =
+      "PCL[" + CreateClassPath(classpath_dex_a) + ":" + CreateClassPath(classpath_dex_b) + "]{" +
+      "DLC[" + CreateClassPath(classpath_dex_c) + "]#" +
+      "PCL[" + CreateClassPath(classpath_dex_d) + "]}";
+
+  std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_spec);
+  ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, ""));
+
+  // Setup the compilation sources.
+  std::vector<std::unique_ptr<const DexFile>> compilation_sources = OpenTestDexFiles("MultiDex");
+  std::vector<const DexFile*> compilation_sources_raw =
+      MakeNonOwningPointerVector(compilation_sources);
+
+  // Create the class loader.
+  jobject jclass_loader = context->CreateClassLoader(compilation_sources_raw);
+  ASSERT_TRUE(jclass_loader != nullptr);
+
+  // Verify the class loader.
+  ScopedObjectAccess soa(Thread::Current());
+
+  StackHandleScope<4> hs(soa.Self());
+  Handle<mirror::ClassLoader> class_loader_1 = hs.NewHandle(
+      soa.Decode<mirror::ClassLoader>(jclass_loader));
+
+  // For the first class loader the class path dex files must come first and then the
+  // compilation sources.
+  std::vector<const DexFile*> class_loader_1_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_a);
+  for (auto& dex : classpath_dex_b) {
+    class_loader_1_dex_files.push_back(dex.get());
+  }
+  for (auto& dex : compilation_sources_raw) {
+    class_loader_1_dex_files.push_back(dex);
+  }
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_1,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            class_loader_1_dex_files);
+
+  // Verify the shared libraries.
+  ArtField* field =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_sharedLibraryLoaders);
+  ObjPtr<mirror::Object> raw_shared_libraries = field->GetObject(class_loader_1.Get());
+  ASSERT_TRUE(raw_shared_libraries != nullptr);
+
+  Handle<mirror::ObjectArray<mirror::ClassLoader>> shared_libraries(
+      hs.NewHandle(raw_shared_libraries->AsObjectArray<mirror::ClassLoader>()));
+  ASSERT_EQ(shared_libraries->GetLength(), 2);
+
+  // Verify the first shared library.
+  Handle<mirror::ClassLoader> class_loader_2 = hs.NewHandle(shared_libraries->Get(0));
+  std::vector<const DexFile*> class_loader_2_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_c);
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_2,
+                            WellKnownClasses::dalvik_system_DelegateLastClassLoader,
+                            class_loader_2_dex_files);
+  raw_shared_libraries = field->GetObject(class_loader_2.Get());
+  ASSERT_TRUE(raw_shared_libraries == nullptr);
+
+  // Verify the second shared library.
+  Handle<mirror::ClassLoader> class_loader_3 = hs.NewHandle(shared_libraries->Get(1));
+  std::vector<const DexFile*> class_loader_3_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_d);
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_3,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            class_loader_3_dex_files);
+  raw_shared_libraries = field->GetObject(class_loader_3.Get());
+  ASSERT_TRUE(raw_shared_libraries == nullptr);
+
+  // All class loaders should have the BootClassLoader as a parent.
+  ASSERT_TRUE(class_loader_1->GetParent()->GetClass() ==
+      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
+  ASSERT_TRUE(class_loader_2->GetParent()->GetClass() ==
+      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
+  ASSERT_TRUE(class_loader_3->GetParent()->GetClass() ==
+      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
+}
+
+TEST_F(ClassLoaderContextTest, CreateClassLoaderWithSharedLibrariesInParentToo) {
+  // Setup the context.
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_a = OpenTestDexFiles("ForClassLoaderA");
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_b = OpenTestDexFiles("ForClassLoaderB");
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_c = OpenTestDexFiles("ForClassLoaderC");
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_d = OpenTestDexFiles("ForClassLoaderD");
+
+  std::string context_spec =
+      "PCL[" + CreateClassPath(classpath_dex_a) + "]{" +
+      "PCL[" + CreateClassPath(classpath_dex_b) + "]};" +
+      "PCL[" + CreateClassPath(classpath_dex_c) + "]{" +
+      "PCL[" + CreateClassPath(classpath_dex_d) + "]}";
+
+  std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_spec);
+  ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, ""));
+
+  // Setup the compilation sources.
+  std::vector<std::unique_ptr<const DexFile>> compilation_sources = OpenTestDexFiles("MultiDex");
+  std::vector<const DexFile*> compilation_sources_raw =
+      MakeNonOwningPointerVector(compilation_sources);
+
+  // Create the class loader.
+  jobject jclass_loader = context->CreateClassLoader(compilation_sources_raw);
+  ASSERT_TRUE(jclass_loader != nullptr);
+
+  // Verify the class loader.
+  ScopedObjectAccess soa(Thread::Current());
+
+  StackHandleScope<6> hs(soa.Self());
+  Handle<mirror::ClassLoader> class_loader_1 = hs.NewHandle(
+      soa.Decode<mirror::ClassLoader>(jclass_loader));
+
+  // For the first class loader the class path dex files must come first and then the
+  // compilation sources.
+  std::vector<const DexFile*> class_loader_1_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_a);
+  for (auto& dex : compilation_sources_raw) {
+    class_loader_1_dex_files.push_back(dex);
+  }
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_1,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            class_loader_1_dex_files);
+
+  // Verify its shared library.
+  ArtField* field =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_sharedLibraryLoaders);
+  ObjPtr<mirror::Object> raw_shared_libraries = field->GetObject(class_loader_1.Get());
+  ASSERT_TRUE(raw_shared_libraries != nullptr);
+
+  Handle<mirror::ObjectArray<mirror::ClassLoader>> shared_libraries(
+      hs.NewHandle(raw_shared_libraries->AsObjectArray<mirror::ClassLoader>()));
+  ASSERT_EQ(shared_libraries->GetLength(), 1);
+
+  Handle<mirror::ClassLoader> class_loader_2 = hs.NewHandle(shared_libraries->Get(0));
+  std::vector<const DexFile*> class_loader_2_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_b);
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_2,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            class_loader_2_dex_files);
+  raw_shared_libraries = field->GetObject(class_loader_2.Get());
+  ASSERT_TRUE(raw_shared_libraries == nullptr);
+
+  // Verify the parent.
+  Handle<mirror::ClassLoader> class_loader_3 = hs.NewHandle(class_loader_1->GetParent());
+  std::vector<const DexFile*> class_loader_3_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_c);
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_3,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            class_loader_3_dex_files);
+
+  // Verify its shared library.
+  raw_shared_libraries = field->GetObject(class_loader_3.Get());
+  ASSERT_TRUE(raw_shared_libraries != nullptr);
+
+  Handle<mirror::ObjectArray<mirror::ClassLoader>> shared_libraries_2(
+      hs.NewHandle(raw_shared_libraries->AsObjectArray<mirror::ClassLoader>()));
+  ASSERT_EQ(shared_libraries->GetLength(), 1);
+
+  Handle<mirror::ClassLoader> class_loader_4 = hs.NewHandle(shared_libraries_2->Get(0));
+  std::vector<const DexFile*> class_loader_4_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_d);
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_4,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            class_loader_4_dex_files);
+  raw_shared_libraries = field->GetObject(class_loader_4.Get());
+  ASSERT_TRUE(raw_shared_libraries == nullptr);
+
+  // Class loaders should have the BootClassLoader as a parent.
+  ASSERT_TRUE(class_loader_2->GetParent()->GetClass() ==
+      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
+  ASSERT_TRUE(class_loader_3->GetParent()->GetClass() ==
+      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
+  ASSERT_TRUE(class_loader_4->GetParent()->GetClass() ==
+      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
+}
+
+TEST_F(ClassLoaderContextTest, CreateClassLoaderWithSharedLibrariesDependencies) {
+  // Setup the context.
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_a = OpenTestDexFiles("ForClassLoaderA");
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_b = OpenTestDexFiles("ForClassLoaderB");
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_c = OpenTestDexFiles("ForClassLoaderC");
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_d = OpenTestDexFiles("ForClassLoaderD");
+
+  std::string context_spec =
+      "PCL[" + CreateClassPath(classpath_dex_a) + "]{" +
+      "PCL[" + CreateClassPath(classpath_dex_b) + "]{" +
+      "PCL[" + CreateClassPath(classpath_dex_c) + "]}};" +
+      "PCL[" + CreateClassPath(classpath_dex_d) + "]";
+
+  std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_spec);
+  ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, ""));
+
+  // Setup the compilation sources.
+  std::vector<std::unique_ptr<const DexFile>> compilation_sources = OpenTestDexFiles("MultiDex");
+  std::vector<const DexFile*> compilation_sources_raw =
+      MakeNonOwningPointerVector(compilation_sources);
+
+  // Create the class loader.
+  jobject jclass_loader = context->CreateClassLoader(compilation_sources_raw);
+  ASSERT_TRUE(jclass_loader != nullptr);
+
+  // Verify the class loader.
+  ScopedObjectAccess soa(Thread::Current());
+
+  StackHandleScope<6> hs(soa.Self());
+  Handle<mirror::ClassLoader> class_loader_1 = hs.NewHandle(
+      soa.Decode<mirror::ClassLoader>(jclass_loader));
+
+  // For the first class loader the class path dex files must come first and then the
+  // compilation sources.
+  std::vector<const DexFile*> class_loader_1_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_a);
+  for (auto& dex : compilation_sources_raw) {
+    class_loader_1_dex_files.push_back(dex);
+  }
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_1,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            class_loader_1_dex_files);
+
+  // Verify its shared library.
+  ArtField* field =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_sharedLibraryLoaders);
+  ObjPtr<mirror::Object> raw_shared_libraries = field->GetObject(class_loader_1.Get());
+  ASSERT_TRUE(raw_shared_libraries != nullptr);
+
+  Handle<mirror::ObjectArray<mirror::ClassLoader>> shared_libraries(
+      hs.NewHandle(raw_shared_libraries->AsObjectArray<mirror::ClassLoader>()));
+  ASSERT_EQ(shared_libraries->GetLength(), 1);
+
+  Handle<mirror::ClassLoader> class_loader_2 = hs.NewHandle(shared_libraries->Get(0));
+  std::vector<const DexFile*> class_loader_2_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_b);
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_2,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            class_loader_2_dex_files);
+
+  // Verify the shared library dependency of the shared library.
+  raw_shared_libraries = field->GetObject(class_loader_2.Get());
+  ASSERT_TRUE(raw_shared_libraries != nullptr);
+
+  Handle<mirror::ObjectArray<mirror::ClassLoader>> shared_libraries_2(
+      hs.NewHandle(raw_shared_libraries->AsObjectArray<mirror::ClassLoader>()));
+  ASSERT_EQ(shared_libraries_2->GetLength(), 1);
+
+  Handle<mirror::ClassLoader> class_loader_3 = hs.NewHandle(shared_libraries_2->Get(0));
+  std::vector<const DexFile*> class_loader_3_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_c);
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_3,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            class_loader_3_dex_files);
+  raw_shared_libraries = field->GetObject(class_loader_3.Get());
+  ASSERT_TRUE(raw_shared_libraries == nullptr);
+
+  // Verify the parent.
+  Handle<mirror::ClassLoader> class_loader_4 = hs.NewHandle(class_loader_1->GetParent());
+  std::vector<const DexFile*> class_loader_4_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_d);
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_4,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            class_loader_4_dex_files);
+  raw_shared_libraries = field->GetObject(class_loader_4.Get());
+  ASSERT_TRUE(raw_shared_libraries == nullptr);
+
+  // Class loaders should have the BootClassLoader as a parent.
+  ASSERT_TRUE(class_loader_2->GetParent()->GetClass() ==
+      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
+  ASSERT_TRUE(class_loader_3->GetParent()->GetClass() ==
+      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
+  ASSERT_TRUE(class_loader_4->GetParent()->GetClass() ==
+      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
+}
 
 TEST_F(ClassLoaderContextTest, RemoveSourceLocations) {
   std::unique_ptr<ClassLoaderContext> context =
@@ -549,6 +935,91 @@
   VerifyClassLoaderPCL(context.get(), 0, "");
 }
 
+TEST_F(ClassLoaderContextTest, CreateClassLoaderWithSameSharedLibraries) {
+  // Setup the context.
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_a = OpenTestDexFiles("ForClassLoaderA");
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_b = OpenTestDexFiles("ForClassLoaderB");
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_c = OpenTestDexFiles("ForClassLoaderC");
+
+  std::string context_spec =
+      "PCL[" + CreateClassPath(classpath_dex_a) + "]{" +
+      "PCL[" + CreateClassPath(classpath_dex_b) + "]};" +
+      "PCL[" + CreateClassPath(classpath_dex_c) + "]{" +
+      "PCL[" + CreateClassPath(classpath_dex_b) + "]}";
+
+  std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_spec);
+  ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, ""));
+
+  // Setup the compilation sources.
+  std::vector<std::unique_ptr<const DexFile>> compilation_sources = OpenTestDexFiles("MultiDex");
+  std::vector<const DexFile*> compilation_sources_raw =
+      MakeNonOwningPointerVector(compilation_sources);
+
+  // Create the class loader.
+  jobject jclass_loader = context->CreateClassLoader(compilation_sources_raw);
+  ASSERT_TRUE(jclass_loader != nullptr);
+
+  // Verify the class loader.
+  ScopedObjectAccess soa(Thread::Current());
+
+  StackHandleScope<6> hs(soa.Self());
+  Handle<mirror::ClassLoader> class_loader_1 = hs.NewHandle(
+      soa.Decode<mirror::ClassLoader>(jclass_loader));
+
+  // For the first class loader the class path dex files must come first and then the
+  // compilation sources.
+  std::vector<const DexFile*> class_loader_1_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_a);
+  for (auto& dex : compilation_sources_raw) {
+    class_loader_1_dex_files.push_back(dex);
+  }
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_1,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            class_loader_1_dex_files);
+
+  // Verify its shared library.
+  ArtField* field =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_sharedLibraryLoaders);
+  ObjPtr<mirror::Object> raw_shared_libraries = field->GetObject(class_loader_1.Get());
+  ASSERT_TRUE(raw_shared_libraries != nullptr);
+
+  Handle<mirror::ObjectArray<mirror::ClassLoader>> shared_libraries(
+      hs.NewHandle(raw_shared_libraries->AsObjectArray<mirror::ClassLoader>()));
+  ASSERT_EQ(shared_libraries->GetLength(), 1);
+
+  Handle<mirror::ClassLoader> class_loader_2 = hs.NewHandle(shared_libraries->Get(0));
+  std::vector<const DexFile*> class_loader_2_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_b);
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_2,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            class_loader_2_dex_files);
+
+  // Verify the parent.
+  Handle<mirror::ClassLoader> class_loader_3 = hs.NewHandle(class_loader_1->GetParent());
+  std::vector<const DexFile*> class_loader_3_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_c);
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_3,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            class_loader_3_dex_files);
+
+  // Verify its shared library is the same as the child.
+  raw_shared_libraries = field->GetObject(class_loader_3.Get());
+  ASSERT_TRUE(raw_shared_libraries != nullptr);
+  Handle<mirror::ObjectArray<mirror::ClassLoader>> shared_libraries_2(
+      hs.NewHandle(raw_shared_libraries->AsObjectArray<mirror::ClassLoader>()));
+  ASSERT_EQ(shared_libraries_2->GetLength(), 1);
+  ASSERT_EQ(shared_libraries_2->Get(0), class_loader_2.Get());
+
+  // Class loaders should have the BootClassLoader as a parent.
+  ASSERT_TRUE(class_loader_2->GetParent()->GetClass() ==
+      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
+  ASSERT_TRUE(class_loader_3->GetParent()->GetClass() ==
+      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
+}
+
 TEST_F(ClassLoaderContextTest, EncodeInOatFile) {
   std::string dex1_name = GetTestDexFileName("Main");
   std::string dex2_name = GetTestDexFileName("MyClass");
@@ -662,6 +1133,73 @@
             ClassLoaderContext::VerificationResult::kMismatch);
 }
 
+TEST_F(ClassLoaderContextTest, VerifyClassLoaderContextMatchSpecial) {
+  std::string context_spec = "&";
+  std::unique_ptr<ClassLoaderContext> context = ParseContextWithChecksums(context_spec);
+  // Pretend that we successfully open the dex files to pass the DCHECKS.
+  // (as it's much easier to test all the corner cases without relying on actual dex files).
+  PretendContextOpenedDexFiles(context.get());
+
+  ASSERT_EQ(context->VerifyClassLoaderContextMatch(context_spec),
+            ClassLoaderContext::VerificationResult::kForcedToSkipChecks);
+}
+
+TEST_F(ClassLoaderContextTest, VerifyClassLoaderContextMatchWithSL) {
+  std::string context_spec =
+      "PCL[a.dex*123:b.dex*456]{PCL[d.dex*321];PCL[e.dex*654]#PCL[f.dex*098:g.dex*999]}"
+      ";DLC[c.dex*890]";
+  std::unique_ptr<ClassLoaderContext> context = ParseContextWithChecksums(context_spec);
+  // Pretend that we successfully open the dex files to pass the DCHECKS.
+  // (as it's much easier to test all the corner cases without relying on actual dex files).
+  PretendContextOpenedDexFiles(context.get());
+
+  VerifyContextSize(context.get(), 2);
+  VerifyClassLoaderPCL(context.get(), 0, "a.dex:b.dex");
+  VerifyClassLoaderDLC(context.get(), 1, "c.dex");
+  VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 0, "d.dex");
+  VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 1, "f.dex:g.dex");
+
+  ASSERT_EQ(context->VerifyClassLoaderContextMatch(context_spec),
+            ClassLoaderContext::VerificationResult::kVerifies);
+
+  std::string wrong_class_loader_type =
+      "PCL[a.dex*123:b.dex*456]{DLC[d.dex*321];PCL[e.dex*654]#PCL[f.dex*098:g.dex*999]}"
+      ";DLC[c.dex*890]";
+  ASSERT_EQ(context->VerifyClassLoaderContextMatch(wrong_class_loader_type),
+            ClassLoaderContext::VerificationResult::kMismatch);
+
+  std::string wrong_class_loader_order =
+      "PCL[a.dex*123:b.dex*456]{PCL[f.dex#098:g.dex#999}#PCL[d.dex*321];PCL[e.dex*654]}"
+      ";DLC[c.dex*890]";
+  ASSERT_EQ(context->VerifyClassLoaderContextMatch(wrong_class_loader_order),
+            ClassLoaderContext::VerificationResult::kMismatch);
+
+  std::string wrong_classpath_order =
+      "PCL[a.dex*123:b.dex*456]{PCL[d.dex*321];PCL[e.dex*654]#PCL[g.dex*999:f.dex*098]}"
+      ";DLC[c.dex*890]";
+  ASSERT_EQ(context->VerifyClassLoaderContextMatch(wrong_classpath_order),
+            ClassLoaderContext::VerificationResult::kMismatch);
+
+  std::string wrong_checksum =
+      "PCL[a.dex*123:b.dex*456]{PCL[d.dex*333];PCL[e.dex*654]#PCL[g.dex*999:f.dex*098]}"
+      ";DLC[c.dex*890]";
+  ASSERT_EQ(context->VerifyClassLoaderContextMatch(wrong_checksum),
+            ClassLoaderContext::VerificationResult::kMismatch);
+
+  std::string wrong_extra_class_loader =
+      "PCL[a.dex*123:b.dex*456]"
+      "{PCL[d.dex*321];PCL[e.dex*654]#PCL[f.dex*098:g.dex*999];PCL[i.dex#444]}"
+      ";DLC[c.dex*890]";
+  ASSERT_EQ(context->VerifyClassLoaderContextMatch(wrong_extra_class_loader),
+            ClassLoaderContext::VerificationResult::kMismatch);
+
+  std::string wrong_extra_classpath =
+      "PCL[a.dex*123:b.dex*456]{PCL[d.dex*321:i.dex#444];PCL[e.dex*654]#PCL[f.dex*098:g.dex*999]}"
+      ";DLC[c.dex*890]";
+  ASSERT_EQ(context->VerifyClassLoaderContextMatch(wrong_extra_classpath),
+            ClassLoaderContext::VerificationResult::kMismatch);
+}
+
 TEST_F(ClassLoaderContextTest, VerifyClassLoaderContextMatchAfterEncoding) {
   jobject class_loader_a = LoadDexInPathClassLoader("ForClassLoaderA", nullptr);
   jobject class_loader_b = LoadDexInDelegateLastClassLoader("ForClassLoaderB", class_loader_a);
@@ -694,4 +1232,30 @@
             ClassLoaderContext::VerificationResult::kVerifies);
 }
 
+TEST_F(ClassLoaderContextTest, CreateContextForClassLoaderWithSharedLibraries) {
+  jobject class_loader_a = LoadDexInPathClassLoader("ForClassLoaderA", nullptr);
+
+  ScopedObjectAccess soa(Thread::Current());
+  StackHandleScope<1> hs(soa.Self());
+  Handle<mirror::ObjectArray<mirror::ClassLoader>> libraries = hs.NewHandle(
+    mirror::ObjectArray<mirror::ClassLoader>::Alloc(
+        soa.Self(),
+        GetClassRoot<mirror::ObjectArray<mirror::ClassLoader>>(),
+        1));
+  libraries->Set(0, soa.Decode<mirror::ClassLoader>(class_loader_a));
+
+  jobject class_loader_b = LoadDexInPathClassLoader(
+      "ForClassLoaderB", nullptr, soa.AddLocalReference<jobject>(libraries.Get()));
+
+  std::unique_ptr<ClassLoaderContext> context = CreateContextForClassLoader(class_loader_b);
+  ASSERT_TRUE(context != nullptr);
+  std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("ForClassLoaderB");
+  VerifyClassLoaderPCL(context.get(), 0, dex_files[0]->GetLocation());
+  dex_files = OpenTestDexFiles("ForClassLoaderA");
+  VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 0, dex_files[0]->GetLocation());
+
+  ASSERT_EQ(context->VerifyClassLoaderContextMatch(context->EncodeContextForOatFile("")),
+            ClassLoaderContext::VerificationResult::kVerifies);
+}
+
 }  // namespace art
diff --git a/runtime/class_loader_utils.h b/runtime/class_loader_utils.h
index 78ad568..562dc47 100644
--- a/runtime/class_loader_utils.h
+++ b/runtime/class_loader_utils.h
@@ -18,7 +18,7 @@
 #define ART_RUNTIME_CLASS_LOADER_UTILS_H_
 
 #include "art_field-inl.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "handle_scope.h"
 #include "jni/jni_internal.h"
 #include "mirror/class_loader.h"
@@ -29,7 +29,7 @@
 namespace art {
 
 // Returns true if the given class loader is either a PathClassLoader or a DexClassLoader.
-// (they both have the same behaviour with respect to class lockup order)
+// (they both have the same behaviour with respect to class lookup order)
 inline bool IsPathOrDexClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
                                    Handle<mirror::ClassLoader> class_loader)
     REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -41,6 +41,15 @@
           soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_DexClassLoader));
 }
 
+// Returns true if the given class loader is an InMemoryDexClassLoader.
+inline bool IsInMemoryDexClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+                                     Handle<mirror::ClassLoader> class_loader)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  mirror::Class* class_loader_class = class_loader->GetClass();
+  return (class_loader_class ==
+      soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_InMemoryDexClassLoader));
+}
+
 inline bool IsDelegateLastClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
                                       Handle<mirror::ClassLoader> class_loader)
     REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -160,7 +169,7 @@
   VisitClassLoaderDexFiles<decltype(helper), void*>(soa,
                                                     class_loader,
                                                     helper,
-                                                    /* default */ nullptr);
+                                                    /* default= */ nullptr);
 }
 
 }  // namespace art
diff --git a/runtime/class_root.h b/runtime/class_root.h
index 19a78b1..1ff4845 100644
--- a/runtime/class_root.h
+++ b/runtime/class_root.h
@@ -17,7 +17,8 @@
 #ifndef ART_RUNTIME_CLASS_ROOT_H_
 #define ART_RUNTIME_CLASS_ROOT_H_
 
-#include "class_linker.h"
+#include "class_linker-inl.h"
+#include "gc_root-inl.h"
 #include "mirror/class.h"
 #include "mirror/object_array-inl.h"
 #include "obj_ptr-inl.h"
@@ -100,6 +101,7 @@
   M(kLongArrayClass,                        "[J",                                         mirror::PrimitiveArray<int64_t>)                          \
   M(kShortArrayClass,                       "[S",                                         mirror::PrimitiveArray<int16_t>)                          \
   M(kJavaLangStackTraceElementArrayClass,   "[Ljava/lang/StackTraceElement;",             mirror::ObjectArray<mirror::StackTraceElement>)           \
+  M(kJavaLangClassLoaderArrayClass,         "[Ljava/lang/ClassLoader;",                   mirror::ObjectArray<mirror::ClassLoader>)                 \
   M(kDalvikSystemClassExt,                  "Ldalvik/system/ClassExt;",                   mirror::ClassExt)
 
 // Well known mirror::Class roots accessed via ClassLinker::GetClassRoots().
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index 6b6fe34..a2cdb2c 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -19,6 +19,7 @@
 
 #include "class_table.h"
 
+#include "base/mutex-inl.h"
 #include "gc_root-inl.h"
 #include "mirror/class.h"
 #include "oat_file.h"
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index a233357..8d8e93a 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -57,12 +57,6 @@
   return nullptr;
 }
 
-// To take into account http://b/35845221
-#pragma clang diagnostic push
-#if __clang_major__ < 4
-#pragma clang diagnostic ignored "-Wunreachable-code"
-#endif
-
 mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash) {
   WriterMutexLock mu(Thread::Current(), lock_);
   // Should only be updating latest table.
@@ -88,8 +82,6 @@
   return existing;
 }
 
-#pragma clang diagnostic pop
-
 size_t ClassTable::CountDefiningLoaderClasses(ObjPtr<mirror::ClassLoader> defining_loader,
                                               const ClassSet& set) const {
   size_t count = 0;
diff --git a/runtime/class_table_test.cc b/runtime/class_table_test.cc
index fdf6ad1..2270662 100644
--- a/runtime/class_table_test.cc
+++ b/runtime/class_table_test.cc
@@ -24,7 +24,7 @@
 #include "gc/accounting/card_table-inl.h"
 #include "gc/heap.h"
 #include "handle_scope-inl.h"
-#include "mirror/class-inl.h"
+#include "mirror/class-alloc-inl.h"
 #include "obj_ptr.h"
 #include "scoped_thread_state_change-inl.h"
 
diff --git a/runtime/common_dex_operations.h b/runtime/common_dex_operations.h
index c29043e..1c95622 100644
--- a/runtime/common_dex_operations.h
+++ b/runtime/common_dex_operations.h
@@ -20,13 +20,15 @@
 #include "android-base/logging.h"
 #include "art_field.h"
 #include "art_method.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "class_linker.h"
 #include "dex/code_item_accessors.h"
+#include "dex/dex_file_structs.h"
 #include "dex/primitive.h"
 #include "handle_scope-inl.h"
 #include "instrumentation.h"
+#include "interpreter/interpreter.h"
 #include "interpreter/shadow_frame.h"
 #include "interpreter/unstarted_runtime.h"
 #include "jvalue-inl.h"
@@ -41,7 +43,7 @@
 
 namespace interpreter {
   void ArtInterpreterToInterpreterBridge(Thread* self,
-                                        const DexFile::CodeItem* code_item,
+                                        const dex::CodeItem* code_item,
                                         ShadowFrame* shadow_frame,
                                         JValue* result)
      REQUIRES_SHARED(Locks::mutator_lock_);
@@ -172,6 +174,14 @@
     if (UNLIKELY(self->IsExceptionPending())) {
       return false;
     }
+    if (shadow_frame.GetForcePopFrame()) {
+      // We need to check this here since we expect that the FieldWriteEvent happens before the
+      // actual field write. If one pops the stack we should not modify the field.  The next
+      // instruction will force a pop. Return true.
+      DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+      DCHECK(interpreter::PrevFrameWillRetry(self, shadow_frame));
+      return true;
+    }
   }
 
   switch (field_type) {
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index be39631..a20baa0 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -24,7 +24,6 @@
 #include "nativehelper/scoped_local_ref.h"
 
 #include "android-base/stringprintf.h"
-#include <unicode/uvernum.h>
 
 #include "art_field-inl.h"
 #include "base/file_utils.h"
@@ -44,14 +43,17 @@
 #include "dex/dex_file_loader.h"
 #include "dex/primitive.h"
 #include "gc/heap.h"
+#include "gc/space/image_space.h"
 #include "gc_root-inl.h"
 #include "gtest/gtest.h"
 #include "handle_scope-inl.h"
 #include "interpreter/unstarted_runtime.h"
 #include "jni/java_vm_ext.h"
 #include "jni/jni_internal.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "native/dalvik_system_DexFile.h"
 #include "noop_compiler_callbacks.h"
 #include "runtime-inl.h"
@@ -59,20 +61,6 @@
 #include "thread.h"
 #include "well_known_classes.h"
 
-int main(int argc, char **argv) {
-  // Gtests can be very noisy. For example, an executable with multiple tests will trigger native
-  // bridge warnings. The following line reduces the minimum log severity to ERROR and suppresses
-  // everything else. In case you want to see all messages, comment out the line.
-  setenv("ANDROID_LOG_TAGS", "*:e", 1);
-
-  art::Locks::Init();
-  art::InitLogging(argv, art::Runtime::Abort);
-  art::MemMap::Init();
-  LOG(INFO) << "Running main() from common_runtime_test.cc...";
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
 namespace art {
 
 using android::base::StringPrintf;
@@ -123,15 +111,14 @@
   std::string min_heap_string(StringPrintf("-Xms%zdm", gc::Heap::kDefaultInitialSize / MB));
   std::string max_heap_string(StringPrintf("-Xmx%zdm", gc::Heap::kDefaultMaximumSize / MB));
 
-
   RuntimeOptions options;
-  std::string boot_class_path_string = "-Xbootclasspath";
-  for (const std::string &core_dex_file_name : GetLibCoreDexFileNames()) {
-    boot_class_path_string += ":";
-    boot_class_path_string += core_dex_file_name;
-  }
+  std::string boot_class_path_string =
+      GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames());
+  std::string boot_class_path_locations_string =
+      GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations());
 
   options.push_back(std::make_pair(boot_class_path_string, nullptr));
+  options.push_back(std::make_pair(boot_class_path_locations_string, nullptr));
   options.push_back(std::make_pair("-Xcheck:jni", nullptr));
   options.push_back(std::make_pair(min_heap_string, nullptr));
   options.push_back(std::make_pair(max_heap_string, nullptr));
@@ -151,7 +138,7 @@
   PreRuntimeCreate();
   if (!Runtime::Create(options, false)) {
     LOG(FATAL) << "Failed to create runtime";
-    return;
+    UNREACHABLE();
   }
   PostRuntimeCreate();
   runtime_.reset(Runtime::Current());
@@ -285,7 +272,8 @@
 
 jobject CommonRuntimeTestImpl::LoadDexInWellKnownClassLoader(const std::string& dex_name,
                                                              jclass loader_class,
-                                                             jobject parent_loader) {
+                                                             jobject parent_loader,
+                                                             jobject shared_libraries) {
   std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles(dex_name.c_str());
   std::vector<const DexFile*> class_path;
   CHECK_NE(0U, dex_files.size());
@@ -300,7 +288,8 @@
       self,
       class_path,
       loader_class,
-      parent_loader);
+      parent_loader,
+      shared_libraries);
 
   {
     // Verify we build the correct chain.
@@ -327,10 +316,12 @@
 }
 
 jobject CommonRuntimeTestImpl::LoadDexInPathClassLoader(const std::string& dex_name,
-                                                        jobject parent_loader) {
+                                                        jobject parent_loader,
+                                                        jobject shared_libraries) {
   return LoadDexInWellKnownClassLoader(dex_name,
                                        WellKnownClasses::dalvik_system_PathClassLoader,
-                                       parent_loader);
+                                       parent_loader,
+                                       shared_libraries);
 }
 
 jobject CommonRuntimeTestImpl::LoadDexInDelegateLastClassLoader(const std::string& dex_name,
@@ -394,6 +385,38 @@
   }
 }
 
+bool CommonRuntimeTestImpl::StartDex2OatCommandLine(/*out*/std::vector<std::string>* argv,
+                                                    /*out*/std::string* error_msg) {
+  DCHECK(argv != nullptr);
+  DCHECK(argv->empty());
+
+  Runtime* runtime = Runtime::Current();
+  const std::vector<gc::space::ImageSpace*>& image_spaces =
+      runtime->GetHeap()->GetBootImageSpaces();
+  if (image_spaces.empty()) {
+    *error_msg = "No image location found for Dex2Oat.";
+    return false;
+  }
+  std::string image_location = image_spaces[0]->GetImageLocation();
+
+  argv->push_back(runtime->GetCompilerExecutable());
+  if (runtime->IsJavaDebuggable()) {
+    argv->push_back("--debuggable");
+  }
+  runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(argv);
+
+  argv->push_back("--runtime-arg");
+  argv->push_back(GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()));
+  argv->push_back("--runtime-arg");
+  argv->push_back(GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()));
+
+  argv->push_back("--boot-image=" + image_location);
+
+  std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
+  argv->insert(argv->end(), compiler_options.begin(), compiler_options.end());
+  return true;
+}
+
 CheckJniAbortCatcher::CheckJniAbortCatcher() : vm_(Runtime::Current()->GetJavaVM()) {
   vm_->SetCheckJniAbortHook(Hook, &actual_);
 }
@@ -420,3 +443,26 @@
 }
 
 }  // namespace art
+
+// Allow other test code to run global initialization/configuration before
+// gtest infra takes over.
+extern "C"
+__attribute__((visibility("default"))) __attribute__((weak))
+void ArtTestGlobalInit() {
+  LOG(ERROR) << "ArtTestGlobalInit in common_runtime_test";
+}
+
+int main(int argc, char **argv) {
+  // Gtests can be very noisy. For example, an executable with multiple tests will trigger native
+  // bridge warnings. The following line reduces the minimum log severity to ERROR and suppresses
+  // everything else. In case you want to see all messages, comment out the line.
+  setenv("ANDROID_LOG_TAGS", "*:e", 1);
+
+  art::Locks::Init();
+  art::InitLogging(argv, art::Runtime::Abort);
+  art::MemMap::Init();
+  LOG(INFO) << "Running main() from common_runtime_test.cc...";
+  testing::InitGoogleTest(&argc, argv);
+  ArtTestGlobalInit();
+  return RUN_ALL_TESTS();
+}
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index a5157df..d7f6127 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -26,14 +26,14 @@
 
 #include "arch/instruction_set.h"
 #include "base/common_art_test.h"
-#include "base/globals.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "base/os.h"
 #include "base/unix_file/fd_file.h"
 #include "dex/art_dex_file_loader.h"
 #include "dex/compact_dex_level.h"
 // TODO: Add inl file and avoid including inl.
 #include "obj_ptr-inl.h"
+#include "runtime_globals.h"
 #include "scoped_thread_state_change-inl.h"
 
 namespace art {
@@ -78,8 +78,8 @@
     const ArtDexFileLoader dex_file_loader;
     CHECK(dex_file_loader.Open(input_jar.c_str(),
                                input_jar.c_str(),
-                               /*verify*/ true,
-                               /*verify_checksum*/ true,
+                               /*verify=*/ true,
+                               /*verify_checksum=*/ true,
                                &error_msg,
                                &dex_files)) << error_msg;
     EXPECT_EQ(dex_files.size(), 1u) << "Only one input dex is supported";
@@ -97,6 +97,9 @@
     return true;
   }
 
+  static bool StartDex2OatCommandLine(/*out*/std::vector<std::string>* argv,
+                                      /*out*/std::string* error_msg);
+
  protected:
   // Allow subclases such as CommonCompilerTest to add extra options.
   virtual void SetUpRuntimeOptions(RuntimeOptions* options ATTRIBUTE_UNUSED) {}
@@ -115,11 +118,14 @@
   jobject LoadMultiDex(const char* first_dex_name, const char* second_dex_name)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  jobject LoadDexInPathClassLoader(const std::string& dex_name, jobject parent_loader);
+  jobject LoadDexInPathClassLoader(const std::string& dex_name,
+                                   jobject parent_loader,
+                                   jobject shared_libraries = nullptr);
   jobject LoadDexInDelegateLastClassLoader(const std::string& dex_name, jobject parent_loader);
   jobject LoadDexInWellKnownClassLoader(const std::string& dex_name,
                                         jclass loader_class,
-                                        jobject parent_loader);
+                                        jobject parent_loader,
+                                        jobject shared_libraries = nullptr);
 
   std::unique_ptr<Runtime> runtime_;
 
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 657a78b..62788b1 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -105,10 +105,10 @@
 }
 
 void ThrowAbstractMethodError(uint32_t method_idx, const DexFile& dex_file) {
-  ThrowException("Ljava/lang/AbstractMethodError;", /* referrer */ nullptr,
+  ThrowException("Ljava/lang/AbstractMethodError;", /* referrer= */ nullptr,
                  StringPrintf("abstract method \"%s\"",
                               dex_file.PrettyMethod(method_idx,
-                                                    /* with_signature */ true).c_str()).c_str());
+                                                    /* with_signature= */ true).c_str()).c_str());
 }
 
 // ArithmeticException
@@ -324,7 +324,7 @@
 void ThrowIncompatibleClassChangeErrorForMethodConflict(ArtMethod* method) {
   DCHECK(method != nullptr);
   ThrowException("Ljava/lang/IncompatibleClassChangeError;",
-                 /*referrer*/nullptr,
+                 /*referrer=*/nullptr,
                  StringPrintf("Conflicting default method implementations %s",
                               ArtMethod::PrettyMethod(method).c_str()).c_str());
 }
@@ -436,20 +436,15 @@
   ThrowException("Ljava/lang/NullPointerException;", nullptr, msg.str().c_str());
 }
 
-void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
-                                              InvokeType type) {
-  ObjPtr<mirror::DexCache> dex_cache =
-      Thread::Current()->GetCurrentMethod(nullptr)->GetDeclaringClass()->GetDexCache();
-  const DexFile& dex_file = *dex_cache->GetDexFile();
+void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx, InvokeType type) {
+  const DexFile& dex_file = *Thread::Current()->GetCurrentMethod(nullptr)->GetDexFile();
   ThrowNullPointerExceptionForMethodAccessImpl(method_idx, dex_file, type);
 }
 
-void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method,
-                                              InvokeType type) {
-  ObjPtr<mirror::DexCache> dex_cache = method->GetDeclaringClass()->GetDexCache();
-  const DexFile& dex_file = *dex_cache->GetDexFile();
+void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method, InvokeType type) {
   ThrowNullPointerExceptionForMethodAccessImpl(method->GetDexMethodIndex(),
-                                               dex_file, type);
+                                               *method->GetDexFile(),
+                                               type);
 }
 
 static bool IsValidReadBarrierImplicitCheck(uintptr_t addr) {
@@ -577,7 +572,7 @@
   CHECK_LT(throw_dex_pc, accessor.InsnsSizeInCodeUnits());
   const Instruction& instr = accessor.InstructionAt(throw_dex_pc);
   if (check_address && !IsValidImplicitCheck(addr, instr)) {
-    const DexFile* dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+    const DexFile* dex_file = method->GetDexFile();
     LOG(FATAL) << "Invalid address for an implicit NullPointerException check: "
                << "0x" << std::hex << addr << std::dec
                << ", at "
@@ -633,7 +628,7 @@
       ArtField* field =
           Runtime::Current()->GetClassLinker()->ResolveField(instr.VRegC_22c(), method, false);
       Thread::Current()->ClearException();  // Resolution may fail, ignore.
-      ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
+      ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ true);
       break;
     }
     case Instruction::IGET_QUICK:
@@ -647,9 +642,9 @@
       ArtField* field = nullptr;
       CHECK_NE(field_idx, DexFile::kDexNoIndex16);
       field = Runtime::Current()->GetClassLinker()->ResolveField(
-          field_idx, method, /* is_static */ false);
+          field_idx, method, /* is_static= */ false);
       Thread::Current()->ClearException();  // Resolution may fail, ignore.
-      ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
+      ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ true);
       break;
     }
     case Instruction::IPUT:
@@ -660,9 +655,9 @@
     case Instruction::IPUT_CHAR:
     case Instruction::IPUT_SHORT: {
       ArtField* field = Runtime::Current()->GetClassLinker()->ResolveField(
-          instr.VRegC_22c(), method, /* is_static */ false);
+          instr.VRegC_22c(), method, /* is_static= */ false);
       Thread::Current()->ClearException();  // Resolution may fail, ignore.
-      ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
+      ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ false);
       break;
     }
     case Instruction::IPUT_QUICK:
@@ -676,9 +671,9 @@
       ArtField* field = nullptr;
       CHECK_NE(field_idx, DexFile::kDexNoIndex16);
       field = Runtime::Current()->GetClassLinker()->ResolveField(
-          field_idx, method, /* is_static */ false);
+          field_idx, method, /* is_static= */ false);
       Thread::Current()->ClearException();  // Resolution may fail, ignore.
-      ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
+      ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ false);
       break;
     }
     case Instruction::AGET:
@@ -717,13 +712,12 @@
       break;
     }
     default: {
-      const DexFile* dex_file =
-          method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+      const DexFile* dex_file = method->GetDexFile();
       LOG(FATAL) << "NullPointerException at an unexpected instruction: "
                  << instr.DumpString(dex_file)
                  << " in "
                  << method->PrettyMethod();
-      break;
+      UNREACHABLE();
     }
   }
 }
@@ -771,13 +765,19 @@
 
   // Avoid running Java code for exception initialization.
   // TODO: Checks to make this a bit less brittle.
+  //
+  // Note: this lambda ensures that the destruction of the ScopedLocalRefs will run in the extended
+  //       stack, which is important for modes with larger stack sizes (e.g., ASAN). Using a lambda
+  //       instead of a block simplifies the control flow.
+  auto create_and_throw = [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Allocate an uninitialized object.
+    ScopedLocalRef<jobject> exc(env,
+                                env->AllocObject(WellKnownClasses::java_lang_StackOverflowError));
+    if (exc == nullptr) {
+      LOG(WARNING) << "Could not allocate StackOverflowError object.";
+      return;
+    }
 
-  std::string error_msg;
-
-  // Allocate an uninitialized object.
-  ScopedLocalRef<jobject> exc(env,
-                              env->AllocObject(WellKnownClasses::java_lang_StackOverflowError));
-  if (exc.get() != nullptr) {
     // "Initialize".
     // StackOverflowError -> VirtualMachineError -> Error -> Throwable -> Object.
     // Only Throwable has "custom" fields:
@@ -793,57 +793,54 @@
     // detailMessage.
     // TODO: Use String::FromModifiedUTF...?
     ScopedLocalRef<jstring> s(env, env->NewStringUTF(msg.c_str()));
-    if (s.get() != nullptr) {
-      env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_detailMessage, s.get());
-
-      // cause.
-      env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_cause, exc.get());
-
-      // suppressedExceptions.
-      ScopedLocalRef<jobject> emptylist(env, env->GetStaticObjectField(
-          WellKnownClasses::java_util_Collections,
-          WellKnownClasses::java_util_Collections_EMPTY_LIST));
-      CHECK(emptylist.get() != nullptr);
-      env->SetObjectField(exc.get(),
-                          WellKnownClasses::java_lang_Throwable_suppressedExceptions,
-                          emptylist.get());
-
-      // stackState is set as result of fillInStackTrace. fillInStackTrace calls
-      // nativeFillInStackTrace.
-      ScopedLocalRef<jobject> stack_state_val(env, nullptr);
-      {
-        ScopedObjectAccessUnchecked soa(env);
-        stack_state_val.reset(soa.Self()->CreateInternalStackTrace<false>(soa));
-      }
-      if (stack_state_val.get() != nullptr) {
-        env->SetObjectField(exc.get(),
-                            WellKnownClasses::java_lang_Throwable_stackState,
-                            stack_state_val.get());
-
-        // stackTrace.
-        ScopedLocalRef<jobject> stack_trace_elem(env, env->GetStaticObjectField(
-            WellKnownClasses::libcore_util_EmptyArray,
-            WellKnownClasses::libcore_util_EmptyArray_STACK_TRACE_ELEMENT));
-        env->SetObjectField(exc.get(),
-                            WellKnownClasses::java_lang_Throwable_stackTrace,
-                            stack_trace_elem.get());
-      } else {
-        error_msg = "Could not create stack trace.";
-      }
-      // Throw the exception.
-      self->SetException(self->DecodeJObject(exc.get())->AsThrowable());
-    } else {
-      // Could not allocate a string object.
-      error_msg = "Couldn't throw new StackOverflowError because JNI NewStringUTF failed.";
+    if (s == nullptr) {
+      LOG(WARNING) << "Could not throw new StackOverflowError because JNI NewStringUTF failed.";
+      return;
     }
-  } else {
-    error_msg = "Could not allocate StackOverflowError object.";
-  }
 
-  if (!error_msg.empty()) {
-    LOG(WARNING) << error_msg;
-    CHECK(self->IsExceptionPending());
-  }
+    env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_detailMessage, s.get());
+
+    // cause.
+    env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_cause, exc.get());
+
+    // suppressedExceptions.
+    ScopedLocalRef<jobject> emptylist(env, env->GetStaticObjectField(
+        WellKnownClasses::java_util_Collections,
+        WellKnownClasses::java_util_Collections_EMPTY_LIST));
+    CHECK(emptylist != nullptr);
+    env->SetObjectField(exc.get(),
+                        WellKnownClasses::java_lang_Throwable_suppressedExceptions,
+                        emptylist.get());
+
+    // stackState is set as result of fillInStackTrace. fillInStackTrace calls
+    // nativeFillInStackTrace.
+    ScopedLocalRef<jobject> stack_state_val(env, nullptr);
+    {
+      ScopedObjectAccessUnchecked soa(env);  // TODO: Is this necessary?
+      stack_state_val.reset(soa.Self()->CreateInternalStackTrace<false>(soa));
+    }
+    if (stack_state_val != nullptr) {
+      env->SetObjectField(exc.get(),
+                          WellKnownClasses::java_lang_Throwable_stackState,
+                          stack_state_val.get());
+
+      // stackTrace.
+      ScopedLocalRef<jobject> stack_trace_elem(env, env->GetStaticObjectField(
+          WellKnownClasses::libcore_util_EmptyArray,
+          WellKnownClasses::libcore_util_EmptyArray_STACK_TRACE_ELEMENT));
+      env->SetObjectField(exc.get(),
+                          WellKnownClasses::java_lang_Throwable_stackTrace,
+                          stack_trace_elem.get());
+    } else {
+      LOG(WARNING) << "Could not create stack trace.";
+      // Note: we'll create an exception without stack state, which is valid.
+    }
+
+    // Throw the exception.
+    self->SetException(self->DecodeJObject(exc.get())->AsThrowable());
+  };
+  create_and_throw();
+  CHECK(self->IsExceptionPending());
 
   bool explicit_overflow_check = Runtime::Current()->ExplicitStackOverflowChecks();
   self->ResetDefaultStackEnd();  // Return to default stack size.
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 6acff6f..ca9c96a 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_COMMON_THROWS_H_
 #define ART_RUNTIME_COMMON_THROWS_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "obj_ptr.h"
 
 namespace art {
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index 6855dcd..18632dc 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_COMPILER_CALLBACKS_H_
 #define ART_RUNTIME_COMPILER_CALLBACKS_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "dex/class_reference.h"
 #include "class_status.h"
 
@@ -51,10 +51,6 @@
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
   virtual void ClassRejected(ClassReference ref) = 0;
 
-  // Return true if we should attempt to relocate to a random base address if we have not already
-  // done so. Return false if relocating in this way would be problematic.
-  virtual bool IsRelocationPossible() = 0;
-
   virtual verifier::VerifierDeps* GetVerifierDeps() const = 0;
   virtual void SetVerifierDeps(verifier::VerifierDeps* deps ATTRIBUTE_UNUSED) {}
 
diff --git a/runtime/compiler_filter.cc b/runtime/compiler_filter.cc
index bda64eb..c086490 100644
--- a/runtime/compiler_filter.cc
+++ b/runtime/compiler_filter.cc
@@ -16,6 +16,8 @@
 
 #include "compiler_filter.h"
 
+#include <ostream>
+
 #include "base/utils.h"
 
 namespace art {
diff --git a/runtime/compiler_filter.h b/runtime/compiler_filter.h
index 012ebcb..c36e40f 100644
--- a/runtime/compiler_filter.h
+++ b/runtime/compiler_filter.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_COMPILER_FILTER_H_
 #define ART_RUNTIME_COMPILER_FILTER_H_
 
-#include <ostream>
+#include <iosfwd>
 #include <string>
 #include <vector>
 
diff --git a/runtime/debug_print.cc b/runtime/debug_print.cc
index cb334b5..2939b00 100644
--- a/runtime/debug_print.cc
+++ b/runtime/debug_print.cc
@@ -37,7 +37,7 @@
   std::ostringstream oss;
   gc::Heap* heap = Runtime::Current()->GetHeap();
   gc::space::ContinuousSpace* cs =
-      heap->FindContinuousSpaceFromObject(klass, /* fail_ok */ true);
+      heap->FindContinuousSpaceFromObject(klass, /* fail_ok= */ true);
   if (cs != nullptr) {
     if (cs->IsImageSpace()) {
       gc::space::ImageSpace* ispace = cs->AsImageSpace();
@@ -50,7 +50,7 @@
     }
   } else {
     gc::space::DiscontinuousSpace* ds =
-        heap->FindDiscontinuousSpaceFromObject(klass, /* fail_ok */ true);
+        heap->FindDiscontinuousSpaceFromObject(klass, /* fail_ok= */ true);
     if (ds != nullptr) {
       oss << "discontinuous;" << ds->GetName();
     } else {
diff --git a/runtime/debug_print.h b/runtime/debug_print.h
index df00f06..e2990d4 100644
--- a/runtime/debug_print.h
+++ b/runtime/debug_print.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_DEBUG_PRINT_H_
 #define ART_RUNTIME_DEBUG_PRINT_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "mirror/object.h"
 
 // Helper functions for printing extra information for certain hard to diagnose bugs.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 366b5ec..663af81 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -53,11 +53,14 @@
 #include "jdwp/object_registry.h"
 #include "jni/jni_internal.h"
 #include "jvalue-inl.h"
+#include "mirror/array-alloc-inl.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/class.h"
 #include "mirror/class_loader.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
+#include "mirror/string-alloc-inl.h"
 #include "mirror/string-inl.h"
 #include "mirror/throwable.h"
 #include "nativehelper/scoped_local_ref.h"
@@ -65,6 +68,7 @@
 #include "oat_file.h"
 #include "obj_ptr-inl.h"
 #include "reflection.h"
+#include "runtime-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "stack.h"
 #include "thread_list.h"
@@ -256,17 +260,6 @@
                << " " << dex_pc << ", " << dex_pc_offset;
   }
 
-  // We only care about invokes in the Jit.
-  void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED,
-                                Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
-                                ArtMethod* method,
-                                uint32_t dex_pc,
-                                ArtMethod* target ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    LOG(ERROR) << "Unexpected invoke event in debugger " << ArtMethod::PrettyMethod(method)
-               << " " << dex_pc;
-  }
-
   // TODO Might be worth it to post ExceptionCatch event.
   void ExceptionHandled(Thread* thread ATTRIBUTE_UNUSED,
                         Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED) override {
@@ -688,18 +681,18 @@
   //
   // The performance cost of this is non-negligible during native-debugging due to the
   // forced JIT, so we keep the AOT code in that case in exchange for limited native debugging.
+  ScopedSuspendAll ssa(__FUNCTION__);
   if (!runtime->IsJavaDebuggable() &&
       !runtime->GetInstrumentation()->IsForcedInterpretOnly() &&
       !runtime->IsNativeDebuggable()) {
     runtime->DeoptimizeBootImage();
   }
 
-  ScopedSuspendAll ssa(__FUNCTION__);
   if (RequiresDeoptimization()) {
     runtime->GetInstrumentation()->EnableDeoptimization();
   }
   instrumentation_events_ = 0;
-  gDebuggerActive = true;
+  Runtime::DoAndMaybeSwitchInterpreter([=](){ gDebuggerActive = true; });
   Runtime::Current()->GetRuntimeCallbacks()->AddMethodInspectionCallback(&gDebugActiveCallback);
   LOG(INFO) << "Debugger is active";
 }
@@ -737,7 +730,7 @@
       if (RequiresDeoptimization()) {
         runtime->GetInstrumentation()->DisableDeoptimization(kDbgInstrumentationKey);
       }
-      gDebuggerActive = false;
+      Runtime::DoAndMaybeSwitchInterpreter([=](){ gDebuggerActive = false; });
       Runtime::Current()->GetRuntimeCallbacks()->RemoveMethodInspectionCallback(
           &gDebugActiveCallback);
     }
@@ -897,7 +890,7 @@
 
     // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
     // annotalysis.
-    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
+    bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
       if (!GetMethod()->IsRuntimeMethod()) {
         Monitor::VisitLocks(this, AppendOwnedMonitors, this);
         ++current_stack_depth;
@@ -954,7 +947,7 @@
 JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
                                        std::vector<uint64_t>* counts) {
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger);
+  heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger);
   VariableSizedHandleScope hs(Thread::Current());
   std::vector<Handle<mirror::Class>> classes;
   counts->clear();
@@ -975,7 +968,7 @@
                                   std::vector<JDWP::ObjectId>* instances) {
   gc::Heap* heap = Runtime::Current()->GetHeap();
   // We only want reachable instances, so do a GC.
-  heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger);
+  heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger);
   JDWP::JdwpError error;
   ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
   if (c == nullptr) {
@@ -985,7 +978,7 @@
   std::vector<Handle<mirror::Object>> raw_instances;
   Runtime::Current()->GetHeap()->GetInstances(hs,
                                               hs.NewHandle(c),
-                                              /* use_is_assignable_from */ false,
+                                              /* use_is_assignable_from= */ false,
                                               max_count,
                                               raw_instances);
   for (size_t i = 0; i < raw_instances.size(); ++i) {
@@ -997,7 +990,7 @@
 JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
                                          std::vector<JDWP::ObjectId>* referring_objects) {
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger);
+  heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger);
   JDWP::JdwpError error;
   ObjPtr<mirror::Object> o = gRegistry->Get<mirror::Object*>(object_id, &error);
   if (o == nullptr) {
@@ -1235,7 +1228,7 @@
     return 8;
   default:
     LOG(FATAL) << "Unknown tag " << tag;
-    return -1;
+    UNREACHABLE();
   }
 }
 
@@ -1667,18 +1660,6 @@
 }
 
 void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply) {
-  struct DebugCallbackContext {
-    int numItems;
-    JDWP::ExpandBuf* pReply;
-
-    static bool Callback(void* context, const DexFile::PositionInfo& entry) {
-      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
-      expandBufAdd8BE(pContext->pReply, entry.address_);
-      expandBufAdd4BE(pContext->pReply, entry.line_);
-      pContext->numItems++;
-      return false;
-    }
-  };
   ArtMethod* m = FromMethodId(method_id);
   CodeItemDebugInfoAccessor accessor(m->DexInstructionDebugInfo());
   uint64_t start, end;
@@ -1699,52 +1680,19 @@
   size_t numLinesOffset = expandBufGetLength(pReply);
   expandBufAdd4BE(pReply, 0);
 
-  DebugCallbackContext context;
-  context.numItems = 0;
-  context.pReply = pReply;
+  int numItems = 0;
+  accessor.DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
+    expandBufAdd8BE(pReply, entry.address_);
+    expandBufAdd4BE(pReply, entry.line_);
+    numItems++;
+    return false;
+  });
 
-  if (accessor.HasCodeItem()) {
-    m->GetDexFile()->DecodeDebugPositionInfo(accessor.DebugInfoOffset(),
-                                             DebugCallbackContext::Callback,
-                                             &context);
-  }
-
-  JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
+  JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, numItems);
 }
 
 void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
                               JDWP::ExpandBuf* pReply) {
-  struct DebugCallbackContext {
-    ArtMethod* method;
-    JDWP::ExpandBuf* pReply;
-    size_t variable_count;
-    bool with_generic;
-
-    static void Callback(void* context, const DexFile::LocalInfo& entry)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
-
-      uint16_t slot = entry.reg_;
-      VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
-                                 pContext->variable_count, entry.start_address_,
-                                 entry.end_address_ - entry.start_address_,
-                                 entry.name_, entry.descriptor_, entry.signature_, slot,
-                                 MangleSlot(slot, pContext->method));
-
-      slot = MangleSlot(slot, pContext->method);
-
-      expandBufAdd8BE(pContext->pReply, entry.start_address_);
-      expandBufAddUtf8String(pContext->pReply, entry.name_);
-      expandBufAddUtf8String(pContext->pReply, entry.descriptor_);
-      if (pContext->with_generic) {
-        expandBufAddUtf8String(pContext->pReply, entry.signature_);
-      }
-      expandBufAdd4BE(pContext->pReply, entry.end_address_- entry.start_address_);
-      expandBufAdd4BE(pContext->pReply, slot);
-
-      ++pContext->variable_count;
-    }
-  };
   ArtMethod* m = FromMethodId(method_id);
   CodeItemDebugInfoAccessor accessor(m->DexInstructionDebugInfo());
 
@@ -1756,24 +1704,39 @@
   size_t variable_count_offset = expandBufGetLength(pReply);
   expandBufAdd4BE(pReply, 0);
 
-  DebugCallbackContext context;
-  context.method = m;
-  context.pReply = pReply;
-  context.variable_count = 0;
-  context.with_generic = with_generic;
+  size_t variable_count = 0;
 
   if (accessor.HasCodeItem()) {
-    m->GetDexFile()->DecodeDebugLocalInfo(accessor.RegistersSize(),
-                                          accessor.InsSize(),
-                                          accessor.InsnsSizeInCodeUnits(),
-                                          accessor.DebugInfoOffset(),
-                                          m->IsStatic(),
-                                          m->GetDexMethodIndex(),
-                                          DebugCallbackContext::Callback,
-                                          &context);
+    accessor.DecodeDebugLocalInfo(m->IsStatic(),
+                                  m->GetDexMethodIndex(),
+                                  [&](const DexFile::LocalInfo& entry)
+        REQUIRES_SHARED(Locks::mutator_lock_) {
+      uint16_t slot = entry.reg_;
+      VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
+                                 variable_count,
+                                 entry.start_address_,
+                                 entry.end_address_ - entry.start_address_,
+                                 entry.name_,
+                                 entry.descriptor_, entry.signature_,
+                                 slot,
+                                 MangleSlot(slot, m));
+
+      slot = MangleSlot(slot, m);
+
+      expandBufAdd8BE(pReply, entry.start_address_);
+      expandBufAddUtf8String(pReply, entry.name_);
+      expandBufAddUtf8String(pReply, entry.descriptor_);
+      if (with_generic) {
+        expandBufAddUtf8String(pReply, entry.signature_);
+      }
+      expandBufAdd4BE(pReply, entry.end_address_- entry.start_address_);
+      expandBufAdd4BE(pReply, slot);
+
+      ++variable_count;
+    });
   }
 
-  JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
+  JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, variable_count);
 }
 
 void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
@@ -2303,7 +2266,7 @@
       // Don't add a 'default' here so the compiler can spot incompatible enum changes.
   }
   LOG(FATAL) << "Unknown thread state: " << state;
-  return JDWP::TS_ZOMBIE;
+  UNREACHABLE();
 }
 
 JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
@@ -2399,25 +2362,18 @@
 }
 
 static int GetStackDepth(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) {
-  struct CountStackDepthVisitor : public StackVisitor {
-    explicit CountStackDepthVisitor(Thread* thread_in)
-        : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-          depth(0) {}
-
-    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
-    // annotalysis.
-    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
-      if (!GetMethod()->IsRuntimeMethod()) {
-        ++depth;
-      }
-      return true;
-    }
-    size_t depth;
-  };
-
-  CountStackDepthVisitor visitor(thread);
-  visitor.WalkStack();
-  return visitor.depth;
+  size_t depth = 0u;
+  StackVisitor::WalkStack(
+      [&depth](const StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        if (!visitor->GetMethod()->IsRuntimeMethod()) {
+          ++depth;
+        }
+        return true;
+      },
+      thread,
+      /* context= */ nullptr,
+      StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+  return depth;
 }
 
 JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) {
@@ -2435,47 +2391,10 @@
   return JDWP::ERR_NONE;
 }
 
-JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
-                                     size_t frame_count, JDWP::ExpandBuf* buf) {
-  class GetFrameVisitor : public StackVisitor {
-   public:
-    GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
-                    JDWP::ExpandBuf* buf_in)
-        REQUIRES_SHARED(Locks::mutator_lock_)
-        : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-          depth_(0),
-          start_frame_(start_frame_in),
-          frame_count_(frame_count_in),
-          buf_(buf_in) {
-      expandBufAdd4BE(buf_, frame_count_);
-    }
-
-    bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
-      if (GetMethod()->IsRuntimeMethod()) {
-        return true;  // The debugger can't do anything useful with a frame that has no Method*.
-      }
-      if (depth_ >= start_frame_ + frame_count_) {
-        return false;
-      }
-      if (depth_ >= start_frame_) {
-        JDWP::FrameId frame_id(GetFrameId());
-        JDWP::JdwpLocation location;
-        SetJdwpLocation(&location, GetMethod(), GetDexPc());
-        VLOG(jdwp) << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
-        expandBufAdd8BE(buf_, frame_id);
-        expandBufAddLocation(buf_, location);
-      }
-      ++depth_;
-      return true;
-    }
-
-   private:
-    size_t depth_;
-    const size_t start_frame_;
-    const size_t frame_count_;
-    JDWP::ExpandBuf* buf_;
-  };
-
+JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id,
+                                     const size_t start_frame,
+                                     const size_t frame_count,
+                                     JDWP::ExpandBuf* buf) {
   ScopedObjectAccessUnchecked soa(Thread::Current());
   JDWP::JdwpError error;
   Thread* thread = DecodeThread(soa, thread_id, &error);
@@ -2485,8 +2404,34 @@
   if (!IsSuspendedForDebugger(soa, thread)) {
     return JDWP::ERR_THREAD_NOT_SUSPENDED;
   }
-  GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
-  visitor.WalkStack();
+
+  expandBufAdd4BE(buf, frame_count);
+
+  size_t depth = 0u;
+  StackVisitor::WalkStack(
+      [&](StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        if (visitor->GetMethod()->IsRuntimeMethod()) {
+          return true;  // The debugger can't do anything useful with a frame that has no Method*.
+        }
+        if (depth >= start_frame + frame_count) {
+          return false;
+        }
+        if (depth >= start_frame) {
+          JDWP::FrameId frame_id(visitor->GetFrameId());
+          JDWP::JdwpLocation location;
+          SetJdwpLocation(&location, visitor->GetMethod(), visitor->GetDexPc());
+          VLOG(jdwp)
+              << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth, frame_id) << location;
+          expandBufAdd8BE(buf, frame_id);
+          expandBufAddLocation(buf, location);
+        }
+        ++depth;
+        return true;
+      },
+      thread,
+      /* context= */ nullptr,
+      StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+
   return JDWP::ERR_NONE;
 }
 
@@ -2567,28 +2512,6 @@
   Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
 }
 
-struct GetThisVisitor : public StackVisitor {
-  GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        this_object(nullptr),
-        frame_id(frame_id_in) {}
-
-  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
-  // annotalysis.
-  virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
-    if (frame_id != GetFrameId()) {
-      return true;  // continue
-    } else {
-      this_object = GetThisObject();
-      return false;
-    }
-  }
-
-  mirror::Object* this_object;
-  JDWP::FrameId frame_id;
-};
-
 JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
                                    JDWP::ObjectId* result) {
   ScopedObjectAccessUnchecked soa(Thread::Current());
@@ -2601,48 +2524,50 @@
     return JDWP::ERR_THREAD_NOT_SUSPENDED;
   }
   std::unique_ptr<Context> context(Context::Create());
-  GetThisVisitor visitor(thread, context.get(), frame_id);
-  visitor.WalkStack();
-  *result = gRegistry->Add(visitor.this_object);
+  mirror::Object* this_object = nullptr;
+  StackVisitor::WalkStack(
+      [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        if (frame_id != stack_visitor->GetFrameId()) {
+          return true;  // continue
+        } else {
+          this_object = stack_visitor->GetThisObject();
+          return false;
+        }
+      },
+      thread,
+      context.get(),
+      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+  *result = gRegistry->Add(this_object);
   return JDWP::ERR_NONE;
 }
 
-// Walks the stack until we find the frame with the given FrameId.
-class FindFrameVisitor final : public StackVisitor {
- public:
-  FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        frame_id_(frame_id),
-        error_(JDWP::ERR_INVALID_FRAMEID) {}
-
-  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
-  // annotalysis.
-  bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
-    if (GetFrameId() != frame_id_) {
-      return true;  // Not our frame, carry on.
-    }
-    ArtMethod* m = GetMethod();
-    if (m->IsNative()) {
-      // We can't read/write local value from/into native method.
-      error_ = JDWP::ERR_OPAQUE_FRAME;
-    } else {
-      // We found our frame.
-      error_ = JDWP::ERR_NONE;
-    }
-    return false;
-  }
-
-  JDWP::JdwpError GetError() const {
-    return error_;
-  }
-
- private:
-  const JDWP::FrameId frame_id_;
-  JDWP::JdwpError error_;
-
-  DISALLOW_COPY_AND_ASSIGN(FindFrameVisitor);
-};
+template <typename FrameHandler>
+static JDWP::JdwpError FindAndHandleNonNativeFrame(Thread* thread,
+                                                   JDWP::FrameId frame_id,
+                                                   const FrameHandler& handler)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  JDWP::JdwpError result = JDWP::ERR_INVALID_FRAMEID;
+  std::unique_ptr<Context> context(Context::Create());
+  StackVisitor::WalkStack(
+      [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        if (stack_visitor->GetFrameId() != frame_id) {
+          return true;  // Not our frame, carry on.
+        }
+        ArtMethod* m = stack_visitor->GetMethod();
+        if (m->IsNative()) {
+          // We can't read/write local value from/into native method.
+          result = JDWP::ERR_OPAQUE_FRAME;
+        } else {
+          // We found our frame.
+          result = handler(stack_visitor);
+        }
+        return false;
+      },
+      thread,
+      context.get(),
+      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+  return result;
+}
 
 JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) {
   JDWP::ObjectId thread_id = request->ReadThreadId();
@@ -2657,31 +2582,29 @@
   if (!IsSuspendedForDebugger(soa, thread)) {
     return JDWP::ERR_THREAD_NOT_SUSPENDED;
   }
-  // Find the frame with the given frame_id.
-  std::unique_ptr<Context> context(Context::Create());
-  FindFrameVisitor visitor(thread, context.get(), frame_id);
-  visitor.WalkStack();
-  if (visitor.GetError() != JDWP::ERR_NONE) {
-    return visitor.GetError();
-  }
 
-  // Read the values from visitor's context.
-  int32_t slot_count = request->ReadSigned32("slot count");
-  expandBufAdd4BE(pReply, slot_count);     /* "int values" */
-  for (int32_t i = 0; i < slot_count; ++i) {
-    uint32_t slot = request->ReadUnsigned32("slot");
-    JDWP::JdwpTag reqSigByte = request->ReadTag();
+  return FindAndHandleNonNativeFrame(
+      thread,
+      frame_id,
+      [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        // Read the values from visitor's context.
+        int32_t slot_count = request->ReadSigned32("slot count");
+        expandBufAdd4BE(pReply, slot_count);     /* "int values" */
+        for (int32_t i = 0; i < slot_count; ++i) {
+          uint32_t slot = request->ReadUnsigned32("slot");
+          JDWP::JdwpTag reqSigByte = request->ReadTag();
 
-    VLOG(jdwp) << "    --> slot " << slot << " " << reqSigByte;
+          VLOG(jdwp) << "    --> slot " << slot << " " << reqSigByte;
 
-    size_t width = Dbg::GetTagWidth(reqSigByte);
-    uint8_t* ptr = expandBufAddSpace(pReply, width + 1);
-    error = Dbg::GetLocalValue(visitor, soa, slot, reqSigByte, ptr, width);
-    if (error != JDWP::ERR_NONE) {
-      return error;
-    }
-  }
-  return JDWP::ERR_NONE;
+          size_t width = Dbg::GetTagWidth(reqSigByte);
+          uint8_t* ptr = expandBufAddSpace(pReply, width + 1);
+          error = Dbg::GetLocalValue(*stack_visitor, soa, slot, reqSigByte, ptr, width);
+          if (error != JDWP::ERR_NONE) {
+            return error;
+          }
+        }
+        return JDWP::ERR_NONE;
+      });
 }
 
 constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION;
@@ -2828,29 +2751,27 @@
   if (!IsSuspendedForDebugger(soa, thread)) {
     return JDWP::ERR_THREAD_NOT_SUSPENDED;
   }
-  // Find the frame with the given frame_id.
-  std::unique_ptr<Context> context(Context::Create());
-  FindFrameVisitor visitor(thread, context.get(), frame_id);
-  visitor.WalkStack();
-  if (visitor.GetError() != JDWP::ERR_NONE) {
-    return visitor.GetError();
-  }
 
-  // Writes the values into visitor's context.
-  int32_t slot_count = request->ReadSigned32("slot count");
-  for (int32_t i = 0; i < slot_count; ++i) {
-    uint32_t slot = request->ReadUnsigned32("slot");
-    JDWP::JdwpTag sigByte = request->ReadTag();
-    size_t width = Dbg::GetTagWidth(sigByte);
-    uint64_t value = request->ReadValue(width);
+  return FindAndHandleNonNativeFrame(
+      thread,
+      frame_id,
+      [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        // Writes the values into visitor's context.
+        int32_t slot_count = request->ReadSigned32("slot count");
+        for (int32_t i = 0; i < slot_count; ++i) {
+          uint32_t slot = request->ReadUnsigned32("slot");
+          JDWP::JdwpTag sigByte = request->ReadTag();
+          size_t width = Dbg::GetTagWidth(sigByte);
+          uint64_t value = request->ReadValue(width);
 
-    VLOG(jdwp) << "    --> slot " << slot << " " << sigByte << " " << value;
-    error = Dbg::SetLocalValue(thread, visitor, slot, sigByte, value, width);
-    if (error != JDWP::ERR_NONE) {
-      return error;
-    }
-  }
-  return JDWP::ERR_NONE;
+          VLOG(jdwp) << "    --> slot " << slot << " " << sigByte << " " << value;
+          error = Dbg::SetLocalValue(thread, *stack_visitor, slot, sigByte, value, width);
+          if (error != JDWP::ERR_NONE) {
+            return error;
+          }
+        }
+        return JDWP::ERR_NONE;
+      });
 }
 
 template<typename T>
@@ -3022,107 +2943,71 @@
   gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
 }
 
-/**
- * Finds the location where this exception will be caught. We search until we reach the top
- * frame, in which case this exception is considered uncaught.
- */
-class CatchLocationFinder : public StackVisitor {
- public:
-  CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-    : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-      exception_(exception),
-      handle_scope_(self),
-      this_at_throw_(handle_scope_.NewHandle<mirror::Object>(nullptr)),
-      catch_method_(nullptr),
-      throw_method_(nullptr),
-      catch_dex_pc_(dex::kDexNoIndex),
-      throw_dex_pc_(dex::kDexNoIndex) {
-  }
-
-  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* method = GetMethod();
-    DCHECK(method != nullptr);
-    if (method->IsRuntimeMethod()) {
-      // Ignore callee save method.
-      DCHECK(method->IsCalleeSaveMethod());
-      return true;
-    }
-
-    uint32_t dex_pc = GetDexPc();
-    if (throw_method_ == nullptr) {
-      // First Java method found. It is either the method that threw the exception,
-      // or the Java native method that is reporting an exception thrown by
-      // native code.
-      this_at_throw_.Assign(GetThisObject());
-      throw_method_ = method;
-      throw_dex_pc_ = dex_pc;
-    }
-
-    if (dex_pc != dex::kDexNoIndex) {
-      StackHandleScope<1> hs(GetThread());
-      uint32_t found_dex_pc;
-      Handle<mirror::Class> exception_class(hs.NewHandle(exception_->GetClass()));
-      bool unused_clear_exception;
-      found_dex_pc = method->FindCatchBlock(exception_class, dex_pc, &unused_clear_exception);
-      if (found_dex_pc != dex::kDexNoIndex) {
-        catch_method_ = method;
-        catch_dex_pc_ = found_dex_pc;
-        return false;  // End stack walk.
-      }
-    }
-    return true;  // Continue stack walk.
-  }
-
-  ArtMethod* GetCatchMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return catch_method_;
-  }
-
-  ArtMethod* GetThrowMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return throw_method_;
-  }
-
-  mirror::Object* GetThisAtThrow() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return this_at_throw_.Get();
-  }
-
-  uint32_t GetCatchDexPc() const {
-    return catch_dex_pc_;
-  }
-
-  uint32_t GetThrowDexPc() const {
-    return throw_dex_pc_;
-  }
-
- private:
-  const Handle<mirror::Throwable>& exception_;
-  StackHandleScope<1> handle_scope_;
-  MutableHandle<mirror::Object> this_at_throw_;
-  ArtMethod* catch_method_;
-  ArtMethod* throw_method_;
-  uint32_t catch_dex_pc_;
-  uint32_t throw_dex_pc_;
-
-  DISALLOW_COPY_AND_ASSIGN(CatchLocationFinder);
-};
-
 void Dbg::PostException(mirror::Throwable* exception_object) {
   if (!IsDebuggerActive()) {
     return;
   }
   Thread* const self = Thread::Current();
-  StackHandleScope<1> handle_scope(self);
+  StackHandleScope<2> handle_scope(self);
   Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object));
+  MutableHandle<mirror::Object> this_at_throw = handle_scope.NewHandle<mirror::Object>(nullptr);
   std::unique_ptr<Context> context(Context::Create());
-  CatchLocationFinder clf(self, h_exception, context.get());
-  clf.WalkStack(/* include_transitions */ false);
-  JDWP::EventLocation exception_throw_location;
-  SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc());
-  JDWP::EventLocation exception_catch_location;
-  SetEventLocation(&exception_catch_location, clf.GetCatchMethod(), clf.GetCatchDexPc());
 
-  gJdwpState->PostException(&exception_throw_location, h_exception.Get(), &exception_catch_location,
-                            clf.GetThisAtThrow());
+  ArtMethod* catch_method = nullptr;
+  ArtMethod* throw_method = nullptr;
+  uint32_t catch_dex_pc = dex::kDexNoIndex;
+  uint32_t throw_dex_pc = dex::kDexNoIndex;
+  StackVisitor::WalkStack(
+      /**
+       * Finds the location where this exception will be caught. We search until we reach the top
+       * frame, in which case this exception is considered uncaught.
+       */
+      [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        ArtMethod* method = stack_visitor->GetMethod();
+        DCHECK(method != nullptr);
+        if (method->IsRuntimeMethod()) {
+          // Ignore callee save method.
+          DCHECK(method->IsCalleeSaveMethod());
+          return true;
+        }
+
+        uint32_t dex_pc = stack_visitor->GetDexPc();
+        if (throw_method == nullptr) {
+          // First Java method found. It is either the method that threw the exception,
+          // or the Java native method that is reporting an exception thrown by
+          // native code.
+          this_at_throw.Assign(stack_visitor->GetThisObject());
+          throw_method = method;
+          throw_dex_pc = dex_pc;
+        }
+
+        if (dex_pc != dex::kDexNoIndex) {
+          StackHandleScope<1> hs(stack_visitor->GetThread());
+          uint32_t found_dex_pc;
+          Handle<mirror::Class> exception_class(hs.NewHandle(h_exception->GetClass()));
+          bool unused_clear_exception;
+          found_dex_pc = method->FindCatchBlock(exception_class, dex_pc, &unused_clear_exception);
+          if (found_dex_pc != dex::kDexNoIndex) {
+            catch_method = method;
+            catch_dex_pc = found_dex_pc;
+            return false;  // End stack walk.
+          }
+        }
+        return true;  // Continue stack walk.
+      },
+      self,
+      context.get(),
+      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+
+  JDWP::EventLocation exception_throw_location;
+  SetEventLocation(&exception_throw_location, throw_method, throw_dex_pc);
+  JDWP::EventLocation exception_catch_location;
+  SetEventLocation(&exception_catch_location, catch_method, catch_dex_pc);
+
+  gJdwpState->PostException(&exception_throw_location,
+                            h_exception.Get(),
+                            &exception_catch_location,
+                            this_at_throw.Get());
 }
 
 void Dbg::PostClassPrepare(mirror::Class* c) {
@@ -3270,7 +3155,7 @@
       break;
     default:
       LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
-      break;
+      UNREACHABLE();
   }
 }
 
@@ -3348,7 +3233,7 @@
     }
     default: {
       LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
-      break;
+      UNREACHABLE();
     }
   }
 }
@@ -3686,56 +3571,6 @@
   return instrumentation->IsDeoptimized(m);
 }
 
-class NeedsDeoptimizationVisitor : public StackVisitor {
- public:
-  explicit NeedsDeoptimizationVisitor(Thread* self)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-    : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-      needs_deoptimization_(false) {}
-
-  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
-    // The visitor is meant to be used when handling exception from compiled code only.
-    CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: "
-                            << ArtMethod::PrettyMethod(GetMethod());
-    ArtMethod* method = GetMethod();
-    if (method == nullptr) {
-      // We reach an upcall and don't need to deoptimize this part of the stack (ManagedFragment)
-      // so we can stop the visit.
-      DCHECK(!needs_deoptimization_);
-      return false;
-    }
-    if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
-      // We found a compiled frame in the stack but instrumentation is set to interpret
-      // everything: we need to deoptimize.
-      needs_deoptimization_ = true;
-      return false;
-    }
-    if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
-      // We found a deoptimized method in the stack.
-      needs_deoptimization_ = true;
-      return false;
-    }
-    ShadowFrame* frame = GetThread()->FindDebuggerShadowFrame(GetFrameId());
-    if (frame != nullptr) {
-      // The debugger allocated a ShadowFrame to update a variable in the stack: we need to
-      // deoptimize the stack to execute (and deallocate) this frame.
-      needs_deoptimization_ = true;
-      return false;
-    }
-    return true;
-  }
-
-  bool NeedsDeoptimization() const {
-    return needs_deoptimization_;
-  }
-
- private:
-  // Do we need to deoptimize the stack?
-  bool needs_deoptimization_;
-
-  DISALLOW_COPY_AND_ASSIGN(NeedsDeoptimizationVisitor);
-};
-
 // Do we need to deoptimize the stack to handle an exception?
 bool Dbg::IsForcedInterpreterNeededForExceptionImpl(Thread* thread) {
   const SingleStepControl* const ssc = thread->GetSingleStepControl();
@@ -3745,9 +3580,45 @@
   }
   // Deoptimization is required if at least one method in the stack needs it. However we
   // skip frames that will be unwound (thus not executed).
-  NeedsDeoptimizationVisitor visitor(thread);
-  visitor.WalkStack(true);  // includes upcall.
-  return visitor.NeedsDeoptimization();
+  bool needs_deoptimization = false;
+  StackVisitor::WalkStack(
+      [&](art::StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        // The visitor is meant to be used when handling exception from compiled code only.
+        CHECK(!visitor->IsShadowFrame()) << "We only expect to visit compiled frame: "
+                                         << ArtMethod::PrettyMethod(visitor->GetMethod());
+        ArtMethod* method = visitor->GetMethod();
+        if (method == nullptr) {
+          // We reach an upcall and don't need to deoptimize this part of the stack (ManagedFragment)
+          // so we can stop the visit.
+          DCHECK(!needs_deoptimization);
+          return false;
+        }
+        if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
+          // We found a compiled frame in the stack but instrumentation is set to interpret
+          // everything: we need to deoptimize.
+          needs_deoptimization = true;
+          return false;
+        }
+        if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
+          // We found a deoptimized method in the stack.
+          needs_deoptimization = true;
+          return false;
+        }
+        ShadowFrame* frame = visitor->GetThread()->FindDebuggerShadowFrame(visitor->GetFrameId());
+        if (frame != nullptr) {
+          // The debugger allocated a ShadowFrame to update a variable in the stack: we need to
+          // deoptimize the stack to execute (and deallocate) this frame.
+          needs_deoptimization = true;
+          return false;
+        }
+        return true;
+      },
+      thread,
+      /* context= */ nullptr,
+      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+      /* check_suspended */ true,
+      /* include_transitions */ true);
+  return needs_deoptimization;
 }
 
 // Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
@@ -3774,7 +3645,7 @@
           bool timed_out;
           ThreadList* const thread_list = Runtime::Current()->GetThreadList();
           suspended_thread = thread_list->SuspendThreadByPeer(thread_peer,
-                                                              /* request_suspension */ true,
+                                                              /* request_suspension= */ true,
                                                               SuspendReason::kForDebugger,
                                                               &timed_out);
         }
@@ -3831,7 +3702,7 @@
 
     // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
     // annotalysis.
-    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
+    bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
       ArtMethod* m = GetMethod();
       if (!m->IsRuntimeMethod()) {
         ++stack_depth;
@@ -3855,50 +3726,6 @@
   SingleStepStackVisitor visitor(thread);
   visitor.WalkStack();
 
-  // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
-  struct DebugCallbackContext {
-    DebugCallbackContext(SingleStepControl* single_step_control_cb,
-                         int32_t line_number_cb, uint32_t num_insns_in_code_units)
-        : single_step_control_(single_step_control_cb), line_number_(line_number_cb),
-          num_insns_in_code_units_(num_insns_in_code_units), last_pc_valid(false), last_pc(0) {
-    }
-
-    static bool Callback(void* raw_context, const DexFile::PositionInfo& entry) {
-      DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
-      if (static_cast<int32_t>(entry.line_) == context->line_number_) {
-        if (!context->last_pc_valid) {
-          // Everything from this address until the next line change is ours.
-          context->last_pc = entry.address_;
-          context->last_pc_valid = true;
-        }
-        // Otherwise, if we're already in a valid range for this line,
-        // just keep going (shouldn't really happen)...
-      } else if (context->last_pc_valid) {  // and the line number is new
-        // Add everything from the last entry up until here to the set
-        for (uint32_t dex_pc = context->last_pc; dex_pc < entry.address_; ++dex_pc) {
-          context->single_step_control_->AddDexPc(dex_pc);
-        }
-        context->last_pc_valid = false;
-      }
-      return false;  // There may be multiple entries for any given line.
-    }
-
-    ~DebugCallbackContext() {
-      // If the line number was the last in the position table...
-      if (last_pc_valid) {
-        for (uint32_t dex_pc = last_pc; dex_pc < num_insns_in_code_units_; ++dex_pc) {
-          single_step_control_->AddDexPc(dex_pc);
-        }
-      }
-    }
-
-    SingleStepControl* const single_step_control_;
-    const int32_t line_number_;
-    const uint32_t num_insns_in_code_units_;
-    bool last_pc_valid;
-    uint32_t last_pc;
-  };
-
   // Allocate single step.
   SingleStepControl* single_step_control =
       new (std::nothrow) SingleStepControl(step_size, step_depth,
@@ -3914,10 +3741,33 @@
   // method on the stack (and no line number either).
   if (m != nullptr && !m->IsNative()) {
     CodeItemDebugInfoAccessor accessor(m->DexInstructionDebugInfo());
-    DebugCallbackContext context(single_step_control, line_number, accessor.InsnsSizeInCodeUnits());
-    m->GetDexFile()->DecodeDebugPositionInfo(accessor.DebugInfoOffset(),
-                                             DebugCallbackContext::Callback,
-                                             &context);
+    bool last_pc_valid = false;
+    uint32_t last_pc = 0u;
+    // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
+    accessor.DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
+      if (static_cast<int32_t>(entry.line_) == line_number) {
+        if (!last_pc_valid) {
+          // Everything from this address until the next line change is ours.
+          last_pc = entry.address_;
+          last_pc_valid = true;
+        }
+        // Otherwise, if we're already in a valid range for this line,
+        // just keep going (shouldn't really happen)...
+      } else if (last_pc_valid) {  // and the line number is new
+        // Add everything from the last entry up until here to the set
+        for (uint32_t dex_pc = last_pc; dex_pc < entry.address_; ++dex_pc) {
+          single_step_control->AddDexPc(dex_pc);
+        }
+        last_pc_valid = false;
+      }
+      return false;  // There may be multiple entries for any given line.
+    });
+    // If the line number was the last in the position table...
+    if (last_pc_valid) {
+      for (uint32_t dex_pc = last_pc; dex_pc < accessor.InsnsSizeInCodeUnits(); ++dex_pc) {
+        single_step_control->AddDexPc(dex_pc);
+      }
+    }
   }
 
   // Activate single-step in the thread.
@@ -4077,7 +3927,7 @@
       StackHandleScope<2> hs(soa.Self());
       HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
       HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
-      const DexFile::TypeList* types = m->GetParameterTypeList();
+      const dex::TypeList* types = m->GetParameterTypeList();
       for (size_t i = 0; i < arg_count; ++i) {
         if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
           return JDWP::ERR_ILLEGAL_ARGUMENT;
@@ -4806,7 +4656,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (ProcessRecord(start, used_bytes)) {
       uint8_t state = ExamineNativeObject(start);
-      AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
+      AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ true);
       startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
     }
   }
@@ -4818,7 +4668,7 @@
       // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
       // If it's the same, we should combine them.
       uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start));
-      AppendChunk(state, start, used_bytes + chunk_overhead_, false /*is_native*/);
+      AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ false);
       startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
     }
   }
diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc
index b87bf8d..050be4a 100644
--- a/runtime/dex/dex_file_annotations.cc
+++ b/runtime/dex/dex_file_annotations.cc
@@ -22,15 +22,22 @@
 
 #include "art_field-inl.h"
 #include "art_method-inl.h"
+#include "base/sdk_version.h"
 #include "class_linker-inl.h"
 #include "class_root.h"
 #include "dex/dex_file-inl.h"
+#include "dex/dex_instruction-inl.h"
 #include "jni/jni_internal.h"
 #include "jvalue-inl.h"
+#include "mirror/array-alloc-inl.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/field.h"
 #include "mirror/method.h"
+#include "mirror/object_array-alloc-inl.h"
+#include "mirror/object_array-inl.h"
 #include "oat_file.h"
 #include "obj_ptr-inl.h"
+#include "quicken_info.h"
 #include "reflection.h"
 #include "thread.h"
 #include "well_known_classes.h"
@@ -39,6 +46,15 @@
 
 using android::base::StringPrintf;
 
+using dex::AnnotationItem;
+using dex::AnnotationSetItem;
+using dex::AnnotationSetRefItem;
+using dex::AnnotationSetRefList;
+using dex::AnnotationsDirectoryItem;
+using dex::FieldAnnotationsItem;
+using dex::MethodAnnotationsItem;
+using dex::ParameterAnnotationsItem;
+
 struct DexFile::AnnotationValue {
   JValue value_;
   uint8_t type_;
@@ -70,7 +86,7 @@
     return dex_file_;
   }
 
-  const DexFile::ClassDef* GetClassDef() const REQUIRES_SHARED(Locks::mutator_lock_) {
+  const dex::ClassDef* GetClassDef() const REQUIRES_SHARED(Locks::mutator_lock_) {
     return class_def_;
   }
 
@@ -102,7 +118,7 @@
   ClassData(Handle<mirror::Class> klass,
             ArtMethod* method,
             const DexFile& dex_file,
-            const DexFile::ClassDef* class_def) REQUIRES_SHARED(Locks::mutator_lock_)
+            const dex::ClassDef* class_def) REQUIRES_SHARED(Locks::mutator_lock_)
       : real_klass_(klass),
         method_(method),
         dex_file_(dex_file),
@@ -113,7 +129,7 @@
   Handle<mirror::Class> real_klass_;
   ArtMethod* method_;
   const DexFile& dex_file_;
-  const DexFile::ClassDef* class_def_;
+  const dex::ClassDef* class_def_;
 
   DISALLOW_COPY_AND_ASSIGN(ClassData);
 };
@@ -125,51 +141,53 @@
 
 bool IsVisibilityCompatible(uint32_t actual, uint32_t expected) {
   if (expected == DexFile::kDexVisibilityRuntime) {
-    int32_t sdk_version = Runtime::Current()->GetTargetSdkVersion();
-    if (sdk_version > 0 && sdk_version <= 23) {
+    if (IsSdkVersionSetAndAtMost(Runtime::Current()->GetTargetSdkVersion(), SdkVersion::kM)) {
       return actual == DexFile::kDexVisibilityRuntime || actual == DexFile::kDexVisibilityBuild;
     }
   }
   return actual == expected;
 }
 
-const DexFile::AnnotationSetItem* FindAnnotationSetForField(ArtField* field)
+static const AnnotationSetItem* FindAnnotationSetForField(const DexFile& dex_file,
+                                                          const dex::ClassDef& class_def,
+                                                          uint32_t field_index)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  const DexFile* dex_file = field->GetDexFile();
-  ObjPtr<mirror::Class> klass = field->GetDeclaringClass();
-  const DexFile::ClassDef* class_def = klass->GetClassDef();
-  if (class_def == nullptr) {
-    DCHECK(klass->IsProxyClass());
-    return nullptr;
-  }
-  const DexFile::AnnotationsDirectoryItem* annotations_dir =
-      dex_file->GetAnnotationsDirectory(*class_def);
+  const AnnotationsDirectoryItem* annotations_dir = dex_file.GetAnnotationsDirectory(class_def);
   if (annotations_dir == nullptr) {
     return nullptr;
   }
-  const DexFile::FieldAnnotationsItem* field_annotations =
-      dex_file->GetFieldAnnotations(annotations_dir);
+  const FieldAnnotationsItem* field_annotations = dex_file.GetFieldAnnotations(annotations_dir);
   if (field_annotations == nullptr) {
     return nullptr;
   }
-  uint32_t field_index = field->GetDexFieldIndex();
   uint32_t field_count = annotations_dir->fields_size_;
   for (uint32_t i = 0; i < field_count; ++i) {
     if (field_annotations[i].field_idx_ == field_index) {
-      return dex_file->GetFieldAnnotationSetItem(field_annotations[i]);
+      return dex_file.GetFieldAnnotationSetItem(field_annotations[i]);
     }
   }
   return nullptr;
 }
 
-const DexFile::AnnotationItem* SearchAnnotationSet(const DexFile& dex_file,
-                                                   const DexFile::AnnotationSetItem* annotation_set,
-                                                   const char* descriptor,
-                                                   uint32_t visibility)
+static const AnnotationSetItem* FindAnnotationSetForField(ArtField* field)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  const DexFile::AnnotationItem* result = nullptr;
+  ObjPtr<mirror::Class> klass = field->GetDeclaringClass();
+  const dex::ClassDef* class_def = klass->GetClassDef();
+  if (class_def == nullptr) {
+    DCHECK(klass->IsProxyClass());
+    return nullptr;
+  }
+  return FindAnnotationSetForField(*field->GetDexFile(), *class_def, field->GetDexFieldIndex());
+}
+
+const AnnotationItem* SearchAnnotationSet(const DexFile& dex_file,
+                                          const AnnotationSetItem* annotation_set,
+                                          const char* descriptor,
+                                          uint32_t visibility)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  const AnnotationItem* result = nullptr;
   for (uint32_t i = 0; i < annotation_set->size_; ++i) {
-    const DexFile::AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i);
+    const AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i);
     if (!IsVisibilityCompatible(annotation_item->visibility_, visibility)) {
       continue;
     }
@@ -209,7 +227,7 @@
     case DexFile::kDexAnnotationArray:
     {
       uint32_t size = DecodeUnsignedLeb128(&annotation);
-      while (size--) {
+      for (; size != 0u; --size) {
         if (!SkipAnnotationValue(dex_file, &annotation)) {
           return false;
         }
@@ -221,7 +239,7 @@
     {
       DecodeUnsignedLeb128(&annotation);  // unused type_index
       uint32_t size = DecodeUnsignedLeb128(&annotation);
-      while (size--) {
+      for (; size != 0u; --size) {
         DecodeUnsignedLeb128(&annotation);  // unused element_name_index
         if (!SkipAnnotationValue(dex_file, &annotation)) {
           return false;
@@ -236,7 +254,7 @@
       break;
     default:
       LOG(FATAL) << StringPrintf("Bad annotation element value byte 0x%02x", value_type);
-      return false;
+      UNREACHABLE();
   }
 
   annotation += width;
@@ -264,16 +282,14 @@
   return nullptr;
 }
 
-const DexFile::AnnotationSetItem* FindAnnotationSetForMethod(const DexFile& dex_file,
-                                                             const DexFile::ClassDef& class_def,
-                                                             uint32_t method_index) {
-  const DexFile::AnnotationsDirectoryItem* annotations_dir =
-      dex_file.GetAnnotationsDirectory(class_def);
+static const AnnotationSetItem* FindAnnotationSetForMethod(const DexFile& dex_file,
+                                                           const dex::ClassDef& class_def,
+                                                           uint32_t method_index) {
+  const AnnotationsDirectoryItem* annotations_dir = dex_file.GetAnnotationsDirectory(class_def);
   if (annotations_dir == nullptr) {
     return nullptr;
   }
-  const DexFile::MethodAnnotationsItem* method_annotations =
-      dex_file.GetMethodAnnotations(annotations_dir);
+  const MethodAnnotationsItem* method_annotations = dex_file.GetMethodAnnotations(annotations_dir);
   if (method_annotations == nullptr) {
     return nullptr;
   }
@@ -286,7 +302,7 @@
   return nullptr;
 }
 
-inline const DexFile::AnnotationSetItem* FindAnnotationSetForMethod(ArtMethod* method)
+inline const AnnotationSetItem* FindAnnotationSetForMethod(ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   if (method->IsProxyMethod()) {
     return nullptr;
@@ -296,15 +312,15 @@
                                     method->GetDexMethodIndex());
 }
 
-const DexFile::ParameterAnnotationsItem* FindAnnotationsItemForMethod(ArtMethod* method)
+const ParameterAnnotationsItem* FindAnnotationsItemForMethod(ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const DexFile* dex_file = method->GetDexFile();
-  const DexFile::AnnotationsDirectoryItem* annotations_dir =
+  const AnnotationsDirectoryItem* annotations_dir =
       dex_file->GetAnnotationsDirectory(method->GetClassDef());
   if (annotations_dir == nullptr) {
     return nullptr;
   }
-  const DexFile::ParameterAnnotationsItem* parameter_annotations =
+  const ParameterAnnotationsItem* parameter_annotations =
       dex_file->GetParameterAnnotations(annotations_dir);
   if (parameter_annotations == nullptr) {
     return nullptr;
@@ -319,16 +335,15 @@
   return nullptr;
 }
 
-const DexFile::AnnotationSetItem* FindAnnotationSetForClass(const ClassData& klass)
+static const AnnotationSetItem* FindAnnotationSetForClass(const ClassData& klass)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const DexFile& dex_file = klass.GetDexFile();
-  const DexFile::ClassDef* class_def = klass.GetClassDef();
+  const dex::ClassDef* class_def = klass.GetClassDef();
   if (class_def == nullptr) {
     DCHECK(klass.GetRealClass()->IsProxyClass());
     return nullptr;
   }
-  const DexFile::AnnotationsDirectoryItem* annotations_dir =
-      dex_file.GetAnnotationsDirectory(*class_def);
+  const AnnotationsDirectoryItem* annotations_dir = dex_file.GetAnnotationsDirectory(*class_def);
   if (annotations_dir == nullptr) {
     return nullptr;
   }
@@ -764,15 +779,14 @@
   return new_member.Get();
 }
 
-const DexFile::AnnotationItem* GetAnnotationItemFromAnnotationSet(
-    const ClassData& klass,
-    const DexFile::AnnotationSetItem* annotation_set,
-    uint32_t visibility,
-    Handle<mirror::Class> annotation_class)
+const AnnotationItem* GetAnnotationItemFromAnnotationSet(const ClassData& klass,
+                                                         const AnnotationSetItem* annotation_set,
+                                                         uint32_t visibility,
+                                                         Handle<mirror::Class> annotation_class)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const DexFile& dex_file = klass.GetDexFile();
   for (uint32_t i = 0; i < annotation_set->size_; ++i) {
-    const DexFile::AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i);
+    const AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i);
     if (!IsVisibilityCompatible(annotation_item->visibility_, visibility)) {
       continue;
     }
@@ -801,13 +815,12 @@
   return nullptr;
 }
 
-ObjPtr<mirror::Object> GetAnnotationObjectFromAnnotationSet(
-    const ClassData& klass,
-    const DexFile::AnnotationSetItem* annotation_set,
-    uint32_t visibility,
-    Handle<mirror::Class> annotation_class)
+ObjPtr<mirror::Object> GetAnnotationObjectFromAnnotationSet(const ClassData& klass,
+                                                            const AnnotationSetItem* annotation_set,
+                                                            uint32_t visibility,
+                                                            Handle<mirror::Class> annotation_class)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  const DexFile::AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
+  const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
       klass, annotation_set, visibility, annotation_class);
   if (annotation_item == nullptr) {
     return nullptr;
@@ -817,7 +830,7 @@
 }
 
 ObjPtr<mirror::Object> GetAnnotationValue(const ClassData& klass,
-                                          const DexFile::AnnotationItem* annotation_item,
+                                          const AnnotationItem* annotation_item,
                                           const char* annotation_name,
                                           Handle<mirror::Class> array_class,
                                           uint32_t expected_type)
@@ -851,11 +864,11 @@
 
 static ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureValue(
     const ClassData& klass,
-    const DexFile::AnnotationSetItem* annotation_set)
+    const AnnotationSetItem* annotation_set)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const DexFile& dex_file = klass.GetDexFile();
   StackHandleScope<1> hs(Thread::Current());
-  const DexFile::AnnotationItem* annotation_item =
+  const AnnotationItem* annotation_item =
       SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/Signature;",
                           DexFile::kDexVisibilitySystem);
   if (annotation_item == nullptr) {
@@ -873,12 +886,11 @@
   return obj->AsObjectArray<mirror::String>();
 }
 
-ObjPtr<mirror::ObjectArray<mirror::Class>> GetThrowsValue(
-    const ClassData& klass,
-    const DexFile::AnnotationSetItem* annotation_set)
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetThrowsValue(const ClassData& klass,
+                                                          const AnnotationSetItem* annotation_set)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const DexFile& dex_file = klass.GetDexFile();
-  const DexFile::AnnotationItem* annotation_item =
+  const AnnotationItem* annotation_item =
       SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/Throws;",
                           DexFile::kDexVisibilitySystem);
   if (annotation_item == nullptr) {
@@ -899,7 +911,7 @@
 
 ObjPtr<mirror::ObjectArray<mirror::Object>> ProcessAnnotationSet(
     const ClassData& klass,
-    const DexFile::AnnotationSetItem* annotation_set,
+    const AnnotationSetItem* annotation_set,
     uint32_t visibility)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const DexFile& dex_file = klass.GetDexFile();
@@ -921,7 +933,7 @@
 
   uint32_t dest_index = 0;
   for (uint32_t i = 0; i < size; ++i) {
-    const DexFile::AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i);
+    const AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i);
     // Note that we do not use IsVisibilityCompatible here because older code
     // was correct for this case.
     if (annotation_item->visibility_ != visibility) {
@@ -957,7 +969,7 @@
 
 ObjPtr<mirror::ObjectArray<mirror::Object>> ProcessAnnotationSetRefList(
     const ClassData& klass,
-    const DexFile::AnnotationSetRefList* set_ref_list,
+    const AnnotationSetRefList* set_ref_list,
     uint32_t size)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const DexFile& dex_file = klass.GetDexFile();
@@ -978,8 +990,8 @@
     return nullptr;
   }
   for (uint32_t index = 0; index < size; ++index) {
-    const DexFile::AnnotationSetRefItem* set_ref_item = &set_ref_list->list_[index];
-    const DexFile::AnnotationSetItem* set_item = dex_file.GetSetRefItemItem(set_ref_item);
+    const AnnotationSetRefItem* set_ref_item = &set_ref_list->list_[index];
+    const AnnotationSetItem* set_item = dex_file.GetSetRefItemItem(set_ref_item);
     ObjPtr<mirror::Object> annotation_set = ProcessAnnotationSet(klass,
                                                                  set_item,
                                                                  DexFile::kDexVisibilityRuntime);
@@ -996,7 +1008,7 @@
 
 ObjPtr<mirror::Object> GetAnnotationForField(ArtField* field,
                                              Handle<mirror::Class> annotation_class) {
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
   if (annotation_set == nullptr) {
     return nullptr;
   }
@@ -1009,14 +1021,14 @@
 }
 
 ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForField(ArtField* field) {
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
   StackHandleScope<1> hs(Thread::Current());
   const ClassData field_class(hs, field);
   return ProcessAnnotationSet(field_class, annotation_set, DexFile::kDexVisibilityRuntime);
 }
 
 ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureAnnotationForField(ArtField* field) {
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
   if (annotation_set == nullptr) {
     return nullptr;
   }
@@ -1026,13 +1038,13 @@
 }
 
 bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_class) {
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
   if (annotation_set == nullptr) {
     return false;
   }
   StackHandleScope<1> hs(Thread::Current());
   const ClassData field_class(hs, field);
-  const DexFile::AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
+  const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
       field_class, annotation_set, DexFile::kDexVisibilityRuntime, annotation_class);
   return annotation_item != nullptr;
 }
@@ -1040,17 +1052,17 @@
 ObjPtr<mirror::Object> GetAnnotationDefaultValue(ArtMethod* method) {
   const ClassData klass(method);
   const DexFile* dex_file = &klass.GetDexFile();
-  const DexFile::AnnotationsDirectoryItem* annotations_dir =
+  const AnnotationsDirectoryItem* annotations_dir =
       dex_file->GetAnnotationsDirectory(*klass.GetClassDef());
   if (annotations_dir == nullptr) {
     return nullptr;
   }
-  const DexFile::AnnotationSetItem* annotation_set =
+  const AnnotationSetItem* annotation_set =
       dex_file->GetClassAnnotationSet(annotations_dir);
   if (annotation_set == nullptr) {
     return nullptr;
   }
-  const DexFile::AnnotationItem* annotation_item = SearchAnnotationSet(*dex_file, annotation_set,
+  const AnnotationItem* annotation_item = SearchAnnotationSet(*dex_file, annotation_set,
       "Ldalvik/annotation/AnnotationDefault;", DexFile::kDexVisibilitySystem);
   if (annotation_item == nullptr) {
     return nullptr;
@@ -1083,7 +1095,7 @@
 
 ObjPtr<mirror::Object> GetAnnotationForMethod(ArtMethod* method,
                                               Handle<mirror::Class> annotation_class) {
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
   if (annotation_set == nullptr) {
     return nullptr;
   }
@@ -1092,14 +1104,14 @@
 }
 
 ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForMethod(ArtMethod* method) {
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
   return ProcessAnnotationSet(ClassData(method),
                               annotation_set,
                               DexFile::kDexVisibilityRuntime);
 }
 
 ObjPtr<mirror::ObjectArray<mirror::Class>> GetExceptionTypesForMethod(ArtMethod* method) {
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
   if (annotation_set == nullptr) {
     return nullptr;
   }
@@ -1108,12 +1120,12 @@
 
 ObjPtr<mirror::ObjectArray<mirror::Object>> GetParameterAnnotations(ArtMethod* method) {
   const DexFile* dex_file = method->GetDexFile();
-  const DexFile::ParameterAnnotationsItem* parameter_annotations =
+  const ParameterAnnotationsItem* parameter_annotations =
       FindAnnotationsItemForMethod(method);
   if (parameter_annotations == nullptr) {
     return nullptr;
   }
-  const DexFile::AnnotationSetRefList* set_ref_list =
+  const AnnotationSetRefList* set_ref_list =
       dex_file->GetParameterAnnotationSetRefList(parameter_annotations);
   if (set_ref_list == nullptr) {
     return nullptr;
@@ -1124,12 +1136,12 @@
 
 uint32_t GetNumberOfAnnotatedMethodParameters(ArtMethod* method) {
   const DexFile* dex_file = method->GetDexFile();
-  const DexFile::ParameterAnnotationsItem* parameter_annotations =
+  const ParameterAnnotationsItem* parameter_annotations =
       FindAnnotationsItemForMethod(method);
   if (parameter_annotations == nullptr) {
     return 0u;
   }
-  const DexFile::AnnotationSetRefList* set_ref_list =
+  const AnnotationSetRefList* set_ref_list =
       dex_file->GetParameterAnnotationSetRefList(parameter_annotations);
   if (set_ref_list == nullptr) {
     return 0u;
@@ -1141,12 +1153,11 @@
                                                        uint32_t parameter_idx,
                                                        Handle<mirror::Class> annotation_class) {
   const DexFile* dex_file = method->GetDexFile();
-  const DexFile::ParameterAnnotationsItem* parameter_annotations =
-      FindAnnotationsItemForMethod(method);
+  const ParameterAnnotationsItem* parameter_annotations = FindAnnotationsItemForMethod(method);
   if (parameter_annotations == nullptr) {
     return nullptr;
   }
-  const DexFile::AnnotationSetRefList* set_ref_list =
+  const AnnotationSetRefList* set_ref_list =
       dex_file->GetParameterAnnotationSetRefList(parameter_annotations);
   if (set_ref_list == nullptr) {
     return nullptr;
@@ -1154,8 +1165,8 @@
   if (parameter_idx >= set_ref_list->size_) {
     return nullptr;
   }
-  const DexFile::AnnotationSetRefItem* annotation_set_ref = &set_ref_list->list_[parameter_idx];
-  const DexFile::AnnotationSetItem* annotation_set =
+  const AnnotationSetRefItem* annotation_set_ref = &set_ref_list->list_[parameter_idx];
+  const AnnotationSetItem* annotation_set =
      dex_file->GetSetRefItemItem(annotation_set_ref);
   if (annotation_set == nullptr) {
     return nullptr;
@@ -1170,14 +1181,14 @@
     ArtMethod* method,
     /*out*/ MutableHandle<mirror::ObjectArray<mirror::String>>* names,
     /*out*/ MutableHandle<mirror::IntArray>* access_flags) {
-  const DexFile::AnnotationSetItem* annotation_set =
+  const AnnotationSetItem* annotation_set =
       FindAnnotationSetForMethod(method);
   if (annotation_set == nullptr) {
     return false;
   }
 
   const DexFile* dex_file = method->GetDexFile();
-  const DexFile::AnnotationItem* annotation_item =
+  const AnnotationItem* annotation_item =
       SearchAnnotationSet(*dex_file,
                           annotation_set,
                           "Ldalvik/annotation/MethodParameters;",
@@ -1224,7 +1235,7 @@
 }
 
 ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureAnnotationForMethod(ArtMethod* method) {
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
   if (annotation_set == nullptr) {
     return nullptr;
   }
@@ -1234,11 +1245,11 @@
 bool IsMethodAnnotationPresent(ArtMethod* method,
                                Handle<mirror::Class> annotation_class,
                                uint32_t visibility /* = DexFile::kDexVisibilityRuntime */) {
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
   if (annotation_set == nullptr) {
     return false;
   }
-  const DexFile::AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
+  const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
       ClassData(method), annotation_set, visibility, annotation_class);
   return annotation_item != nullptr;
 }
@@ -1251,7 +1262,7 @@
     // WellKnownClasses may not be initialized yet, so `klass` may be null.
     if (klass != nullptr) {
       // Lookup using the boot class path loader should yield the annotation class.
-      CHECK_EQ(klass, linker->LookupClass(soa.Self(), descriptor, /* class_loader */ nullptr));
+      CHECK_EQ(klass, linker->LookupClass(soa.Self(), descriptor, /* class_loader= */ nullptr));
     }
   }
 }
@@ -1259,11 +1270,11 @@
 // Check whether a method from the `dex_file` with the given `annotation_set`
 // is annotated with `annotation_descriptor` with build visibility.
 static bool IsMethodBuildAnnotationPresent(const DexFile& dex_file,
-                                           const DexFile::AnnotationSetItem& annotation_set,
+                                           const AnnotationSetItem& annotation_set,
                                            const char* annotation_descriptor,
                                            jclass annotation_class) {
   for (uint32_t i = 0; i < annotation_set.size_; ++i) {
-    const DexFile::AnnotationItem* annotation_item = dex_file.GetAnnotationItem(&annotation_set, i);
+    const AnnotationItem* annotation_item = dex_file.GetAnnotationItem(&annotation_set, i);
     if (!IsVisibilityCompatible(annotation_item->visibility_, DexFile::kDexVisibilityBuild)) {
       continue;
     }
@@ -1279,9 +1290,9 @@
 }
 
 uint32_t GetNativeMethodAnnotationAccessFlags(const DexFile& dex_file,
-                                              const DexFile::ClassDef& class_def,
+                                              const dex::ClassDef& class_def,
                                               uint32_t method_index) {
-  const DexFile::AnnotationSetItem* annotation_set =
+  const dex::AnnotationSetItem* annotation_set =
       FindAnnotationSetForMethod(dex_file, class_def, method_index);
   if (annotation_set == nullptr) {
     return 0u;
@@ -1305,10 +1316,195 @@
   return access_flags;
 }
 
+bool FieldIsReachabilitySensitive(const DexFile& dex_file,
+                                  const dex::ClassDef& class_def,
+                                  uint32_t field_index)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  const AnnotationSetItem* annotation_set =
+      FindAnnotationSetForField(dex_file, class_def, field_index);
+  if (annotation_set == nullptr) {
+    return false;
+  }
+  const AnnotationItem* annotation_item = SearchAnnotationSet(dex_file, annotation_set,
+      "Ldalvik/annotation/optimization/ReachabilitySensitive;", DexFile::kDexVisibilityRuntime);
+  // TODO: We're missing the equivalent of DCheckNativeAnnotation (not a DCHECK). Does it matter?
+  return annotation_item != nullptr;
+}
+
+bool MethodIsReachabilitySensitive(const DexFile& dex_file,
+                                   const dex::ClassDef& class_def,
+                                   uint32_t method_index)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  const AnnotationSetItem* annotation_set =
+      FindAnnotationSetForMethod(dex_file, class_def, method_index);
+  if (annotation_set == nullptr) {
+    return false;
+  }
+  const AnnotationItem* annotation_item = SearchAnnotationSet(dex_file, annotation_set,
+      "Ldalvik/annotation/optimization/ReachabilitySensitive;", DexFile::kDexVisibilityRuntime);
+  return annotation_item != nullptr;
+}
+
+static bool MethodIsReachabilitySensitive(const DexFile& dex_file,
+                                               uint32_t method_index)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK(method_index < dex_file.NumMethodIds());
+  const dex::MethodId& method_id = dex_file.GetMethodId(method_index);
+  dex::TypeIndex class_index = method_id.class_idx_;
+  const dex::ClassDef * class_def = dex_file.FindClassDef(class_index);
+  return class_def != nullptr
+         && MethodIsReachabilitySensitive(dex_file, *class_def, method_index);
+}
+
+bool MethodContainsRSensitiveAccess(const DexFile& dex_file,
+                                    const dex::ClassDef& class_def,
+                                    uint32_t method_index)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  // TODO: This is too slow to run very regularly. Currently this is only invoked in the
+  // presence of @DeadReferenceSafe, which will be rare. In the long run, we need to quickly
+  // check once whether a class has any @ReachabilitySensitive annotations. If not, we can
+  // immediately return false here for any method in that class.
+  uint32_t code_item_offset = dex_file.FindCodeItemOffset(class_def, method_index);
+  const dex::CodeItem* code_item = dex_file.GetCodeItem(code_item_offset);
+  CodeItemInstructionAccessor accessor(dex_file, code_item);
+  if (!accessor.HasCodeItem()) {
+    return false;
+  }
+  ArrayRef<const uint8_t> quicken_data;
+  const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
+  if (oat_dex_file != nullptr) {
+    quicken_data = oat_dex_file->GetQuickenedInfoOf(dex_file, method_index);
+  }
+  const QuickenInfoTable quicken_info(quicken_data);
+  uint32_t quicken_index = 0;
+  for (DexInstructionIterator iter = accessor.begin(); iter != accessor.end(); ++iter) {
+    switch (iter->Opcode()) {
+      case Instruction::IGET:
+      case Instruction::IGET_QUICK:
+      case Instruction::IGET_WIDE:
+      case Instruction::IGET_WIDE_QUICK:
+      case Instruction::IGET_OBJECT:
+      case Instruction::IGET_OBJECT_QUICK:
+      case Instruction::IGET_BOOLEAN:
+      case Instruction::IGET_BOOLEAN_QUICK:
+      case Instruction::IGET_BYTE:
+      case Instruction::IGET_BYTE_QUICK:
+      case Instruction::IGET_CHAR:
+      case Instruction::IGET_CHAR_QUICK:
+      case Instruction::IGET_SHORT:
+      case Instruction::IGET_SHORT_QUICK:
+      case Instruction::IPUT:
+      case Instruction::IPUT_QUICK:
+      case Instruction::IPUT_WIDE:
+      case Instruction::IPUT_WIDE_QUICK:
+      case Instruction::IPUT_OBJECT:
+      case Instruction::IPUT_OBJECT_QUICK:
+      case Instruction::IPUT_BOOLEAN:
+      case Instruction::IPUT_BOOLEAN_QUICK:
+      case Instruction::IPUT_BYTE:
+      case Instruction::IPUT_BYTE_QUICK:
+      case Instruction::IPUT_CHAR:
+      case Instruction::IPUT_CHAR_QUICK:
+      case Instruction::IPUT_SHORT:
+      case Instruction::IPUT_SHORT_QUICK:
+        {
+          uint32_t field_index;
+          if (iter->IsQuickened()) {
+            field_index = quicken_info.GetData(quicken_index);
+          } else {
+            field_index = iter->VRegC_22c();
+          }
+          DCHECK(field_index < dex_file.NumFieldIds());
+          // We only guarantee to pay attention to the annotation if it's in the same class,
+          // or a containing class, but it's OK to do so in other cases.
+          const dex::FieldId& field_id = dex_file.GetFieldId(field_index);
+          dex::TypeIndex class_index = field_id.class_idx_;
+          const dex::ClassDef * field_class_def = dex_file.FindClassDef(class_index);
+          // We do not handle the case in which the field is declared in a superclass, and
+          // don't claim to do so. The annotated field should normally be private.
+          if (field_class_def != nullptr
+              && FieldIsReachabilitySensitive(dex_file, *field_class_def, field_index)) {
+            return true;
+          }
+        }
+        break;
+      case Instruction::INVOKE_SUPER:
+        // Cannot call method in same class. TODO: Try an explicit superclass lookup for
+        // better "best effort"?
+        break;
+      case Instruction::INVOKE_INTERFACE:
+        // We handle an interface call just like a virtual call. We will find annotations
+        // on interface methods/fields visible to us, but not of the annotation is in a
+        // super-interface. Again, we could just ignore it.
+      case Instruction::INVOKE_VIRTUAL:
+      case Instruction::INVOKE_DIRECT:
+        {
+          uint32_t called_method_index = iter->VRegB_35c();
+          if (MethodIsReachabilitySensitive(dex_file, called_method_index)) {
+            return true;
+          }
+        }
+        break;
+      case Instruction::INVOKE_INTERFACE_RANGE:
+      case Instruction::INVOKE_VIRTUAL_RANGE:
+      case Instruction::INVOKE_DIRECT_RANGE:
+        {
+          uint32_t called_method_index = iter->VRegB_3rc();
+          if (MethodIsReachabilitySensitive(dex_file, called_method_index)) {
+            return true;
+          }
+        }
+        break;
+      case Instruction::INVOKE_VIRTUAL_QUICK:
+      case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
+        {
+          uint32_t called_method_index = quicken_info.GetData(quicken_index);
+          if (MethodIsReachabilitySensitive(dex_file, called_method_index)) {
+            return true;
+          }
+        }
+        break;
+        // We explicitly do not handle indirect ReachabilitySensitive accesses through VarHandles,
+        // etc. Thus we ignore INVOKE_CUSTOM / INVOKE_CUSTOM_RANGE / INVOKE_POLYMORPHIC /
+        // INVOKE_POLYMORPHIC_RANGE.
+      default:
+        // There is no way to add an annotation to array elements, and so far we've encountered no
+        // need for that, so we ignore AGET and APUT.
+        // It's impractical or impossible to garbage collect a class while one of its methods is
+        // on the call stack. We allow ReachabilitySensitive annotations on static methods and
+        // fields, but they can be safely ignored.
+        break;
+    }
+    if (QuickenInfoTable::NeedsIndexForInstruction(&iter.Inst())) {
+      ++quicken_index;
+    }
+  }
+  return false;
+}
+
+bool HasDeadReferenceSafeAnnotation(const DexFile& dex_file,
+                                    const dex::ClassDef& class_def)
+  // TODO: This should check outer classes as well.
+  // It's conservatively correct not to do so.
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  const AnnotationsDirectoryItem* annotations_dir =
+      dex_file.GetAnnotationsDirectory(class_def);
+  if (annotations_dir == nullptr) {
+    return false;
+  }
+  const AnnotationSetItem* annotation_set = dex_file.GetClassAnnotationSet(annotations_dir);
+  if (annotation_set == nullptr) {
+    return false;
+  }
+  const AnnotationItem* annotation_item = SearchAnnotationSet(dex_file, annotation_set,
+      "Ldalvik/annotation/optimization/DeadReferenceSafe;", DexFile::kDexVisibilityRuntime);
+  return annotation_item != nullptr;
+}
+
 ObjPtr<mirror::Object> GetAnnotationForClass(Handle<mirror::Class> klass,
                                              Handle<mirror::Class> annotation_class) {
   ClassData data(klass);
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
     return nullptr;
   }
@@ -1320,17 +1516,17 @@
 
 ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForClass(Handle<mirror::Class> klass) {
   ClassData data(klass);
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   return ProcessAnnotationSet(data, annotation_set, DexFile::kDexVisibilityRuntime);
 }
 
 ObjPtr<mirror::ObjectArray<mirror::Class>> GetDeclaredClasses(Handle<mirror::Class> klass) {
   ClassData data(klass);
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
     return nullptr;
   }
-  const DexFile::AnnotationItem* annotation_item =
+  const AnnotationItem* annotation_item =
       SearchAnnotationSet(data.GetDexFile(), annotation_set, "Ldalvik/annotation/MemberClasses;",
                           DexFile::kDexVisibilitySystem);
   if (annotation_item == nullptr) {
@@ -1351,11 +1547,11 @@
 
 ObjPtr<mirror::Class> GetDeclaringClass(Handle<mirror::Class> klass) {
   ClassData data(klass);
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
     return nullptr;
   }
-  const DexFile::AnnotationItem* annotation_item =
+  const AnnotationItem* annotation_item =
       SearchAnnotationSet(data.GetDexFile(), annotation_set, "Ldalvik/annotation/EnclosingClass;",
                           DexFile::kDexVisibilitySystem);
   if (annotation_item == nullptr) {
@@ -1378,11 +1574,11 @@
     return declaring_class;
   }
   ClassData data(klass);
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
     return nullptr;
   }
-  const DexFile::AnnotationItem* annotation_item =
+  const AnnotationItem* annotation_item =
       SearchAnnotationSet(data.GetDexFile(),
                           annotation_set,
                           "Ldalvik/annotation/EnclosingMethod;",
@@ -1419,11 +1615,11 @@
 
 ObjPtr<mirror::Object> GetEnclosingMethod(Handle<mirror::Class> klass) {
   ClassData data(klass);
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
     return nullptr;
   }
-  const DexFile::AnnotationItem* annotation_item =
+  const AnnotationItem* annotation_item =
       SearchAnnotationSet(data.GetDexFile(),
                           annotation_set,
                           "Ldalvik/annotation/EnclosingMethod;",
@@ -1437,11 +1633,11 @@
 
 bool GetInnerClass(Handle<mirror::Class> klass, /*out*/ ObjPtr<mirror::String>* name) {
   ClassData data(klass);
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
     return false;
   }
-  const DexFile::AnnotationItem* annotation_item = SearchAnnotationSet(
+  const AnnotationItem* annotation_item = SearchAnnotationSet(
       data.GetDexFile(),
       annotation_set,
       "Ldalvik/annotation/InnerClass;",
@@ -1472,11 +1668,11 @@
 
 bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) {
   ClassData data(klass);
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
     return false;
   }
-  const DexFile::AnnotationItem* annotation_item =
+  const AnnotationItem* annotation_item =
       SearchAnnotationSet(data.GetDexFile(), annotation_set, "Ldalvik/annotation/InnerClass;",
                           DexFile::kDexVisibilitySystem);
   if (annotation_item == nullptr) {
@@ -1505,7 +1701,7 @@
 ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureAnnotationForClass(
     Handle<mirror::Class> klass) {
   ClassData data(klass);
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
     return nullptr;
   }
@@ -1522,12 +1718,12 @@
   }
 
   ClassData data(klass);
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
     return nullptr;
   }
 
-  const DexFile::AnnotationItem* annotation_item = SearchAnnotationSet(
+  const AnnotationItem* annotation_item = SearchAnnotationSet(
       data.GetDexFile(),
       annotation_set,
       "Ldalvik/annotation/SourceDebugExtension;",
@@ -1558,11 +1754,11 @@
 
 bool IsClassAnnotationPresent(Handle<mirror::Class> klass, Handle<mirror::Class> annotation_class) {
   ClassData data(klass);
-  const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
     return false;
   }
-  const DexFile::AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
+  const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
       data, annotation_set, DexFile::kDexVisibilityRuntime, annotation_class);
   return annotation_item != nullptr;
 }
@@ -1578,9 +1774,9 @@
   DCHECK(accessor.HasCodeItem()) << method->PrettyMethod() << " " << dex_file->GetLocation();
 
   // A method with no line number info should return -1
-  DexFile::LineNumFromPcContext context(rel_pc, -1);
-  dex_file->DecodeDebugPositionInfo(accessor.DebugInfoOffset(), DexFile::LineNumForPcCb, &context);
-  return context.line_num_;
+  uint32_t line_num = -1;
+  accessor.GetLineNumForPc(rel_pc, &line_num);
+  return line_num;
 }
 
 template<bool kTransactionActive>
diff --git a/runtime/dex/dex_file_annotations.h b/runtime/dex/dex_file_annotations.h
index bde7891..018e87f 100644
--- a/runtime/dex/dex_file_annotations.h
+++ b/runtime/dex/dex_file_annotations.h
@@ -18,7 +18,6 @@
 #define ART_RUNTIME_DEX_DEX_FILE_ANNOTATIONS_H_
 
 #include "dex/dex_file.h"
-
 #include "handle.h"
 #include "mirror/dex_cache.h"
 #include "mirror/object_array.h"
@@ -79,13 +78,36 @@
                                Handle<mirror::Class> annotation_class,
                                uint32_t visibility = DexFile::kDexVisibilityRuntime)
     REQUIRES_SHARED(Locks::mutator_lock_);
+
 // Check whether a method from the `dex_file` with the given `method_index`
 // is annotated with @dalvik.annotation.optimization.FastNative or
 // @dalvik.annotation.optimization.CriticalNative with build visibility.
 // If yes, return the associated access flags, i.e. kAccFastNative or kAccCriticalNative.
 uint32_t GetNativeMethodAnnotationAccessFlags(const DexFile& dex_file,
-                                              const DexFile::ClassDef& class_def,
+                                              const dex::ClassDef& class_def,
                                               uint32_t method_index);
+// Is the field from the `dex_file` with the given `field_index`
+// annotated with @dalvik.annotation.optimization.ReachabilitySensitive?
+bool FieldIsReachabilitySensitive(const DexFile& dex_file,
+                                  const dex::ClassDef& class_def,
+                                  uint32_t field_index);
+// Is the method from the `dex_file` with the given `method_index`
+// annotated with @dalvik.annotation.optimization.ReachabilitySensitive?
+bool MethodIsReachabilitySensitive(const DexFile& dex_file,
+                                   const dex::ClassDef& class_def,
+                                   uint32_t method_index);
+// Does the method from the `dex_file` with the given `method_index` contain an access to a field
+// annotated with @dalvik.annotation.optimization.ReachabilitySensitive, or a call to a method
+// with that annotation?
+// Class_def is the class defining the method. We consider only accessses to classes or methods
+// declared in the static type of the corresponding object. We may overlook accesses to annotated
+// fields or methods that are in neither class_def nor a containing (outer) class.
+bool MethodContainsRSensitiveAccess(const DexFile& dex_file,
+                                    const dex::ClassDef& class_def,
+                                    uint32_t method_index);
+// Is the given class annotated with @dalvik.annotation.optimization.DeadReferenceSafe?
+bool HasDeadReferenceSafeAnnotation(const DexFile& dex_file,
+                                    const dex::ClassDef& class_def);
 
 // Class annotations.
 ObjPtr<mirror::Object> GetAnnotationForClass(Handle<mirror::Class> klass,
@@ -124,7 +146,7 @@
   RuntimeEncodedStaticFieldValueIterator(Handle<mirror::DexCache> dex_cache,
                                          Handle<mirror::ClassLoader> class_loader,
                                          ClassLinker* linker,
-                                         const DexFile::ClassDef& class_def)
+                                         const dex::ClassDef& class_def)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : EncodedStaticFieldValueIterator(*dex_cache->GetDexFile(), class_def),
         dex_cache_(dex_cache),
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index 2cbf557..fbcee39 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -87,7 +87,7 @@
     std::vector<std::unique_ptr<const DexFile>> multi1;
     ASSERT_TRUE(dex_file_loader.Open(GetMultiDexSrc1().c_str(),
                                      GetMultiDexSrc1().c_str(),
-                                     /* verify */ true,
+                                     /* verify= */ true,
                                      kVerifyChecksum,
                                      &error_msg,
                                      &multi1)) << error_msg;
@@ -96,7 +96,7 @@
     std::vector<std::unique_ptr<const DexFile>> multi2;
     ASSERT_TRUE(dex_file_loader.Open(GetMultiDexSrc2().c_str(),
                                      GetMultiDexSrc2().c_str(),
-                                     /* verify */ true,
+                                     /* verify= */ true,
                                      kVerifyChecksum,
                                      &error_msg,
                                      &multi2)) << error_msg;
diff --git a/runtime/dex_to_dex_decompiler.cc b/runtime/dex_to_dex_decompiler.cc
index a5248e6..d078d6f 100644
--- a/runtime/dex_to_dex_decompiler.cc
+++ b/runtime/dex_to_dex_decompiler.cc
@@ -32,7 +32,7 @@
 class DexDecompiler {
  public:
   DexDecompiler(const DexFile& dex_file,
-                const DexFile::CodeItem& code_item,
+                const dex::CodeItem& code_item,
                 const ArrayRef<const uint8_t>& quickened_info,
                 bool decompile_return_instruction)
     : code_item_accessor_(dex_file, &code_item),
@@ -187,7 +187,6 @@
       LOG(FATAL) << "Failed to use all values in quickening info."
                  << " Actual: " << std::hex << quicken_index_
                  << " Expected: " << quicken_info_.NumIndices();
-      return false;
     }
   }
 
@@ -195,7 +194,7 @@
 }
 
 bool ArtDecompileDEX(const DexFile& dex_file,
-                     const DexFile::CodeItem& code_item,
+                     const dex::CodeItem& code_item,
                      const ArrayRef<const uint8_t>& quickened_info,
                      bool decompile_return_instruction) {
   if (quickened_info.size() == 0 && !decompile_return_instruction) {
diff --git a/runtime/dex_to_dex_decompiler.h b/runtime/dex_to_dex_decompiler.h
index 93711d1..4b6b0f7 100644
--- a/runtime/dex_to_dex_decompiler.h
+++ b/runtime/dex_to_dex_decompiler.h
@@ -18,9 +18,15 @@
 #define ART_RUNTIME_DEX_TO_DEX_DECOMPILER_H_
 
 #include "base/array_ref.h"
-#include "dex/dex_file.h"
 
 namespace art {
+
+class DexFile;
+
+namespace dex {
+struct CodeItem;
+}  // namespace dex
+
 namespace optimizer {
 
 // "Decompile", that is unquicken, the code item provided, given the
@@ -30,7 +36,7 @@
 // consistent with DexToDexCompiler, but we should really change it to
 // DexFile::CodeItem*.
 bool ArtDecompileDEX(const DexFile& dex_file,
-                     const DexFile::CodeItem& code_item,
+                     const dex::CodeItem& code_item,
                      const ArrayRef<const uint8_t>& quickened_data,
                      bool decompile_return_instruction);
 
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index 556ff69..9c0ac8f 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -39,8 +39,6 @@
 
 void DexoptTest::PreRuntimeCreate() {
   std::string error_msg;
-  ASSERT_TRUE(PreRelocateImage(GetImageLocation(), &error_msg)) << error_msg;
-  ASSERT_TRUE(PreRelocateImage(GetImageLocation2(), &error_msg)) << error_msg;
   UnreserveImageSpace();
 }
 
@@ -48,27 +46,14 @@
   ReserveImageSpace();
 }
 
-static std::string ImageLocation() {
-  Runtime* runtime = Runtime::Current();
-  const std::vector<gc::space::ImageSpace*>& image_spaces =
-      runtime->GetHeap()->GetBootImageSpaces();
-  if (image_spaces.empty()) {
-    return "";
-  }
-  return image_spaces[0]->GetImageLocation();
-}
-
 bool DexoptTest::Dex2Oat(const std::vector<std::string>& args, std::string* error_msg) {
-  Runtime* runtime = Runtime::Current();
-
   std::vector<std::string> argv;
-  argv.push_back(runtime->GetCompilerExecutable());
-  if (runtime->IsJavaDebuggable()) {
-    argv.push_back("--debuggable");
+  if (!CommonRuntimeTest::StartDex2OatCommandLine(&argv, error_msg)) {
+    return false;
   }
-  runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
 
-  if (runtime->GetHiddenApiEnforcementPolicy() != hiddenapi::EnforcementPolicy::kNoChecks) {
+  Runtime* runtime = Runtime::Current();
+  if (runtime->GetHiddenApiEnforcementPolicy() != hiddenapi::EnforcementPolicy::kDisabled) {
     argv.push_back("--runtime-arg");
     argv.push_back("-Xhidden-api-checks");
   }
@@ -77,11 +62,6 @@
     argv.push_back("--host");
   }
 
-  argv.push_back("--boot-image=" + ImageLocation());
-
-  std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
-  argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
-
   argv.insert(argv.end(), args.begin(), args.end());
 
   std::string command_line(android::base::Join(argv, ' '));
@@ -92,7 +72,8 @@
                                     const std::string& oat_location,
                                     CompilerFilter::Filter filter,
                                     bool with_alternate_image,
-                                    const char* compilation_reason) {
+                                    const char* compilation_reason,
+                                    const std::vector<std::string>& extra_args) {
   std::string dalvik_cache = GetDalvikCache(GetInstructionSetString(kRuntimeISA));
   std::string dalvik_cache_tmp = dalvik_cache + ".redirected";
 
@@ -121,35 +102,36 @@
     args.push_back("--compilation-reason=" + std::string(compilation_reason));
   }
 
+  args.insert(args.end(), extra_args.begin(), extra_args.end());
+
   std::string error_msg;
   ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
 
   // Verify the odex file was generated as expected.
-  std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                    oat_location.c_str(),
                                                    oat_location.c_str(),
-                                                   /* requested_base */ nullptr,
-                                                   /* executable */ false,
-                                                   /* low_4gb */ false,
+                                                   /*executable=*/ false,
+                                                   /*low_4gb=*/ false,
                                                    dex_location.c_str(),
-                                                   /* reservation */ nullptr,
+                                                   /*reservation=*/ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
   EXPECT_EQ(filter, odex_file->GetCompilerFilter());
 
-  std::unique_ptr<ImageHeader> image_header(
-          gc::space::ImageSpace::ReadImageHeader(image_location.c_str(),
-                                                 kRuntimeISA,
-                                                 &error_msg));
-  ASSERT_TRUE(image_header != nullptr) << error_msg;
+  std::string boot_image_checksums = gc::space::ImageSpace::GetBootClassPathChecksums(
+      Runtime::Current()->GetBootClassPath(), image_location, kRuntimeISA, &error_msg);
+  ASSERT_FALSE(boot_image_checksums.empty()) << error_msg;
+
   const OatHeader& oat_header = odex_file->GetOatHeader();
-  uint32_t combined_checksum = image_header->GetOatChecksum();
 
   if (CompilerFilter::DependsOnImageChecksum(filter)) {
+    const char* checksums = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey);
+    ASSERT_TRUE(checksums != nullptr);
     if (with_alternate_image) {
-      EXPECT_NE(combined_checksum, oat_header.GetImageFileLocationOatChecksum());
+      EXPECT_NE(boot_image_checksums, checksums);
     } else {
-      EXPECT_EQ(combined_checksum, oat_header.GetImageFileLocationOatChecksum());
+      EXPECT_EQ(boot_image_checksums, checksums);
     }
   }
 }
@@ -157,12 +139,14 @@
 void DexoptTest::GenerateOdexForTest(const std::string& dex_location,
                                      const std::string& odex_location,
                                      CompilerFilter::Filter filter,
-                                     const char* compilation_reason) {
+                                     const char* compilation_reason,
+                                     const std::vector<std::string>& extra_args) {
   GenerateOatForTest(dex_location,
                      odex_location,
                      filter,
-                     /* with_alternate_image */ false,
-                     compilation_reason);
+                     /*with_alternate_image=*/ false,
+                     compilation_reason,
+                     extra_args);
 }
 
 void DexoptTest::GenerateOatForTest(const char* dex_location,
@@ -179,35 +163,7 @@
 }
 
 void DexoptTest::GenerateOatForTest(const char* dex_location, CompilerFilter::Filter filter) {
-  GenerateOatForTest(dex_location, filter, /* with_alternate_image */ false);
-}
-
-bool DexoptTest::PreRelocateImage(const std::string& image_location, std::string* error_msg) {
-  std::string dalvik_cache;
-  bool have_android_data;
-  bool dalvik_cache_exists;
-  bool is_global_cache;
-  GetDalvikCache(GetInstructionSetString(kRuntimeISA),
-                 /* create_if_absent */ true,
-                 &dalvik_cache,
-                 &have_android_data,
-                 &dalvik_cache_exists,
-                 &is_global_cache);
-  if (!dalvik_cache_exists) {
-    *error_msg = "Failed to create dalvik cache";
-    return false;
-  }
-
-  std::string patchoat = GetAndroidRoot();
-  patchoat += kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat";
-
-  std::vector<std::string> argv;
-  argv.push_back(patchoat);
-  argv.push_back("--input-image-location=" + image_location);
-  argv.push_back("--output-image-directory=" + dalvik_cache);
-  argv.push_back("--instruction-set=" + std::string(GetInstructionSetString(kRuntimeISA)));
-  argv.push_back("--base-offset-delta=0x00008000");
-  return Exec(argv, error_msg);
+  GenerateOatForTest(dex_location, filter, /*with_alternate_image=*/ false);
 }
 
 void DexoptTest::ReserveImageSpace() {
@@ -237,7 +193,9 @@
                                                       reinterpret_cast<uint8_t*>(start),
                                                       end - start,
                                                       PROT_NONE,
-                                                      /* low_4gb*/ false,
+                                                      /*low_4gb=*/ false,
+                                                      /*reuse=*/ false,
+                                                      /*reservation=*/ nullptr,
                                                       &error_msg));
     ASSERT_TRUE(image_reservation_.back().IsValid()) << error_msg;
     LOG(INFO) << "Reserved space for image " <<
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
index 067b67a..026fe55 100644
--- a/runtime/dexopt_test.h
+++ b/runtime/dexopt_test.h
@@ -42,13 +42,15 @@
                           const std::string& oat_location,
                           CompilerFilter::Filter filter,
                           bool with_alternate_image,
-                          const char* compilation_reason = nullptr);
+                          const char* compilation_reason = nullptr,
+                          const std::vector<std::string>& extra_args = {});
 
   // Generate an odex file for the purposes of test.
   void GenerateOdexForTest(const std::string& dex_location,
                            const std::string& odex_location,
                            CompilerFilter::Filter filter,
-                           const char* compilation_reason = nullptr);
+                           const char* compilation_reason = nullptr,
+                          const std::vector<std::string>& extra_args = {});
 
   // Generate an oat file for the given dex location in its oat location (under
   // the dalvik cache).
@@ -62,11 +64,6 @@
   static bool Dex2Oat(const std::vector<std::string>& args, std::string* error_msg);
 
  private:
-  // Pre-Relocate the image to a known non-zero offset so we don't have to
-  // deal with the runtime randomly relocating the image by 0 and messing up
-  // the expected results of the tests.
-  bool PreRelocateImage(const std::string& image_location, std::string* error_msg);
-
   // Reserve memory around where the image will be loaded so other memory
   // won't conflict when it comes time to load the image.
   // This can be called with an already loaded image to reserve the space
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index e7715c4..4e5fe5f 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -86,7 +86,7 @@
                                                    bool low_4gb,
                                                    std::string* error_msg) {
   std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(
-      new ElfFileImpl<ElfTypes>(file, (prot & PROT_WRITE) != 0, /* program_header_only */ false));
+      new ElfFileImpl<ElfTypes>(file, (prot & PROT_WRITE) != 0, /* program_header_only= */ false));
   if (!elf_file->Setup(file, prot, flags, low_4gb, error_msg)) {
     return nullptr;
   }
@@ -1163,7 +1163,7 @@
           vaddr_size,
           PROT_NONE,
           low_4gb,
-          /* reuse */ false,
+          /* reuse= */ false,
           reservation,
           error_msg);
       if (!local_reservation.IsValid()) {
@@ -1237,10 +1237,10 @@
                                    flags,
                                    file->Fd(),
                                    program_header->p_offset,
-                                   /* low4_gb */ false,
+                                   /* low_4gb= */ false,
                                    file->GetPath().c_str(),
-                                   /* reuse */ true,  // implies MAP_FIXED
-                                   /* reservation */ nullptr,
+                                   /* reuse= */ true,  // implies MAP_FIXED
+                                   /* reservation= */ nullptr,
                                    error_msg);
       if (!segment.IsValid()) {
         *error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
@@ -1262,9 +1262,9 @@
                                             p_vaddr + program_header->p_filesz,
                                             program_header->p_memsz - program_header->p_filesz,
                                             prot,
-                                            /* low_4gb */ false,
-                                            /* reuse */ true,
-                                            /* reservation */ nullptr,
+                                            /* low_4gb= */ false,
+                                            /* reuse= */ true,
+                                            /* reservation= */ nullptr,
                                             error_msg);
       if (!segment.IsValid()) {
         *error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s: %s",
@@ -1417,7 +1417,7 @@
 void ElfFileImpl<ElfTypes>::ApplyOatPatches(
     const uint8_t* patches, const uint8_t* patches_end, Elf_Addr delta,
     uint8_t* to_patch, const uint8_t* to_patch_end) {
-  typedef __attribute__((__aligned__(1))) Elf_Addr UnalignedAddress;
+  using UnalignedAddress __attribute__((__aligned__(1))) = Elf_Addr;
   while (patches < patches_end) {
     to_patch += DecodeUnsignedLeb128(&patches);
     DCHECK_LE(patches, patches_end) << "Unexpected end of patch list.";
@@ -1763,7 +1763,7 @@
                                PROT_READ,
                                MAP_PRIVATE,
                                file->Fd(),
-                               /* start */ 0,
+                               /* start= */ 0,
                                low_4gb,
                                file->GetPath().c_str(),
                                error_msg);
@@ -1886,7 +1886,7 @@
 }
 
 bool ElfFile::Strip(File* file, std::string* error_msg) {
-  std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, true, false, /*low_4gb*/false, error_msg));
+  std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, true, false, /*low_4gb=*/false, error_msg));
   if (elf_file.get() == nullptr) {
     return false;
   }
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 1045d2a..a18cca4 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -22,6 +22,7 @@
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/enums.h"
+#include "base/sdk_version.h"
 #include "class_linker-inl.h"
 #include "common_throws.h"
 #include "dex/dex_file.h"
@@ -32,7 +33,8 @@
 #include "imtable-inl.h"
 #include "indirect_reference_table.h"
 #include "jni/jni_internal.h"
-#include "mirror/array.h"
+#include "mirror/array-alloc-inl.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/throwable.h"
@@ -93,15 +95,18 @@
       // even going back from boot image methods to the same oat file. However, this is
       // not currently implemented in the compiler. Therefore crossing dex file boundary
       // indicates that the inlined definition is not the same as the one used at runtime.
-      LOG(FATAL) << "Inlined method resolution crossed dex file boundary: from "
-                 << method->PrettyMethod()
-                 << " in " << method->GetDexFile()->GetLocation() << "/"
-                 << static_cast<const void*>(method->GetDexFile())
-                 << " to " << inlined_method->PrettyMethod()
-                 << " in " << inlined_method->GetDexFile()->GetLocation() << "/"
-                 << static_cast<const void*>(inlined_method->GetDexFile()) << ". "
-                 << "This must be due to duplicate classes or playing wrongly with class loaders";
-      UNREACHABLE();
+      bool target_sdk_at_least_p =
+          IsSdkVersionSetAndAtLeast(Runtime::Current()->GetTargetSdkVersion(), SdkVersion::kP);
+      LOG(target_sdk_at_least_p ? FATAL : WARNING)
+          << "Inlined method resolution crossed dex file boundary: from "
+          << method->PrettyMethod()
+          << " in " << method->GetDexFile()->GetLocation() << "/"
+          << static_cast<const void*>(method->GetDexFile())
+          << " to " << inlined_method->PrettyMethod()
+          << " in " << inlined_method->GetDexFile()->GetLocation() << "/"
+          << static_cast<const void*>(inlined_method->GetDexFile()) << ". "
+          << "This must be due to duplicate classes or playing wrongly with class loaders. "
+          << "The runtime is in an unsafe state.";
     }
     method = inlined_method;
   }
@@ -189,7 +194,7 @@
       return nullptr;
     }
     // CheckObjectAlloc can cause thread suspension which means we may now be instrumented.
-    return klass->Alloc</*kInstrumented*/true>(
+    return klass->Alloc</*kInstrumented=*/true>(
         self,
         Runtime::Current()->GetHeap()->GetCurrentAllocator()).Ptr();
   }
@@ -214,7 +219,7 @@
     // Pass in false since the object cannot be finalizable.
     // CheckClassInitializedForObjectAlloc can cause thread suspension which means we may now be
     // instrumented.
-    return klass->Alloc</*kInstrumented*/true, false>(self, heap->GetCurrentAllocator()).Ptr();
+    return klass->Alloc</*kInstrumented=*/true, false>(self, heap->GetCurrentAllocator()).Ptr();
   }
   // Pass in false since the object cannot be finalizable.
   return klass->Alloc<kInstrumented, false>(self, allocator_type).Ptr();
@@ -285,11 +290,11 @@
     }
     gc::Heap* heap = Runtime::Current()->GetHeap();
     // CheckArrayAlloc can cause thread suspension which means we may now be instrumented.
-    return mirror::Array::Alloc</*kInstrumented*/true>(self,
-                                                       klass,
-                                                       component_count,
-                                                       klass->GetComponentSizeShift(),
-                                                       heap->GetCurrentAllocator());
+    return mirror::Array::Alloc</*kInstrumented=*/true>(self,
+                                                        klass,
+                                                        component_count,
+                                                        klass->GetComponentSizeShift(),
+                                                        heap->GetCurrentAllocator());
   }
   return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
                                              klass->GetComponentSizeShift(), allocator_type);
@@ -420,28 +425,17 @@
 #undef EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL
 #undef EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL
 
+// Follow virtual/interface indirections if applicable.
+// Will throw null-pointer exception the if the object is null.
 template<InvokeType type, bool access_check>
-inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
-                                     ObjPtr<mirror::Object>* this_object,
-                                     ArtMethod* referrer,
-                                     Thread* self) {
+ALWAYS_INLINE ArtMethod* FindMethodToCall(uint32_t method_idx,
+                                          ArtMethod* resolved_method,
+                                          ObjPtr<mirror::Object>* this_object,
+                                          ArtMethod* referrer,
+                                          Thread* self)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
-  constexpr ClassLinker::ResolveMode resolve_mode =
-      access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
-                   : ClassLinker::ResolveMode::kNoChecks;
-  ArtMethod* resolved_method;
-  if (type == kStatic) {
-    resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
-  } else {
-    StackHandleScope<1> hs(self);
-    HandleWrapperObjPtr<mirror::Object> h_this(hs.NewHandleWrapper(this_object));
-    resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
-  }
-  if (UNLIKELY(resolved_method == nullptr)) {
-    DCHECK(self->IsExceptionPending());  // Throw exception and unwind.
-    return nullptr;  // Failure.
-  }
-  // Next, null pointer check.
+  // Null pointer check.
   if (UNLIKELY(*this_object == nullptr && type != kStatic)) {
     if (UNLIKELY(resolved_method->GetDeclaringClass()->IsStringClass() &&
                  resolved_method->IsConstructor())) {
@@ -539,7 +533,7 @@
       UNREACHABLE();
     }
     case kInterface: {
-      uint32_t imt_index = ImTable::GetImtIndex(resolved_method);
+      size_t imt_index = resolved_method->GetImtIndex();
       PointerSize pointer_size = class_linker->GetImagePointerSize();
       ObjPtr<mirror::Class> klass = (*this_object)->GetClass();
       ArtMethod* imt_method = klass->GetImt(pointer_size)->Get(imt_index, pointer_size);
@@ -570,6 +564,31 @@
   }
 }
 
+template<InvokeType type, bool access_check>
+inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
+                                     ObjPtr<mirror::Object>* this_object,
+                                     ArtMethod* referrer,
+                                     Thread* self) {
+  ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+  constexpr ClassLinker::ResolveMode resolve_mode =
+      access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
+                   : ClassLinker::ResolveMode::kNoChecks;
+  ArtMethod* resolved_method;
+  if (type == kStatic) {
+    resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
+  } else {
+    StackHandleScope<1> hs(self);
+    HandleWrapperObjPtr<mirror::Object> h_this(hs.NewHandleWrapper(this_object));
+    resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
+  }
+  if (UNLIKELY(resolved_method == nullptr)) {
+    DCHECK(self->IsExceptionPending());  // Throw exception and unwind.
+    return nullptr;  // Failure.
+  }
+  return FindMethodToCall<type, access_check>(
+      method_idx, resolved_method, this_object, referrer, self);
+}
+
 // Explicit template declarations of FindMethodFromCode for all invoke types.
 #define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check)                 \
   template REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE                       \
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 5421f69..19498f3 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -20,6 +20,7 @@
 #include "art_method-inl.h"
 #include "base/enums.h"
 #include "base/mutex.h"
+#include "base/sdk_version.h"
 #include "class_linker-inl.h"
 #include "dex/dex_file-inl.h"
 #include "entrypoints/entrypoint_utils-inl.h"
@@ -64,9 +65,9 @@
   soa.Self()->AssertThreadSuspensionIsAllowable();
   jobjectArray args_jobj = nullptr;
   const JValue zero;
-  int32_t target_sdk_version = Runtime::Current()->GetTargetSdkVersion();
+  uint32_t target_sdk_version = Runtime::Current()->GetTargetSdkVersion();
   // Do not create empty arrays unless needed to maintain Dalvik bug compatibility.
-  if (args.size() > 0 || (target_sdk_version > 0 && target_sdk_version <= 21)) {
+  if (args.size() > 0 || IsSdkVersionSetAndAtMost(target_sdk_version, SdkVersion::kL)) {
     args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, nullptr);
     if (args_jobj == nullptr) {
       CHECK(soa.Self()->IsExceptionPending());
@@ -244,7 +245,7 @@
   result.outer_method = outer_caller_and_pc.first;
   uintptr_t caller_pc = outer_caller_and_pc.second;
   result.caller =
-      DoGetCalleeSaveMethodCaller(result.outer_method, caller_pc, /* do_caller_check */ true);
+      DoGetCalleeSaveMethodCaller(result.outer_method, caller_pc, /* do_caller_check= */ true);
   return result;
 }
 
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index c8bf6d0..e10a6e8 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -21,8 +21,8 @@
 #include <stdint.h>
 
 #include "base/callee_save_type.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/dex_file_types.h"
 #include "dex/dex_instruction.h"
 #include "gc/allocator_type.h"
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index 1e30907..e555d68 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -20,7 +20,7 @@
 #include "arch/instruction_set.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "quick/quick_method_frame_info.h"
 #include "thread-inl.h"
 
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 257cd41..abefa4a 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -25,7 +25,7 @@
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
-#include "mirror/string-inl.h"
+#include "mirror/string-alloc-inl.h"
 
 namespace art {
 
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.h b/runtime/entrypoints/quick/quick_alloc_entrypoints.h
index bd1e295..937ba8e 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ALLOC_ENTRYPOINTS_H_
 #define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ALLOC_ENTRYPOINTS_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "gc/allocator_type.h"
 #include "quick_entrypoints.h"
 
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index 8e784c1..ce12fde 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -31,7 +31,7 @@
   jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
 
   // Alloc
-  ResetQuickAllocEntryPoints(qpoints, /* is_marking */ true);
+  ResetQuickAllocEntryPoints(qpoints, /* is_marking= */ true);
 
   // Resolution and initialization
   qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index c782c9c..d06dbcb 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -16,7 +16,6 @@
 
 #include "base/logging.h"  // For VLOG_IS_ON.
 #include "base/mutex.h"
-#include "base/systrace.h"
 #include "callee_save_frame.h"
 #include "interpreter/interpreter.h"
 #include "obj_ptr-inl.h"  // TODO: Find the other include that isn't complete, and clean this up.
@@ -41,13 +40,10 @@
 
   self->AssertHasDeoptimizationContext();
   QuickExceptionHandler exception_handler(self, true);
-  {
-    ScopedTrace trace(std::string("Deoptimization ") + GetDeoptimizationKindName(kind));
-    if (single_frame) {
-      exception_handler.DeoptimizeSingleFrame(kind);
-    } else {
-      exception_handler.DeoptimizeStack();
-    }
+  if (single_frame) {
+    exception_handler.DeoptimizeSingleFrame(kind);
+  } else {
+    exception_handler.DeoptimizeStack();
   }
   uintptr_t return_pc = exception_handler.UpdateInstrumentationStack();
   if (exception_handler.IsFullFragmentDone()) {
@@ -74,9 +70,9 @@
   JValue return_value;
   return_value.SetJ(0);  // we never deoptimize from compiled code with an invoke result.
   self->PushDeoptimizationContext(return_value,
-                                  false /* is_reference */,
+                                  /* is_reference= */ false,
                                   self->GetException(),
-                                  true /* from_code */,
+                                  /* from_code= */ true,
                                   DeoptimizationMethodType::kDefault);
   artDeoptimizeImpl(self, kind, true);
 }
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index c4d85a3..e939982 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -140,7 +140,7 @@
   StackHandleScope<1> hs(self);
   Handle<mirror::Class> h_klass = hs.NewHandle(klass);
   bool success = class_linker->EnsureInitialized(
-      self, h_klass, /* can_init_fields */ true, /* can_init_parents */ true);
+      self, h_klass, /* can_init_fields= */ true, /* can_init_parents= */ true);
   if (UNLIKELY(!success)) {
     return nullptr;
   }
@@ -157,8 +157,8 @@
   ObjPtr<mirror::Class> result = ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
                                                         caller,
                                                         self,
-                                                        /* can_run_clinit */ false,
-                                                        /* verify_access */ false);
+                                                        /* can_run_clinit= */ false,
+                                                        /* verify_access= */ false);
   if (LIKELY(result != nullptr) && CanReferenceBss(caller_and_outer.outer_method, caller)) {
     StoreTypeInBss(caller_and_outer.outer_method, dex::TypeIndex(type_idx), result);
   }
@@ -175,8 +175,8 @@
   ObjPtr<mirror::Class> result = ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
                                                         caller,
                                                         self,
-                                                        /* can_run_clinit */ false,
-                                                        /* verify_access */ true);
+                                                        /* can_run_clinit= */ false,
+                                                        /* verify_access= */ true);
   // Do not StoreTypeInBss(); access check entrypoint is never used together with .bss.
   return result.Ptr();
 }
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 795faa8..243f7ec 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -19,8 +19,8 @@
 
 #include <jni.h>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "deoptimization_kind.h"
 #include "offsets.h"
 
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index d38e3ed..56232c5 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -392,7 +392,7 @@
   constexpr ReadBarrierOption kReadBarrierOption =
       kUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
   mirror::Object* result =
-      ReadBarrier::Barrier<mirror::Object, /* kIsVolatile */ false, kReadBarrierOption>(
+      ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kReadBarrierOption>(
         obj,
         MemberOffset(offset),
         ref_addr);
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 3c41a8c..5c86bbb 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -252,7 +252,7 @@
         return 0;
       default:
         LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char;
-        return 0;
+        UNREACHABLE();
     }
   }
 }
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index ba7fb6b..2e447ec 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -67,7 +67,7 @@
   ScopedQuickEntrypointChecks sqec(self);
   // We come from an explicit check in the generated code. This path is triggered
   // only if the object is indeed null.
-  ThrowNullPointerExceptionFromDexPC(/* check_address */ false, 0U);
+  ThrowNullPointerExceptionFromDexPC(/* check_address= */ false, 0U);
   self->QuickDeliverException();
 }
 
@@ -75,7 +75,7 @@
 extern "C" NO_RETURN void artThrowNullPointerExceptionFromSignal(uintptr_t addr, Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
-  ThrowNullPointerExceptionFromDexPC(/* check_address */ true, addr);
+  ThrowNullPointerExceptionFromDexPC(/* check_address= */ true, addr);
   self->QuickDeliverException();
 }
 
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index fccfce4..6deb509 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -297,7 +297,7 @@
       case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA));
       default:
       LOG(FATAL) << "Unexpected GPR index: " << gpr_index;
-      return 0;
+      UNREACHABLE();
     }
   }
 #else
@@ -753,6 +753,7 @@
   const char* shorty = non_proxy_method->GetShorty(&shorty_len);
 
   JValue result;
+  bool force_frame_pop = false;
 
   if (UNLIKELY(deopt_frame != nullptr)) {
     HandleDeoptimization(&result, method, deopt_frame, &fragment);
@@ -762,7 +763,7 @@
     uint16_t num_regs = accessor.RegistersSize();
     // No last shadow coming from quick.
     ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
-        CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0);
+        CREATE_SHADOW_FRAME(num_regs, /* link= */ nullptr, method, /* dex_pc= */ 0);
     ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
     size_t first_arg_reg = accessor.RegistersSize() - accessor.InsSize();
     BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
@@ -788,6 +789,7 @@
     }
 
     result = interpreter::EnterInterpreterFromEntryPoint(self, accessor, shadow_frame);
+    force_frame_pop = shadow_frame->GetForcePopFrame();
   }
 
   // Pop transition.
@@ -804,13 +806,21 @@
       LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
                    << caller->PrettyMethod();
     } else {
+      VLOG(deopt) << "Forcing deoptimization on return from method " << method->PrettyMethod()
+                  << " to " << caller->PrettyMethod()
+                  << (force_frame_pop ? " for frame-pop" : "");
+      DCHECK(!force_frame_pop || result.GetJ() == 0) << "Force frame pop should have no result.";
+      if (force_frame_pop && self->GetException() != nullptr) {
+        LOG(WARNING) << "Suppressing exception for instruction-retry: "
+                     << self->GetException()->Dump();
+      }
       // Push the context of the deoptimization stack so we can restore the return value and the
       // exception before executing the deoptimized frames.
       self->PushDeoptimizationContext(
           result,
           shorty[0] == 'L' || shorty[0] == '[',  /* class or array */
-          self->GetException(),
-          false /* from_code */,
+          force_frame_pop ? nullptr : self->GetException(),
+          /* from_code= */ false,
           DeoptimizationMethodType::kDefault);
 
       // Set special exception to cause deoptimization.
@@ -902,7 +912,7 @@
   uint32_t shorty_len = 0;
   const char* shorty = non_proxy_method->GetShorty(&shorty_len);
   BuildQuickArgumentVisitor local_ref_visitor(
-      sp, /* is_static */ false, shorty, shorty_len, &soa, &args);
+      sp, /* is_static= */ false, shorty, shorty_len, &soa, &args);
 
   local_ref_visitor.VisitArguments();
   DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod();
@@ -965,7 +975,7 @@
                                      const char* shorty,
                                      uint32_t shorty_len,
                                      size_t arg_pos)
-      : QuickArgumentVisitor(sp, /* is_static */ false, shorty, shorty_len),
+      : QuickArgumentVisitor(sp, /* is_static= */ false, shorty, shorty_len),
         cur_pos_(0u),
         arg_pos_(arg_pos),
         ref_arg_(nullptr) {
@@ -1051,7 +1061,7 @@
       << proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod();
   uint32_t shorty_len = 0;
   const char* shorty = non_proxy_method->GetShorty(&shorty_len);
-  GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /* is_static */ false, shorty, shorty_len);
+  GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /*is_static=*/ false, shorty, shorty_len);
   ref_args_visitor.VisitArguments();
   std::vector<StackReference<mirror::Object>*> ref_args = ref_args_visitor.GetReferenceArguments();
   return ref_args;
@@ -2505,7 +2515,7 @@
   ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
   ArtMethod* method = FindMethodFast<type, access_check>(method_idx, this_object, caller_method);
   if (UNLIKELY(method == nullptr)) {
-    const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+    const DexFile* dex_file = caller_method->GetDexFile();
     uint32_t shorty_len;
     const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
     {
@@ -2638,7 +2648,7 @@
       dex_method_idx = instr.VRegB_3rc();
     }
 
-    const DexFile& dex_file = caller_method->GetDeclaringClass()->GetDexFile();
+    const DexFile& dex_file = *caller_method->GetDexFile();
     uint32_t shorty_len;
     const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(dex_method_idx),
                                                   &shorty_len);
@@ -2661,7 +2671,7 @@
 
   DCHECK(!interface_method->IsRuntimeMethod());
   // Look whether we have a match in the ImtConflictTable.
-  uint32_t imt_index = ImTable::GetImtIndex(interface_method);
+  uint32_t imt_index = interface_method->GetImtIndex();
   ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize);
   if (LIKELY(conflict_method->IsRuntimeMethod())) {
     ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
@@ -2699,7 +2709,7 @@
       conflict_method,
       interface_method,
       method,
-      /*force_new_conflict_method*/false);
+      /*force_new_conflict_method=*/false);
   if (new_conflict_method != conflict_method) {
     // Update the IMT if we create a new conflict method. No fence needed here, as the
     // data is consistent.
@@ -2774,7 +2784,7 @@
   const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc();
   const size_t first_arg = 0;
   ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
-      CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, resolved_method, dex_pc);
+      CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, resolved_method, dex_pc);
   ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
   ScopedStackedShadowFramePusher
       frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
@@ -2867,7 +2877,7 @@
   const size_t first_arg = 0;
   const size_t num_vregs = ArtMethod::NumArgRegisters(shorty);
   ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
-      CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, caller_method, dex_pc);
+      CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, caller_method, dex_pc);
   ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
   ScopedStackedShadowFramePusher
       frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index cb85804..040a8c5 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -14,8 +14,6 @@
  * limitations under the License.
  */
 
-#include <setjmp.h>
-
 #include <memory>
 
 #include "base/macros.h"
@@ -127,9 +125,7 @@
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, jni_entrypoints, sizeof(size_t));
 
     // Skip across the entrypoints structures.
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, mterp_default_ibase, sizeof(void*));
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_default_ibase, mterp_alt_ibase, sizeof(void*));
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_alt_ibase, rosalloc_runs, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, rosalloc_runs, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, rosalloc_runs, thread_local_alloc_stack_top,
                         sizeof(void*) * kNumRosAllocThreadLocalSizeBracketsInThread);
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_alloc_stack_top, thread_local_alloc_stack_end,
@@ -140,8 +136,11 @@
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, flip_function, method_verifier, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, method_verifier, thread_local_mark_stack, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_mark_stack, async_exception, sizeof(void*));
-    EXPECT_OFFSET_DIFF(Thread, tlsPtr_.async_exception, Thread, wait_mutex_, sizeof(void*),
-                       thread_tlsptr_end);
+    // The first field after tlsPtr_ is forced to a 16 byte alignment so it might have some space.
+    auto offset_tlsptr_end = OFFSETOF_MEMBER(Thread, tlsPtr_) +
+        sizeof(decltype(reinterpret_cast<Thread*>(16)->tlsPtr_));
+    CHECKED(offset_tlsptr_end - OFFSETOF_MEMBER(Thread, tlsPtr_.async_exception) == sizeof(void*),
+            "async_exception last field");
   }
 
   void CheckJniEntryPoints() {
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 1ab0b0e..5c2830d 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -16,7 +16,6 @@
 
 #include "fault_handler.h"
 
-#include <setjmp.h>
 #include <string.h>
 #include <sys/mman.h>
 #include <sys/ucontext.h>
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index 02eeefe..f6cf2d7 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -18,13 +18,13 @@
 #ifndef ART_RUNTIME_FAULT_HANDLER_H_
 #define ART_RUNTIME_FAULT_HANDLER_H_
 
-#include <setjmp.h>
 #include <signal.h>
 #include <stdint.h>
 
 #include <vector>
 
-#include "base/mutex.h"  // For annotalysis.
+#include "base/locks.h"  // For annotalysis.
+#include "runtime_globals.h"  // For CanDoImplicitNullCheckOn.
 
 namespace art {
 
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 10af10d..9431f80 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -253,10 +253,9 @@
   void Init() {
     std::string error_msg;
     mem_map_ = MemMap::MapAnonymous(name_.c_str(),
-                                    /* addr */ nullptr,
                                     capacity_ * sizeof(begin_[0]),
                                     PROT_READ | PROT_WRITE,
-                                    /* low_4gb */ false,
+                                    /*low_4gb=*/ false,
                                     &error_msg);
     CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
     uint8_t* addr = mem_map_.Begin();
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index bb2beaa..8a15af2 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -49,10 +49,9 @@
       RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
-                                        /* addr */ nullptr,
                                         bitmap_size,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ false,
+                                        /*low_4gb=*/ false,
                                         &error_msg);
   if (UNLIKELY(!mem_map.IsValid())) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
index ffef566..68f2d04 100644
--- a/runtime/gc/accounting/bitmap.h
+++ b/runtime/gc/accounting/bitmap.h
@@ -23,9 +23,9 @@
 #include <set>
 #include <vector>
 
-#include "base/globals.h"
+#include "base/locks.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
+#include "runtime_globals.h"
 
 namespace art {
 
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 1e7d76c..df50682 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -127,14 +127,6 @@
   return cards_scanned;
 }
 
-/*
- * Visitor is expected to take in a card and return the new value. When a value is modified, the
- * modify visitor is called.
- * visitor: The visitor which modifies the cards. Returns the new value for a card given an old
- * value.
- * modified: Whenever the visitor modifies a card, this visitor is called on the card. Enables
- * us to know which cards got cleared.
- */
 template <typename Visitor, typename ModifiedVisitor>
 inline void CardTable::ModifyCardsAtomic(uint8_t* scan_begin,
                                          uint8_t* scan_end,
@@ -144,6 +136,7 @@
   uint8_t* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
   CheckCardValid(card_cur);
   CheckCardValid(card_end);
+  DCHECK(visitor(kCardClean) == kCardClean);
 
   // Handle any unaligned cards at the start.
   while (!IsAligned<sizeof(intptr_t)>(card_cur) && card_cur < card_end) {
@@ -188,7 +181,8 @@
   while (word_cur < word_end) {
     while (true) {
       expected_word = *word_cur;
-      if (LIKELY(expected_word == 0)) {
+      static_assert(kCardClean == 0);
+      if (LIKELY(expected_word == 0 /* All kCardClean */ )) {
         break;
       }
       for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 7cddec6..fdf1615 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -65,10 +65,9 @@
   /* Allocate an extra 256 bytes to allow fixed low-byte of base */
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous("card table",
-                                        /* addr */ nullptr,
                                         capacity + 256,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ false,
+                                        /*low_4gb=*/ false,
                                         &error_msg);
   CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg;
   // All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index f163898..30c4386 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -19,9 +19,9 @@
 
 #include <memory>
 
-#include "base/globals.h"
+#include "base/locks.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
+#include "runtime_globals.h"
 
 namespace art {
 
@@ -95,12 +95,10 @@
   }
 
   /*
-   * Visitor is expected to take in a card and return the new value. When a value is modified, the
-   * modify visitor is called.
-   * visitor: The visitor which modifies the cards. Returns the new value for a card given an old
-   * value.
-   * modified: Whenever the visitor modifies a card, this visitor is called on the card. Enables
-   * us to know which cards got cleared.
+   * Modify cards in the range from scan_begin (inclusive) to scan_end (exclusive). Each card
+   * value v is replaced by visitor(v). Visitor() should not have side-effects.
+   * Whenever a card value is changed, modified(card_address, old_value, new_value) is invoked.
+   * For opportunistic performance reasons, this assumes that visitor(kCardClean) is kCardClean!
    */
   template <typename Visitor, typename ModifiedVisitor>
   void ModifyCardsAtomic(uint8_t* scan_begin,
diff --git a/runtime/gc/accounting/card_table_test.cc b/runtime/gc/accounting/card_table_test.cc
index 87965ed..12baaa4 100644
--- a/runtime/gc/accounting/card_table_test.cc
+++ b/runtime/gc/accounting/card_table_test.cc
@@ -60,7 +60,7 @@
   uint8_t* HeapLimit() const {
     return HeapBegin() + heap_size_;
   }
-  // Return a pseudo random card for an address.
+  // Return a non-zero pseudo random card for an address.
   uint8_t PseudoRandomCard(const uint8_t* addr) const {
     size_t offset = RoundDown(addr - heap_begin_, CardTable::kCardSize);
     return 1 + offset % 254;
@@ -97,7 +97,8 @@
 class UpdateVisitor {
  public:
   uint8_t operator()(uint8_t c) const {
-    return c * 93 + 123;
+    // Must map zero to zero. Never applied to zero.
+    return c == 0 ? 0 : c * 93 + 123;
   }
   void operator()(uint8_t* /*card*/, uint8_t /*expected_value*/, uint8_t /*new_value*/) const {
   }
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index c997f8d..e477556 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -20,8 +20,8 @@
 #include <android-base/logging.h>
 
 #include "base/allocator.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "space_bitmap.h"
 
 namespace art {
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 40dc6e1..b4026fc 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -462,7 +462,7 @@
     for (mirror::HeapReference<mirror::Object>* obj_ptr : references) {
       if (obj_ptr->AsMirrorPtr() != nullptr) {
         all_null = false;
-        visitor->MarkHeapReference(obj_ptr, /*do_atomic_update*/ false);
+        visitor->MarkHeapReference(obj_ptr, /*do_atomic_update=*/ false);
       }
     }
     count += references.size();
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 8c471bc..011e95c 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -18,12 +18,12 @@
 #define ART_RUNTIME_GC_ACCOUNTING_MOD_UNION_TABLE_H_
 
 #include "base/allocator.h"
-#include "base/globals.h"
 #include "base/safe_map.h"
 #include "base/tracking_safe_map.h"
 #include "bitmap.h"
 #include "card_table.h"
 #include "mirror/object_reference.h"
+#include "runtime_globals.h"
 
 #include <set>
 #include <vector>
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index 2a382d7..e66a174 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -20,6 +20,7 @@
 #include "class_root.h"
 #include "common_runtime_test.h"
 #include "gc/space/space-inl.h"
+#include "mirror/array-alloc-inl.h"
 #include "mirror/array-inl.h"
 #include "space_bitmap-inl.h"
 #include "thread-current-inl.h"
@@ -161,9 +162,9 @@
     }
     default: {
       UNIMPLEMENTED(FATAL) << "Invalid type " << type;
+      UNREACHABLE();
     }
   }
-  return nullptr;
 }
 
 TEST_F(ModUnionTableTest, TestCardCache) {
@@ -184,7 +185,7 @@
   ResetClass();
   // Create another space that we can put references in.
   std::unique_ptr<space::DlMallocSpace> other_space(space::DlMallocSpace::Create(
-      "other space", 128 * KB, 4 * MB, 4 * MB, nullptr, false));
+      "other space", 128 * KB, 4 * MB, 4 * MB, /*can_move_objects=*/ false));
   ASSERT_TRUE(other_space.get() != nullptr);
   {
     ScopedThreadSuspension sts(self, kSuspended);
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 8bdf6da..44cdb5e 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -20,10 +20,10 @@
 #include <sys/mman.h>  // For the PROT_* and MAP_* constants.
 
 #include "base/bit_utils.h"
-#include "base/globals.h"
+#include "base/locks.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 #include "gc/space/space.h"
+#include "runtime_globals.h"
 
 namespace art {
 namespace gc {
@@ -40,10 +40,9 @@
               static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
     std::string error_msg;
     mem_map_ = MemMap::MapAnonymous("read barrier table",
-                                    /* addr */ nullptr,
                                     capacity,
                                     PROT_READ | PROT_WRITE,
-                                    /* low_4gb */ false,
+                                    /*low_4gb=*/ false,
                                     &error_msg);
     CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr)
         << "couldn't allocate read barrier table: " << error_msg;
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 9dea2f8..fba62c3 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -75,7 +75,7 @@
     mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
     if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) {
       *contains_reference_to_target_space_ = true;
-      collector_->MarkHeapReference(ref_ptr, /*do_atomic_update*/ false);
+      collector_->MarkHeapReference(ref_ptr, /*do_atomic_update=*/ false);
       DCHECK(!target_space_->HasAddress(ref_ptr->AsMirrorPtr()));
     }
   }
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index b96f0d3..3525667 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -18,9 +18,9 @@
 #define ART_RUNTIME_GC_ACCOUNTING_REMEMBERED_SET_H_
 
 #include "base/allocator.h"
-#include "base/globals.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "base/safe_map.h"
+#include "runtime_globals.h"
 
 #include <set>
 #include <vector>
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 2946486..dc223db 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -85,10 +85,9 @@
   const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
-                                        /* addr */ nullptr,
                                         bitmap_size,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ false,
+                                        /*low_4gb=*/ false,
                                         &error_msg);
   if (UNLIKELY(!mem_map.IsValid())) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 6a3faef..6ca254a 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -23,9 +23,9 @@
 #include <set>
 #include <vector>
 
-#include "base/globals.h"
+#include "base/locks.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
+#include "runtime_globals.h"
 
 namespace art {
 
@@ -151,6 +151,12 @@
   void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, Visitor&& visitor) const
       NO_THREAD_SAFETY_ANALYSIS;
 
+  // Visit all of the set bits in HeapBegin(), HeapLimit().
+  template <typename Visitor>
+  void VisitAllMarked(Visitor&& visitor) const {
+    VisitMarkedRange(HeapBegin(), HeapLimit(), visitor);
+  }
+
   // Visits set bits in address order.  The callback is not permitted to change the bitmap bits or
   // max during the traversal.
   template <typename Visitor>
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 22529b8..9f355e3 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -19,9 +19,9 @@
 #include <stdint.h>
 #include <memory>
 
-#include "base/globals.h"
 #include "base/mutex.h"
 #include "common_runtime_test.h"
+#include "runtime_globals.h"
 #include "space_bitmap-inl.h"
 
 namespace art {
diff --git a/runtime/gc/allocation_listener.h b/runtime/gc/allocation_listener.h
index 7675a22..a578252 100644
--- a/runtime/gc/allocation_listener.h
+++ b/runtime/gc/allocation_listener.h
@@ -20,8 +20,8 @@
 #include <list>
 #include <memory>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "gc_root.h"
 #include "obj_ptr.h"
 
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index b9c1dc6..80e3394 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -24,9 +24,7 @@
 #include "object_callbacks.h"
 #include "stack.h"
 
-#ifdef ART_TARGET_ANDROID
-#include "cutils/properties.h"
-#endif
+#include <android-base/properties.h>
 
 namespace art {
 namespace gc {
@@ -45,10 +43,10 @@
 #ifdef ART_TARGET_ANDROID
   // Check whether there's a system property overriding the max number of records.
   const char* propertyName = "dalvik.vm.allocTrackerMax";
-  char allocMaxString[PROPERTY_VALUE_MAX];
-  if (property_get(propertyName, allocMaxString, "") > 0) {
+  std::string allocMaxString = android::base::GetProperty(propertyName, "");
+  if (!allocMaxString.empty()) {
     char* end;
-    size_t value = strtoul(allocMaxString, &end, 10);
+    size_t value = strtoul(allocMaxString.c_str(), &end, 10);
     if (*end != '\0') {
       LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocMaxString
                  << "' --- invalid";
@@ -61,10 +59,10 @@
   }
   // Check whether there's a system property overriding the number of recent records.
   propertyName = "dalvik.vm.recentAllocMax";
-  char recentAllocMaxString[PROPERTY_VALUE_MAX];
-  if (property_get(propertyName, recentAllocMaxString, "") > 0) {
+  std::string recentAllocMaxString = android::base::GetProperty(propertyName, "");
+  if (!recentAllocMaxString.empty()) {
     char* end;
-    size_t value = strtoul(recentAllocMaxString, &end, 10);
+    size_t value = strtoul(recentAllocMaxString.c_str(), &end, 10);
     if (*end != '\0') {
       LOG(ERROR) << "Ignoring  " << propertyName << " '" << recentAllocMaxString
                  << "' --- invalid";
@@ -77,10 +75,10 @@
   }
   // Check whether there's a system property overriding the max depth of stack trace.
   propertyName = "debug.allocTracker.stackDepth";
-  char stackDepthString[PROPERTY_VALUE_MAX];
-  if (property_get(propertyName, stackDepthString, "") > 0) {
+  std::string stackDepthString = android::base::GetProperty(propertyName, "");
+  if (!stackDepthString.empty()) {
     char* end;
-    size_t value = strtoul(stackDepthString, &end, 10);
+    size_t value = strtoul(stackDepthString.c_str(), &end, 10);
     if (*end != '\0') {
       LOG(ERROR) << "Ignoring  " << propertyName << " '" << stackDepthString
                  << "' --- invalid";
@@ -186,34 +184,6 @@
   new_record_condition_.Broadcast(Thread::Current());
 }
 
-class AllocRecordStackVisitor : public StackVisitor {
- public:
-  AllocRecordStackVisitor(Thread* thread, size_t max_depth, AllocRecordStackTrace* trace_out)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        max_depth_(max_depth),
-        trace_(trace_out) {}
-
-  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
-  // annotalysis.
-  bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
-    if (trace_->GetDepth() >= max_depth_) {
-      return false;
-    }
-    ArtMethod* m = GetMethod();
-    // m may be null if we have inlined methods of unresolved classes. b/27858645
-    if (m != nullptr && !m->IsRuntimeMethod()) {
-      m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
-      trace_->AddStackElement(AllocRecordStackTraceElement(m, GetDexPc()));
-    }
-    return true;
-  }
-
- private:
-  const size_t max_depth_;
-  AllocRecordStackTrace* const trace_;
-};
-
 void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
   Thread* self = Thread::Current();
   Heap* heap = Runtime::Current()->GetHeap();
@@ -270,11 +240,26 @@
   // Get stack trace outside of lock in case there are allocations during the stack walk.
   // b/27858645.
   AllocRecordStackTrace trace;
-  AllocRecordStackVisitor visitor(self, max_stack_depth_, /*out*/ &trace);
   {
     StackHandleScope<1> hs(self);
     auto obj_wrapper = hs.NewHandleWrapper(obj);
-    visitor.WalkStack();
+
+    StackVisitor::WalkStack(
+        [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+          if (trace.GetDepth() >= max_stack_depth_) {
+            return false;
+          }
+          ArtMethod* m = stack_visitor->GetMethod();
+          // m may be null if we have inlined methods of unresolved classes. b/27858645
+          if (m != nullptr && !m->IsRuntimeMethod()) {
+            m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+            trace.AddStackElement(AllocRecordStackTraceElement(m, stack_visitor->GetDexPc()));
+          }
+          return true;
+        },
+        self,
+        /* context= */ nullptr,
+        art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
   }
 
   MutexLock mu(self, *Locks::alloc_tracker_lock_);
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index e508d5f..79d4fbf 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -38,6 +38,7 @@
 #pragma GCC diagnostic ignored "-Wempty-body"
 #pragma GCC diagnostic ignored "-Wstrict-aliasing"
 #pragma GCC diagnostic ignored "-Wnull-pointer-arithmetic"
+#pragma GCC diagnostic ignored "-Wexpansion-to-defined"
 #include "../../../external/dlmalloc/malloc.c"
 // Note: malloc.c uses a DEBUG define to drive debug code. This interferes with the DEBUG severity
 //       of libbase, so undefine it now.
@@ -59,8 +60,8 @@
 
 #include <sys/mman.h>
 
-#include "base/globals.h"
 #include "base/utils.h"
+#include "runtime_globals.h"
 
 extern "C" void DlmallocMadviseCallback(void* start, void* end, size_t used_bytes, void* arg) {
   // Is this chunk in use?
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 0dbafde..d4b9fab 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include "rosalloc.h"
+#include "rosalloc-inl.h"
 
 #include <list>
 #include <map>
@@ -92,10 +92,9 @@
   size_t max_num_of_pages = max_capacity_ / kPageSize;
   std::string error_msg;
   page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map",
-                                           /* addr */ nullptr,
                                            RoundUp(max_num_of_pages, kPageSize),
                                            PROT_READ | PROT_WRITE,
-                                           /* low_4gb */ false,
+                                           /*low_4gb=*/ false,
                                            &error_msg);
   CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
   page_map_ = page_map_mem_map_.Begin();
@@ -287,7 +286,7 @@
       break;
     default:
       LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_type);
-      break;
+      UNREACHABLE();
     }
     if (kIsDebugBuild) {
       // Clear the first page since it is not madvised due to the magic number.
@@ -326,7 +325,7 @@
     LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << " : " << "pm_idx=" << pm_idx << ", pm_type="
                << static_cast<int>(pm_type) << ", ptr=" << std::hex
                << reinterpret_cast<intptr_t>(ptr);
-    return 0;
+    UNREACHABLE();
   }
   // Update the page map and count the number of pages.
   size_t num_pages = 1;
@@ -515,7 +514,7 @@
         return FreePages(self, ptr, false);
       case kPageMapLargeObjectPart:
         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
-        return 0;
+        UNREACHABLE();
       case kPageMapRunPart: {
         // Find the beginning of the run.
         do {
@@ -530,11 +529,11 @@
       case kPageMapReleased:
       case kPageMapEmpty:
         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
-        return 0;
+        UNREACHABLE();
       }
       default:
         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
-        return 0;
+        UNREACHABLE();
     }
   }
   DCHECK(run != nullptr);
@@ -1308,7 +1307,7 @@
     case kPageMapEmpty:
       LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
                  << std::hex << reinterpret_cast<intptr_t>(ptr);
-      break;
+      UNREACHABLE();
     case kPageMapLargeObject: {
       size_t num_pages = 1;
       size_t idx = pm_idx + 1;
@@ -1322,7 +1321,7 @@
     case kPageMapLargeObjectPart:
       LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
                  << std::hex << reinterpret_cast<intptr_t>(ptr);
-      break;
+      UNREACHABLE();
     case kPageMapRun:
     case kPageMapRunPart: {
       // Find the beginning of the run.
@@ -1341,10 +1340,9 @@
     }
     default: {
       LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
-      break;
+      UNREACHABLE();
     }
   }
-  return 0;
 }
 
 bool RosAlloc::Trim() {
@@ -1457,7 +1455,7 @@
       }
       case kPageMapLargeObjectPart:
         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
-        break;
+        UNREACHABLE();
       case kPageMapRun: {
         // The start of a run.
         Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
@@ -1477,10 +1475,10 @@
       }
       case kPageMapRunPart:
         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
-        break;
+        UNREACHABLE();
       default:
         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
-        break;
+        UNREACHABLE();
     }
   }
 }
@@ -1810,7 +1808,7 @@
         }
         case kPageMapLargeObjectPart:
           LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
-          break;
+          UNREACHABLE();
         case kPageMapRun: {
           // The start of a run.
           Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
@@ -1838,7 +1836,7 @@
           // Fall-through.
         default:
           LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
-          break;
+          UNREACHABLE();
       }
     }
   }
@@ -2031,7 +2029,7 @@
         break;  // Skip.
       default:
         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
-        break;
+        UNREACHABLE();
     }
   }
   return reclaimed_bytes;
@@ -2139,7 +2137,7 @@
       case kPageMapLargeObjectPart:
         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl
                    << DumpPageMap();
-        break;
+        UNREACHABLE();
       case kPageMapRun: {
         Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
         size_t idx = run->size_bracket_idx_;
@@ -2158,7 +2156,7 @@
       default:
         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl
                    << DumpPageMap();
-        break;
+        UNREACHABLE();
     }
   }
   os << "RosAlloc stats:\n";
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 0562167..0906295 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -30,9 +30,9 @@
 
 #include "base/allocator.h"
 #include "base/bit_utils.h"
-#include "base/globals.h"
 #include "base/mem_map.h"
 #include "base/mutex.h"
+#include "runtime_globals.h"
 #include "thread.h"
 
 namespace art {
@@ -830,16 +830,16 @@
            size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
   ~RosAlloc();
 
-  static size_t RunFreeListOffset() {
+  static constexpr size_t RunFreeListOffset() {
     return OFFSETOF_MEMBER(Run, free_list_);
   }
-  static size_t RunFreeListHeadOffset() {
+  static constexpr size_t RunFreeListHeadOffset() {
     return OFFSETOF_MEMBER(SlotFreeList<false>, head_);
   }
-  static size_t RunFreeListSizeOffset() {
+  static constexpr size_t RunFreeListSizeOffset() {
     return OFFSETOF_MEMBER(SlotFreeList<false>, size_);
   }
-  static size_t RunSlotNextOffset() {
+  static constexpr size_t RunSlotNextOffset() {
     return OFFSETOF_MEMBER(Slot, next_);
   }
 
diff --git a/runtime/gc/allocator_type.h b/runtime/gc/allocator_type.h
index 2f1f577..992c32a 100644
--- a/runtime/gc/allocator_type.h
+++ b/runtime/gc/allocator_type.h
@@ -23,15 +23,19 @@
 namespace gc {
 
 // Different types of allocators.
+// Those marked with * have fast path entrypoints callable from generated code.
 enum AllocatorType {
-  kAllocatorTypeBumpPointer,  // Use BumpPointer allocator, has entrypoints.
-  kAllocatorTypeTLAB,  // Use TLAB allocator, has entrypoints.
-  kAllocatorTypeRosAlloc,  // Use RosAlloc allocator, has entrypoints.
-  kAllocatorTypeDlMalloc,  // Use dlmalloc allocator, has entrypoints.
-  kAllocatorTypeNonMoving,  // Special allocator for non moving objects, doesn't have entrypoints.
-  kAllocatorTypeLOS,  // Large object space, also doesn't have entrypoints.
-  kAllocatorTypeRegion,
-  kAllocatorTypeRegionTLAB,
+  // BumpPointer spaces are currently only used for ZygoteSpace construction.
+  kAllocatorTypeBumpPointer,  // Use global CAS-based BumpPointer allocator. (*)
+  kAllocatorTypeTLAB,  // Use TLAB allocator within BumpPointer space. (*)
+  kAllocatorTypeRosAlloc,  // Use RosAlloc (segregated size, free list) allocator. (*)
+  kAllocatorTypeDlMalloc,  // Use dlmalloc (well-known C malloc) allocator. (*)
+  kAllocatorTypeNonMoving,  // Special allocator for non moving objects.
+  kAllocatorTypeLOS,  // Large object space.
+  // The following differ from the BumpPointer allocators primarily in that memory is
+  // allocated from multiple regions, instead of a single contiguous space.
+  kAllocatorTypeRegion,  // Use CAS-based contiguous bump-pointer allocation within a region. (*)
+  kAllocatorTypeRegionTLAB,  // Use region pieces as TLABs. Default for most small objects. (*)
 };
 std::ostream& operator<<(std::ostream& os, const AllocatorType& rhs);
 
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 3095f9f..2de7910 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -22,7 +22,7 @@
 #include "gc/accounting/atomic_stack.h"
 #include "gc/accounting/space_bitmap-inl.h"
 #include "gc/heap.h"
-#include "gc/space/region_space.h"
+#include "gc/space/region_space-inl.h"
 #include "gc/verification.h"
 #include "lock_word.h"
 #include "mirror/class.h"
@@ -36,15 +36,15 @@
     Thread* const self,
     mirror::Object* ref,
     accounting::ContinuousSpaceBitmap* bitmap) {
-  if (kEnableGenerationalConcurrentCopyingCollection
-      && young_gen_
-      && !done_scanning_.load(std::memory_order_acquire)) {
-    // Everything in the unevac space should be marked for generational CC except for large objects.
-    DCHECK(region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref)) << ref << " "
+  if (use_generational_cc_ && !done_scanning_.load(std::memory_order_acquire)) {
+    // Everything in the unevac space should be marked for young generation CC,
+    // except for large objects.
+    DCHECK(!young_gen_ || region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref))
+        << ref << " "
         << ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->PrettyClass();
-    // Since the mark bitmap is still filled in from last GC, we can not use that or else the
-    // mutator may see references to the from space. Instead, use the baker pointer itself as
-    // the mark bit.
+    // Since the mark bitmap is still filled in from last GC (or from marking phase of 2-phase CC,
+    // we can not use that or else the mutator may see references to the from space. Instead, use
+    // the baker pointer itself as the mark bit.
     if (ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
       // TODO: We don't actually need to scan this object later, we just need to clear the gray
       // bit.
@@ -76,8 +76,8 @@
     // we can avoid an expensive CAS.
     // For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
     // set.
-    success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::NonGrayState(),
-                                             /* rb_state */ ReadBarrier::GrayState());
+    success = ref->AtomicSetReadBarrierState(/* expected_rb_state= */ ReadBarrier::NonGrayState(),
+                                             /* rb_state= */ ReadBarrier::GrayState());
   } else {
     success = !bitmap->AtomicTestAndSet(ref);
   }
@@ -113,8 +113,8 @@
     }
     // This may or may not succeed, which is ok because the object may already be gray.
     bool success =
-        ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::NonGrayState(),
-                                       /* rb_state */ ReadBarrier::GrayState());
+        ref->AtomicSetReadBarrierState(/* expected_rb_state= */ ReadBarrier::NonGrayState(),
+                                       /* rb_state= */ ReadBarrier::GrayState());
     if (success) {
       MutexLock mu(self, immune_gray_stack_lock_);
       immune_gray_stack_.push_back(ref);
@@ -129,7 +129,7 @@
                                                mirror::Object* holder,
                                                MemberOffset offset) {
   // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
+  DCHECK(!kNoUnEvac || use_generational_cc_);
   if (from_ref == nullptr) {
     return nullptr;
   }
@@ -171,9 +171,7 @@
         return to_ref;
       }
       case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace:
-        if (kEnableGenerationalConcurrentCopyingCollection
-            && kNoUnEvac
-            && !region_space_->IsLargeObject(from_ref)) {
+        if (kNoUnEvac && use_generational_cc_ && !region_space_->IsLargeObject(from_ref)) {
           if (!kFromGCThread) {
             DCHECK(IsMarkedInUnevacFromSpace(from_ref)) << "Returning unmarked object to mutator";
           }
@@ -186,7 +184,7 @@
         region_space_->Unprotect();
         LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(holder, offset, from_ref);
         region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
-        heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
+        heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal= */ true);
         UNREACHABLE();
     }
   } else {
@@ -209,8 +207,8 @@
   if (UNLIKELY(mark_from_read_barrier_measurements_)) {
     ret = MarkFromReadBarrierWithMeasurements(self, from_ref);
   } else {
-    ret = Mark</*kGrayImmuneObject*/true, /*kNoUnEvac*/false, /*kFromGCThread*/false>(self,
-                                                                                      from_ref);
+    ret = Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self,
+                                                                                         from_ref);
   }
   // Only set the mark bit for baker barrier.
   if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) {
@@ -242,15 +240,32 @@
   // Use load-acquire on the read barrier pointer to ensure that we never see a black (non-gray)
   // read barrier state with an unmarked bit due to reordering.
   DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
-  if (kEnableGenerationalConcurrentCopyingCollection
-      && young_gen_
-      && !done_scanning_.load(std::memory_order_acquire)) {
-    return from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState();
-  }
   if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
     return true;
+  } else if (!use_generational_cc_ || done_scanning_.load(std::memory_order_acquire)) {
+    // If the card table scanning is not finished yet, then only read-barrier
+    // state should be checked. Checking the mark bitmap is unreliable as there
+    // may be some objects - whose corresponding card is dirty - which are
+    // marked in the mark bitmap, but cannot be considered marked unless their
+    // read-barrier state is set to Gray.
+    //
+    // Why read read-barrier state before checking done_scanning_?
+    // If the read-barrier state was read *after* done_scanning_, then there
+    // exists a concurrency race due to which even after the object is marked,
+    // read-barrier state is checked *after* that, this function will return
+    // false. The following scenario may cause the race:
+    //
+    // 1. Mutator thread reads done_scanning_ and upon finding it false, gets
+    // suspended before reading the object's read-barrier state.
+    // 2. GC thread finishes card-table scan and then sets done_scanning_ to
+    // true.
+    // 3. GC thread grays the object, scans it, marks in the bitmap, and then
+    // changes its read-barrier state back to non-gray.
+    // 4. Mutator thread resumes, reads the object's read-barrier state and
+    // returns false.
+    return region_space_bitmap_->Test(from_ref);
   }
-  return region_space_bitmap_->Test(from_ref);
+  return false;
 }
 
 }  // namespace collector
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index eede5a5..642b12e 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -17,6 +17,7 @@
 #include "concurrent_copying.h"
 
 #include "art_field-inl.h"
+#include "barrier.h"
 #include "base/enums.h"
 #include "base/file_utils.h"
 #include "base/histogram-inl.h"
@@ -40,6 +41,7 @@
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object-refvisitor-inl.h"
+#include "mirror/object_reference.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread-inl.h"
 #include "thread_list.h"
@@ -67,15 +69,19 @@
 
 ConcurrentCopying::ConcurrentCopying(Heap* heap,
                                      bool young_gen,
+                                     bool use_generational_cc,
                                      const std::string& name_prefix,
                                      bool measure_read_barrier_slow_path)
     : GarbageCollector(heap,
                        name_prefix + (name_prefix.empty() ? "" : " ") +
                        "concurrent copying"),
-      region_space_(nullptr), gc_barrier_(new Barrier(0)),
+      region_space_(nullptr),
+      gc_barrier_(new Barrier(0)),
       gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
                                                      kDefaultGcMarkStackSize,
                                                      kDefaultGcMarkStackSize)),
+      use_generational_cc_(use_generational_cc),
+      young_gen_(young_gen),
       rb_mark_bit_stack_(accounting::ObjectStack::Create("rb copying gc mark stack",
                                                          kReadBarrierMarkStackSize,
                                                          kReadBarrierMarkStackSize)),
@@ -93,7 +99,11 @@
       from_space_num_bytes_at_first_pause_(0),
       mark_stack_mode_(kMarkStackModeOff),
       weak_ref_access_enabled_(true),
-      young_gen_(young_gen),
+      copied_live_bytes_ratio_sum_(0.f),
+      gc_count_(0),
+      region_space_inter_region_bitmap_(nullptr),
+      non_moving_space_inter_region_bitmap_(nullptr),
+      reclaimed_bytes_ratio_sum_(0.f),
       skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
       measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
       mark_from_read_barrier_measurements_(false),
@@ -108,10 +118,11 @@
       force_evacuate_all_(false),
       gc_grays_immune_objects_(false),
       immune_gray_stack_lock_("concurrent copying immune gray stack lock",
-                              kMarkSweepMarkStackLock) {
+                              kMarkSweepMarkStackLock),
+      num_bytes_allocated_before_gc_(0) {
   static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
                 "The region space size and the read barrier table region size must match");
-  CHECK(kEnableGenerationalConcurrentCopyingCollection || !young_gen_);
+  CHECK(use_generational_cc_ || !young_gen_);
   Thread* self = Thread::Current();
   {
     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -130,15 +141,14 @@
       pooled_mark_stacks_.push_back(mark_stack);
     }
   }
-  if (kEnableGenerationalConcurrentCopyingCollection) {
+  if (use_generational_cc_) {
     // Allocate sweep array free buffer.
     std::string error_msg;
     sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
         "concurrent copying sweep array free buffer",
-        /* addr */ nullptr,
         RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
         PROT_READ | PROT_WRITE,
-        /* low_4gb */ false,
+        /*low_4gb=*/ false,
         &error_msg);
     CHECK(sweep_array_free_buffer_mem_map_.IsValid())
         << "Couldn't allocate sweep array free buffer: " << error_msg;
@@ -185,6 +195,11 @@
   {
     ReaderMutexLock mu(self, *Locks::mutator_lock_);
     InitializePhase();
+    // In case of forced evacuation, all regions are evacuated and hence no
+    // need to compute live_bytes.
+    if (use_generational_cc_ && !young_gen_ && !force_evacuate_all_) {
+      MarkingPhase();
+    }
   }
   if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
     // Switch to read barrier mark entrypoints before we gray the objects. This is required in case
@@ -198,7 +213,7 @@
   FlipThreadRoots();
   {
     ReaderMutexLock mu(self, *Locks::mutator_lock_);
-    MarkingPhase();
+    CopyingPhase();
   }
   // Verify no from space refs. This causes a pause.
   if (kEnableNoFromSpaceRefsVerification) {
@@ -277,6 +292,29 @@
   gc_barrier_->Increment(self, barrier_count);
 }
 
+void ConcurrentCopying::CreateInterRegionRefBitmaps() {
+  DCHECK(use_generational_cc_);
+  DCHECK(region_space_inter_region_bitmap_ == nullptr);
+  DCHECK(non_moving_space_inter_region_bitmap_ == nullptr);
+  DCHECK(region_space_ != nullptr);
+  DCHECK(heap_->non_moving_space_ != nullptr);
+  // Region-space
+  region_space_inter_region_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
+      "region-space inter region ref bitmap",
+      reinterpret_cast<uint8_t*>(region_space_->Begin()),
+      region_space_->Limit() - region_space_->Begin()));
+  CHECK(region_space_inter_region_bitmap_ != nullptr)
+      << "Couldn't allocate region-space inter region ref bitmap";
+
+  // non-moving-space
+  non_moving_space_inter_region_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
+      "non-moving-space inter region ref bitmap",
+      reinterpret_cast<uint8_t*>(heap_->non_moving_space_->Begin()),
+      heap_->non_moving_space_->Limit() - heap_->non_moving_space_->Begin()));
+  CHECK(non_moving_space_inter_region_bitmap_ != nullptr)
+      << "Couldn't allocate non-moving-space inter region ref bitmap";
+}
+
 void ConcurrentCopying::BindBitmaps() {
   Thread* self = Thread::Current();
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -289,19 +327,30 @@
     } else {
       CHECK(!space->IsZygoteSpace());
       CHECK(!space->IsImageSpace());
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      CHECK(space == region_space_ || space == heap_->non_moving_space_);
+      if (use_generational_cc_) {
         if (space == region_space_) {
           region_space_bitmap_ = region_space_->GetMarkBitmap();
         } else if (young_gen_ && space->IsContinuousMemMapAllocSpace()) {
           DCHECK_EQ(space->GetGcRetentionPolicy(), space::kGcRetentionPolicyAlwaysCollect);
           space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
         }
-        // Age all of the cards for the region space so that we know which evac regions to scan.
-        Runtime::Current()->GetHeap()->GetCardTable()->ModifyCardsAtomic(
-            space->Begin(),
-            space->End(),
-            AgeCardVisitor(),
-            VoidFunctor());
+        if (young_gen_) {
+          // Age all of the cards for the region space so that we know which evac regions to scan.
+          heap_->GetCardTable()->ModifyCardsAtomic(space->Begin(),
+                                                   space->End(),
+                                                   AgeCardVisitor(),
+                                                   VoidFunctor());
+        } else {
+          // In a full-heap GC cycle, the card-table corresponding to region-space and
+          // non-moving space can be cleared, because this cycle only needs to
+          // capture writes during the marking phase of this cycle to catch
+          // objects that skipped marking due to heap mutation. Furthermore,
+          // if the next GC is a young-gen cycle, then it only needs writes to
+          // be captured after the thread-flip of this GC cycle, as that is when
+          // the young-gen for the next GC cycle starts getting populated.
+          heap_->GetCardTable()->ClearCardRange(space->Begin(), space->Limit());
+        }
       } else {
         if (space == region_space_) {
           // It is OK to clear the bitmap with mutators running since the only place it is read is
@@ -312,7 +361,7 @@
       }
     }
   }
-  if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+  if (use_generational_cc_ && young_gen_) {
     for (const auto& space : GetHeap()->GetDiscontinuousSpaces()) {
       CHECK(space->IsLargeObjectSpace());
       space->AsLargeObjectSpace()->CopyLiveToMarked();
@@ -322,17 +371,13 @@
 
 void ConcurrentCopying::InitializePhase() {
   TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
+  num_bytes_allocated_before_gc_ = static_cast<int64_t>(heap_->GetBytesAllocated());
   if (kVerboseMode) {
     LOG(INFO) << "GC InitializePhase";
     LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
               << reinterpret_cast<void*>(region_space_->Limit());
   }
   CheckEmptyMarkStack();
-  if (kIsDebugBuild) {
-    MutexLock mu(Thread::Current(), mark_stack_lock_);
-    CHECK(false_gray_stack_.empty());
-  }
-
   rb_mark_bit_stack_full_ = false;
   mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
   if (measure_read_barrier_slow_path_) {
@@ -349,7 +394,7 @@
   GcCause gc_cause = GetCurrentIteration()->GetGcCause();
 
   force_evacuate_all_ = false;
-  if (!kEnableGenerationalConcurrentCopyingCollection || !young_gen_) {
+  if (!use_generational_cc_ || !young_gen_) {
     if (gc_cause == kGcCauseExplicit ||
         gc_cause == kGcCauseCollectorTransition ||
         GetCurrentIteration()->GetClearSoftReferences()) {
@@ -365,7 +410,7 @@
       DCHECK(immune_gray_stack_.empty());
     }
   }
-  if (kEnableGenerationalConcurrentCopyingCollection) {
+  if (use_generational_cc_) {
     done_scanning_.store(false, std::memory_order_release);
   }
   BindBitmaps();
@@ -379,9 +424,10 @@
     }
     LOG(INFO) << "GC end of InitializePhase";
   }
-  if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) {
+  if (use_generational_cc_ && !young_gen_) {
     region_space_bitmap_->Clear();
   }
+  mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal, std::memory_order_relaxed);
   // Mark all of the zygote large objects without graying them.
   MarkZygoteLargeObjects();
 }
@@ -406,7 +452,7 @@
         concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
         reinterpret_cast<Atomic<size_t>*>(
             &concurrent_copying_->from_space_num_objects_at_first_pause_)->
-                fetch_add(thread_local_objects, std::memory_order_seq_cst);
+                fetch_add(thread_local_objects, std::memory_order_relaxed);
       } else {
         concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
       }
@@ -423,7 +469,7 @@
 
   void VisitRoots(mirror::Object*** roots,
                   size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED)
+                  const RootInfo& info ATTRIBUTE_UNUSED) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     Thread* self = Thread::Current();
     for (size_t i = 0; i < count; ++i) {
@@ -440,7 +486,7 @@
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
                   size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED)
+                  const RootInfo& info ATTRIBUTE_UNUSED) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     Thread* self = Thread::Current();
     for (size_t i = 0; i < count; ++i) {
@@ -472,7 +518,7 @@
     TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
     // Note: self is not necessarily equal to thread since thread may be suspended.
     Thread* self = Thread::Current();
-    if (kVerifyNoMissingCardMarks) {
+    if (kVerifyNoMissingCardMarks && cc->young_gen_) {
       cc->VerifyNoMissingCardMarks();
     }
     CHECK_EQ(thread, self);
@@ -486,9 +532,11 @@
     }
     {
       TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings());
-      // Only change live bytes for full CC.
+      // Only change live bytes for 1-phase full heap CC.
       cc->region_space_->SetFromSpace(
-          cc->rb_table_, evac_mode, /*clear_live_bytes*/ !cc->young_gen_);
+          cc->rb_table_,
+          evac_mode,
+          /*clear_live_bytes=*/ !cc->use_generational_cc_);
     }
     cc->SwapStacks();
     if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
@@ -497,9 +545,7 @@
       cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
     }
     cc->is_marking_ = true;
-    cc->mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal,
-                               std::memory_order_relaxed);
-    if (kIsDebugBuild && !cc->young_gen_) {
+    if (kIsDebugBuild && !cc->use_generational_cc_) {
       cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
     }
     if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
@@ -573,8 +619,10 @@
     if (ref != nullptr) {
       if (!collector_->immune_spaces_.ContainsObject(ref.Ptr())) {
         // Not immune, must be a zygote large object.
-        CHECK(Runtime::Current()->GetHeap()->GetLargeObjectsSpace()->IsZygoteLargeObject(
-            Thread::Current(), ref.Ptr()))
+        space::LargeObjectSpace* large_object_space =
+            Runtime::Current()->GetHeap()->GetLargeObjectsSpace();
+        CHECK(large_object_space->Contains(ref.Ptr()) &&
+              large_object_space->IsZygoteLargeObject(Thread::Current(), ref.Ptr()))
             << "Non gray object references non immune, non zygote large object "<< ref << " "
             << mirror::Object::PrettyTypeOf(ref) << " in holder " << holder << " "
             << mirror::Object::PrettyTypeOf(holder) << " offset=" << offset.Uint32Value();
@@ -599,7 +647,7 @@
         REQUIRES_SHARED(Locks::mutator_lock_) {
       // If an object is not gray, it should only have references to things in the immune spaces.
       if (obj->GetReadBarrierState() != ReadBarrier::GrayState()) {
-        obj->VisitReferences</*kVisitNativeRoots*/true,
+        obj->VisitReferences</*kVisitNativeRoots=*/true,
                              kDefaultVerifyFlags,
                              kWithoutReadBarrier>(visitor, visitor);
       }
@@ -667,8 +715,8 @@
     // Objects on clean cards should never have references to newly allocated regions. Note
     // that aged cards are also not clean.
     if (heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
-      VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder*/ obj);
-      obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
+      VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder=*/ obj);
+      obj->VisitReferences</*kVisitNativeRoots=*/true, kVerifyNone, kWithoutReadBarrier>(
           internal_visitor, internal_visitor);
     }
   };
@@ -683,7 +731,7 @@
 // Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
 void ConcurrentCopying::FlipThreadRoots() {
   TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
-  if (kVerboseMode) {
+  if (kVerboseMode || heap_->dump_region_info_before_gc_) {
     LOG(INFO) << "time=" << region_space_->Time();
     region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
   }
@@ -740,7 +788,7 @@
   TimingLogger::ScopedTiming split("GrayAllDirtyImmuneObjects", GetTimings());
   accounting::CardTable* const card_table = heap_->GetCardTable();
   Thread* const self = Thread::Current();
-  using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ true>;
+  using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ true>;
   VisitorType visitor(self);
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
@@ -767,11 +815,11 @@
                 : card;
           },
           /* card modified visitor */ VoidFunctor());
-      card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
-                                               space->Begin(),
-                                               space->End(),
-                                               visitor,
-                                               gc::accounting::CardTable::kCardAged);
+      card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(),
+                                              space->Begin(),
+                                              space->End(),
+                                              visitor,
+                                              gc::accounting::CardTable::kCardAged);
     }
   }
 }
@@ -779,7 +827,7 @@
 void ConcurrentCopying::GrayAllNewlyDirtyImmuneObjects() {
   TimingLogger::ScopedTiming split("(Paused)GrayAllNewlyDirtyImmuneObjects", GetTimings());
   accounting::CardTable* const card_table = heap_->GetCardTable();
-  using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ false>;
+  using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ false>;
   Thread* const self = Thread::Current();
   VisitorType visitor(self);
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
@@ -789,11 +837,11 @@
 
     // Don't need to scan aged cards since we did these before the pause. Note that scanning cards
     // also handles the mod-union table cards.
-    card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
-                                             space->Begin(),
-                                             space->End(),
-                                             visitor,
-                                             gc::accounting::CardTable::kCardDirty);
+    card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(),
+                                            space->Begin(),
+                                            space->End(),
+                                            visitor,
+                                            gc::accounting::CardTable::kCardDirty);
     if (table != nullptr) {
       // Add the cards to the mod-union table so that we can clear cards to save RAM.
       table->ProcessCards();
@@ -821,7 +869,7 @@
   DCHECK(obj != nullptr);
   DCHECK(immune_spaces_.ContainsObject(obj));
   // Update the fields without graying it or pushing it onto the mark stack.
-  if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+  if (use_generational_cc_ && young_gen_) {
     // Young GC does not care about references to unevac space. It is safe to not gray these as
     // long as scan immune objects happens after scanning the dirty cards.
     Scan<true>(obj);
@@ -859,13 +907,484 @@
   ConcurrentCopying* const collector_;
 };
 
-// Concurrently mark roots that are guarded by read barriers and process the mark stack.
+template <bool kAtomicTestAndSet>
+class ConcurrentCopying::CaptureRootsForMarkingVisitor : public RootVisitor {
+ public:
+  explicit CaptureRootsForMarkingVisitor(ConcurrentCopying* cc, Thread* self)
+      : collector_(cc), self_(self) {}
+
+  void VisitRoots(mirror::Object*** roots,
+                  size_t count,
+                  const RootInfo& info ATTRIBUTE_UNUSED) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    for (size_t i = 0; i < count; ++i) {
+      mirror::Object** root = roots[i];
+      mirror::Object* ref = *root;
+      if (ref != nullptr && !collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) {
+        collector_->PushOntoMarkStack(self_, ref);
+      }
+    }
+  }
+
+  void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+                  size_t count,
+                  const RootInfo& info ATTRIBUTE_UNUSED) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    for (size_t i = 0; i < count; ++i) {
+      mirror::CompressedReference<mirror::Object>* const root = roots[i];
+      if (!root->IsNull()) {
+        mirror::Object* ref = root->AsMirrorPtr();
+        if (!collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) {
+          collector_->PushOntoMarkStack(self_, ref);
+        }
+      }
+    }
+  }
+
+ private:
+  ConcurrentCopying* const collector_;
+  Thread* const self_;
+};
+
+class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
+ public:
+  RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
+                                       bool disable_weak_ref_access)
+      : concurrent_copying_(concurrent_copying),
+        disable_weak_ref_access_(disable_weak_ref_access) {
+  }
+
+  void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
+    // Note: self is not necessarily equal to thread since thread may be suspended.
+    Thread* const self = Thread::Current();
+    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
+        << thread->GetState() << " thread " << thread << " self " << self;
+    // Revoke thread local mark stacks.
+    accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
+    if (tl_mark_stack != nullptr) {
+      MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
+      concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
+      thread->SetThreadLocalMarkStack(nullptr);
+    }
+    // Disable weak ref access.
+    if (disable_weak_ref_access_) {
+      thread->SetWeakRefAccessEnabled(false);
+    }
+    // If thread is a running mutator, then act on behalf of the garbage collector.
+    // See the code in ThreadList::RunCheckpoint.
+    concurrent_copying_->GetBarrier().Pass(self);
+  }
+
+ protected:
+  ConcurrentCopying* const concurrent_copying_;
+
+ private:
+  const bool disable_weak_ref_access_;
+};
+
+class ConcurrentCopying::CaptureThreadRootsForMarkingAndCheckpoint :
+  public RevokeThreadLocalMarkStackCheckpoint {
+ public:
+  explicit CaptureThreadRootsForMarkingAndCheckpoint(ConcurrentCopying* cc) :
+    RevokeThreadLocalMarkStackCheckpoint(cc, /* disable_weak_ref_access */ false) {}
+
+  void Run(Thread* thread) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    Thread* const self = Thread::Current();
+    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
+    // only.
+    CaptureRootsForMarkingVisitor</*kAtomicTestAndSet*/ true> visitor(concurrent_copying_, self);
+    thread->VisitRoots(&visitor, kVisitRootFlagAllRoots);
+    // Barrier handling is done in the base class' Run() below.
+    RevokeThreadLocalMarkStackCheckpoint::Run(thread);
+  }
+};
+
+void ConcurrentCopying::CaptureThreadRootsForMarking() {
+  TimingLogger::ScopedTiming split("CaptureThreadRootsForMarking", GetTimings());
+  if (kVerboseMode) {
+    LOG(INFO) << "time=" << region_space_->Time();
+    region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
+  }
+  Thread* const self = Thread::Current();
+  CaptureThreadRootsForMarkingAndCheckpoint check_point(this);
+  ThreadList* thread_list = Runtime::Current()->GetThreadList();
+  gc_barrier_->Init(self, 0);
+  size_t barrier_count = thread_list->RunCheckpoint(&check_point, /* callback */ nullptr);
+  // If there are no threads to wait which implys that all the checkpoint functions are finished,
+  // then no need to release the mutator lock.
+  if (barrier_count == 0) {
+    return;
+  }
+  Locks::mutator_lock_->SharedUnlock(self);
+  {
+    ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+    gc_barrier_->Increment(self, barrier_count);
+  }
+  Locks::mutator_lock_->SharedLock(self);
+  if (kVerboseMode) {
+    LOG(INFO) << "time=" << region_space_->Time();
+    region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
+    LOG(INFO) << "GC end of CaptureThreadRootsForMarking";
+  }
+}
+
+// Used to scan ref fields of an object.
+template <bool kHandleInterRegionRefs>
+class ConcurrentCopying::ComputeLiveBytesAndMarkRefFieldsVisitor {
+ public:
+  explicit ComputeLiveBytesAndMarkRefFieldsVisitor(ConcurrentCopying* collector,
+                                                   size_t obj_region_idx)
+      : collector_(collector),
+      obj_region_idx_(obj_region_idx),
+      contains_inter_region_idx_(false) {}
+
+  void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
+      ALWAYS_INLINE
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
+    DCHECK_EQ(collector_->RegionSpace()->RegionIdxForRef(obj), obj_region_idx_);
+    DCHECK(kHandleInterRegionRefs || collector_->immune_spaces_.ContainsObject(obj));
+    CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset));
+  }
+
+  void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
+      REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
+    DCHECK(klass->IsTypeOfReferenceClass());
+    // If the referent is not null, then we must re-visit the object during
+    // copying phase to enqueue it for delayed processing and setting
+    // read-barrier state to gray to ensure that call to GetReferent() triggers
+    // the read-barrier. We use same data structure that is used to remember
+    // objects with inter-region refs for this purpose too.
+    if (kHandleInterRegionRefs
+        && !contains_inter_region_idx_
+        && ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr) {
+      contains_inter_region_idx_ = true;
+    }
+  }
+
+  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+      ALWAYS_INLINE
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (!root->IsNull()) {
+      VisitRoot(root);
+    }
+  }
+
+  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+      ALWAYS_INLINE
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    CheckReference(root->AsMirrorPtr());
+  }
+
+  bool ContainsInterRegionRefs() const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+    return contains_inter_region_idx_;
+  }
+
+ private:
+  void CheckReference(mirror::Object* ref) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (ref == nullptr) {
+      // Nothing to do.
+      return;
+    }
+    if (!collector_->TestAndSetMarkBitForRef(ref)) {
+      collector_->PushOntoLocalMarkStack(ref);
+    }
+    if (kHandleInterRegionRefs && !contains_inter_region_idx_) {
+      size_t ref_region_idx = collector_->RegionSpace()->RegionIdxForRef(ref);
+      // If a region-space object refers to an outside object, we will have a
+      // mismatch of region idx, but the object need not be re-visited in
+      // copying phase.
+      if (ref_region_idx != static_cast<size_t>(-1) && obj_region_idx_ != ref_region_idx) {
+        contains_inter_region_idx_ = true;
+      }
+    }
+  }
+
+  ConcurrentCopying* const collector_;
+  const size_t obj_region_idx_;
+  mutable bool contains_inter_region_idx_;
+};
+
+void ConcurrentCopying::AddLiveBytesAndScanRef(mirror::Object* ref) {
+  DCHECK(ref != nullptr);
+  DCHECK(!immune_spaces_.ContainsObject(ref));
+  DCHECK(TestMarkBitmapForRef(ref));
+  size_t obj_region_idx = static_cast<size_t>(-1);
+  if (LIKELY(region_space_->HasAddress(ref))) {
+    obj_region_idx = region_space_->RegionIdxForRefUnchecked(ref);
+    // Add live bytes to the corresponding region
+    if (!region_space_->IsRegionNewlyAllocated(obj_region_idx)) {
+      // Newly Allocated regions are always chosen for evacuation. So no need
+      // to update live_bytes_.
+      size_t obj_size = ref->SizeOf<kDefaultVerifyFlags>();
+      size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
+      region_space_->AddLiveBytes(ref, alloc_size);
+    }
+  }
+  ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ true>
+      visitor(this, obj_region_idx);
+  ref->VisitReferences</*kVisitNativeRoots=*/ true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+      visitor, visitor);
+  // Mark the corresponding card dirty if the object contains any
+  // inter-region reference.
+  if (visitor.ContainsInterRegionRefs()) {
+    if (obj_region_idx == static_cast<size_t>(-1)) {
+      // If an inter-region ref has been found in a non-region-space, then it
+      // must be non-moving-space. This is because this function cannot be
+      // called on a immune-space object, and a large-object-space object has
+      // only class object reference, which is either in some immune-space, or
+      // in non-moving-space.
+      DCHECK(heap_->non_moving_space_->HasAddress(ref));
+      non_moving_space_inter_region_bitmap_->Set(ref);
+    } else {
+      region_space_inter_region_bitmap_->Set(ref);
+    }
+  }
+}
+
+template <bool kAtomic>
+bool ConcurrentCopying::TestAndSetMarkBitForRef(mirror::Object* ref) {
+  accounting::ContinuousSpaceBitmap* bitmap = nullptr;
+  accounting::LargeObjectBitmap* los_bitmap = nullptr;
+  if (LIKELY(region_space_->HasAddress(ref))) {
+    bitmap = region_space_bitmap_;
+  } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) {
+    bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
+  } else if (immune_spaces_.ContainsObject(ref)) {
+    // References to immune space objects are always live.
+    DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
+    return true;
+  } else {
+    // Should be a large object. Must be page aligned and the LOS must exist.
+    if (kIsDebugBuild
+        && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
+      // It must be heap corruption. Remove memory protection and dump data.
+      region_space_->Unprotect();
+      heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
+                                                  MemberOffset(0),
+                                                  ref,
+                                                  /* fatal */ true);
+    }
+    los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+  }
+  if (kAtomic) {
+    return (bitmap != nullptr) ? bitmap->AtomicTestAndSet(ref) : los_bitmap->AtomicTestAndSet(ref);
+  } else {
+    return (bitmap != nullptr) ? bitmap->Set(ref) : los_bitmap->Set(ref);
+  }
+}
+
+bool ConcurrentCopying::TestMarkBitmapForRef(mirror::Object* ref) {
+  if (LIKELY(region_space_->HasAddress(ref))) {
+    return region_space_bitmap_->Test(ref);
+  } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) {
+    return heap_->GetNonMovingSpace()->GetMarkBitmap()->Test(ref);
+  } else if (immune_spaces_.ContainsObject(ref)) {
+    // References to immune space objects are always live.
+    DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
+    return true;
+  } else {
+    // Should be a large object. Must be page aligned and the LOS must exist.
+    if (kIsDebugBuild
+        && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
+      // It must be heap corruption. Remove memory protection and dump data.
+      region_space_->Unprotect();
+      heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
+                                                  MemberOffset(0),
+                                                  ref,
+                                                  /* fatal */ true);
+    }
+    return heap_->GetLargeObjectsSpace()->GetMarkBitmap()->Test(ref);
+  }
+}
+
+void ConcurrentCopying::PushOntoLocalMarkStack(mirror::Object* ref) {
+  if (kIsDebugBuild) {
+    Thread *self = Thread::Current();
+    DCHECK_EQ(thread_running_gc_, self);
+    DCHECK(self->GetThreadLocalMarkStack() == nullptr);
+  }
+  DCHECK_EQ(mark_stack_mode_.load(std::memory_order_relaxed), kMarkStackModeThreadLocal);
+  gc_mark_stack_->PushBack(ref);
+}
+
+void ConcurrentCopying::ProcessMarkStackForMarkingAndComputeLiveBytes() {
+  // Process thread-local mark stack containing thread roots
+  ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false,
+                               /* checkpoint_callback */ nullptr,
+                               [this] (mirror::Object* ref)
+                                   REQUIRES_SHARED(Locks::mutator_lock_) {
+                                 AddLiveBytesAndScanRef(ref);
+                               });
+
+  while (!gc_mark_stack_->IsEmpty()) {
+    mirror::Object* ref = gc_mark_stack_->PopBack();
+    AddLiveBytesAndScanRef(ref);
+  }
+}
+
+class ConcurrentCopying::ImmuneSpaceCaptureRefsVisitor {
+ public:
+  explicit ImmuneSpaceCaptureRefsVisitor(ConcurrentCopying* cc) : collector_(cc) {}
+
+  ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
+    ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ false>
+        visitor(collector_, /*obj_region_idx*/ static_cast<size_t>(-1));
+    obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+        visitor, visitor);
+  }
+
+  static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
+    reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
+  }
+
+ private:
+  ConcurrentCopying* const collector_;
+};
+
+/* Invariants for two-phase CC
+ * ===========================
+ * A) Definitions
+ * ---------------
+ * 1) Black: marked in bitmap, rb_state is non-gray, and not in mark stack
+ * 2) Black-clean: marked in bitmap, and corresponding card is clean/aged
+ * 3) Black-dirty: marked in bitmap, and corresponding card is dirty
+ * 4) Gray: marked in bitmap, and exists in mark stack
+ * 5) Gray-dirty: marked in bitmap, rb_state is gray, corresponding card is
+ *    dirty, and exists in mark stack
+ * 6) White: unmarked in bitmap, rb_state is non-gray, and not in mark stack
+ *
+ * B) Before marking phase
+ * -----------------------
+ * 1) All objects are white
+ * 2) Cards are either clean or aged (cannot be asserted without a STW pause)
+ * 3) Mark bitmap is cleared
+ * 4) Mark stack is empty
+ *
+ * C) During marking phase
+ * ------------------------
+ * 1) If a black object holds an inter-region or white reference, then its
+ *    corresponding card is dirty. In other words, it changes from being
+ *    black-clean to black-dirty
+ * 2) No black-clean object points to a white object
+ *
+ * D) After marking phase
+ * -----------------------
+ * 1) There are no gray objects
+ * 2) All newly allocated objects are in from space
+ * 3) No white object can be reachable, directly or otherwise, from a
+ *    black-clean object
+ *
+ * E) During copying phase
+ * ------------------------
+ * 1) Mutators cannot observe white and black-dirty objects
+ * 2) New allocations are in to-space (newly allocated regions are part of to-space)
+ * 3) An object in mark stack must have its rb_state = Gray
+ *
+ * F) During card table scan
+ * --------------------------
+ * 1) Referents corresponding to root references are gray or in to-space
+ * 2) Every path from an object that is read or written by a mutator during
+ *    this period to a dirty black object goes through some gray object.
+ *    Mutators preserve this by graying black objects as needed during this
+ *    period. Ensures that a mutator never encounters a black dirty object.
+ *
+ * G) After card table scan
+ * ------------------------
+ * 1) There are no black-dirty objects
+ * 2) Referents corresponding to root references are gray, black-clean or in
+ *    to-space
+ *
+ * H) After copying phase
+ * -----------------------
+ * 1) Mark stack is empty
+ * 2) No references into evacuated from-space
+ * 3) No reference to an object which is unmarked and is also not in newly
+ *    allocated region. In other words, no reference to white objects.
+*/
+
 void ConcurrentCopying::MarkingPhase() {
   TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
   if (kVerboseMode) {
     LOG(INFO) << "GC MarkingPhase";
   }
+  accounting::CardTable* const card_table = heap_->GetCardTable();
+  Thread* const self = Thread::Current();
+  // Clear live_bytes_ of every non-free region, except the ones that are newly
+  // allocated.
+  region_space_->SetAllRegionLiveBytesZero();
+  if (kIsDebugBuild) {
+    region_space_->AssertAllRegionLiveBytesZeroOrCleared();
+  }
+  // Scan immune spaces
+  {
+    TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
+    for (auto& space : immune_spaces_.GetSpaces()) {
+      DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
+      accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+      accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+      ImmuneSpaceCaptureRefsVisitor visitor(this);
+      if (table != nullptr) {
+        table->VisitObjects(ImmuneSpaceCaptureRefsVisitor::Callback, &visitor);
+      } else {
+        WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
+        card_table->Scan<false>(
+            live_bitmap,
+            space->Begin(),
+            space->Limit(),
+            visitor,
+            accounting::CardTable::kCardDirty - 1);
+      }
+    }
+  }
+  // Scan runtime roots
+  {
+    TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
+    CaptureRootsForMarkingVisitor visitor(this, self);
+    Runtime::Current()->VisitConcurrentRoots(&visitor, kVisitRootFlagAllRoots);
+  }
+  {
+    // TODO: don't visit the transaction roots if it's not active.
+    TimingLogger::ScopedTiming split2("VisitNonThreadRoots", GetTimings());
+    CaptureRootsForMarkingVisitor visitor(this, self);
+    Runtime::Current()->VisitNonThreadRoots(&visitor);
+  }
+  // Capture thread roots
+  CaptureThreadRootsForMarking();
+  // Process mark stack
+  ProcessMarkStackForMarkingAndComputeLiveBytes();
+
+  if (kVerboseMode) {
+    LOG(INFO) << "GC end of MarkingPhase";
+  }
+}
+
+template <bool kNoUnEvac>
+void ConcurrentCopying::ScanDirtyObject(mirror::Object* obj) {
+  Scan<kNoUnEvac>(obj);
+  // Set the read-barrier state of a reference-type object to gray if its
+  // referent is not marked yet. This is to ensure that if GetReferent() is
+  // called, it triggers the read-barrier to process the referent before use.
+  if (UNLIKELY((obj->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass()))) {
+    mirror::Object* referent =
+        obj->AsReference<kVerifyNone, kWithoutReadBarrier>()->GetReferent<kWithoutReadBarrier>();
+    if (referent != nullptr && !IsInToSpace(referent)) {
+      obj->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState());
+    }
+  }
+}
+
+// Concurrently mark roots that are guarded by read barriers and process the mark stack.
+void ConcurrentCopying::CopyingPhase() {
+  TimingLogger::ScopedTiming split("CopyingPhase", GetTimings());
+  if (kVerboseMode) {
+    LOG(INFO) << "GC CopyingPhase";
+  }
   Thread* self = Thread::Current();
+  accounting::CardTable* const card_table = heap_->GetCardTable();
   if (kIsDebugBuild) {
     MutexLock mu(self, *Locks::thread_list_lock_);
     CHECK(weak_ref_access_enabled_);
@@ -878,7 +1397,7 @@
   if (kUseBakerReadBarrier) {
     gc_grays_immune_objects_ = false;
   }
-  if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+  if (use_generational_cc_) {
     if (kVerboseMode) {
       LOG(INFO) << "GC ScanCardsForSpace";
     }
@@ -896,39 +1415,76 @@
         continue;
       }
       // Scan all of the objects on dirty cards in unevac from space, and non moving space. These
-      // are from previous GCs and may reference things in the from space.
+      // are from previous GCs (or from marking phase of 2-phase full GC) and may reference things
+      // in the from space.
       //
       // Note that we do not need to process the large-object space (the only discontinuous space)
       // as it contains only large string objects and large primitive array objects, that have no
       // reference to other objects, except their class. There is no need to scan these large
       // objects, as the String class and the primitive array classes are expected to never move
-      // during a minor (young-generation) collection:
+      // during a collection:
       // - In the case where we run with a boot image, these classes are part of the image space,
       //   which is an immune space.
       // - In the case where we run without a boot image, these classes are allocated in the
       //   non-moving space (see art::ClassLinker::InitWithoutImage).
-      Runtime::Current()->GetHeap()->GetCardTable()->Scan<false>(
+      card_table->Scan<false>(
           space->GetMarkBitmap(),
           space->Begin(),
           space->End(),
           [this, space](mirror::Object* obj)
               REQUIRES(Locks::heap_bitmap_lock_)
               REQUIRES_SHARED(Locks::mutator_lock_) {
-            // Don't push or gray unevac refs.
-            if (kIsDebugBuild && space == region_space_) {
-              // We may get unevac large objects.
-              if (!region_space_->IsInUnevacFromSpace(obj)) {
-                CHECK(region_space_bitmap_->Test(obj));
-                region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
-                LOG(FATAL) << "Scanning " << obj << " not in unevac space";
+            // TODO: This code may be refactored to avoid scanning object while
+            // done_scanning_ is false by setting rb_state to gray, and pushing the
+            // object on mark stack. However, it will also require clearing the
+            // corresponding mark-bit and, for region space objects,
+            // decrementing the object's size from the corresponding region's
+            // live_bytes.
+            if (young_gen_) {
+              // Don't push or gray unevac refs.
+              if (kIsDebugBuild && space == region_space_) {
+                // We may get unevac large objects.
+                if (!region_space_->IsInUnevacFromSpace(obj)) {
+                  CHECK(region_space_bitmap_->Test(obj));
+                  region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
+                  LOG(FATAL) << "Scanning " << obj << " not in unevac space";
+                }
               }
+              ScanDirtyObject</*kNoUnEvac*/ true>(obj);
+            } else if (space != region_space_) {
+              DCHECK(space == heap_->non_moving_space_);
+              // We need to process un-evac references as they may be unprocessed,
+              // if they skipped the marking phase due to heap mutation.
+              ScanDirtyObject</*kNoUnEvac*/ false>(obj);
+              non_moving_space_inter_region_bitmap_->Clear(obj);
+            } else if (region_space_->IsInUnevacFromSpace(obj)) {
+              ScanDirtyObject</*kNoUnEvac*/ false>(obj);
+              region_space_inter_region_bitmap_->Clear(obj);
             }
-            Scan<true>(obj);
           },
-          accounting::CardTable::kCardDirty - 1);
+          accounting::CardTable::kCardAged);
+
+      if (!young_gen_) {
+        auto visitor = [this](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+                         // We don't need to process un-evac references as any unprocessed
+                         // ones will be taken care of in the card-table scan above.
+                         ScanDirtyObject</*kNoUnEvac*/ true>(obj);
+                       };
+        if (space == region_space_) {
+          region_space_->ScanUnevacFromSpace(region_space_inter_region_bitmap_.get(), visitor);
+        } else {
+          DCHECK(space == heap_->non_moving_space_);
+          non_moving_space_inter_region_bitmap_->VisitMarkedRange(
+              reinterpret_cast<uintptr_t>(space->Begin()),
+              reinterpret_cast<uintptr_t>(space->End()),
+              visitor);
+        }
+      }
     }
     // Done scanning unevac space.
     done_scanning_.store(true, std::memory_order_release);
+    // NOTE: inter-region-ref bitmaps can be cleared here to release memory, if needed.
+    // Currently we do it in ReclaimPhase().
     if (kVerboseMode) {
       LOG(INFO) << "GC end of ScanCardsForSpace";
     }
@@ -946,10 +1502,13 @@
       if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) {
         table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
       } else {
-        // TODO: Scan only the aged cards.
-        live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
-                                      reinterpret_cast<uintptr_t>(space->Limit()),
-                                      visitor);
+        WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
+        card_table->Scan<false>(
+            live_bitmap,
+            space->Begin(),
+            space->Limit(),
+            visitor,
+            accounting::CardTable::kCardDirty - 1);
       }
     }
   }
@@ -1050,9 +1609,6 @@
     Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
     // Marking is done. Disable marking.
     DisableMarking();
-    if (kUseBakerReadBarrier) {
-      ProcessFalseGrayStack();
-    }
     CheckEmptyMarkStack();
   }
 
@@ -1061,7 +1617,7 @@
     CHECK(weak_ref_access_enabled_);
   }
   if (kVerboseMode) {
-    LOG(INFO) << "GC end of MarkingPhase";
+    LOG(INFO) << "GC end of CopyingPhase";
   }
 }
 
@@ -1164,32 +1720,6 @@
   mark_stack_mode_.store(kMarkStackModeOff, std::memory_order_seq_cst);
 }
 
-void ConcurrentCopying::PushOntoFalseGrayStack(Thread* const self, mirror::Object* ref) {
-  CHECK(kUseBakerReadBarrier);
-  DCHECK(ref != nullptr);
-  MutexLock mu(self, mark_stack_lock_);
-  false_gray_stack_.push_back(ref);
-}
-
-void ConcurrentCopying::ProcessFalseGrayStack() {
-  CHECK(kUseBakerReadBarrier);
-  // Change the objects on the false gray stack from gray to non-gray (conceptually black).
-  MutexLock mu(Thread::Current(), mark_stack_lock_);
-  for (mirror::Object* obj : false_gray_stack_) {
-    DCHECK(IsMarked(obj));
-    // The object could be non-gray (conceptually black) here if a thread got preempted after a
-    // success at the AtomicSetReadBarrierState in MarkNonMoving(), GC started marking through it
-    // (but not finished so still gray), the thread ran to register it onto the false gray stack,
-    // and then GC eventually marked it black (non-gray) after it finished scanning it.
-    if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
-      bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
-                                                    ReadBarrier::NonGrayState());
-      DCHECK(success);
-    }
-  }
-  false_gray_stack_.clear();
-}
-
 void ConcurrentCopying::IssueEmptyCheckpoint() {
   Thread* self = Thread::Current();
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
@@ -1374,7 +1904,7 @@
     space::RegionSpace* region_space = RegionSpace();
     CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
     VerifyNoFromSpaceRefsFieldVisitor visitor(this);
-    obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+    obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
         visitor,
         visitor);
     if (kUseBakerReadBarrier) {
@@ -1412,24 +1942,6 @@
 }
 
 // The following visitors are used to assert the to-space invariant.
-class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
- public:
-  explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
-      : collector_(collector) {}
-
-  void operator()(mirror::Object* ref) const
-      REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
-    if (ref == nullptr) {
-      // OK.
-      return;
-    }
-    collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
-  }
-
- private:
-  ConcurrentCopying* const collector_;
-};
-
 class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
  public:
   explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
@@ -1441,8 +1953,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
     mirror::Object* ref =
         obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
-    AssertToSpaceInvariantRefsVisitor visitor(collector_);
-    visitor(ref);
+    collector_->AssertToSpaceInvariant(obj.Ptr(), offset, ref);
   }
   void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
@@ -1458,48 +1969,14 @@
 
   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    AssertToSpaceInvariantRefsVisitor visitor(collector_);
-    visitor(root->AsMirrorPtr());
+    mirror::Object* ref = root->AsMirrorPtr();
+    collector_->AssertToSpaceInvariant(/* obj */ nullptr, MemberOffset(0), ref);
   }
 
  private:
   ConcurrentCopying* const collector_;
 };
 
-class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
- public:
-  RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
-                                       bool disable_weak_ref_access)
-      : concurrent_copying_(concurrent_copying),
-        disable_weak_ref_access_(disable_weak_ref_access) {
-  }
-
-  void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
-    // Note: self is not necessarily equal to thread since thread may be suspended.
-    Thread* self = Thread::Current();
-    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
-        << thread->GetState() << " thread " << thread << " self " << self;
-    // Revoke thread local mark stacks.
-    accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
-    if (tl_mark_stack != nullptr) {
-      MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
-      concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
-      thread->SetThreadLocalMarkStack(nullptr);
-    }
-    // Disable weak ref access.
-    if (disable_weak_ref_access_) {
-      thread->SetWeakRefAccessEnabled(false);
-    }
-    // If thread is a running mutator, then act on behalf of the garbage collector.
-    // See the code in ThreadList::RunCheckpoint.
-    concurrent_copying_->GetBarrier().Pass(self);
-  }
-
- private:
-  ConcurrentCopying* const concurrent_copying_;
-  const bool disable_weak_ref_access_;
-};
-
 void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access,
                                                     Closure* checkpoint_callback) {
   Thread* self = Thread::Current();
@@ -1556,8 +2033,12 @@
   MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
   if (mark_stack_mode == kMarkStackModeThreadLocal) {
     // Process the thread-local mark stacks and the GC mark stack.
-    count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false,
-                                          /* checkpoint_callback */ nullptr);
+    count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ false,
+                                          /* checkpoint_callback= */ nullptr,
+                                          [this] (mirror::Object* ref)
+                                              REQUIRES_SHARED(Locks::mutator_lock_) {
+                                            ProcessMarkStackRef(ref);
+                                          });
     while (!gc_mark_stack_->IsEmpty()) {
       mirror::Object* to_ref = gc_mark_stack_->PopBack();
       ProcessMarkStackRef(to_ref);
@@ -1613,8 +2094,10 @@
   return count == 0;
 }
 
+template <typename Processor>
 size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
-                                                       Closure* checkpoint_callback) {
+                                                       Closure* checkpoint_callback,
+                                                       const Processor& processor) {
   // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
   RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback);
   size_t count = 0;
@@ -1628,7 +2111,7 @@
   for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
     for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
       mirror::Object* to_ref = p->AsMirrorPtr();
-      ProcessMarkStackRef(to_ref);
+      processor(to_ref);
       ++count;
     }
     {
@@ -1648,41 +2131,93 @@
 
 inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
   DCHECK(!region_space_->IsInFromSpace(to_ref));
+  space::RegionSpace::RegionType rtype = region_space_->GetRegionType(to_ref);
   if (kUseBakerReadBarrier) {
     DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
-        << " " << to_ref << " " << to_ref->GetReadBarrierState()
-        << " is_marked=" << IsMarked(to_ref);
+        << " to_ref=" << to_ref
+        << " rb_state=" << to_ref->GetReadBarrierState()
+        << " is_marked=" << IsMarked(to_ref)
+        << " type=" << to_ref->PrettyTypeOf()
+        << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
+        << " space=" << heap_->DumpSpaceNameFromAddress(to_ref)
+        << " region_type=" << rtype
+        // TODO: Temporary; remove this when this is no longer needed (b/116087961).
+        << " runtime->sentinel=" << Runtime::Current()->GetSentinel().Read<kWithoutReadBarrier>();
   }
-  space::RegionSpace::RegionType rtype = region_space_->GetRegionType(to_ref);
   bool add_to_live_bytes = false;
   // Invariant: There should be no object from a newly-allocated
   // region (either large or non-large) on the mark stack.
   DCHECK(!region_space_->IsInNewlyAllocatedRegion(to_ref)) << to_ref;
-  if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
-    // Mark the bitmap only in the GC thread here so that we don't need a CAS.
-    if (!kUseBakerReadBarrier ||
-        !region_space_bitmap_->Set(to_ref)) {
-      // It may be already marked if we accidentally pushed the same object twice due to the racy
-      // bitmap read in MarkUnevacFromSpaceRegion.
-      if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
-        CHECK(region_space_->IsLargeObject(to_ref));
-        region_space_->ZeroLiveBytesForLargeObject(to_ref);
-        Scan<true>(to_ref);
-      } else {
-        Scan<false>(to_ref);
+  bool perform_scan = false;
+  switch (rtype) {
+    case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace:
+      // Mark the bitmap only in the GC thread here so that we don't need a CAS.
+      if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) {
+        // It may be already marked if we accidentally pushed the same object twice due to the racy
+        // bitmap read in MarkUnevacFromSpaceRegion.
+        if (use_generational_cc_ && young_gen_) {
+          CHECK(region_space_->IsLargeObject(to_ref));
+          region_space_->ZeroLiveBytesForLargeObject(to_ref);
+        }
+        perform_scan = true;
+        // Only add to the live bytes if the object was not already marked and we are not the young
+        // GC.
+        // Why add live bytes even after 2-phase GC?
+        // We need to ensure that if there is a unevac region with any live
+        // objects, then its live_bytes must be non-zero. Otherwise,
+        // ClearFromSpace() will clear the region. Considering, that we may skip
+        // live objects during marking phase of 2-phase GC, we have to take care
+        // of such objects here.
+        add_to_live_bytes = true;
       }
-      // Only add to the live bytes if the object was not already marked and we are not the young
-      // GC.
-      add_to_live_bytes = true;
-    }
-  } else {
-    if (kEnableGenerationalConcurrentCopyingCollection) {
-      if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
+      break;
+    case space::RegionSpace::RegionType::kRegionTypeToSpace:
+      if (use_generational_cc_) {
         // Copied to to-space, set the bit so that the next GC can scan objects.
         region_space_bitmap_->Set(to_ref);
       }
-    }
-    if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+      perform_scan = true;
+      break;
+    default:
+      DCHECK(!region_space_->HasAddress(to_ref)) << to_ref;
+      DCHECK(!immune_spaces_.ContainsObject(to_ref));
+      // Non-moving or large-object space.
+      if (kUseBakerReadBarrier) {
+        accounting::ContinuousSpaceBitmap* mark_bitmap =
+            heap_->GetNonMovingSpace()->GetMarkBitmap();
+        const bool is_los = !mark_bitmap->HasAddress(to_ref);
+        if (is_los) {
+          if (!IsAligned<kPageSize>(to_ref)) {
+            // Ref is a large object that is not aligned, it must be heap
+            // corruption. Remove memory protection and dump data before
+            // AtomicSetReadBarrierState since it will fault if the address is not
+            // valid.
+            region_space_->Unprotect();
+            heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
+                                                        MemberOffset(0),
+                                                        to_ref,
+                                                        /* fatal */ true);
+          }
+          DCHECK(heap_->GetLargeObjectsSpace())
+              << "ref=" << to_ref
+              << " doesn't belong to non-moving space and large object space doesn't exist";
+          accounting::LargeObjectBitmap* los_bitmap =
+              heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+          DCHECK(los_bitmap->HasAddress(to_ref));
+          // Only the GC thread could be setting the LOS bit map hence doesn't
+          // need to be atomically done.
+          perform_scan = !los_bitmap->Set(to_ref);
+        } else {
+          // Only the GC thread could be setting the non-moving space bit map
+          // hence doesn't need to be atomically done.
+          perform_scan = !mark_bitmap->Set(to_ref);
+        }
+      } else {
+        perform_scan = true;
+      }
+  }
+  if (perform_scan) {
+    if (use_generational_cc_ && young_gen_) {
       Scan<true>(to_ref);
     } else {
       Scan<false>(to_ref);
@@ -1690,8 +2225,15 @@
   }
   if (kUseBakerReadBarrier) {
     DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
-        << " " << to_ref << " " << to_ref->GetReadBarrierState()
-        << " is_marked=" << IsMarked(to_ref);
+        << " to_ref=" << to_ref
+        << " rb_state=" << to_ref->GetReadBarrierState()
+        << " is_marked=" << IsMarked(to_ref)
+        << " type=" << to_ref->PrettyTypeOf()
+        << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
+        << " space=" << heap_->DumpSpaceNameFromAddress(to_ref)
+        << " region_type=" << rtype
+        // TODO: Temporary; remove this when this is no longer needed (b/116087961).
+        << " runtime->sentinel=" << Runtime::Current()->GetSentinel().Read<kWithoutReadBarrier>();
   }
 #ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
   mirror::Object* referent = nullptr;
@@ -1732,7 +2274,7 @@
     CHECK(!region_space->IsInFromSpace(to_ref)) << "Scanning object " << to_ref << " in from space";
     AssertToSpaceInvariant(nullptr, MemberOffset(0), to_ref);
     AssertToSpaceInvariantFieldVisitor visitor(this);
-    to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+    to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
         visitor,
         visitor);
   }
@@ -1767,7 +2309,12 @@
   DisableWeakRefAccessCallback dwrac(this);
   // Process the thread local mark stacks one last time after switching to the shared mark stack
   // mode and disable weak ref accesses.
-  ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ true, &dwrac);
+  ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true,
+                               &dwrac,
+                               [this] (mirror::Object* ref)
+                                   REQUIRES_SHARED(Locks::mutator_lock_) {
+                                 ProcessMarkStackRef(ref);
+                               });
   if (kVerboseMode) {
     LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
   }
@@ -1829,9 +2376,9 @@
 }
 
 void ConcurrentCopying::Sweep(bool swap_bitmaps) {
-  if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+  if (use_generational_cc_ && young_gen_) {
     // Only sweep objects on the live stack.
-    SweepArray(heap_->GetLiveStack(), /* swap_bitmaps */ false);
+    SweepArray(heap_->GetLiveStack(), /* swap_bitmaps= */ false);
   } else {
     {
       TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
@@ -1863,7 +2410,7 @@
 // Copied and adapted from MarkSweep::SweepArray.
 void ConcurrentCopying::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
   // This method is only used when Generational CC collection is enabled.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection);
+  DCHECK(use_generational_cc_);
   CheckEmptyMarkStack();
   TimingLogger::ScopedTiming t("SweepArray", GetTimings());
   Thread* self = Thread::Current();
@@ -2013,9 +2560,9 @@
     const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
     const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
     const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
-    uint64_t to_bytes = bytes_moved_.load(std::memory_order_seq_cst) + bytes_moved_gc_thread_;
+    uint64_t to_bytes = bytes_moved_.load(std::memory_order_relaxed) + bytes_moved_gc_thread_;
     cumulative_bytes_moved_.fetch_add(to_bytes, std::memory_order_relaxed);
-    uint64_t to_objects = objects_moved_.load(std::memory_order_seq_cst) + objects_moved_gc_thread_;
+    uint64_t to_objects = objects_moved_.load(std::memory_order_relaxed) + objects_moved_gc_thread_;
     cumulative_objects_moved_.fetch_add(to_objects, std::memory_order_relaxed);
     if (kEnableFromSpaceAccountingCheck) {
       CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
@@ -2023,12 +2570,17 @@
     }
     CHECK_LE(to_objects, from_objects);
     CHECK_LE(to_bytes, from_bytes);
+    if (from_bytes > 0) {
+      copied_live_bytes_ratio_sum_ += static_cast<float>(to_bytes) / from_bytes;
+      gc_count_++;
+    }
+
     // Cleared bytes and objects, populated by the call to RegionSpace::ClearFromSpace below.
     uint64_t cleared_bytes;
     uint64_t cleared_objects;
     {
       TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
-      region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects);
+      region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects, /*clear_bitmap*/ !young_gen_);
       // `cleared_bytes` and `cleared_objects` may be greater than the from space equivalents since
       // RegionSpace::ClearFromSpace may clear empty unevac regions.
       CHECK_GE(cleared_bytes, from_bytes);
@@ -2047,18 +2599,21 @@
                 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
                 << " to_space size=" << region_space_->ToSpaceSize();
       LOG(INFO) << "(before) num_bytes_allocated="
-                << heap_->num_bytes_allocated_.load(std::memory_order_seq_cst);
+                << heap_->num_bytes_allocated_.load();
     }
     RecordFree(ObjectBytePair(freed_objects, freed_bytes));
     if (kVerboseMode) {
       LOG(INFO) << "(after) num_bytes_allocated="
-                << heap_->num_bytes_allocated_.load(std::memory_order_seq_cst);
+                << heap_->num_bytes_allocated_.load();
     }
+
+    float reclaimed_bytes_ratio = static_cast<float>(freed_bytes) / num_bytes_allocated_before_gc_;
+    reclaimed_bytes_ratio_sum_ += reclaimed_bytes_ratio;
   }
 
   {
     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    Sweep(/* swap_bitmaps */ false);
+    Sweep(/* swap_bitmaps= */ false);
     SwapBitmaps();
     heap_->UnBindBitmaps();
 
@@ -2069,6 +2624,11 @@
 
   CheckEmptyMarkStack();
 
+  if (heap_->dump_region_info_after_gc_) {
+    LOG(INFO) << "time=" << region_space_->Time();
+    region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
+  }
+
   if (kVerboseMode) {
     LOG(INFO) << "GC end of ReclaimPhase";
   }
@@ -2115,7 +2675,10 @@
                                                mirror::Object* ref) {
   CHECK_EQ(heap_->collector_type_, kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
   if (is_asserting_to_space_invariant_) {
-    if (region_space_->HasAddress(ref)) {
+    if (ref == nullptr) {
+      // OK.
+      return;
+    } else if (region_space_->HasAddress(ref)) {
       // Check to-space invariant in region space (moving space).
       using RegionType = space::RegionSpace::RegionType;
       space::RegionSpace::RegionType type = region_space_->GetRegionTypeUnsafe(ref);
@@ -2169,7 +2732,7 @@
         LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
         region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
         PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
-        MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+        MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
         LOG(FATAL) << "Invalid reference " << ref
                    << " referenced from object " << obj << " at offset " << offset;
       }
@@ -2218,7 +2781,10 @@
                                                mirror::Object* ref) {
   CHECK_EQ(heap_->collector_type_, kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
   if (is_asserting_to_space_invariant_) {
-    if (region_space_->HasAddress(ref)) {
+    if (ref == nullptr) {
+      // OK.
+      return;
+    } else if (region_space_->HasAddress(ref)) {
       // Check to-space invariant in region space (moving space).
       using RegionType = space::RegionSpace::RegionType;
       space::RegionSpace::RegionType type = region_space_->GetRegionTypeUnsafe(ref);
@@ -2262,12 +2828,12 @@
         LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
         region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
         PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
-        MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+        MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
         LOG(FATAL) << "Invalid reference " << ref;
       }
     } else {
       // Check to-space invariant in non-moving space.
-      AssertToSpaceInvariantInNonMovingSpace(/* obj */ nullptr, ref);
+      AssertToSpaceInvariantInNonMovingSpace(/* obj= */ nullptr, ref);
     }
   }
 }
@@ -2296,14 +2862,17 @@
       LOG(INFO) << "holder is in an immune image or the zygote space.";
     } else {
       LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
-      accounting::ContinuousSpaceBitmap* mark_bitmap =
-          heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
-      accounting::LargeObjectBitmap* los_bitmap =
-          heap_mark_bitmap_->GetLargeObjectBitmap(obj);
-      CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
-      bool is_los = mark_bitmap == nullptr;
+      accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
+      accounting::LargeObjectBitmap* los_bitmap = nullptr;
+      const bool is_los = !mark_bitmap->HasAddress(obj);
+      if (is_los) {
+        DCHECK(heap_->GetLargeObjectsSpace() && heap_->GetLargeObjectsSpace()->Contains(obj))
+            << "obj=" << obj
+            << " LOS bit map covers the entire lower 4GB address range";
+        los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+      }
       if (!is_los && mark_bitmap->Test(obj)) {
-        LOG(INFO) << "holder is marked in the mark bit map.";
+        LOG(INFO) << "holder is marked in the non-moving space mark bit map.";
       } else if (is_los && los_bitmap->Test(obj)) {
         LOG(INFO) << "holder is marked in the los bit map.";
       } else {
@@ -2320,6 +2889,29 @@
   LOG(INFO) << "offset=" << offset.SizeValue();
 }
 
+bool ConcurrentCopying::IsMarkedInNonMovingSpace(mirror::Object* from_ref) {
+  DCHECK(!region_space_->HasAddress(from_ref)) << "ref=" << from_ref;
+  DCHECK(!immune_spaces_.ContainsObject(from_ref)) << "ref=" << from_ref;
+  if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
+    return true;
+  } else if (!use_generational_cc_ || done_scanning_.load(std::memory_order_acquire)) {
+    // Read the comment in IsMarkedInUnevacFromSpace()
+    accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
+    accounting::LargeObjectBitmap* los_bitmap = nullptr;
+    const bool is_los = !mark_bitmap->HasAddress(from_ref);
+    if (is_los) {
+      DCHECK(heap_->GetLargeObjectsSpace() && heap_->GetLargeObjectsSpace()->Contains(from_ref))
+          << "ref=" << from_ref
+          << " doesn't belong to non-moving space and large object space doesn't exist";
+      los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+    }
+    if (is_los ? los_bitmap->Test(from_ref) : mark_bitmap->Test(from_ref)) {
+      return true;
+    }
+  }
+  return IsOnAllocStack(from_ref);
+}
+
 void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
                                                                mirror::Object* ref) {
   CHECK(ref != nullptr);
@@ -2341,62 +2933,14 @@
     }
   } else {
     // Non-moving space and large-object space (LOS) cases.
-    accounting::ContinuousSpaceBitmap* mark_bitmap =
-        heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
-    accounting::LargeObjectBitmap* los_bitmap =
-        heap_mark_bitmap_->GetLargeObjectBitmap(ref);
-    bool is_los = (mark_bitmap == nullptr);
-
-    bool marked_in_non_moving_space_or_los =
-        (kUseBakerReadBarrier
-         && kEnableGenerationalConcurrentCopyingCollection
-         && young_gen_
-         && !done_scanning_.load(std::memory_order_acquire))
-        // Don't use the mark bitmap to ensure `ref` is marked: check that the
-        // read barrier state is gray instead. This is to take into account a
-        // potential race between two read barriers on the same reference when the
-        // young-generation collector is still scanning the dirty cards.
-        //
-        // For instance consider two concurrent read barriers on the same GC root
-        // reference during the dirty-card-scanning step of a young-generation
-        // collection. Both threads would call ReadBarrier::BarrierForRoot, which
-        // would:
-        // a. mark the reference (leading to a call to
-        //    ConcurrentCopying::MarkNonMoving); then
-        // b. check the to-space invariant (leading to a call this
-        //    ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace -- this
-        //    method).
-        //
-        // In this situation, the following race could happen:
-        // 1. Thread A successfully changes `ref`'s read barrier state from
-        //    non-gray (white) to gray (with AtomicSetReadBarrierState) in
-        //    ConcurrentCopying::MarkNonMoving, then gets preempted.
-        // 2. Thread B also tries to change `ref`'s read barrier state with
-        //    AtomicSetReadBarrierState from non-gray to gray in
-        //    ConcurrentCopying::MarkNonMoving, but fails, as Thread A already
-        //    changed it.
-        // 3. Because Thread B failed the previous CAS, it does *not* set the
-        //    bit in the mark bitmap for `ref`.
-        // 4. Thread B checks the to-space invariant and calls
-        //    ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace: the bit
-        //    is not set in the mark bitmap for `ref`; checking that this bit is
-        //    set to check the to-space invariant is therefore not a reliable
-        //    test.
-        // 5. (Note that eventually, Thread A will resume its execution and set
-        //    the bit for `ref` in the mark bitmap.)
-        ? (ref->GetReadBarrierState() == ReadBarrier::GrayState())
-        // It is safe to use the heap mark bitmap otherwise.
-        : (!is_los && mark_bitmap->Test(ref)) || (is_los && los_bitmap->Test(ref));
-
     // If `ref` is on the allocation stack, then it may not be
     // marked live, but considered marked/alive (but not
     // necessarily on the live stack).
-    CHECK(marked_in_non_moving_space_or_los || IsOnAllocStack(ref))
+    CHECK(IsMarkedInNonMovingSpace(ref))
         << "Unmarked ref that's not on the allocation stack."
         << " obj=" << obj
         << " ref=" << ref
         << " rb_state=" << ref->GetReadBarrierState()
-        << " is_los=" << std::boolalpha << is_los << std::noboolalpha
         << " is_marking=" << std::boolalpha << is_marking_ << std::noboolalpha
         << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
         << " done_scanning="
@@ -2412,7 +2956,7 @@
   explicit RefFieldsVisitor(ConcurrentCopying* collector, Thread* const thread)
       : collector_(collector), thread_(thread) {
     // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
-    DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
+    DCHECK(!kNoUnEvac || collector_->use_generational_cc_);
   }
 
   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
@@ -2438,7 +2982,7 @@
   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
       ALWAYS_INLINE
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    collector_->MarkRoot</*kGrayImmuneObject*/false>(thread_, root);
+    collector_->MarkRoot</*kGrayImmuneObject=*/false>(thread_, root);
   }
 
  private:
@@ -2449,7 +2993,7 @@
 template <bool kNoUnEvac>
 inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
   // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
+  DCHECK(!kNoUnEvac || use_generational_cc_);
   if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
     // Avoid all read barriers during visit references to help performance.
     // Don't do this in transaction mode because we may read the old value of an field which may
@@ -2460,7 +3004,7 @@
   DCHECK_EQ(Thread::Current(), thread_running_gc_);
   RefFieldsVisitor<kNoUnEvac> visitor(this, thread_running_gc_);
   // Disable the read barrier for a performance reason.
-  to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+  to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
       visitor, visitor);
   if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
     thread_running_gc_->ModifyDebugDisallowReadBarrier(-1);
@@ -2470,14 +3014,14 @@
 template <bool kNoUnEvac>
 inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
   // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
+  DCHECK(!kNoUnEvac || use_generational_cc_);
   DCHECK_EQ(Thread::Current(), thread_running_gc_);
   mirror::Object* ref = obj->GetFieldObject<
       mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
-  mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, kNoUnEvac, /*kFromGCThread*/true>(
+  mirror::Object* to_ref = Mark</*kGrayImmuneObject=*/false, kNoUnEvac, /*kFromGCThread=*/true>(
       thread_running_gc_,
       ref,
-      /*holder*/ obj,
+      /*holder=*/ obj,
       offset);
   if (to_ref == ref) {
     return;
@@ -2551,7 +3095,7 @@
     mirror::CompressedReference<mirror::Object>* const root = roots[i];
     if (!root->IsNull()) {
       // kGrayImmuneObject is true because this is used for the thread flip.
-      MarkRoot</*kGrayImmuneObject*/true>(self, root);
+      MarkRoot</*kGrayImmuneObject=*/true>(self, root);
     }
   }
 }
@@ -2611,15 +3155,15 @@
     if (ReadBarrier::kEnableToSpaceInvariantChecks) {
       AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object_);
     }
-    CHECK_EQ(byte_size, (java_lang_Object_->GetObjectSize<kVerifyNone, kWithoutReadBarrier>()));
+    CHECK_EQ(byte_size, java_lang_Object_->GetObjectSize<kVerifyNone>());
     dummy_obj->SetClass(java_lang_Object_);
     CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>()));
   } else {
     // Use an int array.
     dummy_obj->SetClass(int_array_class);
-    CHECK((dummy_obj->IsArrayInstance<kVerifyNone, kWithoutReadBarrier>()));
+    CHECK(dummy_obj->IsArrayInstance<kVerifyNone>());
     int32_t length = (byte_size - data_offset) / component_size;
-    mirror::Array* dummy_arr = dummy_obj->AsArray<kVerifyNone, kWithoutReadBarrier>();
+    mirror::Array* dummy_arr = dummy_obj->AsArray<kVerifyNone>();
     dummy_arr->SetLength(length);
     CHECK_EQ(dummy_arr->GetLength(), length)
         << "byte_size=" << byte_size << " length=" << length
@@ -2700,7 +3244,7 @@
   if (UNLIKELY(klass == nullptr)) {
     // Remove memory protection from the region space and log debugging information.
     region_space_->Unprotect();
-    heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
+    heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal= */ true);
   }
   // There must not be a read barrier to avoid nested RB that might violate the to-space invariant.
   // Note that from_ref is a from space ref so the SizeOf() call will access the from-space meta
@@ -2714,7 +3258,7 @@
   size_t bytes_allocated = 0U;
   size_t dummy;
   bool fall_back_to_non_moving = false;
-  mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac*/ true>(
+  mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac=*/ true>(
       region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
   bytes_allocated = region_space_bytes_allocated;
   if (LIKELY(to_ref != nullptr)) {
@@ -2729,17 +3273,17 @@
         region_space_->RecordAlloc(to_ref);
       }
       bytes_allocated = region_space_alloc_size;
-      heap_->num_bytes_allocated_.fetch_sub(bytes_allocated, std::memory_order_seq_cst);
-      to_space_bytes_skipped_.fetch_sub(bytes_allocated, std::memory_order_seq_cst);
-      to_space_objects_skipped_.fetch_sub(1, std::memory_order_seq_cst);
+      heap_->num_bytes_allocated_.fetch_sub(bytes_allocated, std::memory_order_relaxed);
+      to_space_bytes_skipped_.fetch_sub(bytes_allocated, std::memory_order_relaxed);
+      to_space_objects_skipped_.fetch_sub(1, std::memory_order_relaxed);
     } else {
       // Fall back to the non-moving space.
       fall_back_to_non_moving = true;
       if (kVerboseMode) {
         LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
-                  << to_space_bytes_skipped_.load(std::memory_order_seq_cst)
+                  << to_space_bytes_skipped_.load(std::memory_order_relaxed)
                   << " skipped_objects="
-                  << to_space_objects_skipped_.load(std::memory_order_seq_cst);
+                  << to_space_objects_skipped_.load(std::memory_order_relaxed);
       }
       to_ref = heap_->non_moving_space_->Alloc(self, obj_size,
                                                &non_moving_space_bytes_allocated, nullptr, &dummy);
@@ -2750,12 +3294,6 @@
         LOG(FATAL) << "Object address=" << from_ref << " type=" << from_ref->PrettyTypeOf();
       }
       bytes_allocated = non_moving_space_bytes_allocated;
-      // Mark it in the mark bitmap.
-      accounting::ContinuousSpaceBitmap* mark_bitmap =
-          heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
-      CHECK(mark_bitmap != nullptr);
-      bool previously_marked_in_bitmap = mark_bitmap->AtomicTestAndSet(to_ref);
-      CHECK(!previously_marked_in_bitmap);
     }
   }
   DCHECK(to_ref != nullptr);
@@ -2788,12 +3326,12 @@
         DCHECK(region_space_->IsInToSpace(to_ref));
         if (bytes_allocated > space::RegionSpace::kRegionSize) {
           // Free the large alloc.
-          region_space_->FreeLarge</*kForEvac*/ true>(to_ref, bytes_allocated);
+          region_space_->FreeLarge</*kForEvac=*/ true>(to_ref, bytes_allocated);
         } else {
           // Record the lost copy for later reuse.
-          heap_->num_bytes_allocated_.fetch_add(bytes_allocated, std::memory_order_seq_cst);
-          to_space_bytes_skipped_.fetch_add(bytes_allocated, std::memory_order_seq_cst);
-          to_space_objects_skipped_.fetch_add(1, std::memory_order_seq_cst);
+          heap_->num_bytes_allocated_.fetch_add(bytes_allocated, std::memory_order_relaxed);
+          to_space_bytes_skipped_.fetch_add(bytes_allocated, std::memory_order_relaxed);
+          to_space_objects_skipped_.fetch_add(1, std::memory_order_relaxed);
           MutexLock mu(self, skipped_blocks_lock_);
           skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
                                                     reinterpret_cast<uint8_t*>(to_ref)));
@@ -2802,10 +3340,6 @@
         DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
         DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
         // Free the non-moving-space chunk.
-        accounting::ContinuousSpaceBitmap* mark_bitmap =
-            heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
-        CHECK(mark_bitmap != nullptr);
-        CHECK(mark_bitmap->Clear(to_ref));
         heap_->non_moving_space_->Free(self, to_ref);
       }
 
@@ -2854,6 +3388,14 @@
       } else {
         DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
         DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
+        if (!use_generational_cc_ || !young_gen_) {
+          // Mark it in the live bitmap.
+          CHECK(!heap_->non_moving_space_->GetLiveBitmap()->AtomicTestAndSet(to_ref));
+        }
+        if (!kUseBakerReadBarrier) {
+          // Mark it in the mark bitmap.
+          CHECK(!heap_->non_moving_space_->GetMarkBitmap()->AtomicTestAndSet(to_ref));
+        }
       }
       if (kUseBakerReadBarrier) {
         DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState());
@@ -2898,34 +3440,11 @@
       to_ref = from_ref;
     } else {
       // Non-immune non-moving space. Use the mark bitmap.
-      accounting::ContinuousSpaceBitmap* mark_bitmap =
-          heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
-      bool is_los = mark_bitmap == nullptr;
-      if (!is_los && mark_bitmap->Test(from_ref)) {
+      if (IsMarkedInNonMovingSpace(from_ref)) {
         // Already marked.
         to_ref = from_ref;
       } else {
-        accounting::LargeObjectBitmap* los_bitmap =
-            heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
-        // We may not have a large object space for dex2oat, don't assume it exists.
-        if (los_bitmap == nullptr) {
-          CHECK(heap_->GetLargeObjectsSpace() == nullptr)
-              << "LOS bitmap covers the entire address range " << from_ref
-              << " " << heap_->DumpSpaces();
-        }
-        if (los_bitmap != nullptr && is_los && los_bitmap->Test(from_ref)) {
-          // Already marked in LOS.
-          to_ref = from_ref;
-        } else {
-          // Not marked.
-          if (IsOnAllocStack(from_ref)) {
-            // If on the allocation stack, it's considered marked.
-            to_ref = from_ref;
-          } else {
-            // Not marked.
-            to_ref = nullptr;
-          }
-        }
+        to_ref = nullptr;
       }
     }
   }
@@ -2947,12 +3466,25 @@
   DCHECK(!region_space_->HasAddress(ref)) << ref;
   DCHECK(!immune_spaces_.ContainsObject(ref));
   // Use the mark bitmap.
-  accounting::ContinuousSpaceBitmap* mark_bitmap =
-      heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
-  accounting::LargeObjectBitmap* los_bitmap =
-      heap_mark_bitmap_->GetLargeObjectBitmap(ref);
-  bool is_los = mark_bitmap == nullptr;
-  if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+  accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
+  accounting::LargeObjectBitmap* los_bitmap = nullptr;
+  const bool is_los = !mark_bitmap->HasAddress(ref);
+  if (is_los) {
+    if (!IsAligned<kPageSize>(ref)) {
+      // Ref is a large object that is not aligned, it must be heap
+      // corruption. Remove memory protection and dump data before
+      // AtomicSetReadBarrierState since it will fault if the address is not
+      // valid.
+      region_space_->Unprotect();
+      heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal= */ true);
+    }
+    DCHECK(heap_->GetLargeObjectsSpace())
+        << "ref=" << ref
+        << " doesn't belong to non-moving space and large object space doesn't exist";
+    los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+    DCHECK(los_bitmap->HasAddress(ref));
+  }
+  if (use_generational_cc_) {
     // The sticky-bit CC collector is only compatible with Baker-style read barriers.
     DCHECK(kUseBakerReadBarrier);
     // Not done scanning, use AtomicSetReadBarrierPointer.
@@ -2960,15 +3492,17 @@
       // Since the mark bitmap is still filled in from last GC, we can not use that or else the
       // mutator may see references to the from space. Instead, use the Baker pointer itself as
       // the mark bit.
-      if (ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
+      //
+      // We need to avoid marking objects that are on allocation stack as that will lead to a
+      // situation (after this GC cycle is finished) where some object(s) are on both allocation
+      // stack and live bitmap. This leads to visiting the same object(s) twice during a heapdump
+      // (b/117426281).
+      if (!IsOnAllocStack(ref) &&
+          ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
         // TODO: We don't actually need to scan this object later, we just need to clear the gray
         // bit.
-        // Also make sure the object is marked.
-        if (is_los) {
-          los_bitmap->AtomicTestAndSet(ref);
-        } else {
-          mark_bitmap->AtomicTestAndSet(ref);
-        }
+        // We don't need to mark newly allocated objects (those in allocation stack) as they can
+        // only point to to-space objects. Also, they are considered live till the next GC cycle.
         PushOntoMarkStack(self, ref);
       }
       return ref;
@@ -2978,65 +3512,34 @@
     // Already marked.
   } else if (is_los && los_bitmap->Test(ref)) {
     // Already marked in LOS.
-  } else {
-    // Not marked.
-    if (IsOnAllocStack(ref)) {
-      // If it's on the allocation stack, it's considered marked. Keep it white (non-gray).
-      // Objects on the allocation stack need not be marked.
-      if (!is_los) {
-        DCHECK(!mark_bitmap->Test(ref));
-      } else {
-        DCHECK(!los_bitmap->Test(ref));
-      }
-      if (kUseBakerReadBarrier) {
-        DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState());
-      }
+  } else if (IsOnAllocStack(ref)) {
+    // If it's on the allocation stack, it's considered marked. Keep it white (non-gray).
+    // Objects on the allocation stack need not be marked.
+    if (!is_los) {
+      DCHECK(!mark_bitmap->Test(ref));
     } else {
-      // For the baker-style RB, we need to handle 'false-gray' cases. See the
-      // kRegionTypeUnevacFromSpace-case comment in Mark().
+      DCHECK(!los_bitmap->Test(ref));
+    }
+    if (kUseBakerReadBarrier) {
+      DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState());
+    }
+  } else {
+    // Not marked nor on the allocation stack. Try to mark it.
+    // This may or may not succeed, which is ok.
+    bool success = false;
+    if (kUseBakerReadBarrier) {
+      success = ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(),
+                                               ReadBarrier::GrayState());
+    } else {
+      success = is_los ?
+          !los_bitmap->AtomicTestAndSet(ref) :
+          !mark_bitmap->AtomicTestAndSet(ref);
+    }
+    if (success) {
       if (kUseBakerReadBarrier) {
-        // Test the bitmap first to reduce the chance of false gray cases.
-        if ((!is_los && mark_bitmap->Test(ref)) ||
-            (is_los && los_bitmap->Test(ref))) {
-          return ref;
-        }
+        DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
       }
-      if (is_los && !IsAligned<kPageSize>(ref)) {
-        // Ref is a large object that is not aligned, it must be heap
-        // corruption. Remove memory protection and dump data before
-        // AtomicSetReadBarrierState since it will fault if the address is not
-        // valid.
-        region_space_->Unprotect();
-        heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal */ true);
-      }
-      // Not marked nor on the allocation stack. Try to mark it.
-      // This may or may not succeed, which is ok.
-      bool cas_success = false;
-      if (kUseBakerReadBarrier) {
-        cas_success = ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(),
-                                                     ReadBarrier::GrayState());
-      }
-      if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
-        // Already marked.
-        if (kUseBakerReadBarrier &&
-            cas_success &&
-            ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
-          PushOntoFalseGrayStack(self, ref);
-        }
-      } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
-        // Already marked in LOS.
-        if (kUseBakerReadBarrier &&
-            cas_success &&
-            ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
-          PushOntoFalseGrayStack(self, ref);
-        }
-      } else {
-        // Newly marked.
-        if (kUseBakerReadBarrier) {
-          DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
-        }
-        PushOntoMarkStack(self, ref);
-      }
+      PushOntoMarkStack(self, ref);
     }
   }
   return ref;
@@ -3050,10 +3553,13 @@
   }
   // kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false
   // positives.
-  if (!kEnableGenerationalConcurrentCopyingCollection && !kVerifyNoMissingCardMarks) {
+  if (!kVerifyNoMissingCardMarks && !use_generational_cc_) {
     TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings());
     // We do not currently use the region space cards at all, madvise them away to save ram.
     heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
+  } else if (use_generational_cc_ && !young_gen_) {
+    region_space_inter_region_bitmap_->Clear();
+    non_moving_space_inter_region_bitmap_->Clear();
   }
   {
     MutexLock mu(self, skipped_blocks_lock_);
@@ -3121,7 +3627,7 @@
       } while (!field->CasWeakRelaxed(from_ref, to_ref));
     } else {
       // TODO: Why is this seq_cst when the above is relaxed? Document memory ordering.
-      field->Assign</* kIsVolatile */ true>(to_ref);
+      field->Assign</* kIsVolatile= */ true>(to_ref);
     }
   }
   return true;
@@ -3141,7 +3647,7 @@
   // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   GetHeap()->GetReferenceProcessor()->ProcessReferences(
-      true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
+      /*concurrent=*/ true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
 }
 
 void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
@@ -3159,7 +3665,8 @@
   ScopedTrace tr(__FUNCTION__);
   const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
   mirror::Object* ret =
-      Mark</*kGrayImmuneObject*/true, /*kNoUnEvac*/false, /*kFromGCThread*/false>(self, from_ref);
+      Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self,
+                                                                                     from_ref);
   if (measure_read_barrier_slow_path_) {
     rb_slow_path_ns_.fetch_add(NanoTime() - start_time, std::memory_order_relaxed);
   }
@@ -3168,6 +3675,7 @@
 
 void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) {
   GarbageCollector::DumpPerformanceInfo(os);
+  size_t num_gc_cycles = GetCumulativeTimings().GetIterations();
   MutexLock mu(Thread::Current(), rb_slow_path_histogram_lock_);
   if (rb_slow_path_time_histogram_.SampleSize() > 0) {
     Histogram<uint64_t>::CumulativeData cumulative_data;
@@ -3180,6 +3688,15 @@
   if (rb_slow_path_count_gc_total_ > 0) {
     os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n";
   }
+
+  os << "Average " << (young_gen_ ? "minor" : "major") << " GC reclaim bytes ratio "
+     << (reclaimed_bytes_ratio_sum_ / num_gc_cycles) << " over " << num_gc_cycles
+     << " GC cycles\n";
+
+  os << "Average " << (young_gen_ ? "minor" : "major") << " GC copied live bytes ratio "
+     << (copied_live_bytes_ratio_sum_ / gc_count_) << " over " << gc_count_
+     << " " << (young_gen_ ? "minor" : "major") << " GCs\n";
+
   os << "Cumulative bytes moved "
      << cumulative_bytes_moved_.load(std::memory_order_relaxed) << "\n";
   os << "Cumulative objects moved "
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 1a7464a..124713c 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -17,22 +17,22 @@
 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
 
-#include "barrier.h"
-#include "base/safe_map.h"
 #include "garbage_collector.h"
 #include "immune_spaces.h"
-#include "jni.h"
-#include "mirror/object_reference.h"
 #include "offsets.h"
 
-#include <unordered_map>
+#include <map>
+#include <memory>
 #include <vector>
 
 namespace art {
+class Barrier;
 class Closure;
 class RootInfo;
 
 namespace mirror {
+template<class MirrorType> class CompressedReference;
+template<class MirrorType> class HeapReference;
 class Object;
 }  // namespace mirror
 
@@ -65,10 +65,11 @@
   // pages.
   static constexpr bool kGrayDirtyImmuneObjects = true;
 
-  explicit ConcurrentCopying(Heap* heap,
-                             bool young_gen,
-                             const std::string& name_prefix = "",
-                             bool measure_read_barrier_slow_path = false);
+  ConcurrentCopying(Heap* heap,
+                    bool young_gen,
+                    bool use_generational_cc,
+                    const std::string& name_prefix = "",
+                    bool measure_read_barrier_slow_path = false);
   ~ConcurrentCopying();
 
   void RunPhases() override
@@ -79,6 +80,8 @@
   void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
   void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_);
+  void CopyingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
   void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
   void FinishPhase() REQUIRES(!mark_stack_lock_,
@@ -88,7 +91,7 @@
   void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::heap_bitmap_lock_);
   GcType GetGcType() const override {
-    return (kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+    return (use_generational_cc_ && young_gen_)
         ? kGcTypeSticky
         : kGcTypePartial;
   }
@@ -96,6 +99,9 @@
     return kCollectorTypeCC;
   }
   void RevokeAllThreadLocalBuffers() override;
+  // Creates inter-region ref bitmaps for region-space and non-moving-space.
+  // Gets called in Heap construction after the two spaces are created.
+  void CreateInterRegionRefBitmaps();
   void SetRegionSpace(space::RegionSpace* region_space) {
     DCHECK(region_space != nullptr);
     region_space_ = region_space;
@@ -161,6 +167,13 @@
   template <bool kNoUnEvac>
   void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
+  // Scan the reference fields of object 'obj' in the dirty cards during
+  // card-table scan. In addition to visiting the references, it also sets the
+  // read-barrier state to gray for Reference-type objects to ensure that
+  // GetReferent() called on these objects calls the read-barrier on the referent.
+  template <bool kNoUnEvac>
+  void ScanDirtyObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_);
   // Process a field.
   template <bool kNoUnEvac>
   void Process(mirror::Object* obj, MemberOffset offset)
@@ -198,7 +211,10 @@
   void VerifyNoMissingCardMarks()
       REQUIRES(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
-  size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
+  template <typename Processor>
+  size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
+                                      Closure* checkpoint_callback,
+                                      const Processor& processor)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
   void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -218,6 +234,8 @@
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
   bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
       REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsMarkedInNonMovingSpace(mirror::Object* from_ref)
+      REQUIRES_SHARED(Locks::mutator_lock_);
   bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
                                    bool do_atomic_update) override
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -283,11 +301,6 @@
   ALWAYS_INLINE mirror::Object* MarkImmuneSpace(Thread* const self,
                                                 mirror::Object* from_ref)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
-  void PushOntoFalseGrayStack(Thread* const self, mirror::Object* obj)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!mark_stack_lock_);
-  void ProcessFalseGrayStack() REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!mark_stack_lock_);
   void ScanImmuneObject(mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
   mirror::Object* MarkFromReadBarrierWithMeasurements(Thread* const self,
@@ -298,10 +311,32 @@
   // Set the read barrier mark entrypoints to non-null.
   void ActivateReadBarrierEntrypoints();
 
+  void CaptureThreadRootsForMarking() REQUIRES_SHARED(Locks::mutator_lock_);
+  void AddLiveBytesAndScanRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+  bool TestMarkBitmapForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+  template <bool kAtomic = false>
+  bool TestAndSetMarkBitForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+  void PushOntoLocalMarkStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+  void ProcessMarkStackForMarkingAndComputeLiveBytes() REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_);
+
   space::RegionSpace* region_space_;      // The underlying region space.
   std::unique_ptr<Barrier> gc_barrier_;
   std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
 
+  // If true, enable generational collection when using the Concurrent Copying
+  // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
+  // for major collections. Generational CC collection is currently only
+  // compatible with Baker read barriers. Set in Heap constructor.
+  const bool use_generational_cc_;
+
+  // Generational "sticky", only trace through dirty objects in region space.
+  const bool young_gen_;
+
+  // If true, the GC thread is done scanning marked objects on dirty and aged
+  // card (see ConcurrentCopying::CopyingPhase).
+  Atomic<bool> done_scanning_;
+
   // The read-barrier mark-bit stack. Stores object references whose
   // mark bit has been set by ConcurrentCopying::MarkFromReadBarrier,
   // so that this bit can be reset at the end of the collection in
@@ -315,7 +350,6 @@
   // (see use case in ConcurrentCopying::MarkFromReadBarrier).
   bool rb_mark_bit_stack_full_;
 
-  std::vector<mirror::Object*> false_gray_stack_ GUARDED_BY(mark_stack_lock_);
   Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   std::vector<accounting::ObjectStack*> revoked_mark_stacks_
       GUARDED_BY(mark_stack_lock_);
@@ -359,11 +393,26 @@
   Atomic<uint64_t> cumulative_bytes_moved_;
   Atomic<uint64_t> cumulative_objects_moved_;
 
-  // Generational "sticky", only trace through dirty objects in region space.
-  const bool young_gen_;
-  // If true, the GC thread is done scanning marked objects on dirty and aged
-  // card (see ConcurrentCopying::MarkingPhase).
-  Atomic<bool> done_scanning_;
+  // copied_live_bytes_ratio_sum_ is read and written by CC per GC, in
+  // ReclaimPhase, and is read by DumpPerformanceInfo (potentially from another
+  // thread). However, at present, DumpPerformanceInfo is only called when the
+  // runtime shuts down, so no concurrent access. The same reasoning goes for
+  // gc_count_ and reclaimed_bytes_ratio_sum_
+
+  // The sum of of all copied live bytes ratio (to_bytes/from_bytes)
+  float copied_live_bytes_ratio_sum_;
+  // The number of GC counts, used to calculate the average above. (It doesn't
+  // include GC where from_bytes is zero, IOW, from-space is empty, which is
+  // possible for minor GC if all allocated objects are in non-moving
+  // space.)
+  size_t gc_count_;
+  // Bit is set if the corresponding object has inter-region references that
+  // were found during the marking phase of two-phase full-heap GC cycle.
+  std::unique_ptr<accounting::ContinuousSpaceBitmap> region_space_inter_region_bitmap_;
+  std::unique_ptr<accounting::ContinuousSpaceBitmap> non_moving_space_inter_region_bitmap_;
+
+  // reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle
+  float reclaimed_bytes_ratio_sum_;
 
   // The skipped blocks are memory blocks/chucks that were copies of
   // objects that were unused due to lost races (cas failures) at
@@ -404,6 +453,9 @@
   // ConcurrentCopying::SweepArray).
   MemMap sweep_array_free_buffer_mem_map_;
 
+  // Use signed because after_gc may be larger than before_gc.
+  int64_t num_bytes_allocated_before_gc_;
+
   class ActivateReadBarrierEntrypointsCallback;
   class ActivateReadBarrierEntrypointsCheckpoint;
   class AssertToSpaceInvariantFieldVisitor;
@@ -425,6 +477,10 @@
   class VerifyNoFromSpaceRefsFieldVisitor;
   class VerifyNoFromSpaceRefsVisitor;
   class VerifyNoMissingCardMarkVisitor;
+  class ImmuneSpaceCaptureRefsVisitor;
+  template <bool kAtomicTestAndSet = false> class CaptureRootsForMarkingVisitor;
+  class CaptureThreadRootsForMarkingAndCheckpoint;
+  template <bool kHandleInterRegionRefs> class ComputeLiveBytesAndMarkRefFieldsVisitor;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
 };
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 5e3692e..b8ad624 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -77,8 +77,9 @@
 
 void GarbageCollector::ResetCumulativeStatistics() {
   cumulative_timings_.Reset();
-  total_time_ns_ = 0;
-  total_freed_objects_ = 0;
+  total_thread_cpu_time_ns_ = 0u;
+  total_time_ns_ = 0u;
+  total_freed_objects_ = 0u;
   total_freed_bytes_ = 0;
   MutexLock mu(Thread::Current(), pause_histogram_lock_);
   pause_histogram_.Reset();
@@ -88,12 +89,15 @@
   ScopedTrace trace(android::base::StringPrintf("%s %s GC", PrettyCause(gc_cause), GetName()));
   Thread* self = Thread::Current();
   uint64_t start_time = NanoTime();
+  uint64_t thread_cpu_start_time = ThreadCpuNanoTime();
+  GetHeap()->CalculatePreGcWeightedAllocatedBytes();
   Iteration* current_iteration = GetCurrentIteration();
   current_iteration->Reset(gc_cause, clear_soft_references);
   // Note transaction mode is single-threaded and there's no asynchronous GC and this flag doesn't
   // change in the middle of a GC.
   is_transaction_active_ = Runtime::Current()->IsActiveTransaction();
   RunPhases();  // Run all the GC phases.
+  GetHeap()->CalculatePostGcWeightedAllocatedBytes();
   // Add the current timings to the cumulative timings.
   cumulative_timings_.AddLogger(*GetTimings());
   // Update cumulative statistics with how many bytes the GC iteration freed.
@@ -102,6 +106,8 @@
   total_freed_bytes_ += current_iteration->GetFreedBytes() +
       current_iteration->GetFreedLargeObjectBytes();
   uint64_t end_time = NanoTime();
+  uint64_t thread_cpu_end_time = ThreadCpuNanoTime();
+  total_thread_cpu_time_ns_ += thread_cpu_end_time - thread_cpu_start_time;
   current_iteration->SetDurationNs(end_time - start_time);
   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
     // The entire GC was paused, clear the fake pauses which might be in the pause times and add
@@ -159,8 +165,9 @@
     pause_histogram_.Reset();
   }
   cumulative_timings_.Reset();
-  total_time_ns_ = 0;
-  total_freed_objects_ = 0;
+  total_thread_cpu_time_ns_ = 0u;
+  total_time_ns_ = 0u;
+  total_freed_objects_ = 0u;
   total_freed_bytes_ = 0;
 }
 
@@ -229,12 +236,16 @@
       pause_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
     }
   }
+  double cpu_seconds = NsToMs(GetTotalCpuTime()) / 1000.0;
   os << GetName() << " total time: " << PrettyDuration(total_ns)
      << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
      << GetName() << " freed: " << freed_objects
      << " objects with total size " << PrettySize(freed_bytes) << "\n"
      << GetName() << " throughput: " << freed_objects / seconds << "/s / "
-     << PrettySize(freed_bytes / seconds) << "/s\n";
+     << PrettySize(freed_bytes / seconds) << "/s"
+     << "  per cpu-time: "
+     << static_cast<uint64_t>(freed_bytes / cpu_seconds) << "/s / "
+     << PrettySize(freed_bytes / cpu_seconds) << "/s\n";
 }
 
 }  // namespace collector
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index f722e8d..2857881 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -81,6 +81,9 @@
   void SwapBitmaps()
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
+  uint64_t GetTotalCpuTime() const {
+    return total_thread_cpu_time_ns_;
+  }
   uint64_t GetTotalPausedTimeNs() REQUIRES(!pause_histogram_lock_);
   int64_t GetTotalFreedBytes() const {
     return total_freed_bytes_;
@@ -146,6 +149,7 @@
   std::string name_;
   // Cumulative statistics.
   Histogram<uint64_t> pause_histogram_ GUARDED_BY(pause_histogram_lock_);
+  uint64_t total_thread_cpu_time_ns_;
   uint64_t total_time_ns_;
   uint64_t total_freed_objects_;
   int64_t total_freed_bytes_;
diff --git a/runtime/gc/collector/immune_region.h b/runtime/gc/collector/immune_region.h
index c9ac435..80ee44c 100644
--- a/runtime/gc/collector/immune_region.h
+++ b/runtime/gc/collector/immune_region.h
@@ -18,7 +18,6 @@
 #define ART_RUNTIME_GC_COLLECTOR_IMMUNE_REGION_H_
 
 #include "base/macros.h"
-#include "base/mutex.h"
 
 namespace art {
 namespace mirror {
diff --git a/runtime/gc/collector/immune_spaces.cc b/runtime/gc/collector/immune_spaces.cc
index 3b59618..3c20e51 100644
--- a/runtime/gc/collector/immune_spaces.cc
+++ b/runtime/gc/collector/immune_spaces.cc
@@ -57,7 +57,7 @@
       if (image_oat_file != nullptr) {
         intervals.push_back(Interval(reinterpret_cast<uintptr_t>(image_oat_file->Begin()),
                                      reinterpret_cast<uintptr_t>(image_oat_file->End()),
-                                     /*image*/false));
+                                     /*image=*/false));
       }
     }
     intervals.push_back(Interval(space_begin, space_end, /*is_heap*/true));
diff --git a/runtime/gc/collector/immune_spaces.h b/runtime/gc/collector/immune_spaces.h
index 72cb60d..5a8441a 100644
--- a/runtime/gc/collector/immune_spaces.h
+++ b/runtime/gc/collector/immune_spaces.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_GC_COLLECTOR_IMMUNE_SPACES_H_
 #define ART_RUNTIME_GC_COLLECTOR_IMMUNE_SPACES_H_
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "gc/space/space.h"
 #include "immune_region.h"
 
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 7bd87bd..b0d09ba 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -32,7 +32,7 @@
 
 class DummyOatFile : public OatFile {
  public:
-  DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*is_executable*/ false) {
+  DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*executable=*/ false) {
     begin_ = begin;
     end_ = end;
   }
@@ -45,7 +45,7 @@
                   std::unique_ptr<DummyOatFile>&& oat_file,
                   MemMap&& oat_map)
       : ImageSpace("DummyImageSpace",
-                   /*image_location*/"",
+                   /*image_location=*/"",
                    std::move(map),
                    std::move(live_bitmap),
                    map.End()),
@@ -78,18 +78,20 @@
   }
 
   // Create an image space, the oat file is optional.
-  DummyImageSpace* CreateImageSpace(uint8_t* image_begin,
-                                    size_t image_size,
-                                    uint8_t* oat_begin,
-                                    size_t oat_size) {
+  DummyImageSpace* CreateImageSpace(size_t image_size,
+                                    size_t oat_size,
+                                    MemMap* image_reservation,
+                                    MemMap* oat_reservation) {
+    DCHECK(image_reservation != nullptr);
+    DCHECK(oat_reservation != nullptr);
     std::string error_str;
-    MemMap map = MemMap::MapAnonymous("DummyImageSpace",
-                                      image_begin,
-                                      image_size,
-                                      PROT_READ | PROT_WRITE,
-                                      /*low_4gb*/true,
-                                      &error_str);
-    if (!map.IsValid()) {
+    MemMap image_map = MemMap::MapAnonymous("DummyImageSpace",
+                                            image_size,
+                                            PROT_READ | PROT_WRITE,
+                                            /*low_4gb=*/ true,
+                                            /*reservation=*/ image_reservation,
+                                            &error_str);
+    if (!image_map.IsValid()) {
       LOG(ERROR) << error_str;
       return nullptr;
     }
@@ -97,10 +99,10 @@
     std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back()));
     live_bitmaps_.pop_back();
     MemMap oat_map = MemMap::MapAnonymous("OatMap",
-                                          oat_begin,
                                           oat_size,
                                           PROT_READ | PROT_WRITE,
-                                          /*low_4gb*/true,
+                                          /*low_4gb=*/ true,
+                                          /*reservation=*/ oat_reservation,
                                           &error_str);
     if (!oat_map.IsValid()) {
       LOG(ERROR) << error_str;
@@ -109,49 +111,28 @@
     std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map.Begin(), oat_map.End()));
     // Create image header.
     ImageSection sections[ImageHeader::kSectionCount];
-    new (map.Begin()) ImageHeader(
-        /*image_begin*/PointerToLowMemUInt32(map.Begin()),
-        /*image_size*/map.Size(),
+    new (image_map.Begin()) ImageHeader(
+        /*image_reservation_size=*/ image_size,
+        /*component_count=*/ 1u,
+        /*image_begin=*/ PointerToLowMemUInt32(image_map.Begin()),
+        /*image_size=*/ image_size,
         sections,
-        /*image_roots*/PointerToLowMemUInt32(map.Begin()) + 1,
-        /*oat_checksum*/0u,
+        /*image_roots=*/ PointerToLowMemUInt32(image_map.Begin()) + 1,
+        /*oat_checksum=*/ 0u,
         // The oat file data in the header is always right after the image space.
-        /*oat_file_begin*/PointerToLowMemUInt32(oat_begin),
-        /*oat_data_begin*/PointerToLowMemUInt32(oat_begin),
-        /*oat_data_end*/PointerToLowMemUInt32(oat_begin + oat_size),
-        /*oat_file_end*/PointerToLowMemUInt32(oat_begin + oat_size),
-        /*boot_image_begin*/0u,
-        /*boot_image_size*/0u,
-        /*boot_oat_begin*/0u,
-        /*boot_oat_size*/0u,
-        /*pointer_size*/sizeof(void*),
-        /*compile_pic*/false,
-        /*is_pic*/false,
-        ImageHeader::kStorageModeUncompressed,
-        /*storage_size*/0u);
-    return new DummyImageSpace(std::move(map),
+        /*oat_file_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
+        /*oat_data_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
+        /*oat_data_end=*/ PointerToLowMemUInt32(oat_map.Begin() + oat_size),
+        /*oat_file_end=*/ PointerToLowMemUInt32(oat_map.Begin() + oat_size),
+        /*boot_image_begin=*/ 0u,
+        /*boot_image_size=*/ 0u,
+        /*pointer_size=*/ sizeof(void*));
+    return new DummyImageSpace(std::move(image_map),
                                std::move(live_bitmap),
                                std::move(oat_file),
                                std::move(oat_map));
   }
 
-  // Does not reserve the memory, the caller needs to be sure no other threads will map at the
-  // returned address.
-  static uint8_t* GetContinuousMemoryRegion(size_t size) {
-    std::string error_str;
-    MemMap map = MemMap::MapAnonymous("reserve",
-                                      /* addr */ nullptr,
-                                      size,
-                                      PROT_READ | PROT_WRITE,
-                                      /*low_4gb*/ true,
-                                      &error_str);
-    if (!map.IsValid()) {
-      LOG(ERROR) << "Failed to allocate memory region " << error_str;
-      return nullptr;
-    }
-    return map.Begin();
-  }
-
  private:
   // Bitmap pool for pre-allocated dummy bitmaps. We need to pre-allocate them since we don't want
   // them to randomly get placed somewhere where we want an image space.
@@ -165,7 +146,7 @@
                         space::kGcRetentionPolicyNeverCollect,
                         begin,
                         end,
-                        /*limit*/end) {}
+                        /*limit=*/end) {}
 
   space::SpaceType GetType() const override {
     return space::kSpaceTypeMallocSpace;
@@ -208,13 +189,25 @@
   constexpr size_t kImageOatSize = 321 * kPageSize;
   constexpr size_t kOtherSpaceSize = 100 * kPageSize;
 
-  uint8_t* memory = GetContinuousMemoryRegion(kImageSize + kImageOatSize + kOtherSpaceSize);
+  std::string error_str;
+  MemMap reservation = MemMap::MapAnonymous("reserve",
+                                            kImageSize + kImageOatSize + kOtherSpaceSize,
+                                            PROT_READ | PROT_WRITE,
+                                            /*low_4gb=*/ true,
+                                            &error_str);
+  ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+  MemMap image_reservation = reservation.TakeReservedMemory(kImageSize);
+  ASSERT_TRUE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
 
-  std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(memory,
-                                                                kImageSize,
-                                                                memory + kImageSize,
-                                                                kImageOatSize));
+  std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(kImageSize,
+                                                                kImageOatSize,
+                                                                &image_reservation,
+                                                                &reservation));
   ASSERT_TRUE(image_space != nullptr);
+  ASSERT_FALSE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+
   const ImageHeader& image_header = image_space->GetImageHeader();
   DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
 
@@ -259,36 +252,44 @@
   constexpr size_t kImage3OatSize = kPageSize;
   constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size;
   constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize;
-  uint8_t* memory = GetContinuousMemoryRegion(kMemorySize);
-  uint8_t* space1_begin = memory;
-  memory += kImage1Size;
-  uint8_t* space2_begin = memory;
-  memory += kImage2Size;
-  uint8_t* space1_oat_begin = memory;
-  memory += kImage1OatSize;
-  uint8_t* space2_oat_begin = memory;
-  memory += kImage2OatSize;
-  uint8_t* space3_begin = memory;
+  std::string error_str;
+  MemMap reservation = MemMap::MapAnonymous("reserve",
+                                            kMemorySize,
+                                            PROT_READ | PROT_WRITE,
+                                            /*low_4gb=*/ true,
+                                            &error_str);
+  ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+  MemMap image_reservation = reservation.TakeReservedMemory(kImage1Size + kImage2Size);
+  ASSERT_TRUE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
 
-  std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(space1_begin,
-                                                           kImage1Size,
-                                                           space1_oat_begin,
-                                                           kImage1OatSize));
+  std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(kImage1Size,
+                                                           kImage1OatSize,
+                                                           &image_reservation,
+                                                           &reservation));
   ASSERT_TRUE(space1 != nullptr);
+  ASSERT_TRUE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
 
-
-  std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(space2_begin,
-                                                           kImage2Size,
-                                                           space2_oat_begin,
-                                                           kImage2OatSize));
+  std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(kImage2Size,
+                                                           kImage2OatSize,
+                                                           &image_reservation,
+                                                           &reservation));
   ASSERT_TRUE(space2 != nullptr);
+  ASSERT_FALSE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
 
   // Finally put a 3rd image space.
-  std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(space3_begin,
-                                                           kImage3Size,
-                                                           space3_begin + kImage3Size,
-                                                           kImage3OatSize));
+  image_reservation = reservation.TakeReservedMemory(kImage3Size);
+  ASSERT_TRUE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+  std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(kImage3Size,
+                                                           kImage3OatSize,
+                                                           &image_reservation,
+                                                           &reservation));
   ASSERT_TRUE(space3 != nullptr);
+  ASSERT_FALSE(image_reservation.IsValid());
+  ASSERT_FALSE(reservation.IsValid());
 
   // Check that we do not include the oat if there is no space after.
   ImmuneSpaces spaces;
@@ -325,12 +326,29 @@
   constexpr size_t kGuardSize = kPageSize;
   constexpr size_t kImage4Size = kImageBytes - kPageSize;
   constexpr size_t kImage4OatSize = kPageSize;
-  uint8_t* memory2 = GetContinuousMemoryRegion(kImage4Size + kImage4OatSize + kGuardSize * 2);
-  std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(memory2 + kGuardSize,
-                                                           kImage4Size,
-                                                           memory2 + kGuardSize + kImage4Size,
-                                                           kImage4OatSize));
+
+  reservation = MemMap::MapAnonymous("reserve",
+                                     kImage4Size + kImage4OatSize + kGuardSize * 2,
+                                     PROT_READ | PROT_WRITE,
+                                     /*low_4gb=*/ true,
+                                     &error_str);
+  ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+  MemMap guard = reservation.TakeReservedMemory(kGuardSize);
+  ASSERT_TRUE(guard.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+  guard.Reset();  // Release the guard memory.
+  image_reservation = reservation.TakeReservedMemory(kImage4Size);
+  ASSERT_TRUE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+  std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(kImage4Size,
+                                                           kImage4OatSize,
+                                                           &image_reservation,
+                                                           &reservation));
   ASSERT_TRUE(space4 != nullptr);
+  ASSERT_FALSE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+  ASSERT_EQ(reservation.Size(), kGuardSize);
+  reservation.Reset();  // Release the guard memory.
   {
     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
     LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin());
@@ -348,12 +366,28 @@
   // Layout:  [guard page][image][oat][guard page]
   constexpr size_t kImage5Size = kImageBytes + kPageSize;
   constexpr size_t kImage5OatSize = kPageSize;
-  uint8_t* memory3 = GetContinuousMemoryRegion(kImage5Size + kImage5OatSize + kGuardSize * 2);
-  std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(memory3 + kGuardSize,
-                                                           kImage5Size,
-                                                           memory3 + kGuardSize + kImage5Size,
-                                                           kImage5OatSize));
+  reservation = MemMap::MapAnonymous("reserve",
+                                     kImage5Size + kImage5OatSize + kGuardSize * 2,
+                                     PROT_READ | PROT_WRITE,
+                                     /*low_4gb=*/ true,
+                                     &error_str);
+  ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+  guard = reservation.TakeReservedMemory(kGuardSize);
+  ASSERT_TRUE(guard.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+  guard.Reset();  // Release the guard memory.
+  image_reservation = reservation.TakeReservedMemory(kImage5Size);
+  ASSERT_TRUE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+  std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(kImage5Size,
+                                                           kImage5OatSize,
+                                                           &image_reservation,
+                                                           &reservation));
   ASSERT_TRUE(space5 != nullptr);
+  ASSERT_FALSE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+  ASSERT_EQ(reservation.Size(), kGuardSize);
+  reservation.Reset();  // Release the guard memory.
   {
     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
     LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin());
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 23b2719..9e5cb9c 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -105,10 +105,9 @@
   std::string error_msg;
   sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
       "mark sweep sweep array free buffer",
-      /* addr */ nullptr,
       RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
       PROT_READ | PROT_WRITE,
-      /* low_4gb */ false,
+      /*low_4gb=*/ false,
       &error_msg);
   CHECK(sweep_array_free_buffer_mem_map_.IsValid())
       << "Couldn't allocate sweep array free buffer: " << error_msg;
@@ -283,9 +282,9 @@
   // cards (during the call to Heap::ProcessCard) are not reordered
   // *after* marking actually starts?
   heap_->ProcessCards(GetTimings(),
-                      /* use_rem_sets */ false,
-                      /* process_alloc_space_cards */ true,
-                      /* clear_alloc_space_cards */ GetGcType() != kGcTypeSticky);
+                      /* use_rem_sets= */ false,
+                      /* process_alloc_space_cards= */ true,
+                      /* clear_alloc_space_cards= */ GetGcType() != kGcTypeSticky);
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   MarkRoots(self);
   MarkReachableObjects();
@@ -446,7 +445,7 @@
                      !large_object_space->Contains(obj)))) {
       // Lowest priority logging first:
       PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
-      MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+      MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
       // Buffer the output in the string stream since it is more important than the stack traces
       // and we want it to have log priority. The stack traces are printed from Runtime::Abort
       // which is called from LOG(FATAL) but before the abort message.
@@ -789,12 +788,12 @@
     mark_stack_[mark_stack_pos_++].Assign(obj);
   }
 
-  virtual void Finalize() {
+  void Finalize() override {
     delete this;
   }
 
   // Scans all of the objects
-  virtual void Run(Thread* self ATTRIBUTE_UNUSED)
+  void Run(Thread* self ATTRIBUTE_UNUSED) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     ScanObjectParallelVisitor visitor(this);
@@ -852,11 +851,11 @@
   const uint8_t minimum_age_;
   const bool clear_card_;
 
-  virtual void Finalize() {
+  void Finalize() override {
     delete this;
   }
 
-  virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
+  void Run(Thread* self) override NO_THREAD_SAFETY_ANALYSIS {
     ScanObjectParallelVisitor visitor(this);
     accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
     size_t cards_scanned = clear_card_
@@ -1009,12 +1008,12 @@
   const uintptr_t begin_;
   const uintptr_t end_;
 
-  virtual void Finalize() {
+  void Finalize() override {
     delete this;
   }
 
   // Scans all of the objects
-  virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
+  void Run(Thread* self) override NO_THREAD_SAFETY_ANALYSIS {
     ScanObjectParallelVisitor visitor(this);
     bitmap_->VisitMarkedRange(begin_, end_, visitor);
     // Finish by emptying our local mark stack.
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 8cd484f..15e0711 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -251,6 +251,7 @@
     ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
     SweepSystemWeaks();
   }
+  Runtime::Current()->BroadcastForNewSystemWeaks();
   Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
   // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
   // before they are properly counted.
@@ -727,7 +728,7 @@
   DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
   MarkObjectVisitor visitor(this);
   // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
-  obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+  obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
       visitor, visitor);
 }
 
@@ -735,7 +736,8 @@
 void SemiSpace::ProcessMarkStack() {
   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
   accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
-  if (collect_from_space_only_) {
+  const bool collect_from_space_only = collect_from_space_only_;
+  if (collect_from_space_only) {
     // If a bump pointer space only collection (and the promotion is
     // enabled,) we delay the live-bitmap marking of promoted objects
     // from MarkObject() until this function.
@@ -747,7 +749,7 @@
   }
   while (!mark_stack_->IsEmpty()) {
     Object* obj = mark_stack_->PopBack();
-    if (collect_from_space_only_ && promo_dest_space_->HasAddress(obj)) {
+    if (collect_from_space_only && promo_dest_space_->HasAddress(obj)) {
       // obj has just been promoted. Mark the live bitmap for it,
       // which is delayed from MarkObject().
       DCHECK(!live_bitmap->Test(obj));
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index bb42be6..6fab371 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -20,8 +20,8 @@
 #include <memory>
 
 #include "base/atomic.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "garbage_collector.h"
 #include "gc/accounting/heap_bitmap.h"
 #include "gc_root.h"
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index ee7ac7d..8b4bac2 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -18,8 +18,8 @@
 
 #include <android-base/logging.h>
 
-#include "base/globals.h"
 #include "base/macros.h"
+#include "runtime_globals.h"
 
 #include <ostream>
 
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 791d037..1c09b5c 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -22,7 +22,6 @@
 #include "allocation_listener.h"
 #include "base/quasi_atomic.h"
 #include "base/time_utils.h"
-#include "base/utils.h"
 #include "gc/accounting/atomic_stack.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/allocation_record.h"
@@ -108,7 +107,8 @@
     pre_fence_visitor(obj, usable_size);
     QuasiAtomic::ThreadFenceForConstructor();
   } else {
-    // Bytes allocated that takes bulk thread-local buffer allocations into account.
+    // Bytes allocated that includes bulk thread-local buffer allocations in addition to direct
+    // non-TLAB object allocations.
     size_t bytes_tl_bulk_allocated = 0u;
     obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
                                               &usable_size, &bytes_tl_bulk_allocated);
@@ -128,10 +128,10 @@
         if (!self->IsExceptionPending()) {
           // AllocObject will pick up the new allocator type, and instrumented as true is the safe
           // default.
-          return AllocObject</*kInstrumented*/true>(self,
-                                                    klass,
-                                                    byte_count,
-                                                    pre_fence_visitor);
+          return AllocObject</*kInstrumented=*/true>(self,
+                                                     klass,
+                                                     byte_count,
+                                                     pre_fence_visitor);
         }
         return nullptr;
       }
@@ -156,10 +156,10 @@
     }
     pre_fence_visitor(obj, usable_size);
     QuasiAtomic::ThreadFenceForConstructor();
-    size_t num_bytes_allocated_before =
-        num_bytes_allocated_.fetch_add(bytes_tl_bulk_allocated, std::memory_order_relaxed);
-    new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated;
     if (bytes_tl_bulk_allocated > 0) {
+      size_t num_bytes_allocated_before =
+          num_bytes_allocated_.fetch_add(bytes_tl_bulk_allocated, std::memory_order_relaxed);
+      new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated;
       // Only trace when we get an increase in the number of bytes allocated. This happens when
       // obtaining a new TLAB and isn't often enough to hurt performance according to golem.
       TraceHeapSize(new_num_bytes_allocated);
@@ -212,7 +212,9 @@
   // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
   // the allocator_type should be constant propagated.
   if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
-    CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
+    // New_num_bytes_allocated is zero if we didn't update num_bytes_allocated_.
+    // That's fine.
+    CheckConcurrentGCForJava(self, new_num_bytes_allocated, &obj);
   }
   VerifyObject(obj);
   self->VerifyStack();
@@ -252,8 +254,8 @@
                                            size_t* bytes_allocated,
                                            size_t* usable_size,
                                            size_t* bytes_tl_bulk_allocated) {
-  if (allocator_type != kAllocatorTypeTLAB &&
-      allocator_type != kAllocatorTypeRegionTLAB &&
+  if (allocator_type != kAllocatorTypeRegionTLAB &&
+      allocator_type != kAllocatorTypeTLAB &&
       allocator_type != kAllocatorTypeRosAlloc &&
       UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, alloc_size, kGrow))) {
     return nullptr;
@@ -394,28 +396,46 @@
 inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
                                             size_t alloc_size,
                                             bool grow) {
-  size_t new_footprint = num_bytes_allocated_.load(std::memory_order_seq_cst) + alloc_size;
-  if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
-    if (UNLIKELY(new_footprint > growth_limit_)) {
+  size_t old_target = target_footprint_.load(std::memory_order_relaxed);
+  while (true) {
+    size_t old_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
+    size_t new_footprint = old_allocated + alloc_size;
+    // Tests against heap limits are inherently approximate, since multiple allocations may
+    // race, and this is not atomic with the allocation.
+    if (UNLIKELY(new_footprint <= old_target)) {
+      return false;
+    } else if (UNLIKELY(new_footprint > growth_limit_)) {
       return true;
     }
-    if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) {
-      if (!grow) {
+    // We are between target_footprint_ and growth_limit_ .
+    if (AllocatorMayHaveConcurrentGC(allocator_type) && IsGcConcurrent()) {
+      return false;
+    } else {
+      if (grow) {
+        if (target_footprint_.compare_exchange_weak(/*inout ref*/old_target, new_footprint,
+                                                    std::memory_order_relaxed)) {
+          VlogHeapGrowth(old_target, new_footprint, alloc_size);
+          return false;
+        }  // else try again.
+      } else {
         return true;
       }
-      // TODO: Grow for allocation is racy, fix it.
-      VlogHeapGrowth(max_allowed_footprint_, new_footprint, alloc_size);
-      max_allowed_footprint_ = new_footprint;
     }
   }
-  return false;
 }
 
-inline void Heap::CheckConcurrentGC(Thread* self,
+inline bool Heap::ShouldConcurrentGCForJava(size_t new_num_bytes_allocated) {
+  // For a Java allocation, we only check whether the number of Java allocated bytes excceeds a
+  // threshold. By not considering native allocation here, we (a) ensure that Java heap bounds are
+  // maintained, and (b) reduce the cost of the check here.
+  return new_num_bytes_allocated >= concurrent_start_bytes_;
+}
+
+inline void Heap::CheckConcurrentGCForJava(Thread* self,
                                     size_t new_num_bytes_allocated,
                                     ObjPtr<mirror::Object>* obj) {
-  if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
-    RequestConcurrentGCAndSaveObject(self, false, obj);
+  if (UNLIKELY(ShouldConcurrentGCForJava(new_num_bytes_allocated))) {
+    RequestConcurrentGCAndSaveObject(self, false /* force_full */, obj);
   }
 }
 
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index e76d35d..5473b52 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -17,6 +17,9 @@
 #include "heap.h"
 
 #include <limits>
+#if defined(__BIONIC__) || defined(__GLIBC__)
+#include <malloc.h>  // For mallinfo()
+#endif
 #include <memory>
 #include <vector>
 
@@ -36,6 +39,7 @@
 #include "base/stl_util.h"
 #include "base/systrace.h"
 #include "base/time_utils.h"
+#include "base/utils.h"
 #include "common_throws.h"
 #include "debugger.h"
 #include "dex/dex_file-inl.h"
@@ -51,6 +55,7 @@
 #include "gc/collector/partial_mark_sweep.h"
 #include "gc/collector/semi_space.h"
 #include "gc/collector/sticky_mark_sweep.h"
+#include "gc/racing_check.h"
 #include "gc/reference_processor.h"
 #include "gc/scoped_gc_critical_section.h"
 #include "gc/space/bump_pointer_space.h"
@@ -102,8 +107,9 @@
 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
 // threads (lower pauses, use less memory bandwidth).
-static constexpr double kStickyGcThroughputAdjustment =
-    kEnableGenerationalConcurrentCopyingCollection ? 0.5 : 1.0;
+static double GetStickyGcThroughputAdjustment(bool use_generational_cc) {
+  return use_generational_cc ? 0.5 : 1.0;
+}
 // Whether or not we compact the zygote in PreZygoteFork.
 static constexpr bool kCompactZygote = kMovingCollector;
 // How many reserve entries are at the end of the allocation stack, these are only needed if the
@@ -172,6 +178,8 @@
            double foreground_heap_growth_multiplier,
            size_t capacity,
            size_t non_moving_space_capacity,
+           const std::vector<std::string>& boot_class_path,
+           const std::vector<std::string>& boot_class_path_locations,
            const std::string& image_file_name,
            const InstructionSet image_instruction_set,
            CollectorType foreground_collector_type,
@@ -183,7 +191,7 @@
            bool low_memory_mode,
            size_t long_pause_log_threshold,
            size_t long_gc_log_threshold,
-           bool ignore_max_footprint,
+           bool ignore_target_footprint,
            bool use_tlab,
            bool verify_pre_gc_heap,
            bool verify_pre_sweeping_heap,
@@ -194,7 +202,10 @@
            bool gc_stress_mode,
            bool measure_gc_performance,
            bool use_homogeneous_space_compaction_for_oom,
-           uint64_t min_interval_homogeneous_space_compaction_by_oom)
+           bool use_generational_cc,
+           uint64_t min_interval_homogeneous_space_compaction_by_oom,
+           bool dump_region_info_before_gc,
+           bool dump_region_info_after_gc)
     : non_moving_space_(nullptr),
       rosalloc_space_(nullptr),
       dlmalloc_space_(nullptr),
@@ -209,7 +220,12 @@
       low_memory_mode_(low_memory_mode),
       long_pause_log_threshold_(long_pause_log_threshold),
       long_gc_log_threshold_(long_gc_log_threshold),
-      ignore_max_footprint_(ignore_max_footprint),
+      process_cpu_start_time_ns_(ProcessCpuNanoTime()),
+      pre_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
+      post_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
+      pre_gc_weighted_allocated_bytes_(0.0),
+      post_gc_weighted_allocated_bytes_(0.0),
+      ignore_target_footprint_(ignore_target_footprint),
       zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
       zygote_space_(nullptr),
       large_object_threshold_(large_object_threshold),
@@ -222,13 +238,14 @@
       next_gc_type_(collector::kGcTypePartial),
       capacity_(capacity),
       growth_limit_(growth_limit),
-      max_allowed_footprint_(initial_size),
+      target_footprint_(initial_size),
       concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
       total_bytes_freed_ever_(0),
       total_objects_freed_ever_(0),
       num_bytes_allocated_(0),
-      new_native_bytes_allocated_(0),
+      native_bytes_registered_(0),
       old_native_bytes_allocated_(0),
+      native_objects_notified_(0),
       num_bytes_freed_revoke_(0),
       verify_missing_card_marks_(false),
       verify_system_weaks_(false),
@@ -273,6 +290,7 @@
       pending_collector_transition_(nullptr),
       pending_heap_trim_(nullptr),
       use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
+      use_generational_cc_(use_generational_cc),
       running_collection_is_blocking_(false),
       blocking_gc_count_(0U),
       blocking_gc_time_(0U),
@@ -287,7 +305,9 @@
       backtrace_lock_(nullptr),
       seen_backtrace_count_(0u),
       unique_backtrace_count_(0u),
-      gc_disabled_for_shutdown_(false) {
+      gc_disabled_for_shutdown_(false),
+      dump_region_info_before_gc_(dump_region_info_before_gc),
+      dump_region_info_after_gc_(dump_region_info_after_gc) {
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() entering";
   }
@@ -335,14 +355,20 @@
   // Requested begin for the alloc space, to follow the mapped image and oat files
   uint8_t* request_begin = nullptr;
   // Calculate the extra space required after the boot image, see allocations below.
-  size_t heap_reservation_size = separate_non_moving_space
-      ? non_moving_space_capacity
-      : ((is_zygote && foreground_collector_type_ != kCollectorTypeCC) ? capacity_ : 0u);
+  size_t heap_reservation_size = 0u;
+  if (separate_non_moving_space) {
+    heap_reservation_size = non_moving_space_capacity;
+  } else if ((foreground_collector_type_ != kCollectorTypeCC) &&
+             (is_zygote || foreground_collector_type_ == kCollectorTypeGSS)) {
+    heap_reservation_size = capacity_;
+  }
   heap_reservation_size = RoundUp(heap_reservation_size, kPageSize);
   // Load image space(s).
   std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces;
   MemMap heap_reservation;
-  if (space::ImageSpace::LoadBootImage(image_file_name,
+  if (space::ImageSpace::LoadBootImage(boot_class_path,
+                                       boot_class_path_locations,
+                                       image_file_name,
                                        image_instruction_set,
                                        heap_reservation_size,
                                        &boot_image_spaces,
@@ -414,21 +440,22 @@
   // Attempt to create 2 mem maps at or after the requested begin.
   if (foreground_collector_type_ != kCollectorTypeCC) {
     ScopedTrace trace2("Create main mem map");
-    if (separate_non_moving_space || !is_zygote) {
+    if (separate_non_moving_space ||
+        !(is_zygote || foreground_collector_type_ == kCollectorTypeGSS)) {
       main_mem_map_1 = MapAnonymousPreferredAddress(
           kMemMapSpaceName[0], request_begin, capacity_, &error_str);
     } else {
-      // If no separate non-moving space and we are the zygote, the main space must come right
-      // after the image space to avoid a gap. This is required since we want the zygote space to
-      // be adjacent to the image space.
+      // If no separate non-moving space and we are the zygote or the collector type is GSS,
+      // the main space must come right after the image space to avoid a gap.
+      // This is required since we want the zygote space to be adjacent to the image space.
       DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
       main_mem_map_1 = MemMap::MapAnonymous(
           kMemMapSpaceName[0],
           request_begin,
           capacity_,
           PROT_READ | PROT_WRITE,
-          /* low_4gb */ true,
-          /* reuse */ false,
+          /* low_4gb= */ true,
+          /* reuse= */ false,
           heap_reservation.IsValid() ? &heap_reservation : nullptr,
           &error_str);
     }
@@ -457,7 +484,7 @@
                                                                initial_size,
                                                                size,
                                                                size,
-                                                               /* can_move_objects */ false);
+                                                               /* can_move_objects= */ false);
     CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
         << non_moving_space_mem_map_begin;
     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
@@ -470,7 +497,8 @@
     MemMap region_space_mem_map =
         space::RegionSpace::CreateMemMap(kRegionSpaceName, capacity_ * 2, request_begin);
     CHECK(region_space_mem_map.IsValid()) << "No region space mem map";
-    region_space_ = space::RegionSpace::Create(kRegionSpaceName, std::move(region_space_mem_map));
+    region_space_ = space::RegionSpace::Create(
+        kRegionSpaceName, std::move(region_space_mem_map), use_generational_cc_);
     AddSpace(region_space_);
   } else if (IsMovingGc(foreground_collector_type_) &&
       foreground_collector_type_ != kCollectorTypeGSS) {
@@ -499,11 +527,11 @@
       // Create bump pointer spaces instead of a backup space.
       main_mem_map_2.Reset();
       bump_pointer_space_ = space::BumpPointerSpace::Create(
-          "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
+          "Bump pointer space 1", kGSSBumpPointerSpaceCapacity);
       CHECK(bump_pointer_space_ != nullptr);
       AddSpace(bump_pointer_space_);
       temp_space_ = space::BumpPointerSpace::Create(
-          "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
+          "Bump pointer space 2", kGSSBumpPointerSpaceCapacity);
       CHECK(temp_space_ != nullptr);
       AddSpace(temp_space_);
     } else if (main_mem_map_2.IsValid()) {
@@ -513,7 +541,7 @@
                                                            growth_limit_,
                                                            capacity_,
                                                            name,
-                                                           /* can_move_objects */ true));
+                                                           /* can_move_objects= */ true));
       CHECK(main_space_backup_.get() != nullptr);
       // Add the space so its accounted for in the heap_begin and heap_end.
       AddSpace(main_space_backup_.get());
@@ -523,8 +551,7 @@
   CHECK(!non_moving_space_->CanMoveObjects());
   // Allocate the large object space.
   if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
-    large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
-                                                       capacity_);
+    large_object_space_ = space::FreeListSpace::Create("free list large object space", capacity_);
     CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
   } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
     large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
@@ -601,11 +628,11 @@
   task_processor_.reset(new TaskProcessor());
   reference_processor_.reset(new ReferenceProcessor());
   pending_task_lock_ = new Mutex("Pending task lock");
-  if (ignore_max_footprint_) {
+  if (ignore_target_footprint_) {
     SetIdealFootprint(std::numeric_limits<size_t>::max());
     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
   }
-  CHECK_NE(max_allowed_footprint_, 0U);
+  CHECK_NE(target_footprint_.load(std::memory_order_relaxed), 0U);
   // Create our garbage collectors.
   for (size_t i = 0; i < 2; ++i) {
     const bool concurrent = i != 0;
@@ -628,24 +655,29 @@
     }
     if (MayUseCollector(kCollectorTypeCC)) {
       concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
-                                                                       /*young_gen*/false,
+                                                                       /*young_gen=*/false,
+                                                                       use_generational_cc_,
                                                                        "",
                                                                        measure_gc_performance);
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         young_concurrent_copying_collector_ = new collector::ConcurrentCopying(
             this,
-            /*young_gen*/true,
+            /*young_gen=*/true,
+            use_generational_cc_,
             "young",
             measure_gc_performance);
       }
       active_concurrent_copying_collector_ = concurrent_copying_collector_;
       DCHECK(region_space_ != nullptr);
       concurrent_copying_collector_->SetRegionSpace(region_space_);
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         young_concurrent_copying_collector_->SetRegionSpace(region_space_);
+        // At this point, non-moving space should be created.
+        DCHECK(non_moving_space_ != nullptr);
+        concurrent_copying_collector_->CreateInterRegionRefBitmaps();
       }
       garbage_collectors_.push_back(concurrent_copying_collector_);
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         garbage_collectors_.push_back(young_concurrent_copying_collector_);
       }
     }
@@ -665,7 +697,7 @@
     bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap());
     if (!no_gap) {
       PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
-      MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse */ true);
+      MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse= */ true);
       LOG(FATAL) << "There's a gap between the image space and the non-moving space";
     }
   }
@@ -690,7 +722,9 @@
                                       request_begin,
                                       capacity,
                                       PROT_READ | PROT_WRITE,
-                                      /* low_4gb*/ true,
+                                      /*low_4gb=*/ true,
+                                      /*reuse=*/ false,
+                                      /*reservation=*/ nullptr,
                                       out_error_str);
     if (map.IsValid() || request_begin == nullptr) {
       return map;
@@ -1055,13 +1089,42 @@
   }
 }
 
+double Heap::CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
+                                               uint64_t current_process_cpu_time) const {
+  uint64_t bytes_allocated = GetBytesAllocated();
+  double weight = current_process_cpu_time - gc_last_process_cpu_time_ns;
+  return weight * bytes_allocated;
+}
+
+void Heap::CalculatePreGcWeightedAllocatedBytes() {
+  uint64_t current_process_cpu_time = ProcessCpuNanoTime();
+  pre_gc_weighted_allocated_bytes_ +=
+    CalculateGcWeightedAllocatedBytes(pre_gc_last_process_cpu_time_ns_, current_process_cpu_time);
+  pre_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
+}
+
+void Heap::CalculatePostGcWeightedAllocatedBytes() {
+  uint64_t current_process_cpu_time = ProcessCpuNanoTime();
+  post_gc_weighted_allocated_bytes_ +=
+    CalculateGcWeightedAllocatedBytes(post_gc_last_process_cpu_time_ns_, current_process_cpu_time);
+  post_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
+}
+
+uint64_t Heap::GetTotalGcCpuTime() {
+  uint64_t sum = 0;
+  for (auto* collector : garbage_collectors_) {
+    sum += collector->GetTotalCpuTime();
+  }
+  return sum;
+}
+
 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
   // Dump cumulative timings.
   os << "Dumping cumulative Gc timings\n";
   uint64_t total_duration = 0;
   // Dump cumulative loggers for each GC type.
   uint64_t total_paused_time = 0;
-  for (auto& collector : garbage_collectors_) {
+  for (auto* collector : garbage_collectors_) {
     total_duration += collector->GetCumulativeTimings().GetTotalNs();
     total_paused_time += collector->GetTotalPausedTimeNs();
     collector->DumpPerformanceInfo(os);
@@ -1112,18 +1175,28 @@
     rosalloc_space_->DumpStats(os);
   }
 
-  os << "Registered native bytes allocated: "
-     << (old_native_bytes_allocated_.load(std::memory_order_relaxed) +
-         new_native_bytes_allocated_.load(std::memory_order_relaxed))
-     << "\n";
+  os << "Native bytes total: " << GetNativeBytes()
+     << " registered: " << native_bytes_registered_.load(std::memory_order_relaxed) << "\n";
+
+  os << "Total native bytes at last GC: "
+     << old_native_bytes_allocated_.load(std::memory_order_relaxed) << "\n";
 
   BaseMutex::DumpAll(os);
 }
 
 void Heap::ResetGcPerformanceInfo() {
-  for (auto& collector : garbage_collectors_) {
+  for (auto* collector : garbage_collectors_) {
     collector->ResetMeasurements();
   }
+
+  process_cpu_start_time_ns_ = ProcessCpuNanoTime();
+
+  pre_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
+  pre_gc_weighted_allocated_bytes_ = 0u;
+
+  post_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
+  post_gc_weighted_allocated_bytes_ = 0u;
+
   total_bytes_freed_ever_ = 0;
   total_objects_freed_ever_ = 0;
   total_wait_time_ = 0;
@@ -1142,7 +1215,7 @@
 
 uint64_t Heap::GetGcCount() const {
   uint64_t gc_count = 0U;
-  for (auto& collector : garbage_collectors_) {
+  for (auto* collector : garbage_collectors_) {
     gc_count += collector->GetCumulativeTimings().GetIterations();
   }
   return gc_count;
@@ -1150,7 +1223,7 @@
 
 uint64_t Heap::GetGcTime() const {
   uint64_t gc_time = 0U;
-  for (auto& collector : garbage_collectors_) {
+  for (auto* collector : garbage_collectors_) {
     gc_time += collector->GetCumulativeTimings().GetTotalNs();
   }
   return gc_time;
@@ -1199,8 +1272,8 @@
   delete thread_flip_lock_;
   delete pending_task_lock_;
   delete backtrace_lock_;
-  uint64_t unique_count = unique_backtrace_count_.load(std::memory_order_relaxed);
-  uint64_t seen_count = seen_backtrace_count_.load(std::memory_order_relaxed);
+  uint64_t unique_count = unique_backtrace_count_.load();
+  uint64_t seen_count = seen_backtrace_count_.load();
   if (unique_count != 0 || seen_count != 0) {
     LOG(INFO) << "gc stress unique=" << unique_count << " total=" << (unique_count + seen_count);
   }
@@ -1264,6 +1337,10 @@
   return nullptr;
 }
 
+std::string Heap::DumpSpaceNameFromAddress(const void* addr) const {
+  space::Space* space = FindSpaceFromAddress(addr);
+  return (space != nullptr) ? space->GetName() : "no space";
+}
 
 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
   // If we're in a stack overflow, do not create a new exception. It would require running the
@@ -1278,7 +1355,8 @@
   size_t total_bytes_free = GetFreeMemory();
   oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
       << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM,"
-      << " max allowed footprint " << max_allowed_footprint_ << ", growth limit "
+      << " target footprint " << target_footprint_.load(std::memory_order_relaxed)
+      << ", growth limit "
       << growth_limit_;
   // If the allocation failed due to fragmentation, print out the largest continuous allocation.
   if (total_bytes_free >= byte_count) {
@@ -1317,7 +1395,7 @@
       // Invoke CC full compaction.
       CollectGarbageInternal(collector::kGcTypeFull,
                              kGcCauseCollectorTransition,
-                             /*clear_soft_references*/false);
+                             /*clear_soft_references=*/false);
     } else {
       VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
     }
@@ -1344,6 +1422,11 @@
   TrimSpaces(self);
   // Trim arenas that may have been used by JIT or verifier.
   runtime->GetArenaPool()->TrimMaps();
+  {
+    // TODO: Move this to a callback called when startup is finished (b/120671223).
+    ScopedTrace trace2("Delete thread pool");
+    runtime->DeleteThreadPool();
+  }
 }
 
 class TrimIndirectReferenceTableClosure : public Closure {
@@ -1582,10 +1665,10 @@
   // Use signed comparison since freed bytes can be negative when background compaction foreground
   // transitions occurs. This is caused by the moving objects from a bump pointer space to a
   // free list backed space typically increasing memory footprint due to padding and binning.
-  DCHECK_LE(freed_bytes,
-            static_cast<int64_t>(num_bytes_allocated_.load(std::memory_order_relaxed)));
+  RACING_DCHECK_LE(freed_bytes,
+                   static_cast<int64_t>(num_bytes_allocated_.load(std::memory_order_relaxed)));
   // Note: This relies on 2s complement for handling negative freed_bytes.
-  num_bytes_allocated_.fetch_sub(static_cast<ssize_t>(freed_bytes));
+  num_bytes_allocated_.fetch_sub(static_cast<ssize_t>(freed_bytes), std::memory_order_relaxed);
   if (Runtime::Current()->HasStatsEnabled()) {
     RuntimeStats* thread_stats = Thread::Current()->GetStats();
     thread_stats->freed_objects += freed_objects;
@@ -1602,10 +1685,10 @@
   // ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
   // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
   // all the way to zero exactly as the remainder will be subtracted at the next GC.
-  size_t bytes_freed = num_bytes_freed_revoke_.load();
-  CHECK_GE(num_bytes_freed_revoke_.fetch_sub(bytes_freed),
+  size_t bytes_freed = num_bytes_freed_revoke_.load(std::memory_order_relaxed);
+  CHECK_GE(num_bytes_freed_revoke_.fetch_sub(bytes_freed, std::memory_order_relaxed),
            bytes_freed) << "num_bytes_freed_revoke_ underflow";
-  CHECK_GE(num_bytes_allocated_.fetch_sub(bytes_freed),
+  CHECK_GE(num_bytes_allocated_.fetch_sub(bytes_freed, std::memory_order_relaxed),
            bytes_freed) << "num_bytes_allocated_ underflow";
   GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
 }
@@ -1777,7 +1860,7 @@
           break;
         }
         // Try to transition the heap if the allocation failure was due to the space being full.
-        if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) {
+        if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow=*/ false)) {
           // If we aren't out of memory then the OOM was probably from the non moving space being
           // full. Attempt to disable compaction and turn the main space into a non moving space.
           DisableMovingGc();
@@ -1813,7 +1896,7 @@
 }
 
 void Heap::SetTargetHeapUtilization(float target) {
-  DCHECK_GT(target, 0.0f);  // asserted in Java code
+  DCHECK_GT(target, 0.1f);  // asserted in Java code
   DCHECK_LT(target, 1.0f);
   target_utilization_ = target;
 }
@@ -2030,7 +2113,7 @@
   VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
              << " -> " << static_cast<int>(collector_type);
   uint64_t start_time = NanoTime();
-  uint32_t before_allocated = num_bytes_allocated_.load();
+  uint32_t before_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
   Runtime* const runtime = Runtime::Current();
   Thread* const self = Thread::Current();
   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
@@ -2150,7 +2233,7 @@
       default: {
         LOG(FATAL) << "Attempted to transition to invalid collector type "
                    << static_cast<size_t>(collector_type);
-        break;
+        UNREACHABLE();
       }
     }
     ChangeCollector(collector_type);
@@ -2166,7 +2249,7 @@
     ScopedObjectAccess soa(self);
     soa.Vm()->UnloadNativeLibraries();
   }
-  int32_t after_allocated = num_bytes_allocated_.load(std::memory_order_seq_cst);
+  int32_t after_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
   int32_t delta_allocated = before_allocated - after_allocated;
   std::string saved_str;
   if (delta_allocated >= 0) {
@@ -2185,7 +2268,7 @@
     gc_plan_.clear();
     switch (collector_type_) {
       case kCollectorTypeCC: {
-        if (kEnableGenerationalConcurrentCopyingCollection) {
+        if (use_generational_cc_) {
           gc_plan_.push_back(collector::kGcTypeSticky);
         }
         gc_plan_.push_back(collector::kGcTypeFull);
@@ -2227,8 +2310,8 @@
     }
     if (IsGcConcurrent()) {
       concurrent_start_bytes_ =
-          std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) -
-          kMinConcurrentRemainingBytes;
+          UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
+                             kMinConcurrentRemainingBytes);
     } else {
       concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
     }
@@ -2280,13 +2363,13 @@
     }
   }
 
-  virtual bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const {
+  bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const override {
     // Don't sweep any spaces since we probably blasted the internal accounting of the free list
     // allocator.
     return false;
   }
 
-  virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
+  mirror::Object* MarkNonForwardedObject(mirror::Object* obj) override
       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
     size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
     size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
@@ -2554,7 +2637,40 @@
 }
 
 void Heap::TraceHeapSize(size_t heap_size) {
-  ATRACE_INT("Heap size (KB)", heap_size / KB);
+  ATraceIntegerValue("Heap size (KB)", heap_size / KB);
+}
+
+size_t Heap::GetNativeBytes() {
+  size_t malloc_bytes;
+#if defined(__BIONIC__) || defined(__GLIBC__)
+  size_t mmapped_bytes;
+  struct mallinfo mi = mallinfo();
+  // In spite of the documentation, the jemalloc version of this call seems to do what we want,
+  // and it is thread-safe.
+  if (sizeof(size_t) > sizeof(mi.uordblks) && sizeof(size_t) > sizeof(mi.hblkhd)) {
+    // Shouldn't happen, but glibc declares uordblks as int.
+    // Avoiding sign extension gets us correct behavior for another 2 GB.
+    malloc_bytes = (unsigned int)mi.uordblks;
+    mmapped_bytes = (unsigned int)mi.hblkhd;
+  } else {
+    malloc_bytes = mi.uordblks;
+    mmapped_bytes = mi.hblkhd;
+  }
+  // From the spec, we clearly have mmapped_bytes <= malloc_bytes. Reality is sometimes
+  // dramatically different. (b/119580449) If so, fudge it.
+  if (mmapped_bytes > malloc_bytes) {
+    malloc_bytes = mmapped_bytes;
+  }
+#else
+  // We should hit this case only in contexts in which GC triggering is not critical. Effectively
+  // disable GC triggering based on malloc().
+  malloc_bytes = 1000;
+#endif
+  return malloc_bytes + native_bytes_registered_.load(std::memory_order_relaxed);
+  // An alternative would be to get RSS from /proc/self/statm. Empirically, that's no
+  // more expensive, and it would allow us to count memory allocated by means other than malloc.
+  // However it would change as pages are unmapped and remapped due to memory pressure, among
+  // other things. It seems risky to trigger GCs as a result of such changes.
 }
 
 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
@@ -2607,16 +2723,7 @@
     ++runtime->GetStats()->gc_for_alloc_count;
     ++self->GetStats()->gc_for_alloc_count;
   }
-  const uint64_t bytes_allocated_before_gc = GetBytesAllocated();
-
-  if (gc_type == NonStickyGcType()) {
-    // Move all bytes from new_native_bytes_allocated_ to
-    // old_native_bytes_allocated_ now that GC has been triggered, resetting
-    // new_native_bytes_allocated_ to zero in the process.
-    old_native_bytes_allocated_.fetch_add(
-        new_native_bytes_allocated_.exchange(0, std::memory_order_relaxed),
-        std::memory_order_relaxed);
-  }
+  const size_t bytes_allocated_before_gc = GetBytesAllocated();
 
   DCHECK_LT(gc_type, collector::kGcTypeMax);
   DCHECK_NE(gc_type, collector::kGcTypeNone);
@@ -2638,12 +2745,12 @@
         collector = semi_space_collector_;
         break;
       case kCollectorTypeCC:
-        if (kEnableGenerationalConcurrentCopyingCollection) {
+        if (use_generational_cc_) {
           // TODO: Other threads must do the flip checkpoint before they start poking at
           // active_concurrent_copying_collector_. So we should not concurrency here.
           active_concurrent_copying_collector_ = (gc_type == collector::kGcTypeSticky) ?
               young_concurrent_copying_collector_ : concurrent_copying_collector_;
-          active_concurrent_copying_collector_->SetRegionSpace(region_space_);
+          DCHECK(active_concurrent_copying_collector_->RegionSpace() == region_space_);
         }
         collector = active_concurrent_copying_collector_;
         break;
@@ -2665,13 +2772,6 @@
   } else {
     LOG(FATAL) << "Invalid current allocator " << current_allocator_;
   }
-  if (IsGcConcurrent()) {
-    // Disable concurrent GC check so that we don't have spammy JNI requests.
-    // This gets recalculated in GrowForUtilization. It is important that it is disabled /
-    // calculated in the same thread so that there aren't any races that can cause it to become
-    // permanantly disabled. b/17942071
-    concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
-  }
 
   CHECK(collector != nullptr)
       << "Could not find garbage collector with collector_type="
@@ -2688,6 +2788,9 @@
   FinishGC(self, gc_type);
   // Inform DDMS that a GC completed.
   Dbg::GcDidFinish();
+
+  old_native_bytes_allocated_.store(GetNativeBytes());
+
   // Unload native libraries for class unloading. We do this after calling FinishGC to prevent
   // deadlocks in case the JNI_OnUnload function does allocations.
   {
@@ -2768,6 +2871,15 @@
   DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
   uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
   uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
+
+  // The computed number of windows can be incoherently high if NanoTime() is not monotonic.
+  // Setting a limit on its maximum value reduces the impact on CPU time in such cases.
+  if (num_of_windows > kGcCountRateHistogramMaxNumMissedWindows) {
+    LOG(WARNING) << "Reducing the number of considered missed Gc histogram windows from "
+                 << num_of_windows << " to " << kGcCountRateHistogramMaxNumMissedWindows;
+    num_of_windows = kGcCountRateHistogramMaxNumMissedWindows;
+  }
+
   if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
     // Record the first window.
     gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1);  // Exclude the current run.
@@ -3427,7 +3539,7 @@
       running_collection_is_blocking_ = true;
       VLOG(gc) << "Waiting for a blocking GC " << cause;
     }
-    ScopedTrace trace("GC: Wait For Completion");
+    SCOPED_TRACE << "GC: Wait For Completion " << cause;
     // We must wait, change thread state then sleep on gc_complete_cond_;
     gc_complete_cond_->Wait(self);
     last_gc_type = last_gc_type_;
@@ -3462,16 +3574,17 @@
 }
 
 size_t Heap::GetPercentFree() {
-  return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
+  return static_cast<size_t>(100.0f * static_cast<float>(
+      GetFreeMemory()) / target_footprint_.load(std::memory_order_relaxed));
 }
 
-void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
-  if (max_allowed_footprint > GetMaxMemory()) {
-    VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
+void Heap::SetIdealFootprint(size_t target_footprint) {
+  if (target_footprint > GetMaxMemory()) {
+    VLOG(gc) << "Clamp target GC heap from " << PrettySize(target_footprint) << " to "
              << PrettySize(GetMaxMemory());
-    max_allowed_footprint = GetMaxMemory();
+    target_footprint = GetMaxMemory();
   }
-  max_allowed_footprint_ = max_allowed_footprint;
+  target_footprint_.store(target_footprint, std::memory_order_relaxed);
 }
 
 bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
@@ -3486,7 +3599,7 @@
 }
 
 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
-  for (const auto& collector : garbage_collectors_) {
+  for (auto* collector : garbage_collectors_) {
     if (collector->GetCollectorType() == collector_type_ &&
         collector->GetGcType() == gc_type) {
       return collector;
@@ -3504,10 +3617,10 @@
 }
 
 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
-                              uint64_t bytes_allocated_before_gc) {
+                              size_t bytes_allocated_before_gc) {
   // We know what our utilization is at this moment.
   // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
-  const uint64_t bytes_allocated = GetBytesAllocated();
+  const size_t bytes_allocated = GetBytesAllocated();
   // Trace the new heap size after the GC is finished.
   TraceHeapSize(bytes_allocated);
   uint64_t target_size;
@@ -3515,48 +3628,55 @@
   // Use the multiplier to grow more for foreground.
   const double multiplier = HeapGrowthMultiplier();  // Use the multiplier to grow more for
   // foreground.
-  const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
-  const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
+  const size_t adjusted_min_free = static_cast<size_t>(min_free_ * multiplier);
+  const size_t adjusted_max_free = static_cast<size_t>(max_free_ * multiplier);
   if (gc_type != collector::kGcTypeSticky) {
     // Grow the heap for non sticky GC.
-    ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
-    CHECK_GE(delta, 0) << "bytes_allocated=" << bytes_allocated
-                       << " target_utilization_=" << target_utilization_;
+    uint64_t delta = bytes_allocated * (1.0 / GetTargetHeapUtilization() - 1.0);
+    DCHECK_LE(delta, std::numeric_limits<size_t>::max()) << "bytes_allocated=" << bytes_allocated
+        << " target_utilization_=" << target_utilization_;
     target_size = bytes_allocated + delta * multiplier;
-    target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
-    target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
+    target_size = std::min(target_size,
+                           static_cast<uint64_t>(bytes_allocated + adjusted_max_free));
+    target_size = std::max(target_size,
+                           static_cast<uint64_t>(bytes_allocated + adjusted_min_free));
     next_gc_type_ = collector::kGcTypeSticky;
   } else {
     collector::GcType non_sticky_gc_type = NonStickyGcType();
     // Find what the next non sticky collector will be.
     collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
-    if (kEnableGenerationalConcurrentCopyingCollection) {
+    if (use_generational_cc_) {
       if (non_sticky_collector == nullptr) {
         non_sticky_collector = FindCollectorByGcType(collector::kGcTypePartial);
       }
       CHECK(non_sticky_collector != nullptr);
     }
+    double sticky_gc_throughput_adjustment = GetStickyGcThroughputAdjustment(use_generational_cc_);
+
     // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
     // do another sticky collection next.
-    // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
+    // We also check that the bytes allocated aren't over the target_footprint, or
+    // concurrent_start_bytes in case of concurrent GCs, in order to prevent a
     // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
     // if the sticky GC throughput always remained >= the full/partial throughput.
-    if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
+    size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
+    if (current_gc_iteration_.GetEstimatedThroughput() * sticky_gc_throughput_adjustment >=
         non_sticky_collector->GetEstimatedMeanThroughput() &&
         non_sticky_collector->NumberOfIterations() > 0 &&
-        bytes_allocated <= max_allowed_footprint_) {
+        bytes_allocated <= (IsGcConcurrent() ? concurrent_start_bytes_ : target_footprint)) {
       next_gc_type_ = collector::kGcTypeSticky;
     } else {
       next_gc_type_ = non_sticky_gc_type;
     }
     // If we have freed enough memory, shrink the heap back down.
-    if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) {
+    if (bytes_allocated + adjusted_max_free < target_footprint) {
       target_size = bytes_allocated + adjusted_max_free;
     } else {
-      target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
+      target_size = std::max(bytes_allocated, target_footprint);
     }
   }
-  if (!ignore_max_footprint_) {
+  CHECK_LE(target_size, std::numeric_limits<size_t>::max());
+  if (!ignore_target_footprint_) {
     SetIdealFootprint(target_size);
     if (IsGcConcurrent()) {
       const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
@@ -3565,26 +3685,25 @@
       // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
       // how many bytes were allocated during the GC we need to add freed_bytes back on.
       CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
-      const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
+      const size_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
           bytes_allocated_before_gc;
       // Calculate when to perform the next ConcurrentGC.
       // Estimate how many remaining bytes we will have when we need to start the next GC.
       size_t remaining_bytes = bytes_allocated_during_gc;
       remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
       remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
-      if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
+      size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
+      if (UNLIKELY(remaining_bytes > target_footprint)) {
         // A never going to happen situation that from the estimated allocation rate we will exceed
         // the applications entire footprint with the given estimated allocation rate. Schedule
         // another GC nearly straight away.
-        remaining_bytes = kMinConcurrentRemainingBytes;
+        remaining_bytes = std::min(kMinConcurrentRemainingBytes, target_footprint);
       }
-      DCHECK_LE(remaining_bytes, max_allowed_footprint_);
-      DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
+      DCHECK_LE(target_footprint_.load(std::memory_order_relaxed), GetMaxMemory());
       // Start a concurrent GC when we get close to the estimated remaining bytes. When the
       // allocation rate is very high, remaining_bytes could tell us that we should start a GC
       // right away.
-      concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
-                                         static_cast<size_t>(bytes_allocated));
+      concurrent_start_bytes_ = std::max(target_footprint - remaining_bytes, bytes_allocated);
     }
   }
 }
@@ -3612,11 +3731,11 @@
 }
 
 void Heap::ClearGrowthLimit() {
-  if (max_allowed_footprint_ == growth_limit_ && growth_limit_ < capacity_) {
-    max_allowed_footprint_ = capacity_;
+  if (target_footprint_.load(std::memory_order_relaxed) == growth_limit_
+      && growth_limit_ < capacity_) {
+    target_footprint_.store(capacity_, std::memory_order_relaxed);
     concurrent_start_bytes_ =
-         std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) -
-         kMinConcurrentRemainingBytes;
+        UnsignedDifference(capacity_, kMinConcurrentRemainingBytes);
   }
   growth_limit_ = capacity_;
   ScopedObjectAccess soa(Thread::Current());
@@ -3690,7 +3809,7 @@
   if (!Runtime::Current()->IsShuttingDown(self)) {
     // Wait for any GCs currently running to finish.
     if (WaitForGcToComplete(cause, self) == collector::kGcTypeNone) {
-      // If the we can't run the GC type we wanted to run, find the next appropriate one and try
+      // If we can't run the GC type we wanted to run, find the next appropriate one and try
       // that instead. E.g. can't do partial, so do full instead.
       collector::GcType next_gc_type = next_gc_type_;
       // If forcing full and next gc type is sticky, override with a non-sticky type.
@@ -3799,7 +3918,7 @@
 
 void Heap::IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke) {
   size_t previous_num_bytes_freed_revoke =
-      num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_seq_cst);
+      num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_relaxed);
   // Check the updated value is less than the number of bytes allocated. There is a risk of
   // execution being suspended between the increment above and the CHECK below, leading to
   // the use of previous_num_bytes_freed_revoke in the comparison.
@@ -3856,40 +3975,107 @@
                             static_cast<jlong>(timeout));
 }
 
-void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
-  size_t old_value = new_native_bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed);
+// For GC triggering purposes, we count old (pre-last-GC) and new native allocations as
+// different fractions of Java allocations.
+// For now, we essentially do not count old native allocations at all, so that we can preserve the
+// existing behavior of not limiting native heap size. If we seriously considered it, we would
+// have to adjust collection thresholds when we encounter large amounts of old native memory,
+// and handle native out-of-memory situations.
 
-  if (old_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
-             !IsGCRequestPending()) {
-    // Trigger another GC because there have been enough native bytes
-    // allocated since the last GC.
+static constexpr size_t kOldNativeDiscountFactor = 65536;  // Approximately infinite for now.
+static constexpr size_t kNewNativeDiscountFactor = 2;
+
+// If weighted java + native memory use exceeds our target by kStopForNativeFactor, and
+// newly allocated memory exceeds kHugeNativeAlloc, we wait for GC to complete to avoid
+// running out of memory.
+static constexpr float kStopForNativeFactor = 4.0;
+// TODO: Allow this to be tuned. We want this much smaller for some apps, like Calculator.
+// But making it too small can cause jank in apps like launcher that intentionally allocate
+// large amounts of memory in rapid succession. (b/122099093)
+// For now, we punt, and use a value that should be easily large enough to disable this in all
+// questionable setting, but that is clearly too large to be effective for small memory devices.
+static constexpr size_t kHugeNativeAllocs = 1 * GB;
+
+// Return the ratio of the weighted native + java allocated bytes to its target value.
+// A return value > 1.0 means we should collect. Significantly larger values mean we're falling
+// behind.
+inline float Heap::NativeMemoryOverTarget(size_t current_native_bytes) {
+  // Collection check for native allocation. Does not enforce Java heap bounds.
+  // With adj_start_bytes defined below, effectively checks
+  // <java bytes allocd> + c1*<old native allocd> + c2*<new native allocd) >= adj_start_bytes,
+  // where c3 > 1, and currently c1 and c2 are 1 divided by the values defined above.
+  size_t old_native_bytes = old_native_bytes_allocated_.load(std::memory_order_relaxed);
+  if (old_native_bytes > current_native_bytes) {
+    // Net decrease; skip the check, but update old value.
+    // It's OK to lose an update if two stores race.
+    old_native_bytes_allocated_.store(current_native_bytes, std::memory_order_relaxed);
+    return 0.0;
+  } else {
+    size_t new_native_bytes = UnsignedDifference(current_native_bytes, old_native_bytes);
+    size_t weighted_native_bytes = new_native_bytes / kNewNativeDiscountFactor
+        + old_native_bytes / kOldNativeDiscountFactor;
+    size_t add_bytes_allowed = static_cast<size_t>(
+        NativeAllocationGcWatermark() * HeapGrowthMultiplier());
+    size_t adj_start_bytes = concurrent_start_bytes_ + add_bytes_allowed / kNewNativeDiscountFactor;
+    return static_cast<float>(GetBytesAllocated() + weighted_native_bytes)
+         / static_cast<float>(adj_start_bytes);
+  }
+}
+
+inline void Heap::CheckConcurrentGCForNative(Thread* self) {
+  size_t current_native_bytes = GetNativeBytes();
+  float gc_urgency = NativeMemoryOverTarget(current_native_bytes);
+  if (UNLIKELY(gc_urgency >= 1.0)) {
     if (IsGcConcurrent()) {
-      RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full*/true);
+      RequestConcurrentGC(self, kGcCauseForNativeAlloc, /*force_full=*/true);
+      if (gc_urgency > kStopForNativeFactor
+          && current_native_bytes > kHugeNativeAllocs) {
+        // We're in danger of running out of memory due to rampant native allocation.
+        if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+          LOG(INFO) << "Stopping for native allocation, urgency: " << gc_urgency;
+        }
+        WaitForGcToComplete(kGcCauseForNativeAlloc, self);
+      }
     } else {
       CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
     }
   }
 }
 
-void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
-  // Take the bytes freed out of new_native_bytes_allocated_ first. If
-  // new_native_bytes_allocated_ reaches zero, take the remaining bytes freed
-  // out of old_native_bytes_allocated_ to ensure all freed bytes are
-  // accounted for.
-  size_t allocated;
-  size_t new_freed_bytes;
-  do {
-    allocated = new_native_bytes_allocated_.load(std::memory_order_relaxed);
-    new_freed_bytes = std::min(allocated, bytes);
-  } while (!new_native_bytes_allocated_.CompareAndSetWeakRelaxed(allocated,
-                                                                   allocated - new_freed_bytes));
-  if (new_freed_bytes < bytes) {
-    old_native_bytes_allocated_.fetch_sub(bytes - new_freed_bytes, std::memory_order_relaxed);
+// About kNotifyNativeInterval allocations have occurred. Check whether we should garbage collect.
+void Heap::NotifyNativeAllocations(JNIEnv* env) {
+  native_objects_notified_.fetch_add(kNotifyNativeInterval, std::memory_order_relaxed);
+  CheckConcurrentGCForNative(ThreadForEnv(env));
+}
+
+// Register a native allocation with an explicit size.
+// This should only be done for large allocations of non-malloc memory, which we wouldn't
+// otherwise see.
+void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
+  native_bytes_registered_.fetch_add(bytes, std::memory_order_relaxed);
+  uint32_t objects_notified =
+      native_objects_notified_.fetch_add(1, std::memory_order_relaxed);
+  if (objects_notified % kNotifyNativeInterval == kNotifyNativeInterval - 1
+      || bytes > kCheckImmediatelyThreshold) {
+    CheckConcurrentGCForNative(ThreadForEnv(env));
   }
 }
 
+void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
+  size_t allocated;
+  size_t new_freed_bytes;
+  do {
+    allocated = native_bytes_registered_.load(std::memory_order_relaxed);
+    new_freed_bytes = std::min(allocated, bytes);
+    // We should not be registering more free than allocated bytes.
+    // But correctly keep going in non-debug builds.
+    DCHECK_EQ(new_freed_bytes, bytes);
+  } while (!native_bytes_registered_.CompareAndSetWeakRelaxed(allocated,
+                                                              allocated - new_freed_bytes));
+}
+
 size_t Heap::GetTotalMemory() const {
-  return std::max(max_allowed_footprint_, GetBytesAllocated());
+  return std::max(target_footprint_.load(std::memory_order_relaxed), GetBytesAllocated());
 }
 
 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
@@ -3910,7 +4096,7 @@
       << " IsVariableSize=" << c->IsVariableSize()
       << " ObjectSize=" << c->GetObjectSize()
       << " sizeof(Class)=" << sizeof(mirror::Class)
-      << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag*/ "klass");
+      << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag=*/ "klass");
   CHECK_GE(byte_count, sizeof(mirror::Object));
 }
 
@@ -4006,7 +4192,7 @@
     {
       static constexpr size_t kMaxFrames = 16u;
       FixedSizeBacktrace<kMaxFrames> backtrace;
-      backtrace.Collect(/* skip_frames */ 2);
+      backtrace.Collect(/* skip_count= */ 2);
       uint64_t hash = backtrace.Hash();
       MutexLock mu(self, *backtrace_lock_);
       new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
@@ -4017,10 +4203,10 @@
     if (new_backtrace) {
       StackHandleScope<1> hs(self);
       auto h = hs.NewHandleWrapper(obj);
-      CollectGarbage(/* clear_soft_references */ false);
-      unique_backtrace_count_.fetch_add(1, std::memory_order_seq_cst);
+      CollectGarbage(/* clear_soft_references= */ false);
+      unique_backtrace_count_.fetch_add(1);
     } else {
-      seen_backtrace_count_.fetch_add(1, std::memory_order_seq_cst);
+      seen_backtrace_count_.fetch_add(1);
     }
   }
 }
@@ -4191,8 +4377,8 @@
   return verification_.get();
 }
 
-void Heap::VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size) {
-  VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint) << " to "
+void Heap::VlogHeapGrowth(size_t old_footprint, size_t new_footprint, size_t alloc_size) {
+  VLOG(heap) << "Growing heap from " << PrettySize(old_footprint) << " to "
              << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
 }
 
@@ -4201,22 +4387,23 @@
   explicit TriggerPostForkCCGcTask(uint64_t target_time) : HeapTask(target_time) {}
   void Run(Thread* self) override {
     gc::Heap* heap = Runtime::Current()->GetHeap();
-    // Trigger a GC, if not already done. The first GC after fork, whenever
+    // Trigger a GC, if not already done. The first GC after fork, whenever it
     // takes place, will adjust the thresholds to normal levels.
-    if (heap->max_allowed_footprint_ == heap->growth_limit_) {
+    if (heap->target_footprint_.load(std::memory_order_relaxed) == heap->growth_limit_) {
       heap->RequestConcurrentGC(self, kGcCauseBackground, false);
     }
   }
 };
 
 void Heap::PostForkChildAction(Thread* self) {
-  // Temporarily increase max_allowed_footprint_ and concurrent_start_bytes_ to
+  // Temporarily increase target_footprint_ and concurrent_start_bytes_ to
   // max values to avoid GC during app launch.
   if (collector_type_ == kCollectorTypeCC && !IsLowMemoryMode()) {
-    // Set max_allowed_footprint_ to the largest allowed value.
+    // Set target_footprint_ to the largest allowed value.
     SetIdealFootprint(growth_limit_);
     // Set concurrent_start_bytes_ to half of the heap size.
-    concurrent_start_bytes_ = std::max(max_allowed_footprint_ / 2, GetBytesAllocated());
+    size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
+    concurrent_start_bytes_ = std::max(target_footprint / 2, GetBytesAllocated());
 
     GetTaskProcessor()->AddTask(
         self, new TriggerPostForkCCGcTask(NanoTime() + MsToNs(kPostForkMaxHeapDurationMS)));
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 90bac20..6bdba12 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -25,9 +25,7 @@
 #include <android-base/logging.h>
 
 #include "allocator_type.h"
-#include "arch/instruction_set.h"
 #include "base/atomic.h"
-#include "base/globals.h"
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "base/runtime_debug.h"
@@ -43,11 +41,13 @@
 #include "offsets.h"
 #include "process_state.h"
 #include "read_barrier_config.h"
+#include "runtime_globals.h"
 #include "verify_object.h"
 
 namespace art {
 
 class ConditionVariable;
+enum class InstructionSet;
 class IsMarkedVisitor;
 class Mutex;
 class RootVisitor;
@@ -126,7 +126,6 @@
 
 class Heap {
  public:
-  // If true, measure the total allocation time.
   static constexpr size_t kDefaultStartingSize = kPageSize;
   static constexpr size_t kDefaultInitialSize = 2 * MB;
   static constexpr size_t kDefaultMaximumSize = 256 * MB;
@@ -155,6 +154,21 @@
   // Used so that we don't overflow the allocation time atomic integer.
   static constexpr size_t kTimeAdjust = 1024;
 
+  // Client should call NotifyNativeAllocation every kNotifyNativeInterval allocations.
+  // Should be chosen so that time_to_call_mallinfo / kNotifyNativeInterval is on the same order
+  // as object allocation time. time_to_call_mallinfo seems to be on the order of 1 usec.
+#ifdef __ANDROID__
+  static constexpr uint32_t kNotifyNativeInterval = 32;
+#else
+  // Some host mallinfo() implementations are slow. And memory is less scarce.
+  static constexpr uint32_t kNotifyNativeInterval = 128;
+#endif
+
+  // RegisterNativeAllocation checks immediately whether GC is needed if size exceeds the
+  // following. kCheckImmediatelyThreshold * kNotifyNativeInterval should be small enough to
+  // make it safe to allocate that many bytes between checks.
+  static constexpr size_t kCheckImmediatelyThreshold = 300000;
+
   // How often we allow heap trimming to happen (nanoseconds).
   static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
   // How long we wait after a transition request to perform a collector transition (nanoseconds).
@@ -174,7 +188,9 @@
        double foreground_heap_growth_multiplier,
        size_t capacity,
        size_t non_moving_space_capacity,
-       const std::string& original_image_file_name,
+       const std::vector<std::string>& boot_class_path,
+       const std::vector<std::string>& boot_class_path_locations,
+       const std::string& image_file_name,
        InstructionSet image_instruction_set,
        CollectorType foreground_collector_type,
        CollectorType background_collector_type,
@@ -185,7 +201,7 @@
        bool low_memory_mode,
        size_t long_pause_threshold,
        size_t long_gc_threshold,
-       bool ignore_max_footprint,
+       bool ignore_target_footprint,
        bool use_tlab,
        bool verify_pre_gc_heap,
        bool verify_pre_sweeping_heap,
@@ -196,7 +212,10 @@
        bool gc_stress_mode,
        bool measure_gc_performance,
        bool use_homogeneous_space_compaction,
-       uint64_t min_interval_homogeneous_space_compaction_by_oom);
+       bool use_generational_cc,
+       uint64_t min_interval_homogeneous_space_compaction_by_oom,
+       bool dump_region_info_before_gc,
+       bool dump_region_info_after_gc);
 
   ~Heap();
 
@@ -267,10 +286,22 @@
   void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Inform the garbage collector of a non-malloc allocated native memory that might become
+  // reclaimable in the future as a result of Java garbage collection.
   void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
   void RegisterNativeFree(JNIEnv* env, size_t bytes);
 
+  // Notify the garbage collector of malloc allocations that might be reclaimable
+  // as a result of Java garbage collection. Each such call represents approximately
+  // kNotifyNativeInterval such allocations.
+  void NotifyNativeAllocations(JNIEnv* env)
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+
+  uint32_t GetNotifyNativeInterval() {
+    return kNotifyNativeInterval;
+  }
+
   // Change the allocator, updates entrypoints.
   void ChangeAllocator(AllocatorType allocator)
       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
@@ -395,6 +426,26 @@
     REQUIRES(!Locks::heap_bitmap_lock_)
     REQUIRES(Locks::mutator_lock_);
 
+  double GetPreGcWeightedAllocatedBytes() const {
+    return pre_gc_weighted_allocated_bytes_;
+  }
+
+  double GetPostGcWeightedAllocatedBytes() const {
+    return post_gc_weighted_allocated_bytes_;
+  }
+
+  void CalculatePreGcWeightedAllocatedBytes();
+  void CalculatePostGcWeightedAllocatedBytes();
+  uint64_t GetTotalGcCpuTime();
+
+  uint64_t GetProcessCpuStartTime() const {
+    return process_cpu_start_time_ns_;
+  }
+
+  uint64_t GetPostGCLastProcessCpuTime() const {
+    return post_gc_last_process_cpu_time_ns_;
+  }
+
   // Set target ideal heap utilization ratio, implements
   // dalvik.system.VMRuntime.setTargetHeapUtilization.
   void SetTargetHeapUtilization(float target);
@@ -477,8 +528,13 @@
   void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object);
 
   // Returns the number of bytes currently allocated.
+  // The result should be treated as an approximation, if it is being concurrently updated.
   size_t GetBytesAllocated() const {
-    return num_bytes_allocated_.load(std::memory_order_seq_cst);
+    return num_bytes_allocated_.load(std::memory_order_relaxed);
+  }
+
+  bool GetUseGenerationalCC() const {
+    return use_generational_cc_;
   }
 
   // Returns the number of objects currently allocated.
@@ -501,12 +557,16 @@
     return total_bytes_freed_ever_;
   }
 
+  space::RegionSpace* GetRegionSpace() const {
+    return region_space_;
+  }
+
   // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
   // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
   // were specified. Android apps start with a growth limit (small heap size) which is
   // cleared/extended for large apps.
   size_t GetMaxMemory() const {
-    // There is some race conditions in the allocation code that can cause bytes allocated to
+    // There are some race conditions in the allocation code that can cause bytes allocated to
     // become larger than growth_limit_ in rare cases.
     return std::max(GetBytesAllocated(), growth_limit_);
   }
@@ -517,21 +577,20 @@
 
   // Returns approximately how much free memory we have until the next GC happens.
   size_t GetFreeMemoryUntilGC() const {
-    return max_allowed_footprint_ - GetBytesAllocated();
+    return UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
+                              GetBytesAllocated());
   }
 
   // Returns approximately how much free memory we have until the next OOME happens.
   size_t GetFreeMemoryUntilOOME() const {
-    return growth_limit_ - GetBytesAllocated();
+    return UnsignedDifference(growth_limit_, GetBytesAllocated());
   }
 
   // Returns how much free memory we have until we need to grow the heap to perform an allocation.
   // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
   size_t GetFreeMemory() const {
-    size_t byte_allocated = num_bytes_allocated_.load(std::memory_order_seq_cst);
-    size_t total_memory = GetTotalMemory();
-    // Make sure we don't get a negative number.
-    return total_memory - std::min(total_memory, byte_allocated);
+    return UnsignedDifference(GetTotalMemory(),
+                              num_bytes_allocated_.load(std::memory_order_relaxed));
   }
 
   // Get the space that corresponds to an object's address. Current implementation searches all
@@ -553,6 +612,9 @@
   space::Space* FindSpaceFromAddress(const void* ptr) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  std::string DumpSpaceNameFromAddress(const void* addr) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
 
   // Do a pending collector transition.
@@ -711,7 +773,7 @@
 
   // Returns the active concurrent copying collector.
   collector::ConcurrentCopying* ConcurrentCopyingCollector() {
-    if (kEnableGenerationalConcurrentCopyingCollection) {
+    if (use_generational_cc_) {
       DCHECK((active_concurrent_copying_collector_ == concurrent_copying_collector_) ||
              (active_concurrent_copying_collector_ == young_concurrent_copying_collector_));
     } else {
@@ -841,6 +903,9 @@
       REQUIRES(!*gc_complete_lock_);
   void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
 
+  double CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
+                                           uint64_t current_process_cpu_time) const;
+
   // Create a mem map with a preferred base address.
   static MemMap MapAnonymousPreferredAddress(const char* name,
                                              uint8_t* request_begin,
@@ -852,12 +917,16 @@
     return main_space_backup_ != nullptr;
   }
 
+  static ALWAYS_INLINE size_t UnsignedDifference(size_t x, size_t y) {
+    return x > y ? x - y : 0;
+  }
+
   static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
     return
+        allocator_type != kAllocatorTypeRegionTLAB &&
         allocator_type != kAllocatorTypeBumpPointer &&
         allocator_type != kAllocatorTypeTLAB &&
-        allocator_type != kAllocatorTypeRegion &&
-        allocator_type != kAllocatorTypeRegionTLAB;
+        allocator_type != kAllocatorTypeRegion;
   }
   static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
     if (kUseReadBarrier) {
@@ -865,24 +934,30 @@
       return true;
     }
     return
-        allocator_type != kAllocatorTypeBumpPointer &&
-        allocator_type != kAllocatorTypeTLAB;
+        allocator_type != kAllocatorTypeTLAB &&
+        allocator_type != kAllocatorTypeBumpPointer;
   }
   static bool IsMovingGc(CollectorType collector_type) {
     return
+        collector_type == kCollectorTypeCC ||
         collector_type == kCollectorTypeSS ||
         collector_type == kCollectorTypeGSS ||
-        collector_type == kCollectorTypeCC ||
         collector_type == kCollectorTypeCCBackground ||
         collector_type == kCollectorTypeHomogeneousSpaceCompact;
   }
   bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
       REQUIRES_SHARED(Locks::mutator_lock_);
-  ALWAYS_INLINE void CheckConcurrentGC(Thread* self,
-                                       size_t new_num_bytes_allocated,
-                                       ObjPtr<mirror::Object>* obj)
+
+  // Checks whether we should garbage collect:
+  ALWAYS_INLINE bool ShouldConcurrentGCForJava(size_t new_num_bytes_allocated);
+  float NativeMemoryOverTarget(size_t current_native_bytes);
+  ALWAYS_INLINE void CheckConcurrentGCForJava(Thread* self,
+                                              size_t new_num_bytes_allocated,
+                                              ObjPtr<mirror::Object>* obj)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
+  void CheckConcurrentGCForNative(Thread* self)
+      REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
 
   accounting::ObjectStack* GetMarkStack() {
     return mark_stack_.get();
@@ -943,6 +1018,11 @@
   void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Are we out of memory, and thus should force a GC or fail?
+  // For concurrent collectors, out of memory is defined by growth_limit_.
+  // For nonconcurrent collectors it is defined by target_footprint_ unless grow is
+  // set. If grow is set, the limit is growth_limit_ and we adjust target_footprint_
+  // to accomodate the allocation.
   ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
                                                size_t alloc_size,
                                                bool grow);
@@ -1006,7 +1086,7 @@
   // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
   // the GC was run.
   void GrowForUtilization(collector::GarbageCollector* collector_ran,
-                          uint64_t bytes_allocated_before_gc = 0);
+                          size_t bytes_allocated_before_gc = 0);
 
   size_t GetPercentFree();
 
@@ -1040,8 +1120,8 @@
   // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
   // sweep GC, false for other GC types.
   bool IsGcConcurrent() const ALWAYS_INLINE {
-    return collector_type_ == kCollectorTypeCMS ||
-        collector_type_ == kCollectorTypeCC ||
+    return collector_type_ == kCollectorTypeCC ||
+        collector_type_ == kCollectorTypeCMS ||
         collector_type_ == kCollectorTypeCCBackground;
   }
 
@@ -1070,15 +1150,13 @@
     return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
   }
 
-  // How large new_native_bytes_allocated_ can grow before we trigger a new
-  // GC.
+  // Return the amount of space we allow for native memory when deciding whether to
+  // collect. We collect when a weighted sum of Java memory plus native memory exceeds
+  // the similarly weighted sum of the Java heap size target and this value.
   ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
-    // Reuse max_free_ for the native allocation gc watermark, so that the
-    // native heap is treated in the same way as the Java heap in the case
-    // where the gc watermark update would exceed max_free_. Using max_free_
-    // instead of the target utilization means the watermark doesn't depend on
-    // the current number of registered native allocations.
-    return max_free_;
+    // We keep the traditional limit of max_free_ in place for small heaps,
+    // but allow it to be adjusted upward for large heaps to limit GC overhead.
+    return target_footprint_.load(std::memory_order_relaxed) / 8 + max_free_;
   }
 
   ALWAYS_INLINE void IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke);
@@ -1088,6 +1166,11 @@
   // Remove a vlog code from heap-inl.h which is transitively included in half the world.
   static void VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size);
 
+  // Return our best approximation of the number of bytes of native memory that
+  // are currently in use, and could possibly be reclaimed as an indirect result
+  // of a garbage collection.
+  size_t GetNativeBytes();
+
   // All-known continuous spaces, where objects lie within fixed bounds.
   std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
 
@@ -1155,9 +1238,21 @@
   // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
   const size_t long_gc_log_threshold_;
 
-  // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
-  // useful for benchmarking since it reduces time spent in GC to a low %.
-  const bool ignore_max_footprint_;
+  // Starting time of the new process; meant to be used for measuring total process CPU time.
+  uint64_t process_cpu_start_time_ns_;
+
+  // Last time (before and after) GC started; meant to be used to measure the
+  // duration between two GCs.
+  uint64_t pre_gc_last_process_cpu_time_ns_;
+  uint64_t post_gc_last_process_cpu_time_ns_;
+
+  // allocated_bytes * (current_process_cpu_time - [pre|post]_gc_last_process_cpu_time)
+  double pre_gc_weighted_allocated_bytes_;
+  double post_gc_weighted_allocated_bytes_;
+
+  // If we ignore the target footprint it lets the heap grow until it hits the heap capacity, this
+  // is useful for benchmarking since it reduces time spent in GC to a low %.
+  const bool ignore_target_footprint_;
 
   // Lock which guards zygote space creation.
   Mutex zygote_creation_lock_;
@@ -1206,14 +1301,18 @@
 
   // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
   // programs it is "cleared" making it the same as capacity.
+  // Only weakly enforced for simultaneous allocations.
   size_t growth_limit_;
 
-  // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating
-  // a GC should be triggered.
-  size_t max_allowed_footprint_;
+  // Target size (as in maximum allocatable bytes) for the heap. Weakly enforced as a limit for
+  // non-concurrent GC. Used as a guideline for computing concurrent_start_bytes_ in the
+  // concurrent GC case.
+  Atomic<size_t> target_footprint_;
 
   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
   // it completes ahead of an allocation failing.
+  // A multiple of this is also used to determine when to trigger a GC in response to native
+  // allocation.
   size_t concurrent_start_bytes_;
 
   // Since the heap was created, how many bytes have been freed.
@@ -1222,22 +1321,22 @@
   // Since the heap was created, how many objects have been freed.
   uint64_t total_objects_freed_ever_;
 
-  // Number of bytes allocated.  Adjusted after each allocation and free.
+  // Number of bytes currently allocated and not yet reclaimed. Includes active
+  // TLABS in their entirety, even if they have not yet been parceled out.
   Atomic<size_t> num_bytes_allocated_;
 
-  // Number of registered native bytes allocated since the last time GC was
-  // triggered. Adjusted after each RegisterNativeAllocation and
-  // RegisterNativeFree. Used to determine when to trigger GC for native
-  // allocations.
-  // See the REDESIGN section of go/understanding-register-native-allocation.
-  Atomic<size_t> new_native_bytes_allocated_;
+  // Number of registered native bytes allocated. Adjusted after each RegisterNativeAllocation and
+  // RegisterNativeFree. Used to  help determine when to trigger GC for native allocations. Should
+  // not include bytes allocated through the system malloc, since those are implicitly included.
+  Atomic<size_t> native_bytes_registered_;
 
-  // Number of registered native bytes allocated prior to the last time GC was
-  // triggered, for debugging purposes. The current number of registered
-  // native bytes is determined by taking the sum of
-  // old_native_bytes_allocated_ and new_native_bytes_allocated_.
+  // Approximately the smallest value of GetNativeBytes() we've seen since the last GC.
   Atomic<size_t> old_native_bytes_allocated_;
 
+  // Total number of native objects of which we were notified since the beginning of time, mod 2^32.
+  // Allows us to check for GC only roughly every kNotifyNativeInterval allocations.
+  Atomic<uint32_t> native_objects_notified_;
+
   // Number of bytes freed by thread local buffer revokes. This will
   // cancel out the ahead-of-time bulk counting of bytes allocated in
   // rosalloc thread-local buffers.  It is temporarily accumulated
@@ -1322,10 +1421,10 @@
 
   // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
   // utilization, regardless of target utilization ratio.
-  size_t min_free_;
+  const size_t min_free_;
 
   // The ideal maximum free size, when we grow the heap for utilization.
-  size_t max_free_;
+  const size_t max_free_;
 
   // Target ideal heap utilization ratio.
   double target_utilization_;
@@ -1383,6 +1482,11 @@
   // Whether or not we use homogeneous space compaction to avoid OOM errors.
   bool use_homogeneous_space_compaction_for_oom_;
 
+  // If true, enable generational collection when using the Concurrent Copying
+  // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
+  // for major collections. Set in Heap constructor.
+  const bool use_generational_cc_;
+
   // True if the currently running collection has made some thread wait.
   bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
   // The number of blocking GC runs.
@@ -1391,6 +1495,8 @@
   uint64_t blocking_gc_time_;
   // The duration of the window for the GC count rate histograms.
   static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000);  // 10s.
+  // Maximum number of missed histogram windows for which statistics will be collected.
+  static constexpr uint64_t kGcCountRateHistogramMaxNumMissedWindows = 100;
   // The last time when the GC count rate histograms were updated.
   // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
   uint64_t last_update_time_gc_count_rate_histograms_;
@@ -1421,6 +1527,11 @@
   // allocating.
   bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
 
+  // Turned on by -XX:DumpRegionInfoBeforeGC and -XX:DumpRegionInfoAfterGC to
+  // emit region info before and after each GC cycle.
+  bool dump_region_info_before_gc_;
+  bool dump_region_info_after_gc_;
+
   // Boot image spaces.
   std::vector<space::ImageSpace*> boot_image_spaces_;
 
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 7cbad3b..fa10150 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -21,6 +21,7 @@
 #include "handle_scope-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
 #include "scoped_thread_state_change-inl.h"
 
@@ -37,7 +38,9 @@
                                      gc::Heap::kPreferredAllocSpaceBegin,
                                      16 * KB,
                                      PROT_READ,
-                                     /*low_4gb*/ true,
+                                     /*low_4gb=*/ true,
+                                     /*reuse=*/ false,
+                                     /*reservation=*/ nullptr,
                                      &error_msg);
     ASSERT_TRUE(reserved_.IsValid()) << error_msg;
     CommonRuntimeTest::SetUp();
@@ -77,7 +80,7 @@
       }
     }
   }
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
 }
 
 TEST_F(HeapTest, HeapBitmapCapacityTest) {
@@ -91,12 +94,12 @@
 }
 
 TEST_F(HeapTest, DumpGCPerformanceOnShutdown) {
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
   Runtime::Current()->SetDumpGCPerformanceOnShutdown(true);
 }
 
 class ZygoteHeapTest : public CommonRuntimeTest {
-  void SetUpRuntimeOptions(RuntimeOptions* options) {
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
     CommonRuntimeTest::SetUpRuntimeOptions(options);
     options->push_back(std::make_pair("-Xzygote", nullptr));
   }
diff --git a/runtime/gc/heap_verification_test.cc b/runtime/gc/heap_verification_test.cc
index 6caca84..7835c29 100644
--- a/runtime/gc/heap_verification_test.cc
+++ b/runtime/gc/heap_verification_test.cc
@@ -21,6 +21,7 @@
 #include "class_root.h"
 #include "handle_scope-inl.h"
 #include "mirror/object-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/string.h"
 #include "runtime.h"
@@ -83,7 +84,12 @@
 }
 
 TEST_F(VerificationTest, IsValidClassInHeap) {
-  TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING();
+  // Now that the String class is allocated in the non-moving space when the
+  // runtime is running without a boot image (which is the case in this gtest),
+  // and we run with AddressSanizer, it is possible that the (presumably
+  // invalid) memory location `uint_klass - kObjectAlignment` tested below is
+  // poisoned when running with AddressSanizer. Disable this test in that case.
+  TEST_DISABLED_FOR_MEMORY_TOOL();
   ScopedObjectAccess soa(Thread::Current());
   VariableSizedHandleScope hs(soa.Self());
   Handle<mirror::String> string(
@@ -106,7 +112,13 @@
 }
 
 TEST_F(VerificationTest, DumpValidObjectInfo) {
-  TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING();
+  // Now that the String class is allocated in the non-moving space when the
+  // runtime is running without a boot image (which is the case in this gtest),
+  // and we run with AddressSanizer, it is possible that the calls to
+  // Verification::DumpObjectInfo below involving the String class object
+  // (`string->GetClass()`, `uint_klass`, etc.) access poisoned memory when they
+  // call Verification::DumpRAMAroundAddress. Disable this test in that case.
+  TEST_DISABLED_FOR_MEMORY_TOOL();
   ScopedLogSeverity sls(LogSeverity::INFO);
   ScopedObjectAccess soa(Thread::Current());
   Runtime* const runtime = Runtime::Current();
@@ -126,7 +138,13 @@
 }
 
 TEST_F(VerificationTest, LogHeapCorruption) {
-  TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING();
+  // Now that the String class is allocated in the non-moving space when the
+  // runtime is running without a boot image (which is the case in this gtest),
+  // and we run with AddressSanizer, it is possible that the call to
+  // Verification::LogHeapCorruption below involving the String class object
+  // (`string->GetClass()`) accesses poisoned memory when it calls
+  // Verification::DumpRAMAroundAddress. Disable this test in that case.
+  TEST_DISABLED_FOR_MEMORY_TOOL();
   ScopedLogSeverity sls(LogSeverity::INFO);
   ScopedObjectAccess soa(Thread::Current());
   Runtime* const runtime = Runtime::Current();
diff --git a/runtime/gc/racing_check.h b/runtime/gc/racing_check.h
new file mode 100644
index 0000000..a81a513
--- /dev/null
+++ b/runtime/gc/racing_check.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_RACING_CHECK_H_
+#define ART_RUNTIME_GC_RACING_CHECK_H_
+
+#include <unistd.h>
+#include <android-base/logging.h>
+
+// For checking purposes, we occasionally compare global counter values.
+// These counters are generally updated without ordering constraints, and hence
+// we may actually see inconsistent values when checking. To minimize spurious
+// failures, try twice with an intervening short sleep. This is a hack not used
+// in production builds.
+#define RACING_DCHECK_LE(x, y) \
+  if (::android::base::kEnableDChecks && ((x) > (y))) { usleep(1000); CHECK_LE(x, y); }
+
+#endif  // ART_RUNTIME_GC_RACING_CHECK_H_
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index fe4124d..4944639 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -17,6 +17,7 @@
 #include "reference_processor.h"
 
 #include "art_field-inl.h"
+#include "base/mutex.h"
 #include "base/time_utils.h"
 #include "base/utils.h"
 #include "class_root.h"
@@ -60,16 +61,16 @@
 static inline void SetSlowPathFlag(bool enabled) REQUIRES_SHARED(Locks::mutator_lock_) {
   ObjPtr<mirror::Class> reference_class = GetClassRoot<mirror::Reference>();
   MemberOffset slow_path_offset = GetSlowPathFlagOffset(reference_class);
-  reference_class->SetFieldBoolean</* kTransactionActive */ false, /* kCheckTransaction */ false>(
+  reference_class->SetFieldBoolean</* kTransactionActive= */ false, /* kCheckTransaction= */ false>(
       slow_path_offset, enabled ? 1 : 0);
 }
 
 void ReferenceProcessor::EnableSlowPath() {
-  SetSlowPathFlag(/* enabled */ true);
+  SetSlowPathFlag(/* enabled= */ true);
 }
 
 void ReferenceProcessor::DisableSlowPath(Thread* self) {
-  SetSlowPathFlag(/* enabled */ false);
+  SetSlowPathFlag(/* enabled= */ false);
   condition_.Broadcast(self);
 }
 
@@ -238,13 +239,13 @@
   mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
   // do_atomic_update needs to be true because this happens outside of the reference processing
   // phase.
-  if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
+  if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update=*/true)) {
     if (UNLIKELY(collector->IsTransactionActive())) {
       // In transaction mode, keep the referent alive and avoid any reference processing to avoid the
       // issue of rolling back reference processing.  do_atomic_update needs to be true because this
       // happens outside of the reference processing phase.
       if (!referent->IsNull()) {
-        collector->MarkHeapReference(referent, /*do_atomic_update*/ true);
+        collector->MarkHeapReference(referent, /*do_atomic_update=*/ true);
       }
       return;
     }
@@ -276,7 +277,7 @@
   explicit ClearedReferenceTask(jobject cleared_references)
       : HeapTask(NanoTime()), cleared_references_(cleared_references) {
   }
-  virtual void Run(Thread* thread) {
+  void Run(Thread* thread) override {
     ScopedObjectAccess soa(thread);
     jvalue args[1];
     args[0].l = cleared_references_;
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index c6c7836..c1c9a3c 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -17,10 +17,10 @@
 #ifndef ART_RUNTIME_GC_REFERENCE_PROCESSOR_H_
 #define ART_RUNTIME_GC_REFERENCE_PROCESSOR_H_
 
-#include "base/globals.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "jni.h"
 #include "reference_queue.h"
+#include "runtime_globals.h"
 
 namespace art {
 
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index e25e279..95871da 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -17,6 +17,7 @@
 #include "reference_queue.h"
 
 #include "accounting/card_table-inl.h"
+#include "base/mutex.h"
 #include "collector/concurrent_copying.h"
 #include "heap.h"
 #include "mirror/class-inl.h"
@@ -136,7 +137,7 @@
     mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
     // do_atomic_update is false because this happens during the reference processing phase where
     // Reference.clear() would block.
-    if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
+    if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
       // Referent is white, clear it.
       if (Runtime::Current()->IsActiveTransaction()) {
         ref->ClearReferent<true>();
@@ -158,7 +159,7 @@
     mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
     // do_atomic_update is false because this happens during the reference processing phase where
     // Reference.clear() would block.
-    if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
+    if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
       ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
       // Move the updated referent to the zombie field.
       if (Runtime::Current()->IsActiveTransaction()) {
@@ -187,7 +188,7 @@
     if (referent_addr->AsMirrorPtr() != nullptr) {
       // do_atomic_update is false because mutators can't access the referent due to the weak ref
       // access blocking.
-      visitor->MarkHeapReference(referent_addr, /*do_atomic_update*/ false);
+      visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ false);
     }
     ref = ref->GetPendingNext();
   } while (LIKELY(ref != head));
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 09ab51a..90f0be7 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -22,15 +22,18 @@
 #include <vector>
 
 #include "base/atomic.h"
-#include "base/globals.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "base/timing_logger.h"
 #include "jni.h"
 #include "obj_ptr.h"
 #include "offsets.h"
+#include "runtime_globals.h"
 #include "thread_pool.h"
 
 namespace art {
+
+class Mutex;
+
 namespace mirror {
 class Reference;
 }  // namespace mirror
diff --git a/runtime/gc/reference_queue_test.cc b/runtime/gc/reference_queue_test.cc
index ce0807c..c680fb5 100644
--- a/runtime/gc/reference_queue_test.cc
+++ b/runtime/gc/reference_queue_test.cc
@@ -18,6 +18,7 @@
 
 #include "common_runtime_test.h"
 #include "handle_scope-inl.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class-inl.h"
 #include "reference_queue.h"
 #include "scoped_thread_state_change-inl.h"
diff --git a/runtime/gc/scoped_gc_critical_section.h b/runtime/gc/scoped_gc_critical_section.h
index 864bf87..8ad0158 100644
--- a/runtime/gc/scoped_gc_critical_section.h
+++ b/runtime/gc/scoped_gc_critical_section.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_GC_SCOPED_GC_CRITICAL_SECTION_H_
 #define ART_RUNTIME_GC_SCOPED_GC_CRITICAL_SECTION_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "collector_type.h"
 #include "gc_cause.h"
 
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 4c58549..20f7a93 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -83,8 +83,8 @@
 inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) {
   mirror::Object* ret = AllocNonvirtualWithoutAccounting(num_bytes);
   if (ret != nullptr) {
-    objects_allocated_.fetch_add(1, std::memory_order_seq_cst);
-    bytes_allocated_.fetch_add(num_bytes, std::memory_order_seq_cst);
+    objects_allocated_.fetch_add(1, std::memory_order_relaxed);
+    bytes_allocated_.fetch_add(num_bytes, std::memory_order_relaxed);
   }
   return ret;
 }
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 42453f5..609ccee 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -24,15 +24,13 @@
 namespace gc {
 namespace space {
 
-BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity,
-                                           uint8_t* requested_begin) {
+BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity) {
   capacity = RoundUp(capacity, kPageSize);
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
-                                        requested_begin,
                                         capacity,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ true,
+                                        /*low_4gb=*/ true,
                                         &error_msg);
   if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
@@ -206,8 +204,8 @@
 }
 
 void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
-  objects_allocated_.fetch_add(thread->GetThreadLocalObjectsAllocated(), std::memory_order_seq_cst);
-  bytes_allocated_.fetch_add(thread->GetThreadLocalBytesAllocated(), std::memory_order_seq_cst);
+  objects_allocated_.fetch_add(thread->GetThreadLocalObjectsAllocated(), std::memory_order_relaxed);
+  bytes_allocated_.fetch_add(thread->GetThreadLocalBytesAllocated(), std::memory_order_relaxed);
   thread->SetTlab(nullptr, nullptr, nullptr);
 }
 
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 02e84b5..6d9fd04 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -19,6 +19,8 @@
 
 #include "space.h"
 
+#include "base/mutex.h"
+
 namespace art {
 
 namespace mirror {
@@ -46,7 +48,7 @@
   // Create a bump pointer space with the requested sizes. The requested base address is not
   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
   // space to confirm the request was granted.
-  static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
+  static BumpPointerSpace* Create(const std::string& name, size_t capacity);
   static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap&& mem_map);
 
   // Allocate num_bytes, returns null if the space is full.
@@ -155,8 +157,8 @@
 
   // Record objects / bytes freed.
   void RecordFree(int32_t objects, int32_t bytes) {
-    objects_allocated_.fetch_sub(objects, std::memory_order_seq_cst);
-    bytes_allocated_.fetch_sub(bytes, std::memory_order_seq_cst);
+    objects_allocated_.fetch_sub(objects, std::memory_order_relaxed);
+    bytes_allocated_.fetch_sub(bytes, std::memory_order_relaxed);
   }
 
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 36d2161..7955ff9 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -54,7 +54,7 @@
                   end,
                   limit,
                   growth_limit,
-                  /* create_bitmaps */ true,
+                  /* create_bitmaps= */ true,
                   can_move_objects,
                   starting_size, initial_size),
       mspace_(mspace) {
@@ -108,8 +108,10 @@
   }
 }
 
-DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size,
-                                     size_t growth_limit, size_t capacity, uint8_t* requested_begin,
+DlMallocSpace* DlMallocSpace::Create(const std::string& name,
+                                     size_t initial_size,
+                                     size_t growth_limit,
+                                     size_t capacity,
                                      bool can_move_objects) {
   uint64_t start_time = 0;
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -117,8 +119,7 @@
     LOG(INFO) << "DlMallocSpace::Create entering " << name
         << " initial_size=" << PrettySize(initial_size)
         << " growth_limit=" << PrettySize(growth_limit)
-        << " capacity=" << PrettySize(capacity)
-        << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
+        << " capacity=" << PrettySize(capacity);
   }
 
   // Memory we promise to dlmalloc before it asks for morecore.
@@ -126,8 +127,7 @@
   // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
   // size of the large allocation) will be greater than the footprint limit.
   size_t starting_size = kPageSize;
-  MemMap mem_map =
-      CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+  MemMap mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity);
   if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
                << PrettySize(capacity);
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index c63ff71..e91602f 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -46,8 +46,11 @@
   // base address is not guaranteed to be granted, if it is required,
   // the caller should call Begin on the returned space to confirm the
   // request was granted.
-  static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
-                               size_t capacity, uint8_t* requested_begin, bool can_move_objects);
+  static DlMallocSpace* Create(const std::string& name,
+                               size_t initial_size,
+                               size_t growth_limit,
+                               size_t capacity,
+                               bool can_move_objects);
 
   // Virtual to allow MemoryToolMallocSpace to intercept.
   mirror::Object* AllocWithGrowth(Thread* self,
diff --git a/runtime/gc/space/dlmalloc_space_random_test.cc b/runtime/gc/space/dlmalloc_space_random_test.cc
index f9b41da..92b56bd 100644
--- a/runtime/gc/space/dlmalloc_space_random_test.cc
+++ b/runtime/gc/space/dlmalloc_space_random_test.cc
@@ -22,14 +22,16 @@
 namespace gc {
 namespace space {
 
-MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, uint8_t* requested_begin) {
-  return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
+MallocSpace* CreateDlMallocSpace(const std::string& name,
+                                 size_t initial_size,
+                                 size_t growth_limit,
+                                 size_t capacity) {
+  return DlMallocSpace::Create(
+      name, initial_size, growth_limit, capacity, /*can_move_objects=*/ false);
 }
 
 TEST_SPACE_CREATE_FN_RANDOM(DlMallocSpace, CreateDlMallocSpace)
 
-
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/dlmalloc_space_static_test.cc b/runtime/gc/space/dlmalloc_space_static_test.cc
index 5758e0c..550d1bb 100644
--- a/runtime/gc/space/dlmalloc_space_static_test.cc
+++ b/runtime/gc/space/dlmalloc_space_static_test.cc
@@ -22,14 +22,16 @@
 namespace gc {
 namespace space {
 
-MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, uint8_t* requested_begin) {
-  return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
+MallocSpace* CreateDlMallocSpace(const std::string& name,
+                                 size_t initial_size,
+                                 size_t growth_limit,
+                                 size_t capacity) {
+  return DlMallocSpace::Create(
+      name, initial_size, growth_limit, capacity, /*can_move_objects=*/ false);
 }
 
 TEST_SPACE_CREATE_FN_STATIC(DlMallocSpace, CreateDlMallocSpace)
 
-
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 3999e27..4a2dbf5 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -16,7 +16,6 @@
 
 #include "image_space.h"
 
-#include <lz4.h>
 #include <sys/statvfs.h>
 #include <sys/types.h>
 #include <unistd.h>
@@ -26,8 +25,11 @@
 #include "android-base/stringprintf.h"
 #include "android-base/strings.h"
 
+#include "arch/instruction_set.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
+#include "base/array_ref.h"
+#include "base/bit_memory_region.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
 #include "base/file_utils.h"
@@ -38,13 +40,17 @@
 #include "base/systrace.h"
 #include "base/time_utils.h"
 #include "base/utils.h"
+#include "class_root.h"
 #include "dex/art_dex_file_loader.h"
 #include "dex/dex_file_loader.h"
 #include "exec_utils.h"
 #include "gc/accounting/space_bitmap-inl.h"
+#include "gc/task_processor.h"
 #include "image-inl.h"
 #include "image_space_fs.h"
+#include "intern_table-inl.h"
 #include "mirror/class-inl.h"
+#include "mirror/executable.h"
 #include "mirror/object-inl.h"
 #include "mirror/object-refvisitor-inl.h"
 #include "oat_file.h"
@@ -101,9 +107,8 @@
 static bool GenerateImage(const std::string& image_filename,
                           InstructionSet image_isa,
                           std::string* error_msg) {
-  const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
-  std::vector<std::string> boot_class_path;
-  Split(boot_class_path_string, ':', &boot_class_path);
+  Runtime* runtime = Runtime::Current();
+  const std::vector<std::string>& boot_class_path = runtime->GetBootClassPath();
   if (boot_class_path.empty()) {
     *error_msg = "Failed to generate image because no boot class path specified";
     return false;
@@ -123,8 +128,11 @@
   image_option_string += image_filename;
   arg_vector.push_back(image_option_string);
 
-  for (size_t i = 0; i < boot_class_path.size(); i++) {
+  const std::vector<std::string>& boot_class_path_locations = runtime->GetBootClassPathLocations();
+  DCHECK_EQ(boot_class_path.size(), boot_class_path_locations.size());
+  for (size_t i = 0u; i < boot_class_path.size(); i++) {
     arg_vector.push_back(std::string("--dex-file=") + boot_class_path[i]);
+    arg_vector.push_back(std::string("--dex-location=") + boot_class_path_locations[i]);
   }
 
   std::string oat_file_option_string("--oat-file=");
@@ -181,7 +189,7 @@
   bool have_android_data = false;
   *dalvik_cache_exists = false;
   GetDalvikCache(GetInstructionSetString(image_isa),
-                 /* create_if_absent */ true,
+                 /*create_if_absent=*/ true,
                  dalvik_cache,
                  &have_android_data,
                  dalvik_cache_exists,
@@ -239,142 +247,37 @@
     return true;
 }
 
-// Relocate the image at image_location to dest_filename and relocate it by a random amount.
-static bool RelocateImage(const char* image_location,
-                          const char* dest_directory,
-                          InstructionSet isa,
-                          std::string* error_msg) {
-  // We should clean up so we are more likely to have room for the image.
-  if (Runtime::Current()->IsZygote()) {
-    LOG(INFO) << "Pruning dalvik-cache since we are relocating an image and will need to recompile";
-    PruneDalvikCache(isa);
-  }
-
-  std::string patchoat(Runtime::Current()->GetPatchoatExecutable());
-
-  std::string input_image_location_arg("--input-image-location=");
-  input_image_location_arg += image_location;
-
-  std::string output_image_directory_arg("--output-image-directory=");
-  output_image_directory_arg += dest_directory;
-
-  std::string instruction_set_arg("--instruction-set=");
-  instruction_set_arg += GetInstructionSetString(isa);
-
-  std::string base_offset_arg("--base-offset-delta=");
-  StringAppendF(&base_offset_arg, "%d", ChooseRelocationOffsetDelta());
-
-  std::vector<std::string> argv;
-  argv.push_back(patchoat);
-
-  argv.push_back(input_image_location_arg);
-  argv.push_back(output_image_directory_arg);
-
-  argv.push_back(instruction_set_arg);
-  argv.push_back(base_offset_arg);
-
-  std::string command_line(android::base::Join(argv, ' '));
-  LOG(INFO) << "RelocateImage: " << command_line;
-  return Exec(argv, error_msg);
-}
-
-static bool VerifyImage(const char* image_location,
-                        const char* dest_directory,
-                        InstructionSet isa,
-                        std::string* error_msg) {
-  std::string patchoat(Runtime::Current()->GetPatchoatExecutable());
-
-  std::string input_image_location_arg("--input-image-location=");
-  input_image_location_arg += image_location;
-
-  std::string output_image_directory_arg("--output-image-directory=");
-  output_image_directory_arg += dest_directory;
-
-  std::string instruction_set_arg("--instruction-set=");
-  instruction_set_arg += GetInstructionSetString(isa);
-
-  std::vector<std::string> argv;
-  argv.push_back(patchoat);
-
-  argv.push_back(input_image_location_arg);
-  argv.push_back(output_image_directory_arg);
-
-  argv.push_back(instruction_set_arg);
-
-  argv.push_back("--verify");
-
-  std::string command_line(android::base::Join(argv, ' '));
-  LOG(INFO) << "VerifyImage: " << command_line;
-  return Exec(argv, error_msg);
-}
-
-static ImageHeader* ReadSpecificImageHeader(const char* filename, std::string* error_msg) {
+static std::unique_ptr<ImageHeader> ReadSpecificImageHeader(const char* filename,
+                                                            std::string* error_msg) {
   std::unique_ptr<ImageHeader> hdr(new ImageHeader);
   if (!ReadSpecificImageHeader(filename, hdr.get())) {
     *error_msg = StringPrintf("Unable to read image header for %s", filename);
     return nullptr;
   }
-  return hdr.release();
+  return hdr;
 }
 
-ImageHeader* ImageSpace::ReadImageHeader(const char* image_location,
-                                         const InstructionSet image_isa,
-                                         std::string* error_msg) {
+std::unique_ptr<ImageHeader> ImageSpace::ReadImageHeader(const char* image_location,
+                                                         const InstructionSet image_isa,
+                                                         std::string* error_msg) {
   std::string system_filename;
   bool has_system = false;
   std::string cache_filename;
   bool has_cache = false;
   bool dalvik_cache_exists = false;
   bool is_global_cache = false;
-  if (FindImageFilename(image_location, image_isa, &system_filename, &has_system,
-                        &cache_filename, &dalvik_cache_exists, &has_cache, &is_global_cache)) {
-    if (Runtime::Current()->ShouldRelocate()) {
-      if (has_system && has_cache) {
-        std::unique_ptr<ImageHeader> sys_hdr(new ImageHeader);
-        std::unique_ptr<ImageHeader> cache_hdr(new ImageHeader);
-        if (!ReadSpecificImageHeader(system_filename.c_str(), sys_hdr.get())) {
-          *error_msg = StringPrintf("Unable to read image header for %s at %s",
-                                    image_location, system_filename.c_str());
-          return nullptr;
-        }
-        if (!ReadSpecificImageHeader(cache_filename.c_str(), cache_hdr.get())) {
-          *error_msg = StringPrintf("Unable to read image header for %s at %s",
-                                    image_location, cache_filename.c_str());
-          return nullptr;
-        }
-        if (sys_hdr->GetOatChecksum() != cache_hdr->GetOatChecksum()) {
-          *error_msg = StringPrintf("Unable to find a relocated version of image file %s",
-                                    image_location);
-          return nullptr;
-        }
-        return cache_hdr.release();
-      } else if (!has_cache) {
-        *error_msg = StringPrintf("Unable to find a relocated version of image file %s",
-                                  image_location);
-        return nullptr;
-      } else if (!has_system && has_cache) {
-        // This can probably just use the cache one.
-        return ReadSpecificImageHeader(cache_filename.c_str(), error_msg);
-      }
-    } else {
-      // We don't want to relocate, Just pick the appropriate one if we have it and return.
-      if (has_system && has_cache) {
-        // We want the cache if the checksum matches, otherwise the system.
-        std::unique_ptr<ImageHeader> system(ReadSpecificImageHeader(system_filename.c_str(),
-                                                                    error_msg));
-        std::unique_ptr<ImageHeader> cache(ReadSpecificImageHeader(cache_filename.c_str(),
-                                                                   error_msg));
-        if (system.get() == nullptr ||
-            (cache.get() != nullptr && cache->GetOatChecksum() == system->GetOatChecksum())) {
-          return cache.release();
-        } else {
-          return system.release();
-        }
-      } else if (has_system) {
-        return ReadSpecificImageHeader(system_filename.c_str(), error_msg);
-      } else if (has_cache) {
-        return ReadSpecificImageHeader(cache_filename.c_str(), error_msg);
-      }
+  if (FindImageFilename(image_location,
+                        image_isa,
+                        &system_filename,
+                        &has_system,
+                        &cache_filename,
+                        &dalvik_cache_exists,
+                        &has_cache,
+                        &is_global_cache)) {
+    if (has_system) {
+      return ReadSpecificImageHeader(system_filename.c_str(), error_msg);
+    } else if (has_cache) {
+      return ReadSpecificImageHeader(cache_filename.c_str(), error_msg);
     }
   }
 
@@ -439,6 +342,10 @@
     return address - source_ < length_;
   }
 
+  bool InDest(const void* dest) const {
+    return InDest(reinterpret_cast<uintptr_t>(dest));
+  }
+
   bool InDest(uintptr_t address) const {
     return address - dest_ < length_;
   }
@@ -479,27 +386,386 @@
             << reinterpret_cast<const void*>(reloc.Dest() + reloc.Length()) << ")";
 }
 
+template <PointerSize kPointerSize, typename HeapVisitor, typename NativeVisitor>
+class ImageSpace::PatchObjectVisitor final {
+ public:
+  explicit PatchObjectVisitor(HeapVisitor heap_visitor, NativeVisitor native_visitor)
+      : heap_visitor_(heap_visitor), native_visitor_(native_visitor) {}
+
+  void VisitClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
+    // A mirror::Class object consists of
+    //  - instance fields inherited from j.l.Object,
+    //  - instance fields inherited from j.l.Class,
+    //  - embedded tables (vtable, interface method table),
+    //  - static fields of the class itself.
+    // The reference fields are at the start of each field section (this is how the
+    // ClassLinker orders fields; except when that would create a gap between superclass
+    // fields and the first reference of the subclass due to alignment, it can be filled
+    // with smaller fields - but that's not the case for j.l.Object and j.l.Class).
+
+    DCHECK_ALIGNED(klass, kObjectAlignment);
+    static_assert(IsAligned<kHeapReferenceSize>(kObjectAlignment), "Object alignment check.");
+    // First, patch the `klass->klass_`, known to be a reference to the j.l.Class.class.
+    // This should be the only reference field in j.l.Object and we assert that below.
+    PatchReferenceField</*kMayBeNull=*/ false>(klass, mirror::Object::ClassOffset());
+    // Then patch the reference instance fields described by j.l.Class.class.
+    // Use the sizeof(Object) to determine where these reference fields start;
+    // this is the same as `class_class->GetFirstReferenceInstanceFieldOffset()`
+    // after patching but the j.l.Class may not have been patched yet.
+    mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
+    size_t num_reference_instance_fields = class_class->NumReferenceInstanceFields<kVerifyNone>();
+    DCHECK_NE(num_reference_instance_fields, 0u);
+    static_assert(IsAligned<kHeapReferenceSize>(sizeof(mirror::Object)), "Size alignment check.");
+    MemberOffset instance_field_offset(sizeof(mirror::Object));
+    for (size_t i = 0; i != num_reference_instance_fields; ++i) {
+      PatchReferenceField(klass, instance_field_offset);
+      static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
+                    "Heap reference sizes equality check.");
+      instance_field_offset =
+          MemberOffset(instance_field_offset.Uint32Value() + kHeapReferenceSize);
+    }
+    // Now that we have patched the `super_class_`, if this is the j.l.Class.class,
+    // we can get a reference to j.l.Object.class and assert that it has only one
+    // reference instance field (the `klass_` patched above).
+    if (kIsDebugBuild && klass == class_class) {
+      ObjPtr<mirror::Class> object_class =
+          klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
+      CHECK_EQ(object_class->NumReferenceInstanceFields<kVerifyNone>(), 1u);
+    }
+    // Then patch static fields.
+    size_t num_reference_static_fields = klass->NumReferenceStaticFields<kVerifyNone>();
+    if (num_reference_static_fields != 0u) {
+      MemberOffset static_field_offset =
+          klass->GetFirstReferenceStaticFieldOffset<kVerifyNone>(kPointerSize);
+      for (size_t i = 0; i != num_reference_static_fields; ++i) {
+        PatchReferenceField(klass, static_field_offset);
+        static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
+                      "Heap reference sizes equality check.");
+        static_field_offset =
+            MemberOffset(static_field_offset.Uint32Value() + kHeapReferenceSize);
+      }
+    }
+    // Then patch native pointers.
+    klass->FixupNativePointers<kVerifyNone>(klass, kPointerSize, *this);
+  }
+
+  template <typename T>
+  T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED) const {
+    return (ptr != nullptr) ? native_visitor_(ptr) : nullptr;
+  }
+
+  void VisitPointerArray(mirror::PointerArray* pointer_array)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Fully patch the pointer array, including the `klass_` field.
+    PatchReferenceField</*kMayBeNull=*/ false>(pointer_array, mirror::Object::ClassOffset());
+
+    int32_t length = pointer_array->GetLength<kVerifyNone>();
+    for (int32_t i = 0; i != length; ++i) {
+      ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(
+          pointer_array->ElementAddress<kVerifyNone>(i, kPointerSize));
+      PatchNativePointer</*kMayBeNull=*/ false>(method_entry);
+    }
+  }
+
+  void VisitObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Visit all reference fields.
+    object->VisitReferences</*kVisitNativeRoots=*/ false,
+                            kVerifyNone,
+                            kWithoutReadBarrier>(*this, *this);
+    // This function should not be called for classes.
+    DCHECK(!object->IsClass<kVerifyNone>());
+  }
+
+  // Visitor for VisitReferences().
+  ALWAYS_INLINE void operator()(mirror::Object* object, MemberOffset field_offset, bool is_static)
+      const REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(!is_static);
+    PatchReferenceField(object, field_offset);
+  }
+  // Visitor for VisitReferences(), java.lang.ref.Reference case.
+  ALWAYS_INLINE void operator()(ObjPtr<mirror::Class> klass, mirror::Reference* ref) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(klass->IsTypeOfReferenceClass());
+    this->operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
+  }
+  // Ignore class native roots; not called from VisitReferences() for kVisitNativeRoots == false.
+  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+      const {}
+  void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
+  void VisitDexCacheArrays(mirror::DexCache* dex_cache) REQUIRES_SHARED(Locks::mutator_lock_) {
+    FixupDexCacheArray<mirror::StringDexCacheType>(dex_cache,
+                                                   mirror::DexCache::StringsOffset(),
+                                                   dex_cache->NumStrings<kVerifyNone>());
+    FixupDexCacheArray<mirror::TypeDexCacheType>(dex_cache,
+                                                 mirror::DexCache::ResolvedTypesOffset(),
+                                                 dex_cache->NumResolvedTypes<kVerifyNone>());
+    FixupDexCacheArray<mirror::MethodDexCacheType>(dex_cache,
+                                                   mirror::DexCache::ResolvedMethodsOffset(),
+                                                   dex_cache->NumResolvedMethods<kVerifyNone>());
+    FixupDexCacheArray<mirror::FieldDexCacheType>(dex_cache,
+                                                  mirror::DexCache::ResolvedFieldsOffset(),
+                                                  dex_cache->NumResolvedFields<kVerifyNone>());
+    FixupDexCacheArray<mirror::MethodTypeDexCacheType>(
+        dex_cache,
+        mirror::DexCache::ResolvedMethodTypesOffset(),
+        dex_cache->NumResolvedMethodTypes<kVerifyNone>());
+    FixupDexCacheArray<GcRoot<mirror::CallSite>>(
+        dex_cache,
+        mirror::DexCache::ResolvedCallSitesOffset(),
+        dex_cache->NumResolvedCallSites<kVerifyNone>());
+    FixupDexCacheArray<GcRoot<mirror::String>>(
+        dex_cache,
+        mirror::DexCache::PreResolvedStringsOffset(),
+        dex_cache->NumPreResolvedStrings<kVerifyNone>());
+  }
+
+  template <bool kMayBeNull = true, typename T>
+  ALWAYS_INLINE void PatchGcRoot(/*inout*/GcRoot<T>* root) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    static_assert(sizeof(GcRoot<mirror::Class*>) == sizeof(uint32_t), "GcRoot size check");
+    T* old_value = root->template Read<kWithoutReadBarrier>();
+    DCHECK(kMayBeNull || old_value != nullptr);
+    if (!kMayBeNull || old_value != nullptr) {
+      *root = GcRoot<T>(heap_visitor_(old_value));
+    }
+  }
+
+  template <bool kMayBeNull = true, typename T>
+  ALWAYS_INLINE void PatchNativePointer(/*inout*/T** entry) const {
+    if (kPointerSize == PointerSize::k64) {
+      uint64_t* raw_entry = reinterpret_cast<uint64_t*>(entry);
+      T* old_value = reinterpret_cast64<T*>(*raw_entry);
+      DCHECK(kMayBeNull || old_value != nullptr);
+      if (!kMayBeNull || old_value != nullptr) {
+        T* new_value = native_visitor_(old_value);
+        *raw_entry = reinterpret_cast64<uint64_t>(new_value);
+      }
+    } else {
+      uint32_t* raw_entry = reinterpret_cast<uint32_t*>(entry);
+      T* old_value = reinterpret_cast32<T*>(*raw_entry);
+      DCHECK(kMayBeNull || old_value != nullptr);
+      if (!kMayBeNull || old_value != nullptr) {
+        T* new_value = native_visitor_(old_value);
+        *raw_entry = reinterpret_cast32<uint32_t>(new_value);
+      }
+    }
+  }
+
+  template <bool kMayBeNull = true>
+  ALWAYS_INLINE void PatchReferenceField(mirror::Object* object, MemberOffset offset) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    mirror::Object* old_value =
+        object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset);
+    DCHECK(kMayBeNull || old_value != nullptr);
+    if (!kMayBeNull || old_value != nullptr) {
+      mirror::Object* new_value = heap_visitor_(old_value);
+      object->SetFieldObjectWithoutWriteBarrier</*kTransactionActive=*/ false,
+                                                /*kCheckTransaction=*/ true,
+                                                kVerifyNone>(offset, new_value);
+    }
+  }
+
+  template <typename T>
+  void FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* array, uint32_t index)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    static_assert(sizeof(std::atomic<mirror::DexCachePair<T>>) == sizeof(mirror::DexCachePair<T>),
+                  "Size check for removing std::atomic<>.");
+    PatchGcRoot(&(reinterpret_cast<mirror::DexCachePair<T>*>(array)[index].object));
+  }
+
+  template <typename T>
+  void FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* array, uint32_t index)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    static_assert(sizeof(std::atomic<mirror::NativeDexCachePair<T>>) ==
+                      sizeof(mirror::NativeDexCachePair<T>),
+                  "Size check for removing std::atomic<>.");
+    mirror::NativeDexCachePair<T> pair =
+        mirror::DexCache::GetNativePairPtrSize(array, index, kPointerSize);
+    if (pair.object != nullptr) {
+      pair.object = native_visitor_(pair.object);
+      mirror::DexCache::SetNativePairPtrSize(array, index, pair, kPointerSize);
+    }
+  }
+
+  void FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* array, uint32_t index)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    PatchGcRoot(&array[index]);
+  }
+
+  void FixupDexCacheArrayEntry(GcRoot<mirror::String>* array, uint32_t index)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    PatchGcRoot(&array[index]);
+  }
+
+  template <typename EntryType>
+  void FixupDexCacheArray(mirror::DexCache* dex_cache,
+                          MemberOffset array_offset,
+                          uint32_t size) REQUIRES_SHARED(Locks::mutator_lock_) {
+    EntryType* old_array =
+        reinterpret_cast64<EntryType*>(dex_cache->GetField64<kVerifyNone>(array_offset));
+    DCHECK_EQ(old_array != nullptr, size != 0u);
+    if (old_array != nullptr) {
+      EntryType* new_array = native_visitor_(old_array);
+      dex_cache->SetField64<kVerifyNone>(array_offset, reinterpret_cast64<uint64_t>(new_array));
+      for (uint32_t i = 0; i != size; ++i) {
+        FixupDexCacheArrayEntry(new_array, i);
+      }
+    }
+  }
+
+ private:
+  // Heap objects visitor.
+  HeapVisitor heap_visitor_;
+
+  // Native objects visitor.
+  NativeVisitor native_visitor_;
+};
+
+template <typename ObjectVisitor>
+class ImageSpace::PatchArtFieldVisitor final : public ArtFieldVisitor {
+ public:
+  explicit PatchArtFieldVisitor(const ObjectVisitor& visitor) : visitor_(visitor) {}
+
+  void Visit(ArtField* field) override REQUIRES_SHARED(Locks::mutator_lock_) {
+    visitor_.template PatchGcRoot</*kMayBeNull=*/ false>(&field->DeclaringClassRoot());
+  }
+
+ private:
+  const ObjectVisitor visitor_;
+};
+
+template <PointerSize kPointerSize, typename ObjectVisitor, typename CodeVisitor>
+class ImageSpace::PatchArtMethodVisitor final : public ArtMethodVisitor {
+ public:
+  explicit PatchArtMethodVisitor(const ObjectVisitor& object_visitor,
+                                 const CodeVisitor& code_visitor)
+      : object_visitor_(object_visitor),
+        code_visitor_(code_visitor) {}
+
+  void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
+    object_visitor_.PatchGcRoot(&method->DeclaringClassRoot());
+    void** data_address = PointerAddress(method, ArtMethod::DataOffset(kPointerSize));
+    object_visitor_.PatchNativePointer(data_address);
+    void** entrypoint_address =
+        PointerAddress(method, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kPointerSize));
+    code_visitor_.PatchNativePointer(entrypoint_address);
+  }
+
+ private:
+  void** PointerAddress(ArtMethod* method, MemberOffset offset) {
+    return reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(method) + offset.Uint32Value());
+  }
+
+  const ObjectVisitor object_visitor_;
+  const CodeVisitor code_visitor_;
+};
+
+template <typename ReferenceVisitor>
+class ImageSpace::ClassTableVisitor final {
+ public:
+  explicit ClassTableVisitor(const ReferenceVisitor& reference_visitor)
+      : reference_visitor_(reference_visitor) {}
+
+  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(root->AsMirrorPtr() != nullptr);
+    root->Assign(reference_visitor_(root->AsMirrorPtr()));
+  }
+
+ private:
+  ReferenceVisitor reference_visitor_;
+};
+
 // Helper class encapsulating loading, so we can access private ImageSpace members (this is a
 // nested class), but not declare functions in the header.
 class ImageSpace::Loader {
  public:
+  static std::unique_ptr<ImageSpace> InitAppImage(const char* image_filename,
+                                                  const char* image_location,
+                                                  const OatFile* oat_file,
+                                                  /*inout*/MemMap* image_reservation,
+                                                  /*out*/std::string* error_msg)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
+
+    std::unique_ptr<ImageSpace> space = Init(image_filename,
+                                             image_location,
+                                             oat_file,
+                                             &logger,
+                                             image_reservation,
+                                             error_msg);
+    if (space != nullptr) {
+      uint32_t expected_reservation_size =
+          RoundUp(space->GetImageHeader().GetImageSize(), kPageSize);
+      if (!CheckImageReservationSize(*space, expected_reservation_size, error_msg) ||
+          !CheckImageComponentCount(*space, /*expected_component_count=*/ 1u, error_msg)) {
+        return nullptr;
+      }
+
+      TimingLogger::ScopedTiming timing("RelocateImage", &logger);
+      ImageHeader* image_header = reinterpret_cast<ImageHeader*>(space->GetMemMap()->Begin());
+      const PointerSize pointer_size = image_header->GetPointerSize();
+      bool result;
+      if (pointer_size == PointerSize::k64) {
+        result = RelocateInPlace<PointerSize::k64>(*image_header,
+                                                   space->GetMemMap()->Begin(),
+                                                   space->GetLiveBitmap(),
+                                                   oat_file,
+                                                   error_msg);
+      } else {
+        result = RelocateInPlace<PointerSize::k32>(*image_header,
+                                                   space->GetMemMap()->Begin(),
+                                                   space->GetLiveBitmap(),
+                                                   oat_file,
+                                                   error_msg);
+      }
+      if (!result) {
+        return nullptr;
+      }
+      Runtime* runtime = Runtime::Current();
+      CHECK_EQ(runtime->GetResolutionMethod(),
+               image_header->GetImageMethod(ImageHeader::kResolutionMethod));
+      CHECK_EQ(runtime->GetImtConflictMethod(),
+               image_header->GetImageMethod(ImageHeader::kImtConflictMethod));
+      CHECK_EQ(runtime->GetImtUnimplementedMethod(),
+               image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
+      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves),
+               image_header->GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod));
+      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsOnly),
+               image_header->GetImageMethod(ImageHeader::kSaveRefsOnlyMethod));
+      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs),
+               image_header->GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod));
+      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverything),
+               image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod));
+      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForClinit),
+               image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForClinit));
+      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForSuspendCheck),
+               image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForSuspendCheck));
+
+      VLOG(image) << "ImageSpace::Loader::InitAppImage exiting " << *space.get();
+    }
+    if (VLOG_IS_ON(image)) {
+      logger.Dump(LOG_STREAM(INFO));
+    }
+    return space;
+  }
+
   static std::unique_ptr<ImageSpace> Init(const char* image_filename,
                                           const char* image_location,
-                                          bool validate_oat_file,
                                           const OatFile* oat_file,
+                                          TimingLogger* logger,
                                           /*inout*/MemMap* image_reservation,
-                                          /*inout*/MemMap* oat_reservation,
                                           /*out*/std::string* error_msg)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     CHECK(image_filename != nullptr);
     CHECK(image_location != nullptr);
 
-    TimingLogger logger(__PRETTY_FUNCTION__, true, VLOG_IS_ON(image));
     VLOG(image) << "ImageSpace::Init entering image_filename=" << image_filename;
 
     std::unique_ptr<File> file;
     {
-      TimingLogger::ScopedTiming timing("OpenImageFile", &logger);
+      TimingLogger::ScopedTiming timing("OpenImageFile", logger);
       file.reset(OS::OpenFileForReading(image_filename));
       if (file == nullptr) {
         *error_msg = StringPrintf("Failed to open '%s'", image_filename);
@@ -509,7 +775,7 @@
     ImageHeader temp_image_header;
     ImageHeader* image_header = &temp_image_header;
     {
-      TimingLogger::ScopedTiming timing("ReadImageHeader", &logger);
+      TimingLogger::ScopedTiming timing("ReadImageHeader", logger);
       bool success = file->ReadFully(image_header, sizeof(*image_header));
       if (!success || !image_header->IsValid()) {
         *error_msg = StringPrintf("Invalid image header in '%s'", image_filename);
@@ -519,15 +785,16 @@
     // Check that the file is larger or equal to the header size + data size.
     const uint64_t image_file_size = static_cast<uint64_t>(file->GetLength());
     if (image_file_size < sizeof(ImageHeader) + image_header->GetDataSize()) {
-      *error_msg = StringPrintf("Image file truncated: %" PRIu64 " vs. %" PRIu64 ".",
-                                image_file_size,
-                                sizeof(ImageHeader) + image_header->GetDataSize());
+      *error_msg = StringPrintf(
+          "Image file truncated: %" PRIu64 " vs. %" PRIu64 ".",
+           image_file_size,
+           static_cast<uint64_t>(sizeof(ImageHeader) + image_header->GetDataSize()));
       return nullptr;
     }
 
     if (oat_file != nullptr) {
-      // If we have an oat file, check the oat file checksum. The oat file is only non-null for the
-      // app image case. Otherwise, we open the oat file after the image and check the checksum there.
+      // If we have an oat file (i.e. for app image), check the oat file checksum.
+      // Otherwise, we open the oat file after the image and check the checksum there.
       const uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
       const uint32_t image_oat_checksum = image_header->GetOatChecksum();
       if (oat_checksum != image_oat_checksum) {
@@ -556,54 +823,28 @@
     const size_t image_bitmap_offset = RoundUp(sizeof(ImageHeader) + image_header->GetDataSize(),
                                                kPageSize);
     const size_t end_of_bitmap = image_bitmap_offset + bitmap_section.Size();
-    const ImageSection& relocations_section = image_header->GetImageRelocationsSection();
-    if (relocations_section.Offset() != bitmap_section.Offset() + bitmap_section.Size()) {
+    if (end_of_bitmap != image_file_size) {
       *error_msg = StringPrintf(
-          "Relocations do not start immediately after bitmap: %u vs. %u + %u.",
-          relocations_section.Offset(),
-          bitmap_section.Offset(),
-          bitmap_section.Size());
-      return nullptr;
-    }
-    const size_t end_of_relocations = end_of_bitmap + relocations_section.Size();
-    if (end_of_relocations != image_file_size) {
-      *error_msg = StringPrintf(
-          "Image file size does not equal end of relocations: size=%" PRIu64 " vs. %zu.",
+          "Image file size does not equal end of bitmap: size=%" PRIu64 " vs. %zu.",
           image_file_size,
-          end_of_relocations);
+          end_of_bitmap);
       return nullptr;
     }
 
-    MemMap map;
-
     // GetImageBegin is the preferred address to map the image. If we manage to map the
     // image at the image begin, the amount of fixup work required is minimized.
-    // If it is pic we will retry with error_msg for the failure case. Pass a null error_msg to
+    // If it is pic we will retry with error_msg for the2 failure case. Pass a null error_msg to
     // avoid reading proc maps for a mapping failure and slowing everything down.
     // For the boot image, we have already reserved the memory and we load the image
     // into the `image_reservation`.
-    map = LoadImageFile(
+    MemMap map = LoadImageFile(
         image_filename,
         image_location,
         *image_header,
-        image_header->GetImageBegin(),
         file->Fd(),
         logger,
         image_reservation,
-        (image_reservation == nullptr && image_header->IsPic()) ? nullptr : error_msg);
-    // If the header specifies PIC mode, we can also map at a random low_4gb address since we can
-    // relocate in-place.
-    if (!map.IsValid() && image_reservation == nullptr && image_header->IsPic()) {
-      map = LoadImageFile(image_filename,
-                          image_location,
-                          *image_header,
-                          /* address */ nullptr,
-                          file->Fd(),
-                          logger,
-                          /* image_reservation */ nullptr,
-                          error_msg);
-    }
-    // Were we able to load something and continue?
+        error_msg);
     if (!map.IsValid()) {
       DCHECK(!error_msg->empty());
       return nullptr;
@@ -611,10 +852,11 @@
     DCHECK_EQ(0, memcmp(image_header, map.Begin(), sizeof(ImageHeader)));
 
     MemMap image_bitmap_map = MemMap::MapFile(bitmap_section.Size(),
-                                              PROT_READ, MAP_PRIVATE,
+                                              PROT_READ,
+                                              MAP_PRIVATE,
                                               file->Fd(),
                                               image_bitmap_offset,
-                                              /* low_4gb */ false,
+                                              /*low_4gb=*/ false,
                                               image_filename,
                                               error_msg);
     if (!image_bitmap_map.IsValid()) {
@@ -624,7 +866,7 @@
     // Loaded the map, use the image header from the file now in case we patch it with
     // RelocateInPlace.
     image_header = reinterpret_cast<ImageHeader*>(map.Begin());
-    const uint32_t bitmap_index = ImageSpace::bitmap_index_.fetch_add(1, std::memory_order_seq_cst);
+    const uint32_t bitmap_index = ImageSpace::bitmap_index_.fetch_add(1);
     std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u",
                                          image_filename,
                                          bitmap_index));
@@ -634,7 +876,7 @@
     uint8_t* const image_end = map.Begin() + image_objects.End();
     std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap;
     {
-      TimingLogger::ScopedTiming timing("CreateImageBitmap", &logger);
+      TimingLogger::ScopedTiming timing("CreateImageBitmap", logger);
       bitmap.reset(
           accounting::ContinuousSpaceBitmap::CreateFromMemMap(
               bitmap_name,
@@ -647,158 +889,85 @@
         return nullptr;
       }
     }
-    {
-      TimingLogger::ScopedTiming timing("RelocateImage", &logger);
-      if (!RelocateInPlace(*image_header,
-                           map.Begin(),
-                           bitmap.get(),
-                           oat_file,
-                           error_msg)) {
-        return nullptr;
-      }
-    }
     // We only want the mirror object, not the ArtFields and ArtMethods.
     std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename,
                                                      image_location,
                                                      std::move(map),
                                                      std::move(bitmap),
                                                      image_end));
-
-    // VerifyImageAllocations() will be called later in Runtime::Init()
-    // as some class roots like ArtMethod::java_lang_reflect_ArtMethod_
-    // and ArtField::java_lang_reflect_ArtField_, which are used from
-    // Object::SizeOf() which VerifyImageAllocations() calls, are not
-    // set yet at this point.
-    if (oat_file == nullptr) {
-      TimingLogger::ScopedTiming timing("OpenOatFile", &logger);
-      space->oat_file_ = OpenOatFile(*space, image_filename, oat_reservation, error_msg);
-      if (space->oat_file_ == nullptr) {
-        DCHECK(!error_msg->empty());
-        return nullptr;
-      }
-      space->oat_file_non_owned_ = space->oat_file_.get();
-    } else {
-      space->oat_file_non_owned_ = oat_file;
-    }
-
-    if (validate_oat_file) {
-      TimingLogger::ScopedTiming timing("ValidateOatFile", &logger);
-      CHECK(space->oat_file_ != nullptr);
-      if (!ImageSpace::ValidateOatFile(*space->oat_file_, error_msg)) {
-        DCHECK(!error_msg->empty());
-        return nullptr;
-      }
-    }
-
-    Runtime* runtime = Runtime::Current();
-
-    // If oat_file is null, then it is the boot image space. Use oat_file_non_owned_ from the space
-    // to set the runtime methods.
-    CHECK_EQ(oat_file != nullptr, image_header->IsAppImage());
-    if (image_header->IsAppImage()) {
-      CHECK_EQ(runtime->GetResolutionMethod(),
-               image_header->GetImageMethod(ImageHeader::kResolutionMethod));
-      CHECK_EQ(runtime->GetImtConflictMethod(),
-               image_header->GetImageMethod(ImageHeader::kImtConflictMethod));
-      CHECK_EQ(runtime->GetImtUnimplementedMethod(),
-               image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
-      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves),
-               image_header->GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod));
-      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsOnly),
-               image_header->GetImageMethod(ImageHeader::kSaveRefsOnlyMethod));
-      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs),
-               image_header->GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod));
-      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverything),
-               image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod));
-      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForClinit),
-               image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForClinit));
-      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForSuspendCheck),
-               image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForSuspendCheck));
-    } else if (!runtime->HasResolutionMethod()) {
-      runtime->SetInstructionSet(space->oat_file_non_owned_->GetOatHeader().GetInstructionSet());
-      runtime->SetResolutionMethod(image_header->GetImageMethod(ImageHeader::kResolutionMethod));
-      runtime->SetImtConflictMethod(image_header->GetImageMethod(ImageHeader::kImtConflictMethod));
-      runtime->SetImtUnimplementedMethod(
-          image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
-      runtime->SetCalleeSaveMethod(
-          image_header->GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod),
-          CalleeSaveType::kSaveAllCalleeSaves);
-      runtime->SetCalleeSaveMethod(
-          image_header->GetImageMethod(ImageHeader::kSaveRefsOnlyMethod),
-          CalleeSaveType::kSaveRefsOnly);
-      runtime->SetCalleeSaveMethod(
-          image_header->GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod),
-          CalleeSaveType::kSaveRefsAndArgs);
-      runtime->SetCalleeSaveMethod(
-          image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod),
-          CalleeSaveType::kSaveEverything);
-      runtime->SetCalleeSaveMethod(
-          image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForClinit),
-          CalleeSaveType::kSaveEverythingForClinit);
-      runtime->SetCalleeSaveMethod(
-          image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForSuspendCheck),
-          CalleeSaveType::kSaveEverythingForSuspendCheck);
-    }
-
-    VLOG(image) << "ImageSpace::Init exiting " << *space.get();
-    if (VLOG_IS_ON(image)) {
-      logger.Dump(LOG_STREAM(INFO));
-    }
+    space->oat_file_non_owned_ = oat_file;
     return space;
   }
 
+  static bool CheckImageComponentCount(const ImageSpace& space,
+                                       uint32_t expected_component_count,
+                                       /*out*/std::string* error_msg) {
+    const ImageHeader& header = space.GetImageHeader();
+    if (header.GetComponentCount() != expected_component_count) {
+      *error_msg = StringPrintf("Unexpected component count in %s, received %u, expected %u",
+                                space.GetImageFilename().c_str(),
+                                header.GetComponentCount(),
+                                expected_component_count);
+      return false;
+    }
+    return true;
+  }
+
+  static bool CheckImageReservationSize(const ImageSpace& space,
+                                        uint32_t expected_reservation_size,
+                                        /*out*/std::string* error_msg) {
+    const ImageHeader& header = space.GetImageHeader();
+    if (header.GetImageReservationSize() != expected_reservation_size) {
+      *error_msg = StringPrintf("Unexpected reservation size in %s, received %u, expected %u",
+                                space.GetImageFilename().c_str(),
+                                header.GetImageReservationSize(),
+                                expected_reservation_size);
+      return false;
+    }
+    return true;
+  }
+
  private:
   static MemMap LoadImageFile(const char* image_filename,
                               const char* image_location,
                               const ImageHeader& image_header,
-                              uint8_t* address,
                               int fd,
-                              TimingLogger& logger,
+                              TimingLogger* logger,
                               /*inout*/MemMap* image_reservation,
                               /*out*/std::string* error_msg) {
-    TimingLogger::ScopedTiming timing("MapImageFile", &logger);
-    const ImageHeader::StorageMode storage_mode = image_header.GetStorageMode();
-    if (storage_mode == ImageHeader::kStorageModeUncompressed) {
+    TimingLogger::ScopedTiming timing("MapImageFile", logger);
+    std::string temp_error_msg;
+    const bool is_compressed = image_header.HasCompressedBlock();
+    if (!is_compressed) {
+      uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
       return MemMap::MapFileAtAddress(address,
                                       image_header.GetImageSize(),
                                       PROT_READ | PROT_WRITE,
                                       MAP_PRIVATE,
                                       fd,
-                                      /* start */ 0,
-                                      /* low_4gb */ true,
+                                      /*start=*/ 0,
+                                      /*low_4gb=*/ true,
                                       image_filename,
-                                      /* reuse */ false,
+                                      /*reuse=*/ false,
                                       image_reservation,
                                       error_msg);
     }
 
-    if (storage_mode != ImageHeader::kStorageModeLZ4 &&
-        storage_mode != ImageHeader::kStorageModeLZ4HC) {
-      if (error_msg != nullptr) {
-        *error_msg = StringPrintf("Invalid storage mode in image header %d",
-                                  static_cast<int>(storage_mode));
-      }
-      return MemMap::Invalid();
-    }
-
     // Reserve output and decompress into it.
     MemMap map = MemMap::MapAnonymous(image_location,
-                                      address,
                                       image_header.GetImageSize(),
                                       PROT_READ | PROT_WRITE,
-                                      /* low_4gb */ true,
-                                      /* reuse */ false,
+                                      /*low_4gb=*/ true,
                                       image_reservation,
                                       error_msg);
     if (map.IsValid()) {
       const size_t stored_size = image_header.GetDataSize();
-      const size_t decompress_offset = sizeof(ImageHeader);  // Skip the header.
       MemMap temp_map = MemMap::MapFile(sizeof(ImageHeader) + stored_size,
                                         PROT_READ,
                                         MAP_PRIVATE,
                                         fd,
-                                        /* offset */ 0,
-                                        /* low_4gb */ false,
+                                        /*start=*/ 0,
+                                        /*low_4gb=*/ false,
                                         image_filename,
                                         error_msg);
       if (!temp_map.IsValid()) {
@@ -806,102 +975,91 @@
         return MemMap::Invalid();
       }
       memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
+
+      Runtime::ScopedThreadPoolUsage stpu;
+      ThreadPool* const pool = stpu.GetThreadPool();
       const uint64_t start = NanoTime();
-      // LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
-      TimingLogger::ScopedTiming timing2("LZ4 decompress image", &logger);
-      const size_t decompressed_size = LZ4_decompress_safe(
-          reinterpret_cast<char*>(temp_map.Begin()) + sizeof(ImageHeader),
-          reinterpret_cast<char*>(map.Begin()) + decompress_offset,
-          stored_size,
-          map.Size() - decompress_offset);
+      Thread* const self = Thread::Current();
+      static constexpr size_t kMinBlocks = 2u;
+      const bool use_parallel = pool != nullptr && image_header.GetBlockCount() >= kMinBlocks;
+      for (const ImageHeader::Block& block : image_header.GetBlocks(temp_map.Begin())) {
+        auto function = [&](Thread*) {
+          const uint64_t start2 = NanoTime();
+          ScopedTrace trace("LZ4 decompress block");
+          bool result = block.Decompress(/*out_ptr=*/map.Begin(),
+                                         /*in_ptr=*/temp_map.Begin(),
+                                         error_msg);
+          if (!result && error_msg != nullptr) {
+            *error_msg = "Failed to decompress image block " + *error_msg;
+          }
+          VLOG(image) << "Decompress block " << block.GetDataSize() << " -> "
+                      << block.GetImageSize() << " in " << PrettyDuration(NanoTime() - start2);
+        };
+        if (use_parallel) {
+          pool->AddTask(self, new FunctionTask(std::move(function)));
+        } else {
+          function(self);
+        }
+      }
+      if (use_parallel) {
+        ScopedTrace trace("Waiting for workers");
+        pool->Wait(self, true, false);
+      }
       const uint64_t time = NanoTime() - start;
       // Add one 1 ns to prevent possible divide by 0.
       VLOG(image) << "Decompressing image took " << PrettyDuration(time) << " ("
                   << PrettySize(static_cast<uint64_t>(map.Size()) * MsToNs(1000) / (time + 1))
                   << "/s)";
-      if (decompressed_size + sizeof(ImageHeader) != image_header.GetImageSize()) {
-        if (error_msg != nullptr) {
-          *error_msg = StringPrintf(
-              "Decompressed size does not match expected image size %zu vs %zu",
-              decompressed_size + sizeof(ImageHeader),
-              image_header.GetImageSize());
-        }
-        return MemMap::Invalid();
-      }
     }
 
     return map;
   }
 
-  class FixupVisitor : public ValueObject {
+  class EmptyRange {
    public:
-    FixupVisitor(const RelocationRange& boot_image,
-                 const RelocationRange& boot_oat,
-                 const RelocationRange& app_image,
-                 const RelocationRange& app_oat)
-        : boot_image_(boot_image),
-          boot_oat_(boot_oat),
-          app_image_(app_image),
-          app_oat_(app_oat) {}
+    ALWAYS_INLINE bool InSource(uintptr_t) const { return false; }
+    ALWAYS_INLINE bool InDest(uintptr_t) const { return false; }
+    ALWAYS_INLINE uintptr_t ToDest(uintptr_t) const { UNREACHABLE(); }
+  };
+
+  template <typename Range0, typename Range1 = EmptyRange, typename Range2 = EmptyRange>
+  class ForwardAddress {
+   public:
+    ForwardAddress(const Range0& range0 = Range0(),
+                   const Range1& range1 = Range1(),
+                   const Range2& range2 = Range2())
+        : range0_(range0), range1_(range1), range2_(range2) {}
 
     // Return the relocated address of a heap object.
+    // Null checks must be performed in the caller (for performance reasons).
     template <typename T>
-    ALWAYS_INLINE T* ForwardObject(T* src) const {
+    ALWAYS_INLINE T* operator()(T* src) const {
+      DCHECK(src != nullptr);
       const uintptr_t uint_src = reinterpret_cast<uintptr_t>(src);
-      if (boot_image_.InSource(uint_src)) {
-        return reinterpret_cast<T*>(boot_image_.ToDest(uint_src));
+      if (range2_.InSource(uint_src)) {
+        return reinterpret_cast<T*>(range2_.ToDest(uint_src));
       }
-      if (app_image_.InSource(uint_src)) {
-        return reinterpret_cast<T*>(app_image_.ToDest(uint_src));
+      if (range1_.InSource(uint_src)) {
+        return reinterpret_cast<T*>(range1_.ToDest(uint_src));
       }
-      // Since we are fixing up the app image, there should only be pointers to the app image and
-      // boot image.
-      DCHECK(src == nullptr) << reinterpret_cast<const void*>(src);
-      return src;
+      CHECK(range0_.InSource(uint_src))
+          << reinterpret_cast<const void*>(src) << " not in "
+          << reinterpret_cast<const void*>(range0_.Source()) << "-"
+          << reinterpret_cast<const void*>(range0_.Source() + range0_.Length());
+      return reinterpret_cast<T*>(range0_.ToDest(uint_src));
     }
 
-    // Return the relocated address of a code pointer (contained by an oat file).
-    ALWAYS_INLINE const void* ForwardCode(const void* src) const {
-      const uintptr_t uint_src = reinterpret_cast<uintptr_t>(src);
-      if (boot_oat_.InSource(uint_src)) {
-        return reinterpret_cast<const void*>(boot_oat_.ToDest(uint_src));
-      }
-      if (app_oat_.InSource(uint_src)) {
-        return reinterpret_cast<const void*>(app_oat_.ToDest(uint_src));
-      }
-      DCHECK(src == nullptr) << src;
-      return src;
-    }
-
-    // Must be called on pointers that already have been relocated to the destination relocation.
-    ALWAYS_INLINE bool IsInAppImage(mirror::Object* object) const {
-      return app_image_.InDest(reinterpret_cast<uintptr_t>(object));
-    }
-
-   protected:
-    // Source section.
-    const RelocationRange boot_image_;
-    const RelocationRange boot_oat_;
-    const RelocationRange app_image_;
-    const RelocationRange app_oat_;
+   private:
+    const Range0 range0_;
+    const Range1 range1_;
+    const Range2 range2_;
   };
 
-  // Adapt for mirror::Class::FixupNativePointers.
-  class FixupObjectAdapter : public FixupVisitor {
+  template <typename Forward>
+  class FixupRootVisitor {
    public:
     template<typename... Args>
-    explicit FixupObjectAdapter(Args... args) : FixupVisitor(args...) {}
-
-    template <typename T>
-    T* operator()(T* obj, void** dest_addr ATTRIBUTE_UNUSED = nullptr) const {
-      return ForwardObject(obj);
-    }
-  };
-
-  class FixupRootVisitor : public FixupVisitor {
-   public:
-    template<typename... Args>
-    explicit FixupRootVisitor(Args... args) : FixupVisitor(args...) {}
+    explicit FixupRootVisitor(Args... args) : forward_(args...) {}
 
     ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
         REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -913,22 +1071,22 @@
     ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
         REQUIRES_SHARED(Locks::mutator_lock_) {
       mirror::Object* ref = root->AsMirrorPtr();
-      mirror::Object* new_ref = ForwardObject(ref);
+      mirror::Object* new_ref = forward_(ref);
       if (ref != new_ref) {
         root->Assign(new_ref);
       }
     }
+
+   private:
+    Forward forward_;
   };
 
-  class FixupObjectVisitor : public FixupVisitor {
+  template <typename Forward>
+  class FixupObjectVisitor {
    public:
-    template<typename... Args>
     explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* visited,
-                                const PointerSize pointer_size,
-                                Args... args)
-        : FixupVisitor(args...),
-          pointer_size_(pointer_size),
-          visited_(visited) {}
+                                const Forward& forward)
+        : visited_(visited), forward_(forward) {}
 
     // Fix up separately since we also need to fix up method entrypoints.
     ALWAYS_INLINE void VisitRootIfNonNull(
@@ -941,35 +1099,13 @@
                                   MemberOffset offset,
                                   bool is_static ATTRIBUTE_UNUSED) const
         NO_THREAD_SAFETY_ANALYSIS {
-      // There could be overlap between ranges, we must avoid visiting the same reference twice.
-      // Avoid the class field since we already fixed it up in FixupClassVisitor.
-      if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
-        // Space is not yet added to the heap, don't do a read barrier.
-        mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
-            offset);
+      // Space is not yet added to the heap, don't do a read barrier.
+      mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
+          offset);
+      if (ref != nullptr) {
         // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
         // image.
-        obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, ForwardObject(ref));
-      }
-    }
-
-    // Visit a pointer array and forward corresponding native data. Ignores pointer arrays in the
-    // boot image. Uses the bitmap to ensure the same array is not visited multiple times.
-    template <typename Visitor>
-    void UpdatePointerArrayContents(mirror::PointerArray* array, const Visitor& visitor) const
-        NO_THREAD_SAFETY_ANALYSIS {
-      DCHECK(array != nullptr);
-      DCHECK(visitor.IsInAppImage(array));
-      // The bit for the array contents is different than the bit for the array. Since we may have
-      // already visited the array as a long / int array from walking the bitmap without knowing it
-      // was a pointer array.
-      static_assert(kObjectAlignment == 8u, "array bit may be in another object");
-      mirror::Object* const contents_bit = reinterpret_cast<mirror::Object*>(
-          reinterpret_cast<uintptr_t>(array) + kObjectAlignment);
-      // If the bit is not set then the contents have not yet been updated.
-      if (!visited_->Test(contents_bit)) {
-        array->Fixup<kVerifyNone, kWithoutReadBarrier>(array, pointer_size_, visitor);
-        visited_->Set(contents_bit);
+        obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, forward_(ref));
       }
     }
 
@@ -978,189 +1114,98 @@
                     ObjPtr<mirror::Reference> ref) const
         REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
       mirror::Object* obj = ref->GetReferent<kWithoutReadBarrier>();
-      ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
-          mirror::Reference::ReferentOffset(),
-          ForwardObject(obj));
+      if (obj != nullptr) {
+        ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
+            mirror::Reference::ReferentOffset(),
+            forward_(obj));
+      }
     }
 
     void operator()(mirror::Object* obj) const
         NO_THREAD_SAFETY_ANALYSIS {
-      if (visited_->Test(obj)) {
-        // Already visited.
-        return;
-      }
-      visited_->Set(obj);
-
-      // Handle class specially first since we need it to be updated to properly visit the rest of
-      // the instance fields.
-      {
-        mirror::Class* klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
-        DCHECK(klass != nullptr) << "Null class in image";
-        // No AsClass since our fields aren't quite fixed up yet.
-        mirror::Class* new_klass = down_cast<mirror::Class*>(ForwardObject(klass));
-        if (klass != new_klass) {
-          obj->SetClass<kVerifyNone>(new_klass);
-        }
-        if (new_klass != klass && IsInAppImage(new_klass)) {
-          // Make sure the klass contents are fixed up since we depend on it to walk the fields.
-          operator()(new_klass);
-        }
-      }
-
-      if (obj->IsClass()) {
-        mirror::Class* klass = obj->AsClass<kVerifyNone, kWithoutReadBarrier>();
-        // Fixup super class before visiting instance fields which require
-        // information from their super class to calculate offsets.
-        mirror::Class* super_class = klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
-        if (super_class != nullptr) {
-          mirror::Class* new_super_class = down_cast<mirror::Class*>(ForwardObject(super_class));
-          if (new_super_class != super_class && IsInAppImage(new_super_class)) {
-            // Recursively fix all dependencies.
-            operator()(new_super_class);
-          }
-        }
-      }
-
-      obj->VisitReferences</*visit native roots*/false, kVerifyNone, kWithoutReadBarrier>(
-          *this,
-          *this);
-      // Note that this code relies on no circular dependencies.
-      // We want to use our own class loader and not the one in the image.
-      if (obj->IsClass<kVerifyNone, kWithoutReadBarrier>()) {
-        mirror::Class* as_klass = obj->AsClass<kVerifyNone, kWithoutReadBarrier>();
-        FixupObjectAdapter visitor(boot_image_, boot_oat_, app_image_, app_oat_);
-        as_klass->FixupNativePointers<kVerifyNone, kWithoutReadBarrier>(as_klass,
-                                                                        pointer_size_,
-                                                                        visitor);
-        // Deal with the pointer arrays. Use the helper function since multiple classes can reference
-        // the same arrays.
-        mirror::PointerArray* const vtable = as_klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
-        if (vtable != nullptr && IsInAppImage(vtable)) {
-          operator()(vtable);
-          UpdatePointerArrayContents(vtable, visitor);
-        }
-        mirror::IfTable* iftable = as_klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
-        // Ensure iftable arrays are fixed up since we need GetMethodArray to return the valid
-        // contents.
-        if (IsInAppImage(iftable)) {
-          operator()(iftable);
-          for (int32_t i = 0, count = iftable->Count(); i < count; ++i) {
-            if (iftable->GetMethodArrayCount<kVerifyNone, kWithoutReadBarrier>(i) > 0) {
-              mirror::PointerArray* methods =
-                  iftable->GetMethodArray<kVerifyNone, kWithoutReadBarrier>(i);
-              if (visitor.IsInAppImage(methods)) {
-                operator()(methods);
-                DCHECK(methods != nullptr);
-                UpdatePointerArrayContents(methods, visitor);
-              }
-            }
-          }
-        }
+      if (!visited_->Set(obj)) {
+        // Not already visited.
+        obj->VisitReferences</*visit native roots*/false, kVerifyNone, kWithoutReadBarrier>(
+            *this,
+            *this);
+        CHECK(!obj->IsClass());
       }
     }
 
    private:
-    const PointerSize pointer_size_;
     gc::accounting::ContinuousSpaceBitmap* const visited_;
+    Forward forward_;
   };
 
-  class ForwardObjectAdapter {
-   public:
-    ALWAYS_INLINE explicit ForwardObjectAdapter(const FixupVisitor* visitor) : visitor_(visitor) {}
-
-    template <typename T>
-    ALWAYS_INLINE T* operator()(T* src) const {
-      return visitor_->ForwardObject(src);
-    }
-
-   private:
-    const FixupVisitor* const visitor_;
-  };
-
-  class ForwardCodeAdapter {
-   public:
-    ALWAYS_INLINE explicit ForwardCodeAdapter(const FixupVisitor* visitor)
-        : visitor_(visitor) {}
-
-    template <typename T>
-    ALWAYS_INLINE T* operator()(T* src) const {
-      return visitor_->ForwardCode(src);
-    }
-
-   private:
-    const FixupVisitor* const visitor_;
-  };
-
-  class FixupArtMethodVisitor : public FixupVisitor, public ArtMethodVisitor {
+  template <typename ForwardObject, typename ForwardNative, typename ForwardCode>
+  class FixupArtMethodVisitor : public ArtMethodVisitor {
    public:
     template<typename... Args>
-    explicit FixupArtMethodVisitor(bool fixup_heap_objects, PointerSize pointer_size, Args... args)
-        : FixupVisitor(args...),
-          fixup_heap_objects_(fixup_heap_objects),
-          pointer_size_(pointer_size) {}
+    explicit FixupArtMethodVisitor(PointerSize pointer_size,
+                                   const ForwardObject& forward_object,
+                                   const ForwardNative& forward_native,
+                                   const ForwardCode& forward_code)
+        : pointer_size_(pointer_size),
+          forward_object_(forward_object),
+          forward_native_(forward_native),
+          forward_code_(forward_code) {}
 
-    virtual void Visit(ArtMethod* method) NO_THREAD_SAFETY_ANALYSIS {
+    void Visit(ArtMethod* method) override NO_THREAD_SAFETY_ANALYSIS {
       // TODO: Separate visitor for runtime vs normal methods.
       if (UNLIKELY(method->IsRuntimeMethod())) {
         ImtConflictTable* table = method->GetImtConflictTable(pointer_size_);
         if (table != nullptr) {
-          ImtConflictTable* new_table = ForwardObject(table);
+          ImtConflictTable* new_table = forward_native_(table);
           if (table != new_table) {
             method->SetImtConflictTable(new_table, pointer_size_);
           }
         }
         const void* old_code = method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_);
-        const void* new_code = ForwardCode(old_code);
+        const void* new_code = forward_code_(old_code);
         if (old_code != new_code) {
           method->SetEntryPointFromQuickCompiledCodePtrSize(new_code, pointer_size_);
         }
       } else {
-        if (fixup_heap_objects_) {
-          method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this));
-        }
-        method->UpdateEntrypoints(ForwardCodeAdapter(this), pointer_size_);
+        method->UpdateObjectsForImageRelocation(forward_object_);
+        method->UpdateEntrypoints(forward_code_, pointer_size_);
       }
     }
 
    private:
-    const bool fixup_heap_objects_;
     const PointerSize pointer_size_;
+    const ForwardObject forward_object_;
+    const ForwardNative forward_native_;
+    const ForwardCode forward_code_;
   };
 
-  class FixupArtFieldVisitor : public FixupVisitor, public ArtFieldVisitor {
+  template <typename Forward>
+  class FixupArtFieldVisitor : public ArtFieldVisitor {
    public:
-    template<typename... Args>
-    explicit FixupArtFieldVisitor(Args... args) : FixupVisitor(args...) {}
+    explicit FixupArtFieldVisitor(Forward forward) : forward_(forward) {}
 
-    virtual void Visit(ArtField* field) NO_THREAD_SAFETY_ANALYSIS {
-      field->UpdateObjects(ForwardObjectAdapter(this));
+    void Visit(ArtField* field) override NO_THREAD_SAFETY_ANALYSIS {
+      field->UpdateObjects(forward_);
     }
+
+   private:
+    Forward forward_;
   };
 
   // Relocate an image space mapped at target_base which possibly used to be at a different base
-  // address. Only needs a single image space, not one for both source and destination.
-  // In place means modifying a single ImageSpace in place rather than relocating from one ImageSpace
-  // to another.
+  // address. In place means modifying a single ImageSpace in place rather than relocating from
+  // one ImageSpace to another.
+  template <PointerSize kPointerSize>
   static bool RelocateInPlace(ImageHeader& image_header,
                               uint8_t* target_base,
                               accounting::ContinuousSpaceBitmap* bitmap,
                               const OatFile* app_oat_file,
                               std::string* error_msg) {
     DCHECK(error_msg != nullptr);
-    if (!image_header.IsPic()) {
-      if (image_header.GetImageBegin() == target_base) {
-        return true;
-      }
-      *error_msg = StringPrintf("Cannot relocate non-pic image for oat file %s",
-                                (app_oat_file != nullptr) ? app_oat_file->GetLocation().c_str() : "");
-      return false;
-    }
     // Set up sections.
     uint32_t boot_image_begin = 0;
     uint32_t boot_image_end = 0;
     uint32_t boot_oat_begin = 0;
     uint32_t boot_oat_end = 0;
-    const PointerSize pointer_size = image_header.GetPointerSize();
     gc::Heap* const heap = Runtime::Current()->GetHeap();
     heap->GetBootImagesSize(&boot_image_begin, &boot_image_end, &boot_oat_begin, &boot_oat_end);
     if (boot_image_begin == boot_image_end) {
@@ -1171,10 +1216,8 @@
       *error_msg = "Can not relocate app image without boot oat file";
       return false;
     }
-    const uint32_t boot_image_size = boot_image_end - boot_image_begin;
-    const uint32_t boot_oat_size = boot_oat_end - boot_oat_begin;
+    const uint32_t boot_image_size = boot_oat_end - boot_image_begin;
     const uint32_t image_header_boot_image_size = image_header.GetBootImageSize();
-    const uint32_t image_header_boot_oat_size = image_header.GetBootOatSize();
     if (boot_image_size != image_header_boot_image_size) {
       *error_msg = StringPrintf("Boot image size %" PRIu64 " does not match expected size %"
                                     PRIu64,
@@ -1182,45 +1225,49 @@
                                 static_cast<uint64_t>(image_header_boot_image_size));
       return false;
     }
-    if (boot_oat_size != image_header_boot_oat_size) {
-      *error_msg = StringPrintf("Boot oat size %" PRIu64 " does not match expected size %"
-                                    PRIu64,
-                                static_cast<uint64_t>(boot_oat_size),
-                                static_cast<uint64_t>(image_header_boot_oat_size));
-      return false;
-    }
+    const ImageSection& objects_section = image_header.GetObjectsSection();
+    // Where the app image objects are mapped to.
+    uint8_t* objects_location = target_base + objects_section.Offset();
     TimingLogger logger(__FUNCTION__, true, false);
     RelocationRange boot_image(image_header.GetBootImageBegin(),
                                boot_image_begin,
                                boot_image_size);
-    RelocationRange boot_oat(image_header.GetBootOatBegin(),
-                             boot_oat_begin,
-                             boot_oat_size);
-    RelocationRange app_image(reinterpret_cast<uintptr_t>(image_header.GetImageBegin()),
-                              reinterpret_cast<uintptr_t>(target_base),
-                              image_header.GetImageSize());
+    // Metadata is everything after the objects section, use exclusion to be safe.
+    RelocationRange app_image_metadata(
+        reinterpret_cast<uintptr_t>(image_header.GetImageBegin()) + objects_section.End(),
+        reinterpret_cast<uintptr_t>(target_base) + objects_section.End(),
+        image_header.GetImageSize() - objects_section.End());
+    // App image heap objects, may be mapped in the heap.
+    RelocationRange app_image_objects(
+        reinterpret_cast<uintptr_t>(image_header.GetImageBegin()) + objects_section.Offset(),
+        reinterpret_cast<uintptr_t>(objects_location),
+        objects_section.Size());
     // Use the oat data section since this is where the OatFile::Begin is.
     RelocationRange app_oat(reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()),
                             // Not necessarily in low 4GB.
                             reinterpret_cast<uintptr_t>(app_oat_file->Begin()),
                             image_header.GetOatDataEnd() - image_header.GetOatDataBegin());
-    VLOG(image) << "App image " << app_image;
+    VLOG(image) << "App image metadata " << app_image_metadata;
+    VLOG(image) << "App image objects " << app_image_objects;
     VLOG(image) << "App oat " << app_oat;
     VLOG(image) << "Boot image " << boot_image;
-    VLOG(image) << "Boot oat " << boot_oat;
-    // True if we need to fixup any heap pointers, otherwise only code pointers.
-    const bool fixup_image = boot_image.Delta() != 0 || app_image.Delta() != 0;
-    const bool fixup_code = boot_oat.Delta() != 0 || app_oat.Delta() != 0;
-    if (!fixup_image && !fixup_code) {
+    // True if we need to fixup any heap pointers.
+    const bool fixup_image = boot_image.Delta() != 0 || app_image_metadata.Delta() != 0 ||
+        app_image_objects.Delta() != 0;
+    if (!fixup_image) {
       // Nothing to fix up.
       return true;
     }
     ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
-    // Need to update the image to be at the target base.
-    const ImageSection& objects_section = image_header.GetObjectsSection();
-    uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
-    uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
-    FixupObjectAdapter fixup_adapter(boot_image, boot_oat, app_image, app_oat);
+
+    using ForwardObject = ForwardAddress<RelocationRange, RelocationRange>;
+    ForwardObject forward_object(boot_image, app_image_objects);
+    ForwardObject forward_metadata(boot_image, app_image_metadata);
+    using ForwardCode = ForwardAddress<RelocationRange, RelocationRange>;
+    ForwardCode forward_code(boot_image, app_oat);
+    PatchObjectVisitor<kPointerSize, ForwardObject, ForwardCode> patch_object_visitor(
+        forward_object,
+        forward_metadata);
     if (fixup_image) {
       // Two pass approach, fix up all classes first, then fix up non class-objects.
       // The visited bitmap is used to ensure that pointer arrays are not forwarded twice.
@@ -1228,135 +1275,119 @@
           gc::accounting::ContinuousSpaceBitmap::Create("Relocate bitmap",
                                                         target_base,
                                                         image_header.GetImageSize()));
-      FixupObjectVisitor fixup_object_visitor(visited_bitmap.get(),
-                                              pointer_size,
-                                              boot_image,
-                                              boot_oat,
-                                              app_image,
-                                              app_oat);
-      TimingLogger::ScopedTiming timing("Fixup classes", &logger);
-      // Fixup objects may read fields in the boot image, use the mutator lock here for sanity. Though
-      // its probably not required.
+      {
+        TimingLogger::ScopedTiming timing("Fixup classes", &logger);
+        const auto& class_table_section = image_header.GetClassTableSection();
+        if (class_table_section.Size() > 0u) {
+          ScopedObjectAccess soa(Thread::Current());
+          ClassTableVisitor class_table_visitor(forward_object);
+          size_t read_count = 0u;
+          const uint8_t* data = target_base + class_table_section.Offset();
+          // We avoid making a copy of the data since we want modifications to be propagated to the
+          // memory map.
+          ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
+          for (ClassTable::TableSlot& slot : temp_set) {
+            slot.VisitRoot(class_table_visitor);
+            mirror::Class* klass = slot.Read<kWithoutReadBarrier>();
+            if (!app_image_objects.InDest(klass)) {
+              continue;
+            }
+            const bool already_marked = visited_bitmap->Set(klass);
+            CHECK(!already_marked) << "App image class already visited";
+            patch_object_visitor.VisitClass(klass);
+            // Then patch the non-embedded vtable and iftable.
+            mirror::PointerArray* vtable = klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
+            if (vtable != nullptr &&
+                app_image_objects.InDest(vtable) &&
+                !visited_bitmap->Set(vtable)) {
+              patch_object_visitor.VisitPointerArray(vtable);
+            }
+            auto* iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
+            if (iftable != nullptr && app_image_objects.InDest(iftable)) {
+              // Avoid processing the fields of iftable since we will process them later anyways
+              // below.
+              int32_t ifcount = klass->GetIfTableCount<kVerifyNone>();
+              for (int32_t i = 0; i != ifcount; ++i) {
+                mirror::PointerArray* unpatched_ifarray =
+                    iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
+                if (unpatched_ifarray != nullptr) {
+                  // The iftable has not been patched, so we need to explicitly adjust the pointer.
+                  mirror::PointerArray* ifarray = forward_object(unpatched_ifarray);
+                  if (app_image_objects.InDest(ifarray) && !visited_bitmap->Set(ifarray)) {
+                    patch_object_visitor.VisitPointerArray(ifarray);
+                  }
+                }
+              }
+            }
+          }
+        }
+      }
+
+      // Fixup objects may read fields in the boot image, use the mutator lock here for sanity.
+      // Though its probably not required.
+      TimingLogger::ScopedTiming timing("Fixup objects", &logger);
       ScopedObjectAccess soa(Thread::Current());
-      timing.NewTiming("Fixup objects");
+      // Need to update the image to be at the target base.
+      uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
+      uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
+      FixupObjectVisitor<ForwardObject> fixup_object_visitor(visited_bitmap.get(), forward_object);
       bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor);
       // Fixup image roots.
-      CHECK(app_image.InSource(reinterpret_cast<uintptr_t>(
+      CHECK(app_image_objects.InSource(reinterpret_cast<uintptr_t>(
           image_header.GetImageRoots<kWithoutReadBarrier>().Ptr())));
-      image_header.RelocateImageObjects(app_image.Delta());
+      image_header.RelocateImageObjects(app_image_objects.Delta());
       CHECK_EQ(image_header.GetImageBegin(), target_base);
       // Fix up dex cache DexFile pointers.
       auto* dex_caches = image_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kDexCaches)->
-          AsObjectArray<mirror::DexCache, kVerifyNone, kWithoutReadBarrier>();
+          AsObjectArray<mirror::DexCache, kVerifyNone>();
       for (int32_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
         mirror::DexCache* dex_cache = dex_caches->Get<kVerifyNone, kWithoutReadBarrier>(i);
-        // Fix up dex cache pointers.
-        mirror::StringDexCacheType* strings = dex_cache->GetStrings();
-        if (strings != nullptr) {
-          mirror::StringDexCacheType* new_strings = fixup_adapter.ForwardObject(strings);
-          if (strings != new_strings) {
-            dex_cache->SetStrings(new_strings);
-          }
-          dex_cache->FixupStrings<kWithoutReadBarrier>(new_strings, fixup_adapter);
-        }
-        mirror::TypeDexCacheType* types = dex_cache->GetResolvedTypes();
-        if (types != nullptr) {
-          mirror::TypeDexCacheType* new_types = fixup_adapter.ForwardObject(types);
-          if (types != new_types) {
-            dex_cache->SetResolvedTypes(new_types);
-          }
-          dex_cache->FixupResolvedTypes<kWithoutReadBarrier>(new_types, fixup_adapter);
-        }
-        mirror::MethodDexCacheType* methods = dex_cache->GetResolvedMethods();
-        if (methods != nullptr) {
-          mirror::MethodDexCacheType* new_methods = fixup_adapter.ForwardObject(methods);
-          if (methods != new_methods) {
-            dex_cache->SetResolvedMethods(new_methods);
-          }
-          for (size_t j = 0, num = dex_cache->NumResolvedMethods(); j != num; ++j) {
-            auto pair = mirror::DexCache::GetNativePairPtrSize(new_methods, j, pointer_size);
-            ArtMethod* orig = pair.object;
-            ArtMethod* copy = fixup_adapter.ForwardObject(orig);
-            if (orig != copy) {
-              pair.object = copy;
-              mirror::DexCache::SetNativePairPtrSize(new_methods, j, pair, pointer_size);
-            }
-          }
-        }
-        mirror::FieldDexCacheType* fields = dex_cache->GetResolvedFields();
-        if (fields != nullptr) {
-          mirror::FieldDexCacheType* new_fields = fixup_adapter.ForwardObject(fields);
-          if (fields != new_fields) {
-            dex_cache->SetResolvedFields(new_fields);
-          }
-          for (size_t j = 0, num = dex_cache->NumResolvedFields(); j != num; ++j) {
-            mirror::FieldDexCachePair orig =
-                mirror::DexCache::GetNativePairPtrSize(new_fields, j, pointer_size);
-            mirror::FieldDexCachePair copy(fixup_adapter.ForwardObject(orig.object), orig.index);
-            if (orig.object != copy.object) {
-              mirror::DexCache::SetNativePairPtrSize(new_fields, j, copy, pointer_size);
-            }
-          }
-        }
-
-        mirror::MethodTypeDexCacheType* method_types = dex_cache->GetResolvedMethodTypes();
-        if (method_types != nullptr) {
-          mirror::MethodTypeDexCacheType* new_method_types =
-              fixup_adapter.ForwardObject(method_types);
-          if (method_types != new_method_types) {
-            dex_cache->SetResolvedMethodTypes(new_method_types);
-          }
-          dex_cache->FixupResolvedMethodTypes<kWithoutReadBarrier>(new_method_types, fixup_adapter);
-        }
-        GcRoot<mirror::CallSite>* call_sites = dex_cache->GetResolvedCallSites();
-        if (call_sites != nullptr) {
-          GcRoot<mirror::CallSite>* new_call_sites = fixup_adapter.ForwardObject(call_sites);
-          if (call_sites != new_call_sites) {
-            dex_cache->SetResolvedCallSites(new_call_sites);
-          }
-          dex_cache->FixupResolvedCallSites<kWithoutReadBarrier>(new_call_sites, fixup_adapter);
-        }
+        CHECK(dex_cache != nullptr);
+        patch_object_visitor.VisitDexCacheArrays(dex_cache);
       }
     }
     {
       // Only touches objects in the app image, no need for mutator lock.
       TimingLogger::ScopedTiming timing("Fixup methods", &logger);
-      FixupArtMethodVisitor method_visitor(fixup_image,
-                                           pointer_size,
-                                           boot_image,
-                                           boot_oat,
-                                           app_image,
-                                           app_oat);
-      image_header.VisitPackedArtMethods(&method_visitor, target_base, pointer_size);
+      FixupArtMethodVisitor method_visitor(kPointerSize,
+                                           forward_object,
+                                           forward_metadata,
+                                           forward_code);
+      image_header.VisitPackedArtMethods(&method_visitor, target_base, kPointerSize);
     }
     if (fixup_image) {
       {
         // Only touches objects in the app image, no need for mutator lock.
         TimingLogger::ScopedTiming timing("Fixup fields", &logger);
-        FixupArtFieldVisitor field_visitor(boot_image, boot_oat, app_image, app_oat);
+        FixupArtFieldVisitor field_visitor(forward_object);
         image_header.VisitPackedArtFields(&field_visitor, target_base);
       }
       {
         TimingLogger::ScopedTiming timing("Fixup imt", &logger);
-        image_header.VisitPackedImTables(fixup_adapter, target_base, pointer_size);
+        image_header.VisitPackedImTables(forward_metadata, target_base, kPointerSize);
       }
       {
         TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger);
-        image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, pointer_size);
+        image_header.VisitPackedImtConflictTables(forward_metadata, target_base, kPointerSize);
       }
       // In the app image case, the image methods are actually in the boot image.
       image_header.RelocateImageMethods(boot_image.Delta());
-      const auto& class_table_section = image_header.GetClassTableSection();
-      if (class_table_section.Size() > 0u) {
-        // Note that we require that ReadFromMemory does not make an internal copy of the elements.
-        // This also relies on visit roots not doing any verification which could fail after we update
-        // the roots to be the image addresses.
+      // Fix up the intern table.
+      const auto& intern_table_section = image_header.GetInternedStringsSection();
+      if (intern_table_section.Size() > 0u) {
+        TimingLogger::ScopedTiming timing("Fixup intern table", &logger);
         ScopedObjectAccess soa(Thread::Current());
-        WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-        ClassTable temp_table;
-        temp_table.ReadFromMemory(target_base + class_table_section.Offset());
-        FixupRootVisitor root_visitor(boot_image, boot_oat, app_image, app_oat);
-        temp_table.VisitRoots(root_visitor);
+        // Fixup the pointers in the newly written intern table to contain image addresses.
+        InternTable temp_intern_table;
+        // Note that we require that ReadFromMemory does not make an internal copy of the elements
+        // so that the VisitRoots() will update the memory directly rather than the copies.
+        temp_intern_table.AddTableFromMemory(target_base + intern_table_section.Offset(),
+                                             [&](InternTable::UnorderedSet& strings)
+            REQUIRES_SHARED(Locks::mutator_lock_) {
+          for (GcRoot<mirror::String>& root : strings) {
+            root = GcRoot<mirror::String>(forward_object(root.Read<kWithoutReadBarrier>()));
+          }
+        }, /*is_boot_image=*/ false);
       }
     }
     if (VLOG_IS_ON(image)) {
@@ -1364,51 +1395,17 @@
     }
     return true;
   }
-
-  static std::unique_ptr<OatFile> OpenOatFile(const ImageSpace& image,
-                                              const char* image_path,
-                                              /*inout*/MemMap* oat_reservation,
-                                              std::string* error_msg) {
-    const ImageHeader& image_header = image.GetImageHeader();
-    std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(image_path);
-
-    CHECK(image_header.GetOatDataBegin() != nullptr);
-
-    std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
-                                                    oat_filename,
-                                                    oat_filename,
-                                                    image_header.GetOatDataBegin(),
-                                                    !Runtime::Current()->IsAotCompiler(),
-                                                    /* low_4gb */ false,
-                                                    /* abs_dex_location */ nullptr,
-                                                    oat_reservation,
-                                                    error_msg));
-    if (oat_file == nullptr) {
-      *error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
-                                oat_filename.c_str(),
-                                image.GetName(),
-                                error_msg->c_str());
-      return nullptr;
-    }
-    uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
-    uint32_t image_oat_checksum = image_header.GetOatChecksum();
-    if (oat_checksum != image_oat_checksum) {
-      *error_msg = StringPrintf("Failed to match oat file checksum 0x%x to expected oat checksum 0x%x"
-                                " in image %s",
-                                oat_checksum,
-                                image_oat_checksum,
-                                image.GetName());
-      return nullptr;
-    }
-
-    return oat_file;
-  }
 };
 
 class ImageSpace::BootImageLoader {
  public:
-  BootImageLoader(const std::string& image_location, InstructionSet image_isa)
-      : image_location_(image_location),
+  BootImageLoader(const std::vector<std::string>& boot_class_path,
+                  const std::vector<std::string>& boot_class_path_locations,
+                  const std::string& image_location,
+                  InstructionSet image_isa)
+      : boot_class_path_(boot_class_path),
+        boot_class_path_locations_(boot_class_path_locations),
+        image_location_(image_location),
         image_isa_(image_isa),
         is_zygote_(Runtime::Current()->IsZygote()),
         has_system_(false),
@@ -1454,154 +1451,352 @@
                       /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
                       /*out*/MemMap* extra_reservation,
                       /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
+    TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
     std::string filename = GetSystemImageFilename(image_location_.c_str(), image_isa_);
-    std::vector<std::string> locations;
-    if (!GetBootClassPathImageLocations(image_location_, filename, &locations, error_msg)) {
-      return false;
-    }
-    uint32_t image_start;
-    uint32_t image_end;
-    uint32_t oat_end;
-    if (!GetBootImageAddressRange(filename, &image_start, &image_end, &oat_end, error_msg)) {
-      return false;
-    }
-    if (locations.size() > 1u) {
-      std::string last_filename = GetSystemImageFilename(locations.back().c_str(), image_isa_);
-      uint32_t dummy;
-      if (!GetBootImageAddressRange(last_filename, &dummy, &image_end, &oat_end, error_msg)) {
-        return false;
-      }
-    }
-    MemMap image_reservation;
-    MemMap oat_reservation;
-    MemMap local_extra_reservation;
-    if (!ReserveBootImageMemory(image_start,
-                                image_end,
-                                oat_end,
-                                extra_reservation_size,
-                                &image_reservation,
-                                &oat_reservation,
-                                &local_extra_reservation,
-                                error_msg)) {
+
+    if (!LoadFromFile(filename,
+                      /*validate_oat_file=*/ false,
+                      extra_reservation_size,
+                      &logger,
+                      boot_image_spaces,
+                      extra_reservation,
+                      error_msg)) {
       return false;
     }
 
-    std::vector<std::unique_ptr<ImageSpace>> spaces;
-    spaces.reserve(locations.size());
-    for (const std::string& location : locations) {
-      filename = GetSystemImageFilename(location.c_str(), image_isa_);
-      spaces.push_back(Load(location,
-                            filename,
-                            /* validate_oat_file */ false,
-                            &image_reservation,
-                            &oat_reservation,
-                            error_msg));
-      if (spaces.back() == nullptr) {
-        return false;
-      }
+    if (VLOG_IS_ON(image)) {
+      LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromSystem exiting "
+          << boot_image_spaces->front();
+      logger.Dump(LOG_STREAM(INFO));
     }
-    if (!CheckReservationsExhausted(image_reservation, oat_reservation, error_msg)) {
-      return false;
-    }
-
-    *extra_reservation = std::move(local_extra_reservation);
-    boot_image_spaces->swap(spaces);
     return true;
   }
 
   bool LoadFromDalvikCache(
-      bool validate_system_checksums,
       bool validate_oat_file,
       size_t extra_reservation_size,
       /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
       /*out*/MemMap* extra_reservation,
       /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
+    TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
     DCHECK(DalvikCacheExists());
-    std::vector<std::string> locations;
-    if (!GetBootClassPathImageLocations(image_location_, cache_filename_, &locations, error_msg)) {
+
+    if (!LoadFromFile(cache_filename_,
+                      validate_oat_file,
+                      extra_reservation_size,
+                      &logger,
+                      boot_image_spaces,
+                      extra_reservation,
+                      error_msg)) {
       return false;
     }
-    uint32_t image_start;
-    uint32_t image_end;
-    uint32_t oat_end;
-    if (!GetBootImageAddressRange(cache_filename_, &image_start, &image_end, &oat_end, error_msg)) {
+
+    if (VLOG_IS_ON(image)) {
+      LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromDalvikCache exiting "
+          << boot_image_spaces->front();
+      logger.Dump(LOG_STREAM(INFO));
+    }
+    return true;
+  }
+
+ private:
+  bool LoadFromFile(
+      const std::string& filename,
+      bool validate_oat_file,
+      size_t extra_reservation_size,
+      TimingLogger* logger,
+      /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+      /*out*/MemMap* extra_reservation,
+      /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
+    ImageHeader system_hdr;
+    if (!ReadSpecificImageHeader(filename.c_str(), &system_hdr)) {
+      *error_msg = StringPrintf("Cannot read header of %s", filename.c_str());
       return false;
     }
-    if (locations.size() > 1u) {
-      std::string last_filename;
-      if (!GetDalvikCacheFilename(locations.back().c_str(),
-                                  dalvik_cache_.c_str(),
-                                  &last_filename,
-                                  error_msg)) {
-        return false;
-      }
-      uint32_t dummy;
-      if (!GetBootImageAddressRange(last_filename, &dummy, &image_end, &oat_end, error_msg)) {
-        return false;
-      }
+    if (system_hdr.GetComponentCount() == 0u ||
+        system_hdr.GetComponentCount() > boot_class_path_.size()) {
+      *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
+                                    "expected non-zero and <= %zu",
+                                filename.c_str(),
+                                system_hdr.GetComponentCount(),
+                                boot_class_path_.size());
+      return false;
     }
     MemMap image_reservation;
-    MemMap oat_reservation;
     MemMap local_extra_reservation;
-    if (!ReserveBootImageMemory(image_start,
-                                image_end,
-                                oat_end,
+    if (!ReserveBootImageMemory(system_hdr.GetImageReservationSize(),
+                                reinterpret_cast32<uint32_t>(system_hdr.GetImageBegin()),
                                 extra_reservation_size,
                                 &image_reservation,
-                                &oat_reservation,
                                 &local_extra_reservation,
                                 error_msg)) {
       return false;
     }
 
+    ArrayRef<const std::string> provided_locations(boot_class_path_locations_.data(),
+                                                   system_hdr.GetComponentCount());
+    std::vector<std::string> locations =
+        ExpandMultiImageLocations(provided_locations, image_location_);
+    std::vector<std::string> filenames =
+        ExpandMultiImageLocations(provided_locations, filename);
+    DCHECK_EQ(locations.size(), filenames.size());
     std::vector<std::unique_ptr<ImageSpace>> spaces;
     spaces.reserve(locations.size());
-    for (const std::string& location : locations) {
-      std::string filename;
-      if (!GetDalvikCacheFilename(location.c_str(), dalvik_cache_.c_str(), &filename, error_msg)) {
+    for (std::size_t i = 0u, size = locations.size(); i != size; ++i) {
+      spaces.push_back(Load(locations[i], filenames[i], logger, &image_reservation, error_msg));
+      const ImageSpace* space = spaces.back().get();
+      if (space == nullptr) {
         return false;
       }
-      spaces.push_back(Load(location,
-                            filename,
-                            validate_oat_file,
-                            &image_reservation,
-                            &oat_reservation,
-                            error_msg));
-      if (spaces.back() == nullptr) {
+      uint32_t expected_component_count = (i == 0u) ? system_hdr.GetComponentCount() : 0u;
+      uint32_t expected_reservation_size = (i == 0u) ? system_hdr.GetImageReservationSize() : 0u;
+      if (!Loader::CheckImageReservationSize(*space, expected_reservation_size, error_msg) ||
+          !Loader::CheckImageComponentCount(*space, expected_component_count, error_msg)) {
         return false;
       }
-      if (validate_system_checksums) {
-        ImageHeader system_hdr;
-        std::string system_filename = GetSystemImageFilename(location.c_str(), image_isa_);
-        if (!ReadSpecificImageHeader(system_filename.c_str(), &system_hdr)) {
-          *error_msg = StringPrintf("Cannot read header of %s", system_filename.c_str());
-          return false;
-        }
-        if (spaces.back()->GetImageHeader().GetOatChecksum() != system_hdr.GetOatChecksum()) {
-          *error_msg = StringPrintf("Checksum mismatch: %u(%s) vs %u(%s)",
-                                    spaces.back()->GetImageHeader().GetOatChecksum(),
-                                    filename.c_str(),
-                                    system_hdr.GetOatChecksum(),
-                                    system_filename.c_str());
-          return false;
-        }
-      }
     }
-    if (!CheckReservationsExhausted(image_reservation, oat_reservation, error_msg)) {
+    for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
+      std::string expected_boot_class_path =
+          (i == 0u) ? android::base::Join(provided_locations, ':') : std::string();
+      if (!OpenOatFile(spaces[i].get(),
+                       boot_class_path_[i],
+                       expected_boot_class_path,
+                       validate_oat_file,
+                       logger,
+                       &image_reservation,
+                       error_msg)) {
+        return false;
+      }
+    }
+    if (!CheckReservationExhausted(image_reservation, error_msg)) {
       return false;
     }
 
-    *extra_reservation = std::move(local_extra_reservation);
+    MaybeRelocateSpaces(spaces, logger);
+    InitRuntimeMethods(spaces);
     boot_image_spaces->swap(spaces);
+    *extra_reservation = std::move(local_extra_reservation);
     return true;
   }
 
  private:
+  class RelocateVisitor {
+   public:
+    explicit RelocateVisitor(uint32_t diff) : diff_(diff) {}
+
+    template <typename T>
+    ALWAYS_INLINE T* operator()(T* src) const {
+      DCHECK(src != nullptr);
+      return reinterpret_cast32<T*>(reinterpret_cast32<uint32_t>(src) + diff_);
+    }
+
+   private:
+    const uint32_t diff_;
+  };
+
+  template <PointerSize kPointerSize>
+  static void DoRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces,
+                               uint32_t diff) REQUIRES_SHARED(Locks::mutator_lock_) {
+    std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> patched_objects(
+        gc::accounting::ContinuousSpaceBitmap::Create(
+            "Marked objects",
+            spaces.front()->Begin(),
+            spaces.back()->End() - spaces.front()->Begin()));
+    using PatchRelocateVisitor = PatchObjectVisitor<kPointerSize, RelocateVisitor, RelocateVisitor>;
+    RelocateVisitor relocate_visitor(diff);
+    PatchRelocateVisitor patch_object_visitor(relocate_visitor, relocate_visitor);
+
+    mirror::Class* dcheck_class_class = nullptr;  // Used only for a DCHECK().
+    for (size_t s = 0u, size = spaces.size(); s != size; ++s) {
+      const ImageSpace* space = spaces[s].get();
+
+      // First patch the image header. The `diff` is OK for patching 32-bit fields but
+      // the 64-bit method fields in the ImageHeader may need a negative `delta`.
+      reinterpret_cast<ImageHeader*>(space->Begin())->RelocateImage(
+          (reinterpret_cast32<uint32_t>(space->Begin()) < diff)
+              ? -static_cast<int64_t>(-diff) : static_cast<int64_t>(diff));
+
+      // Patch fields and methods.
+      const ImageHeader& image_header = space->GetImageHeader();
+      PatchArtFieldVisitor<PatchRelocateVisitor> field_visitor(patch_object_visitor);
+      image_header.VisitPackedArtFields(&field_visitor, space->Begin());
+      PatchArtMethodVisitor<kPointerSize, PatchRelocateVisitor, PatchRelocateVisitor>
+          method_visitor(patch_object_visitor, patch_object_visitor);
+      image_header.VisitPackedArtMethods(&method_visitor, space->Begin(), kPointerSize);
+      auto method_table_visitor = [&](ArtMethod* method) {
+        DCHECK(method != nullptr);
+        return relocate_visitor(method);
+      };
+      image_header.VisitPackedImTables(method_table_visitor, space->Begin(), kPointerSize);
+      image_header.VisitPackedImtConflictTables(method_table_visitor, space->Begin(), kPointerSize);
+
+      // Patch the intern table.
+      if (image_header.GetInternedStringsSection().Size() != 0u) {
+        const uint8_t* data = space->Begin() + image_header.GetInternedStringsSection().Offset();
+        size_t read_count;
+        InternTable::UnorderedSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
+        for (GcRoot<mirror::String>& slot : temp_set) {
+          patch_object_visitor.template PatchGcRoot</*kMayBeNull=*/ false>(&slot);
+        }
+      }
+
+      // Patch the class table and classes, so that we can traverse class hierarchy to
+      // determine the types of other objects when we visit them later.
+      if (image_header.GetClassTableSection().Size() != 0u) {
+        uint8_t* data = space->Begin() + image_header.GetClassTableSection().Offset();
+        size_t read_count;
+        ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
+        DCHECK(!temp_set.empty());
+        ClassTableVisitor class_table_visitor(relocate_visitor);
+        for (ClassTable::TableSlot& slot : temp_set) {
+          slot.VisitRoot(class_table_visitor);
+          mirror::Class* klass = slot.Read<kWithoutReadBarrier>();
+          DCHECK(klass != nullptr);
+          patched_objects->Set(klass);
+          patch_object_visitor.VisitClass(klass);
+          if (kIsDebugBuild) {
+            mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
+            if (dcheck_class_class == nullptr) {
+              dcheck_class_class = class_class;
+            } else {
+              CHECK_EQ(class_class, dcheck_class_class);
+            }
+          }
+          // Then patch the non-embedded vtable and iftable.
+          mirror::PointerArray* vtable = klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
+          if (vtable != nullptr && !patched_objects->Set(vtable)) {
+            patch_object_visitor.VisitPointerArray(vtable);
+          }
+          auto* iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
+          if (iftable != nullptr) {
+            int32_t ifcount = klass->GetIfTableCount<kVerifyNone>();
+            for (int32_t i = 0; i != ifcount; ++i) {
+              mirror::PointerArray* unpatched_ifarray =
+                  iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
+              if (unpatched_ifarray != nullptr) {
+                // The iftable has not been patched, so we need to explicitly adjust the pointer.
+                mirror::PointerArray* ifarray = relocate_visitor(unpatched_ifarray);
+                if (!patched_objects->Set(ifarray)) {
+                  patch_object_visitor.VisitPointerArray(ifarray);
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+
+    // Patch class roots now, so that we can recognize mirror::Method and mirror::Constructor.
+    ObjPtr<mirror::Class> method_class;
+    ObjPtr<mirror::Class> constructor_class;
+    {
+      const ImageSpace* space = spaces.front().get();
+      const ImageHeader& image_header = space->GetImageHeader();
+
+      ObjPtr<mirror::ObjectArray<mirror::Object>> image_roots =
+          image_header.GetImageRoots<kWithoutReadBarrier>();
+      patched_objects->Set(image_roots.Ptr());
+      patch_object_visitor.VisitObject(image_roots.Ptr());
+
+      ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
+          ObjPtr<mirror::ObjectArray<mirror::Class>>::DownCast(MakeObjPtr(
+              image_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kClassRoots)));
+      patched_objects->Set(class_roots.Ptr());
+      patch_object_visitor.VisitObject(class_roots.Ptr());
+
+      method_class = GetClassRoot<mirror::Method, kWithoutReadBarrier>(class_roots);
+      constructor_class = GetClassRoot<mirror::Constructor, kWithoutReadBarrier>(class_roots);
+    }
+
+    for (size_t s = 0u, size = spaces.size(); s != size; ++s) {
+      const ImageSpace* space = spaces[s].get();
+      const ImageHeader& image_header = space->GetImageHeader();
+
+      static_assert(IsAligned<kObjectAlignment>(sizeof(ImageHeader)), "Header alignment check");
+      uint32_t objects_end = image_header.GetObjectsSection().Size();
+      DCHECK_ALIGNED(objects_end, kObjectAlignment);
+      for (uint32_t pos = sizeof(ImageHeader); pos != objects_end; ) {
+        mirror::Object* object = reinterpret_cast<mirror::Object*>(space->Begin() + pos);
+        if (!patched_objects->Test(object)) {
+          // This is the last pass over objects, so we do not need to Set().
+          patch_object_visitor.VisitObject(object);
+          mirror::Class* klass = object->GetClass<kVerifyNone, kWithoutReadBarrier>();
+          if (klass->IsDexCacheClass<kVerifyNone>()) {
+            // Patch dex cache array pointers and elements.
+            mirror::DexCache* dex_cache = object->AsDexCache<kVerifyNone, kWithoutReadBarrier>();
+            patch_object_visitor.VisitDexCacheArrays(dex_cache);
+          } else if (klass == method_class || klass == constructor_class) {
+            // Patch the ArtMethod* in the mirror::Executable subobject.
+            ObjPtr<mirror::Executable> as_executable =
+                ObjPtr<mirror::Executable>::DownCast(MakeObjPtr(object));
+            ArtMethod* unpatched_method = as_executable->GetArtMethod<kVerifyNone>();
+            ArtMethod* patched_method = relocate_visitor(unpatched_method);
+            as_executable->SetArtMethod</*kTransactionActive=*/ false,
+                                        /*kCheckTransaction=*/ true,
+                                        kVerifyNone>(patched_method);
+          }
+        }
+        pos += RoundUp(object->SizeOf<kVerifyNone>(), kObjectAlignment);
+      }
+    }
+  }
+
+  static void MaybeRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces,
+                                  TimingLogger* logger)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    TimingLogger::ScopedTiming timing("MaybeRelocateSpaces", logger);
+    ImageSpace* first_space = spaces.front().get();
+    const ImageHeader& first_space_header = first_space->GetImageHeader();
+    uint32_t diff =
+        static_cast<uint32_t>(first_space->Begin() - first_space_header.GetImageBegin());
+    if (!Runtime::Current()->ShouldRelocate()) {
+      DCHECK_EQ(diff, 0u);
+      return;
+    }
+
+    PointerSize pointer_size = first_space_header.GetPointerSize();
+    if (pointer_size == PointerSize::k64) {
+      DoRelocateSpaces<PointerSize::k64>(spaces, diff);
+    } else {
+      DoRelocateSpaces<PointerSize::k32>(spaces, diff);
+    }
+  }
+
+  static void InitRuntimeMethods(const std::vector<std::unique_ptr<ImageSpace>>& spaces)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    Runtime* runtime = Runtime::Current();
+    DCHECK(!runtime->HasResolutionMethod());
+    DCHECK(!spaces.empty());
+    ImageSpace* space = spaces[0].get();
+    const ImageHeader& image_header = space->GetImageHeader();
+    runtime->SetResolutionMethod(image_header.GetImageMethod(ImageHeader::kResolutionMethod));
+    runtime->SetImtConflictMethod(image_header.GetImageMethod(ImageHeader::kImtConflictMethod));
+    runtime->SetImtUnimplementedMethod(
+        image_header.GetImageMethod(ImageHeader::kImtUnimplementedMethod));
+    runtime->SetCalleeSaveMethod(
+        image_header.GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod),
+        CalleeSaveType::kSaveAllCalleeSaves);
+    runtime->SetCalleeSaveMethod(
+        image_header.GetImageMethod(ImageHeader::kSaveRefsOnlyMethod),
+        CalleeSaveType::kSaveRefsOnly);
+    runtime->SetCalleeSaveMethod(
+        image_header.GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod),
+        CalleeSaveType::kSaveRefsAndArgs);
+    runtime->SetCalleeSaveMethod(
+        image_header.GetImageMethod(ImageHeader::kSaveEverythingMethod),
+        CalleeSaveType::kSaveEverything);
+    runtime->SetCalleeSaveMethod(
+        image_header.GetImageMethod(ImageHeader::kSaveEverythingMethodForClinit),
+        CalleeSaveType::kSaveEverythingForClinit);
+    runtime->SetCalleeSaveMethod(
+        image_header.GetImageMethod(ImageHeader::kSaveEverythingMethodForSuspendCheck),
+        CalleeSaveType::kSaveEverythingForSuspendCheck);
+  }
+
   std::unique_ptr<ImageSpace> Load(const std::string& image_location,
                                    const std::string& image_filename,
-                                   bool validate_oat_file,
+                                   TimingLogger* logger,
                                    /*inout*/MemMap* image_reservation,
-                                   /*inout*/MemMap* oat_reservation,
                                    /*out*/std::string* error_msg)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Should this be a RDWR lock? This is only a defensive measure, as at
@@ -1616,8 +1811,8 @@
     // descriptor (and the associated exclusive lock) to be released when
     // we leave Create.
     ScopedFlock image = LockedFile::Open(image_filename.c_str(),
-                                         rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY /* flags */,
-                                         true /* block */,
+                                         /*flags=*/ rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY,
+                                         /*block=*/ true,
                                          error_msg);
 
     VLOG(startup) << "Using image file " << image_filename.c_str() << " for image location "
@@ -1630,81 +1825,117 @@
     // file name.
     return Loader::Init(image_filename.c_str(),
                         image_location.c_str(),
-                        validate_oat_file,
-                        /* oat_file */ nullptr,
+                        /*oat_file=*/ nullptr,
+                        logger,
                         image_reservation,
-                        oat_reservation,
                         error_msg);
   }
 
-  // Extract boot class path from oat file associated with `image_filename`
-  // and list all associated image locations.
-  static bool GetBootClassPathImageLocations(const std::string& image_location,
-                                             const std::string& image_filename,
-                                             /*out*/ std::vector<std::string>* all_locations,
-                                             /*out*/ std::string* error_msg) {
-    std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(image_filename);
-    std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
-                                                    oat_filename,
-                                                    oat_filename,
-                                                    /* requested_base */ nullptr,
-                                                    /* executable */ false,
-                                                    /* low_4gb */ false,
-                                                    /* abs_dex_location */ nullptr,
-                                                    /* reservation */ nullptr,
-                                                    error_msg));
-    if (oat_file == nullptr) {
-      *error_msg = StringPrintf("Failed to open oat file '%s' for image file %s: %s",
-                                oat_filename.c_str(),
-                                image_filename.c_str(),
-                                error_msg->c_str());
-      return false;
+  bool OpenOatFile(ImageSpace* space,
+                   const std::string& dex_filename,
+                   const std::string& expected_boot_class_path,
+                   bool validate_oat_file,
+                   TimingLogger* logger,
+                   /*inout*/MemMap* image_reservation,
+                   /*out*/std::string* error_msg) {
+    // VerifyImageAllocations() will be called later in Runtime::Init()
+    // as some class roots like ArtMethod::java_lang_reflect_ArtMethod_
+    // and ArtField::java_lang_reflect_ArtField_, which are used from
+    // Object::SizeOf() which VerifyImageAllocations() calls, are not
+    // set yet at this point.
+    DCHECK(image_reservation != nullptr);
+    std::unique_ptr<OatFile> oat_file;
+    {
+      TimingLogger::ScopedTiming timing("OpenOatFile", logger);
+      std::string oat_filename =
+          ImageHeader::GetOatLocationFromImageLocation(space->GetImageFilename());
+      std::string oat_location =
+          ImageHeader::GetOatLocationFromImageLocation(space->GetImageLocation());
+
+      oat_file.reset(OatFile::Open(/*zip_fd=*/ -1,
+                                   oat_filename,
+                                   oat_location,
+                                   !Runtime::Current()->IsAotCompiler(),
+                                   /*low_4gb=*/ false,
+                                   /*abs_dex_location=*/ dex_filename.c_str(),
+                                   image_reservation,
+                                   error_msg));
+      if (oat_file == nullptr) {
+        *error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
+                                  oat_filename.c_str(),
+                                  space->GetName(),
+                                  error_msg->c_str());
+        return false;
+      }
+      const ImageHeader& image_header = space->GetImageHeader();
+      uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
+      uint32_t image_oat_checksum = image_header.GetOatChecksum();
+      if (oat_checksum != image_oat_checksum) {
+        *error_msg = StringPrintf("Failed to match oat file checksum 0x%x to expected oat checksum"
+                                  " 0x%x in image %s",
+                                  oat_checksum,
+                                  image_oat_checksum,
+                                  space->GetName());
+        return false;
+      }
+      const char* oat_boot_class_path =
+          oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathKey);
+      oat_boot_class_path = (oat_boot_class_path != nullptr) ? oat_boot_class_path : "";
+      if (expected_boot_class_path != oat_boot_class_path) {
+        *error_msg = StringPrintf("Failed to match oat boot class path %s to expected "
+                                  "boot class path %s in image %s",
+                                  oat_boot_class_path,
+                                  expected_boot_class_path.c_str(),
+                                  space->GetName());
+        return false;
+      }
+      ptrdiff_t relocation_diff = space->Begin() - image_header.GetImageBegin();
+      CHECK(image_header.GetOatDataBegin() != nullptr);
+      uint8_t* oat_data_begin = image_header.GetOatDataBegin() + relocation_diff;
+      if (oat_file->Begin() != oat_data_begin) {
+        *error_msg = StringPrintf("Oat file '%s' referenced from image %s has unexpected begin"
+                                      " %p v. %p",
+                                  oat_filename.c_str(),
+                                  space->GetName(),
+                                  oat_file->Begin(),
+                                  oat_data_begin);
+        return false;
+      }
     }
-    const OatHeader& oat_header = oat_file->GetOatHeader();
-    const char* boot_classpath = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
-    all_locations->push_back(image_location);
-    if (boot_classpath != nullptr && boot_classpath[0] != 0) {
-      ExtractMultiImageLocations(image_location, boot_classpath, all_locations);
+    if (validate_oat_file) {
+      TimingLogger::ScopedTiming timing("ValidateOatFile", logger);
+      if (!ImageSpace::ValidateOatFile(*oat_file, error_msg)) {
+        DCHECK(!error_msg->empty());
+        return false;
+      }
     }
+    space->oat_file_ = std::move(oat_file);
+    space->oat_file_non_owned_ = space->oat_file_.get();
     return true;
   }
 
-  bool GetBootImageAddressRange(const std::string& filename,
-                                /*out*/uint32_t* start,
-                                /*out*/uint32_t* end,
-                                /*out*/uint32_t* oat_end,
-                                /*out*/std::string* error_msg) {
-    ImageHeader system_hdr;
-    if (!ReadSpecificImageHeader(filename.c_str(), &system_hdr)) {
-      *error_msg = StringPrintf("Cannot read header of %s", filename.c_str());
-      return false;
-    }
-    *start = reinterpret_cast32<uint32_t>(system_hdr.GetImageBegin());
-    CHECK_ALIGNED(*start, kPageSize);
-    *end = RoundUp(*start + system_hdr.GetImageSize(), kPageSize);
-    *oat_end = RoundUp(reinterpret_cast32<uint32_t>(system_hdr.GetOatFileEnd()), kPageSize);
-    return true;
-  }
-
-  bool ReserveBootImageMemory(uint32_t image_start,
-                              uint32_t image_end,
-                              uint32_t oat_end,
+  bool ReserveBootImageMemory(uint32_t reservation_size,
+                              uint32_t image_start,
                               size_t extra_reservation_size,
                               /*out*/MemMap* image_reservation,
-                              /*out*/MemMap* oat_reservation,
                               /*out*/MemMap* extra_reservation,
                               /*out*/std::string* error_msg) {
+    DCHECK_ALIGNED(reservation_size, kPageSize);
+    DCHECK_ALIGNED(image_start, kPageSize);
     DCHECK(!image_reservation->IsValid());
-    size_t total_size =
-        dchecked_integral_cast<size_t>(oat_end - image_start) + extra_reservation_size;
+    DCHECK_LT(extra_reservation_size, std::numeric_limits<uint32_t>::max() - reservation_size);
+    size_t total_size = reservation_size + extra_reservation_size;
+    bool relocate = Runtime::Current()->ShouldRelocate();
+    // If relocating, choose a random address for ALSR.
+    uint32_t addr = relocate ? ART_BASE_ADDRESS + ChooseRelocationOffsetDelta() : image_start;
     *image_reservation =
         MemMap::MapAnonymous("Boot image reservation",
-                             reinterpret_cast32<uint8_t*>(image_start),
+                             reinterpret_cast32<uint8_t*>(addr),
                              total_size,
                              PROT_NONE,
-                             /* low_4gb */ true,
-                             /* reuse */ false,
-                             /* reservation */ nullptr,
+                             /*low_4gb=*/ true,
+                             /*reuse=*/ false,
+                             /*reservation=*/ nullptr,
                              error_msg);
     if (!image_reservation->IsValid()) {
       return false;
@@ -1722,36 +1953,22 @@
         return false;
       }
     }
-    DCHECK(!oat_reservation->IsValid());
-    *oat_reservation = image_reservation->RemapAtEnd(reinterpret_cast32<uint8_t*>(image_end),
-                                                     "Boot image oat reservation",
-                                                     PROT_NONE,
-                                                     error_msg);
-    if (!oat_reservation->IsValid()) {
-      return false;
-    }
 
     return true;
   }
 
-  bool CheckReservationsExhausted(const MemMap& image_reservation,
-                                  const MemMap& oat_reservation,
-                                  /*out*/std::string* error_msg) {
+  bool CheckReservationExhausted(const MemMap& image_reservation, /*out*/std::string* error_msg) {
     if (image_reservation.IsValid()) {
       *error_msg = StringPrintf("Excessive image reservation after loading boot image: %p-%p",
                                 image_reservation.Begin(),
                                 image_reservation.End());
       return false;
     }
-    if (oat_reservation.IsValid()) {
-      *error_msg = StringPrintf("Excessive oat reservation after loading boot image: %p-%p",
-                                image_reservation.Begin(),
-                                image_reservation.End());
-      return false;
-    }
     return true;
   }
 
+  const std::vector<std::string>& boot_class_path_;
+  const std::vector<std::string>& boot_class_path_locations_;
   const std::string& image_location_;
   InstructionSet image_isa_;
   bool is_zygote_;
@@ -1799,6 +2016,8 @@
 }
 
 bool ImageSpace::LoadBootImage(
+    const std::vector<std::string>& boot_class_path,
+    const std::vector<std::string>& boot_class_path_locations,
     const std::string& image_location,
     const InstructionSet image_isa,
     size_t extra_reservation_size,
@@ -1816,7 +2035,7 @@
     return false;
   }
 
-  BootImageLoader loader(image_location, image_isa);
+  BootImageLoader loader(boot_class_path, boot_class_path_locations, image_location, image_isa);
 
   // Step 0: Extra zygote work.
 
@@ -1840,11 +2059,8 @@
     const std::string& dalvik_cache = loader.GetDalvikCache();
     DCHECK(!dalvik_cache.empty());
     std::string local_error_msg;
-    // All secondary images are verified when the primary image is verified.
-    bool verified =
-        VerifyImage(image_location.c_str(), dalvik_cache.c_str(), image_isa, &local_error_msg);
     bool check_space = CheckSpace(dalvik_cache, &local_error_msg);
-    if (!verified || !check_space) {
+    if (!check_space) {
       LOG(WARNING) << local_error_msg << " Preemptively pruning the dalvik cache.";
       PruneDalvikCache(image_isa);
 
@@ -1860,27 +2076,9 @@
   // Collect all the errors.
   std::vector<std::string> error_msgs;
 
-  // Step 1: Check if we have an existing image in the dalvik cache.
-  if (loader.HasCache()) {
-    std::string local_error_msg;
-    // If we have system image, validate system image checksums, otherwise validate the oat file.
-    if (loader.LoadFromDalvikCache(/* validate_system_checksums */ loader.HasSystem(),
-                                   /* validate_oat_file */ !loader.HasSystem(),
-                                   extra_reservation_size,
-                                   boot_image_spaces,
-                                   extra_reservation,
-                                   &local_error_msg)) {
-      return true;
-    }
-    error_msgs.push_back(local_error_msg);
-  }
+  // Step 1: Check if we have an existing image in /system.
 
-  // Step 2: We have an existing image in /system.
-
-  // Step 2.a: We are not required to relocate it. Then we can use it directly.
-  bool relocate = Runtime::Current()->ShouldRelocate();
-
-  if (loader.HasSystem() && !relocate) {
+  if (loader.HasSystem()) {
     std::string local_error_msg;
     if (loader.LoadFromSystem(extra_reservation_size,
                               boot_image_spaces,
@@ -1891,29 +2089,17 @@
     error_msgs.push_back(local_error_msg);
   }
 
-  // Step 2.b: We require a relocated image. Then we must patch it.
-  if (loader.HasSystem() && relocate) {
+  // Step 2: Check if we have an existing image in the dalvik cache.
+  if (loader.HasCache()) {
     std::string local_error_msg;
-    if (!dex2oat_enabled) {
-      local_error_msg = "Patching disabled.";
-    } else if (ImageCreationAllowed(loader.IsGlobalCache(), image_isa, &local_error_msg)) {
-      bool patch_success = RelocateImage(
-          image_location.c_str(), loader.GetDalvikCache().c_str(), image_isa, &local_error_msg);
-      if (patch_success) {
-        if (loader.LoadFromDalvikCache(/* validate_system_checksums */ false,
-                                       /* validate_oat_file */ false,
-                                       extra_reservation_size,
-                                       boot_image_spaces,
-                                       extra_reservation,
-                                       &local_error_msg)) {
-          return true;
-        }
-      }
+    if (loader.LoadFromDalvikCache(/*validate_oat_file=*/ true,
+                                   extra_reservation_size,
+                                   boot_image_spaces,
+                                   extra_reservation,
+                                   &local_error_msg)) {
+      return true;
     }
-    error_msgs.push_back(StringPrintf("Cannot relocate image %s to %s: %s",
-                                      image_location.c_str(),
-                                      loader.GetCacheFilename().c_str(),
-                                      local_error_msg.c_str()));
+    error_msgs.push_back(local_error_msg);
   }
 
   // Step 3: We do not have an existing image in /system,
@@ -1926,8 +2112,7 @@
       bool compilation_success =
           GenerateImage(loader.GetCacheFilename(), image_isa, &local_error_msg);
       if (compilation_success) {
-        if (loader.LoadFromDalvikCache(/* validate_system_checksums */ false,
-                                       /* validate_oat_file */ false,
+        if (loader.LoadFromDalvikCache(/*validate_oat_file=*/ false,
                                        extra_reservation_size,
                                        boot_image_spaces,
                                        extra_reservation,
@@ -1986,13 +2171,12 @@
 std::unique_ptr<ImageSpace> ImageSpace::CreateFromAppImage(const char* image,
                                                            const OatFile* oat_file,
                                                            std::string* error_msg) {
-  return Loader::Init(image,
-                      image,
-                      /* validate_oat_file */ false,
-                      oat_file,
-                      /* image_reservation */ nullptr,
-                      /* oat_reservation */ nullptr,
-                      error_msg);
+  // Note: The oat file has already been validated.
+  return Loader::InitAppImage(image,
+                              image,
+                              oat_file,
+                              /*image_reservation=*/ nullptr,
+                              error_msg);
 }
 
 const OatFile* ImageSpace::GetOatFile() const {
@@ -2012,51 +2196,6 @@
       << ",name=\"" << GetName() << "\"]";
 }
 
-std::string ImageSpace::GetMultiImageBootClassPath(
-    const std::vector<const char*>& dex_locations,
-    const std::vector<const char*>& oat_filenames,
-    const std::vector<const char*>& image_filenames) {
-  DCHECK_GT(oat_filenames.size(), 1u);
-  // If the image filename was adapted (e.g., for our tests), we need to change this here,
-  // too, but need to strip all path components (they will be re-established when loading).
-  std::ostringstream bootcp_oss;
-  bool first_bootcp = true;
-  for (size_t i = 0; i < dex_locations.size(); ++i) {
-    if (!first_bootcp) {
-      bootcp_oss << ":";
-    }
-
-    std::string dex_loc = dex_locations[i];
-    std::string image_filename = image_filenames[i];
-
-    // Use the dex_loc path, but the image_filename name (without path elements).
-    size_t dex_last_slash = dex_loc.rfind('/');
-
-    // npos is max(size_t). That makes this a bit ugly.
-    size_t image_last_slash = image_filename.rfind('/');
-    size_t image_last_at = image_filename.rfind('@');
-    size_t image_last_sep = (image_last_slash == std::string::npos)
-                                ? image_last_at
-                                : (image_last_at == std::string::npos)
-                                      ? std::string::npos
-                                      : std::max(image_last_slash, image_last_at);
-    // Note: whenever image_last_sep == npos, +1 overflow means using the full string.
-
-    if (dex_last_slash == std::string::npos) {
-      dex_loc = image_filename.substr(image_last_sep + 1);
-    } else {
-      dex_loc = dex_loc.substr(0, dex_last_slash + 1) +
-          image_filename.substr(image_last_sep + 1);
-    }
-
-    // Image filenames already end with .art, no need to replace.
-
-    bootcp_oss << dex_loc;
-    first_bootcp = false;
-  }
-  return bootcp_oss.str();
-}
-
 bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg) {
   const ArtDexFileLoader dex_file_loader;
   for (const OatDexFile* oat_dex_file : oat_file.GetOatDexFiles()) {
@@ -2117,46 +2256,159 @@
   return true;
 }
 
-void ImageSpace::ExtractMultiImageLocations(const std::string& input_image_file_name,
-                                            const std::string& boot_classpath,
-                                            std::vector<std::string>* image_file_names) {
-  DCHECK(image_file_names != nullptr);
-
-  std::vector<std::string> images;
-  Split(boot_classpath, ':', &images);
-
-  // Add the rest into the list. We have to adjust locations, possibly:
-  //
-  // For example, image_file_name is /a/b/c/d/e.art
-  //              images[0] is          f/c/d/e.art
-  // ----------------------------------------------
-  //              images[1] is          g/h/i/j.art  -> /a/b/h/i/j.art
-  const std::string& first_image = images[0];
-  // Length of common suffix.
-  size_t common = 0;
-  while (common < input_image_file_name.size() &&
-         common < first_image.size() &&
-         *(input_image_file_name.end() - common - 1) == *(first_image.end() - common - 1)) {
-    ++common;
+std::string ImageSpace::GetBootClassPathChecksums(const std::vector<std::string>& boot_class_path,
+                                                  const std::string& image_location,
+                                                  InstructionSet image_isa,
+                                                  /*out*/std::string* error_msg) {
+  std::string system_filename;
+  bool has_system = false;
+  std::string cache_filename;
+  bool has_cache = false;
+  bool dalvik_cache_exists = false;
+  bool is_global_cache = false;
+  if (!FindImageFilename(image_location.c_str(),
+                         image_isa,
+                         &system_filename,
+                         &has_system,
+                         &cache_filename,
+                         &dalvik_cache_exists,
+                         &has_cache,
+                         &is_global_cache)) {
+    *error_msg = StringPrintf("Unable to find image file for %s and %s",
+                              image_location.c_str(),
+                              GetInstructionSetString(image_isa));
+    return std::string();
   }
-  // We want to replace the prefix of the input image with the prefix of the boot class path.
-  // This handles the case where the image file contains @ separators.
-  // Example image_file_name is oats/system@framework@boot.art
-  // images[0] is .../arm/boot.art
-  // means that the image name prefix will be oats/system@framework@
-  // so that the other images are openable.
-  const size_t old_prefix_length = first_image.size() - common;
-  const std::string new_prefix = input_image_file_name.substr(
-      0,
-      input_image_file_name.size() - common);
 
-  // Apply pattern to images[1] .. images[n].
-  for (size_t i = 1; i < images.size(); ++i) {
-    const std::string& image = images[i];
-    CHECK_GT(image.length(), old_prefix_length);
-    std::string suffix = image.substr(old_prefix_length);
-    image_file_names->push_back(new_prefix + suffix);
+  DCHECK(has_system || has_cache);
+  const std::string& filename = has_system ? system_filename : cache_filename;
+  std::unique_ptr<ImageHeader> header = ReadSpecificImageHeader(filename.c_str(), error_msg);
+  if (header == nullptr) {
+    return std::string();
   }
+  if (header->GetComponentCount() == 0u || header->GetComponentCount() > boot_class_path.size()) {
+    *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
+                                  "expected non-zero and <= %zu",
+                              filename.c_str(),
+                              header->GetComponentCount(),
+                              boot_class_path.size());
+    return std::string();
+  }
+
+  std::string boot_image_checksum =
+      StringPrintf("i;%d/%08x", header->GetComponentCount(), header->GetImageChecksum());
+  ArrayRef<const std::string> boot_class_path_tail =
+      ArrayRef<const std::string>(boot_class_path).SubArray(header->GetComponentCount());
+  for (const std::string& bcp_filename : boot_class_path_tail) {
+    std::vector<std::unique_ptr<const DexFile>> dex_files;
+    const ArtDexFileLoader dex_file_loader;
+    if (!dex_file_loader.Open(bcp_filename.c_str(),
+                              bcp_filename,  // The location does not matter here.
+                              /*verify=*/ false,
+                              /*verify_checksum=*/ false,
+                              error_msg,
+                              &dex_files)) {
+      return std::string();
+    }
+    DCHECK(!dex_files.empty());
+    StringAppendF(&boot_image_checksum, ":d");
+    for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+      StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
+    }
+  }
+  return boot_image_checksum;
+}
+
+std::string ImageSpace::GetBootClassPathChecksums(
+    const std::vector<ImageSpace*>& image_spaces,
+    const std::vector<const DexFile*>& boot_class_path) {
+  DCHECK(!image_spaces.empty());
+  const ImageHeader& primary_header = image_spaces.front()->GetImageHeader();
+  uint32_t component_count = primary_header.GetComponentCount();
+  DCHECK_EQ(component_count, image_spaces.size());
+  std::string boot_image_checksum =
+      StringPrintf("i;%d/%08x", component_count, primary_header.GetImageChecksum());
+  size_t pos = 0u;
+  for (const ImageSpace* space : image_spaces) {
+    size_t num_dex_files = space->oat_file_non_owned_->GetOatDexFiles().size();
+    if (kIsDebugBuild) {
+      CHECK_NE(num_dex_files, 0u);
+      CHECK_LE(space->oat_file_non_owned_->GetOatDexFiles().size(), boot_class_path.size() - pos);
+      for (size_t i = 0; i != num_dex_files; ++i) {
+        CHECK_EQ(space->oat_file_non_owned_->GetOatDexFiles()[i]->GetDexFileLocation(),
+                 boot_class_path[pos + i]->GetLocation());
+      }
+    }
+    pos += num_dex_files;
+  }
+  ArrayRef<const DexFile* const> boot_class_path_tail =
+      ArrayRef<const DexFile* const>(boot_class_path).SubArray(pos);
+  DCHECK(boot_class_path_tail.empty() ||
+         !DexFileLoader::IsMultiDexLocation(boot_class_path_tail.front()->GetLocation().c_str()));
+  for (const DexFile* dex_file : boot_class_path_tail) {
+    if (!DexFileLoader::IsMultiDexLocation(dex_file->GetLocation().c_str())) {
+      StringAppendF(&boot_image_checksum, ":d");
+    }
+    StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
+  }
+  return boot_image_checksum;
+}
+
+std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
+    const std::vector<std::string>& dex_locations,
+    const std::string& image_location) {
+  return ExpandMultiImageLocations(ArrayRef<const std::string>(dex_locations), image_location);
+}
+
+std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
+    ArrayRef<const std::string> dex_locations,
+    const std::string& image_location) {
+  DCHECK(!dex_locations.empty());
+
+  // Find the path.
+  size_t last_slash = image_location.rfind('/');
+  CHECK_NE(last_slash, std::string::npos);
+
+  // We also need to honor path components that were encoded through '@'. Otherwise the loading
+  // code won't be able to find the images.
+  if (image_location.find('@', last_slash) != std::string::npos) {
+    last_slash = image_location.rfind('@');
+  }
+
+  // Find the dot separating the primary image name from the extension.
+  size_t last_dot = image_location.rfind('.');
+  // Extract the extension and base (the path and primary image name).
+  std::string extension;
+  std::string base = image_location;
+  if (last_dot != std::string::npos && last_dot > last_slash) {
+    extension = image_location.substr(last_dot);  // Including the dot.
+    base.resize(last_dot);
+  }
+  // For non-empty primary image name, add '-' to the `base`.
+  if (last_slash + 1u != base.size()) {
+    base += '-';
+  }
+
+  std::vector<std::string> locations;
+  locations.reserve(dex_locations.size());
+  locations.push_back(image_location);
+
+  // Now create the other names. Use a counted loop to skip the first one.
+  for (size_t i = 1u; i < dex_locations.size(); ++i) {
+    // Replace path with `base` (i.e. image path and prefix) and replace the original
+    // extension (if any) with `extension`.
+    std::string name = dex_locations[i];
+    size_t last_dex_slash = name.rfind('/');
+    if (last_dex_slash != std::string::npos) {
+      name = name.substr(last_dex_slash + 1);
+    }
+    size_t last_dex_dot = name.rfind('.');
+    if (last_dex_dot != std::string::npos) {
+      name.resize(last_dex_dot);
+    }
+    locations.push_back(base + name + extension);
+  }
+  return locations;
 }
 
 void ImageSpace::DumpSections(std::ostream& os) const {
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index a2490ac..bb19097 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -17,13 +17,15 @@
 #ifndef ART_RUNTIME_GC_SPACE_IMAGE_SPACE_H_
 #define ART_RUNTIME_GC_SPACE_IMAGE_SPACE_H_
 
-#include "arch/instruction_set.h"
 #include "gc/accounting/space_bitmap.h"
 #include "image.h"
 #include "space.h"
 
 namespace art {
 
+template <typename T> class ArrayRef;
+class DexFile;
+enum class InstructionSet;
 class OatFile;
 
 namespace gc {
@@ -39,9 +41,11 @@
   // Load boot image spaces from a primary image file for a specified instruction set.
   //
   // On successful return, the loaded spaces are added to boot_image_spaces (which must be
-  // empty on entry) and oat_file_end is updated with the (page-aligned) end of the last
-  // oat file.
+  // empty on entry) and `extra_reservation` is set to the requested reservation located
+  // after the end of the last loaded oat file.
   static bool LoadBootImage(
+      const std::vector<std::string>& boot_class_path,
+      const std::vector<std::string>& boot_class_path_locations,
       const std::string& image_location,
       const InstructionSet image_isa,
       size_t extra_reservation_size,
@@ -57,9 +61,9 @@
   // Reads the image header from the specified image location for the
   // instruction set image_isa. Returns null on failure, with
   // reason in error_msg.
-  static ImageHeader* ReadImageHeader(const char* image_location,
-                                      InstructionSet image_isa,
-                                      std::string* error_msg);
+  static std::unique_ptr<ImageHeader> ReadImageHeader(const char* image_location,
+                                                      InstructionSet image_isa,
+                                                      std::string* error_msg);
 
   // Give access to the OatFile.
   const OatFile* GetOatFile() const;
@@ -122,15 +126,23 @@
                                 bool* has_data,
                                 bool *is_global_cache);
 
-  // Use the input image filename to adapt the names in the given boot classpath to establish
-  // complete locations for secondary images.
-  static void ExtractMultiImageLocations(const std::string& input_image_file_name,
-                                        const std::string& boot_classpath,
-                                        std::vector<std::string>* image_filenames);
+  // Returns the checksums for the boot image and extra boot class path dex files,
+  // based on the boot class path, image location and ISA (may differ from the ISA of an
+  // initialized Runtime). The boot image and dex files do not need to be loaded in memory.
+  static std::string GetBootClassPathChecksums(const std::vector<std::string>& boot_class_path,
+                                               const std::string& image_location,
+                                               InstructionSet image_isa,
+                                               /*out*/std::string* error_msg);
 
-  static std::string GetMultiImageBootClassPath(const std::vector<const char*>& dex_locations,
-                                                const std::vector<const char*>& oat_filenames,
-                                                const std::vector<const char*>& image_filenames);
+  // Returns the checksums for the boot image and extra boot class path dex files,
+  // based on the boot image and boot class path dex files loaded in memory.
+  static std::string GetBootClassPathChecksums(const std::vector<ImageSpace*>& image_spaces,
+                                               const std::vector<const DexFile*>& boot_class_path);
+
+  // Expand a single image location to multi-image locations based on the dex locations.
+  static std::vector<std::string> ExpandMultiImageLocations(
+      const std::vector<std::string>& dex_locations,
+      const std::string& image_location);
 
   // Returns true if the dex checksums in the given oat file match the
   // checksums of the original dex files on disk. This is intended to be used
@@ -148,16 +160,6 @@
     return Begin() + GetImageHeader().GetImageSize();
   }
 
-  // Return the start of the associated oat file.
-  uint8_t* GetOatFileBegin() const {
-    return GetImageHeader().GetOatFileBegin();
-  }
-
-  // Return the end of the associated oat file.
-  uint8_t* GetOatFileEnd() const {
-    return GetImageHeader().GetOatFileEnd();
-  }
-
   void DumpSections(std::ostream& os) const;
 
   // De-initialize the image-space by undoing the effects in Init().
@@ -201,8 +203,21 @@
   friend class Space;
 
  private:
-  class Loader;
+  // Internal overload that takes ArrayRef<> instead of vector<>.
+  static std::vector<std::string> ExpandMultiImageLocations(
+      ArrayRef<const std::string> dex_locations,
+      const std::string& image_location);
+
   class BootImageLoader;
+  template <typename ReferenceVisitor>
+  class ClassTableVisitor;
+  class Loader;
+  template <typename PatchObjectVisitor>
+  class PatchArtFieldVisitor;
+  template <PointerSize kPointerSize, typename PatchObjectVisitor, typename PatchCodeVisitor>
+  class PatchArtMethodVisitor;
+  template <PointerSize kPointerSize, typename HeapVisitor, typename NativeVisitor>
+  class PatchObjectVisitor;
 
   DISALLOW_COPY_AND_ASSIGN(ImageSpace);
 };
diff --git a/runtime/gc/space/image_space_fs.h b/runtime/gc/space/image_space_fs.h
index 14deb6f..262c6e0 100644
--- a/runtime/gc/space/image_space_fs.h
+++ b/runtime/gc/space/image_space_fs.h
@@ -23,13 +23,13 @@
 #include "android-base/stringprintf.h"
 
 #include "base/file_utils.h"
-#include "base/globals.h"
 #include "base/logging.h"  // For VLOG.
 #include "base/macros.h"
 #include "base/os.h"
 #include "base/unix_file/fd_file.h"
 #include "base/utils.h"
 #include "runtime.h"
+#include "runtime_globals.h"
 
 namespace art {
 namespace gc {
diff --git a/runtime/gc/space/image_space_test.cc b/runtime/gc/space/image_space_test.cc
index 299a413..0a35bce 100644
--- a/runtime/gc/space/image_space_test.cc
+++ b/runtime/gc/space/image_space_test.cc
@@ -43,14 +43,13 @@
   args.push_back("--oat-file=" + oat_location);
   ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
 
-  std::unique_ptr<OatFile> oat(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> oat(OatFile::Open(/*zip_fd=*/ -1,
                                              oat_location.c_str(),
                                              oat_location.c_str(),
-                                             /* requested_base */ nullptr,
-                                             /* executable */ false,
-                                             /* low_4gb */ false,
-                                             /* abs_dex_location */ nullptr,
-                                             /* reservation */ nullptr,
+                                             /*executable=*/ false,
+                                             /*low_4gb=*/ false,
+                                             /*abs_dex_location=*/ nullptr,
+                                             /*reservation=*/ nullptr,
                                              &error_msg));
   ASSERT_TRUE(oat != nullptr) << error_msg;
 
@@ -110,7 +109,7 @@
   EXPECT_FALSE(ImageSpace::ValidateOatFile(*oat, &error_msg));
 }
 
-template <bool kImage, bool kRelocate, bool kPatchoat, bool kImageDex2oat>
+template <bool kImage, bool kRelocate, bool kImageDex2oat>
 class ImageSpaceLoadingTest : public CommonRuntimeTest {
  protected:
   void SetUpRuntimeOptions(RuntimeOptions* options) override {
@@ -119,9 +118,6 @@
                             nullptr);
     }
     options->emplace_back(kRelocate ? "-Xrelocate" : "-Xnorelocate", nullptr);
-    if (!kPatchoat) {
-      options->emplace_back("-Xpatchoat:false", nullptr);
-    }
     options->emplace_back(kImageDex2oat ? "-Ximage-dex2oat" : "-Xnoimage-dex2oat", nullptr);
 
     // We want to test the relocation behavior of ImageSpace. As such, don't pretend we're a
@@ -130,27 +126,22 @@
   }
 };
 
-using ImageSpacePatchoatTest = ImageSpaceLoadingTest<true, true, true, true>;
-TEST_F(ImageSpacePatchoatTest, Test) {
-  EXPECT_FALSE(Runtime::Current()->GetHeap()->GetBootImageSpaces().empty());
-}
-
-using ImageSpaceDex2oatTest = ImageSpaceLoadingTest<false, true, false, true>;
+using ImageSpaceDex2oatTest = ImageSpaceLoadingTest<false, true, true>;
 TEST_F(ImageSpaceDex2oatTest, Test) {
   EXPECT_FALSE(Runtime::Current()->GetHeap()->GetBootImageSpaces().empty());
 }
 
-using ImageSpaceNoDex2oatNoPatchoatTest = ImageSpaceLoadingTest<true, true, false, false>;
-TEST_F(ImageSpaceNoDex2oatNoPatchoatTest, Test) {
-  EXPECT_TRUE(Runtime::Current()->GetHeap()->GetBootImageSpaces().empty());
-}
-
-using ImageSpaceNoRelocateNoDex2oatNoPatchoatTest = ImageSpaceLoadingTest<true, false, false, false>;
-TEST_F(ImageSpaceNoRelocateNoDex2oatNoPatchoatTest, Test) {
+using ImageSpaceNoDex2oatTest = ImageSpaceLoadingTest<true, true, false>;
+TEST_F(ImageSpaceNoDex2oatTest, Test) {
   EXPECT_FALSE(Runtime::Current()->GetHeap()->GetBootImageSpaces().empty());
 }
 
-class NoAccessAndroidDataTest : public ImageSpaceLoadingTest<false, true, false, true> {
+using ImageSpaceNoRelocateNoDex2oatTest = ImageSpaceLoadingTest<true, false, false>;
+TEST_F(ImageSpaceNoRelocateNoDex2oatTest, Test) {
+  EXPECT_FALSE(Runtime::Current()->GetHeap()->GetBootImageSpaces().empty());
+}
+
+class NoAccessAndroidDataTest : public ImageSpaceLoadingTest<false, true, true> {
  protected:
   void SetUpRuntimeOptions(RuntimeOptions* options) override {
     const char* android_data = getenv("ANDROID_DATA");
@@ -169,7 +160,7 @@
     CHECK_NE(fd, -1) << strerror(errno);
     result = close(fd);
     CHECK_EQ(result, 0) << strerror(errno);
-    ImageSpaceLoadingTest<false, true, false, true>::SetUpRuntimeOptions(options);
+    ImageSpaceLoadingTest<false, true, true>::SetUpRuntimeOptions(options);
   }
 
   void TearDown() override {
@@ -179,7 +170,7 @@
     CHECK_EQ(result, 0) << strerror(errno);
     result = setenv("ANDROID_DATA", old_android_data_.c_str(), /* replace */ 1);
     CHECK_EQ(result, 0) << strerror(errno);
-    ImageSpaceLoadingTest<false, true, false, true>::TearDown();
+    ImageSpaceLoadingTest<false, true, true>::TearDown();
   }
 
  private:
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 09d0251..1658dba 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -108,8 +108,10 @@
   mark_bitmap_->SetName(temp_name);
 }
 
-LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end)
+LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end,
+                                   const char* lock_name)
     : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
+      lock_(lock_name, kAllocSpaceLock),
       num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
       total_objects_allocated_(0), begin_(begin), end_(end) {
 }
@@ -120,8 +122,7 @@
 }
 
 LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
-    : LargeObjectSpace(name, nullptr, nullptr),
-      lock_("large object map space lock", kAllocSpaceLock) {}
+    : LargeObjectSpace(name, nullptr, nullptr, "large object map space lock") {}
 
 LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
   if (Runtime::Current()->IsRunningOnMemoryTool()) {
@@ -136,10 +137,9 @@
                                            size_t* bytes_tl_bulk_allocated) {
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
-                                        /* addr */ nullptr,
                                         num_bytes,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ true,
+                                        /*low_4gb=*/ true,
                                         &error_msg);
   if (UNLIKELY(!mem_map.IsValid())) {
     LOG(WARNING) << "Large object allocation failed: " << error_msg;
@@ -345,14 +345,13 @@
   return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
 }
 
-FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
+FreeListSpace* FreeListSpace::Create(const std::string& name, size_t size) {
   CHECK_EQ(size % kAlignment, 0U);
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
-                                        requested_begin,
                                         size,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ true,
+                                        /*low_4gb=*/ true,
                                         &error_msg);
   CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
   return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
@@ -362,9 +361,8 @@
                              MemMap&& mem_map,
                              uint8_t* begin,
                              uint8_t* end)
-    : LargeObjectSpace(name, begin, end),
-      mem_map_(std::move(mem_map)),
-      lock_("free list space lock", kAllocSpaceLock) {
+    : LargeObjectSpace(name, begin, end, "free list space lock"),
+      mem_map_(std::move(mem_map)) {
   const size_t space_capacity = end - begin;
   free_end_ = space_capacity;
   CHECK_ALIGNED(space_capacity, kAlignment);
@@ -372,10 +370,9 @@
   std::string error_msg;
   allocation_info_map_ =
       MemMap::MapAnonymous("large object free list space allocation info map",
-                           /* addr */ nullptr,
                            alloc_info_size,
                            PROT_READ | PROT_WRITE,
-                           /* low_4gb */ false,
+                           /*low_4gb=*/ false,
                            &error_msg);
   CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
   allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 26c6463..a4d6a24 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -22,6 +22,7 @@
 #include "base/tracking_safe_map.h"
 #include "dlmalloc_space.h"
 #include "space.h"
+#include "thread-current-inl.h"
 
 #include <set>
 #include <vector>
@@ -50,15 +51,19 @@
   virtual ~LargeObjectSpace() {}
 
   uint64_t GetBytesAllocated() override {
+    MutexLock mu(Thread::Current(), lock_);
     return num_bytes_allocated_;
   }
   uint64_t GetObjectsAllocated() override {
+    MutexLock mu(Thread::Current(), lock_);
     return num_objects_allocated_;
   }
   uint64_t GetTotalBytesAllocated() const {
+    MutexLock mu(Thread::Current(), lock_);
     return total_bytes_allocated_;
   }
   uint64_t GetTotalObjectsAllocated() const {
+    MutexLock mu(Thread::Current(), lock_);
     return total_objects_allocated_;
   }
   size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
@@ -110,14 +115,26 @@
   virtual std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const = 0;
 
  protected:
-  explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end);
+  explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end,
+                            const char* lock_name);
   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
 
-  // Approximate number of bytes which have been allocated into the space.
-  uint64_t num_bytes_allocated_;
-  uint64_t num_objects_allocated_;
-  uint64_t total_bytes_allocated_;
-  uint64_t total_objects_allocated_;
+  // Used to ensure mutual exclusion when the allocation spaces data structures,
+  // including the allocation counters below, are being modified.
+  mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+
+  // Number of bytes which have been allocated into the space and not yet freed. The count is also
+  // included in the identically named field in Heap. Counts actual allocated (after rounding),
+  // not requested, sizes. TODO: It would be cheaper to just maintain total allocated and total
+  // free counts.
+  uint64_t num_bytes_allocated_ GUARDED_BY(lock_);
+  uint64_t num_objects_allocated_ GUARDED_BY(lock_);
+
+  // Totals for large objects ever allocated, including those that have since been deallocated.
+  // Never decremented.
+  uint64_t total_bytes_allocated_ GUARDED_BY(lock_);
+  uint64_t total_objects_allocated_ GUARDED_BY(lock_);
+
   // Begin and end, may change as more large objects are allocated.
   uint8_t* begin_;
   uint8_t* end_;
@@ -157,8 +174,6 @@
   bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override REQUIRES(!lock_);
   void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
 
-  // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
-  mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   AllocationTrackingSafeMap<mirror::Object*, LargeObject, kAllocatorTagLOSMaps> large_objects_
       GUARDED_BY(lock_);
 };
@@ -169,7 +184,7 @@
   static constexpr size_t kAlignment = kPageSize;
 
   virtual ~FreeListSpace();
-  static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
+  static FreeListSpace* Create(const std::string& name, size_t capacity);
   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
       REQUIRES(lock_);
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -215,7 +230,6 @@
   MemMap allocation_info_map_;
   AllocationInfo* allocation_info_;
 
-  mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   // Free bytes at the end of the space.
   size_t free_end_ GUARDED_BY(lock_);
   FreeBlocks free_blocks_ GUARDED_BY(lock_);
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index 9baa016..62bc26e 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -42,7 +42,7 @@
     if (i == 0) {
       los = space::LargeObjectMapSpace::Create("large object space");
     } else {
-      los = space::FreeListSpace::Create("large object space", nullptr, capacity);
+      los = space::FreeListSpace::Create("large object space", capacity);
     }
 
     // Make sure the bitmap is not empty and actually covers at least how much we expect.
@@ -128,7 +128,7 @@
   AllocRaceTask(size_t id, size_t iterations, size_t size, LargeObjectSpace* los) :
     id_(id), iterations_(iterations), size_(size), los_(los) {}
 
-  void Run(Thread* self) {
+  void Run(Thread* self) override {
     for (size_t i = 0; i < iterations_ ; ++i) {
       size_t alloc_size, bytes_tl_bulk_allocated;
       mirror::Object* ptr = los_->Alloc(self, size_, &alloc_size, nullptr,
@@ -140,7 +140,7 @@
     }
   }
 
-  virtual void Finalize() {
+  void Finalize() override {
     delete this;
   }
 
@@ -157,7 +157,7 @@
     if (los_type == 0) {
       los = space::LargeObjectMapSpace::Create("large object space");
     } else {
-      los = space::FreeListSpace::Create("large object space", nullptr, 128 * MB);
+      los = space::FreeListSpace::Create("large object space", 128 * MB);
     }
 
     Thread* self = Thread::Current();
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 445560a..474231b 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -16,9 +16,12 @@
 
 #include "malloc_space.h"
 
+#include <ostream>
+
 #include "android-base/stringprintf.h"
 
 #include "base/logging.h"  // For VLOG
+#include "base/mutex-inl.h"
 #include "base/utils.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/accounting/space_bitmap-inl.h"
@@ -81,8 +84,7 @@
                                  size_t starting_size,
                                  size_t* initial_size,
                                  size_t* growth_limit,
-                                 size_t* capacity,
-                                 uint8_t* requested_begin) {
+                                 size_t* capacity) {
   // Sanity check arguments
   if (starting_size > *initial_size) {
     *initial_size = starting_size;
@@ -106,10 +108,9 @@
 
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
-                                        requested_begin,
                                         *capacity,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ true,
+                                        /*low_4gb=*/ true,
                                         &error_msg);
   if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 6bf2d71..9a90dfd 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -19,8 +19,10 @@
 
 #include "space.h"
 
-#include <ostream>
+#include <iosfwd>
+
 #include "base/memory_tool.h"
+#include "base/mutex.h"
 
 namespace art {
 namespace gc {
@@ -157,8 +159,7 @@
                              size_t starting_size,
                              size_t* initial_size,
                              size_t* growth_limit,
-                             size_t* capacity,
-                             uint8_t* requested_begin);
+                             size_t* capacity);
 
   // When true the low memory mode argument specifies that the heap wishes the created allocator to
   // be more aggressive in releasing unused pages.
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index e048515..86a0a6e 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -18,6 +18,10 @@
 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
 
 #include "region_space.h"
+
+#include "base/mutex-inl.h"
+#include "mirror/object-inl.h"
+#include "region_space.h"
 #include "thread-current-inl.h"
 
 namespace art {
@@ -60,7 +64,8 @@
       return obj;
     }
     MutexLock mu(Thread::Current(), region_lock_);
-    // Retry with current region since another thread may have updated it.
+    // Retry with current region since another thread may have updated
+    // current_region_ or evac_region_.  TODO: fix race.
     obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
                                                              bytes_allocated,
                                                              usable_size,
@@ -188,6 +193,40 @@
   return bytes;
 }
 
+template <typename Visitor>
+inline void RegionSpace::ScanUnevacFromSpace(accounting::ContinuousSpaceBitmap* bitmap,
+                                             Visitor&& visitor) {
+  const size_t iter_limit = kUseTableLookupReadBarrier
+      ? num_regions_ : std::min(num_regions_, non_free_region_index_limit_);
+  // Instead of region-wise scan, find contiguous blocks of un-evac regions and then
+  // visit them. Everything before visit_block_begin has been processed, while
+  // [visit_block_begin, visit_block_end) still needs to be visited.
+  uint8_t* visit_block_begin = nullptr;
+  uint8_t* visit_block_end = nullptr;
+  for (size_t i = 0; i < iter_limit; ++i) {
+    Region* r = &regions_[i];
+    if (r->IsInUnevacFromSpace()) {
+      // visit_block_begin set to nullptr means a new visit block needs to be stated.
+      if (visit_block_begin == nullptr) {
+        visit_block_begin = r->Begin();
+      }
+      visit_block_end = r->End();
+    } else if (visit_block_begin != nullptr) {
+      // Visit the block range as r is not adjacent to current visit block.
+      bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(visit_block_begin),
+                               reinterpret_cast<uintptr_t>(visit_block_end),
+                               visitor);
+      visit_block_begin = nullptr;
+    }
+  }
+  // Visit last block, if not processed yet.
+  if (visit_block_begin != nullptr) {
+    bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(visit_block_begin),
+                             reinterpret_cast<uintptr_t>(visit_block_end),
+                             visitor);
+  }
+}
+
 template<bool kToSpaceOnly, typename Visitor>
 inline void RegionSpace::WalkInternal(Visitor&& visitor) {
   // TODO: MutexLock on region_lock_ won't work due to lock order
@@ -200,41 +239,71 @@
       continue;
     }
     if (r->IsLarge()) {
-      // Avoid visiting dead large objects since they may contain dangling pointers to the
-      // from-space.
-      DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
+      // We may visit a large object with live_bytes = 0 here. However, it is
+      // safe as it cannot contain dangling pointers because corresponding regions
+      // (and regions corresponding to dead referents) cannot be allocated for new
+      // allocations without first clearing regions' live_bytes and state.
       mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
       DCHECK(obj->GetClass() != nullptr);
       visitor(obj);
     } else if (r->IsLargeTail()) {
       // Do nothing.
     } else {
-      // For newly allocated and evacuated regions, live bytes will be -1.
-      uint8_t* pos = r->Begin();
-      uint8_t* top = r->Top();
-      const bool need_bitmap =
-          r->LiveBytes() != static_cast<size_t>(-1) &&
-          r->LiveBytes() != static_cast<size_t>(top - pos);
-      if (need_bitmap) {
-        GetLiveBitmap()->VisitMarkedRange(
-            reinterpret_cast<uintptr_t>(pos),
-            reinterpret_cast<uintptr_t>(top),
-            visitor);
+      WalkNonLargeRegion(visitor, r);
+    }
+  }
+}
+
+template<typename Visitor>
+inline void RegionSpace::WalkNonLargeRegion(Visitor&& visitor, const Region* r) {
+  DCHECK(!r->IsLarge() && !r->IsLargeTail());
+  // For newly allocated and evacuated regions, live bytes will be -1.
+  uint8_t* pos = r->Begin();
+  uint8_t* top = r->Top();
+  // We need the region space bitmap to iterate over a region's objects
+  // if
+  // - its live bytes count is invalid (i.e. -1); or
+  // - its live bytes count is lower than the allocated bytes count.
+  //
+  // In both of the previous cases, we do not have the guarantee that
+  // all allocated objects are "alive" (i.e. valid), so we depend on
+  // the region space bitmap to identify which ones to visit.
+  //
+  // On the other hand, when all allocated bytes are known to be alive,
+  // we know that they form a range of consecutive objects (modulo
+  // object alignment constraints) that can be visited iteratively: we
+  // can compute the next object's location by using the current
+  // object's address and size (and object alignment constraints).
+  const bool need_bitmap =
+      r->LiveBytes() != static_cast<size_t>(-1) &&
+      r->LiveBytes() != static_cast<size_t>(top - pos);
+  if (need_bitmap) {
+    GetLiveBitmap()->VisitMarkedRange(
+        reinterpret_cast<uintptr_t>(pos),
+        reinterpret_cast<uintptr_t>(top),
+        visitor);
+  } else {
+    while (pos < top) {
+      mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
+      if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
+        visitor(obj);
+        pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
       } else {
-        while (pos < top) {
-          mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
-          if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
-            visitor(obj);
-            pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
-          } else {
-            break;
-          }
-        }
+        break;
       }
     }
   }
 }
 
+template <typename Visitor>
+inline void RegionSpace::Walk(Visitor&& visitor) {
+  WalkInternal</* kToSpaceOnly= */ false>(visitor);
+}
+template <typename Visitor>
+inline void RegionSpace::WalkToSpace(Visitor&& visitor) {
+  WalkInternal</* kToSpaceOnly= */ true>(visitor);
+}
+
 inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
   const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
   return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
@@ -408,7 +477,7 @@
     } else {
       DCHECK(reg->IsLargeTail());
     }
-    reg->Clear(/*zero_and_release_pages*/true);
+    reg->Clear(/*zero_and_release_pages=*/true);
     if (kForEvac) {
       --num_evac_regions_;
     } else {
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index f74fa86..5179702 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -17,6 +17,7 @@
 #include "bump_pointer_space-inl.h"
 #include "bump_pointer_space.h"
 #include "base/dumpable.h"
+#include "base/logging.h"
 #include "gc/accounting/read_barrier_table.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
@@ -58,7 +59,9 @@
                                    requested_begin,
                                    capacity + kRegionSize,
                                    PROT_READ | PROT_WRITE,
-                                   /* low_4gb */ true,
+                                   /*low_4gb=*/ true,
+                                   /*reuse=*/ false,
+                                   /*reservation=*/ nullptr,
                                    &error_msg);
     if (mem_map.IsValid() || requested_begin == nullptr) {
       break;
@@ -69,6 +72,7 @@
   if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
         << PrettySize(capacity) << " with message " << error_msg;
+    PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
     MemMap::DumpMaps(LOG_STREAM(ERROR));
     return MemMap::Invalid();
   }
@@ -89,11 +93,12 @@
   return mem_map;
 }
 
-RegionSpace* RegionSpace::Create(const std::string& name, MemMap&& mem_map) {
-  return new RegionSpace(name, std::move(mem_map));
+RegionSpace* RegionSpace::Create(
+    const std::string& name, MemMap&& mem_map, bool use_generational_cc) {
+  return new RegionSpace(name, std::move(mem_map), use_generational_cc);
 }
 
-RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map)
+RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc)
     : ContinuousMemMapAllocSpace(name,
                                  std::move(mem_map),
                                  mem_map.Begin(),
@@ -101,6 +106,7 @@
                                  mem_map.End(),
                                  kGcRetentionPolicyAlwaysCollect),
       region_lock_("Region lock", kRegionSpaceRegionLock),
+      use_generational_cc_(use_generational_cc),
       time_(1U),
       num_regions_(mem_map_.Size() / kRegionSize),
       num_non_free_regions_(0U),
@@ -175,9 +181,44 @@
   return num_regions * kRegionSize;
 }
 
+void RegionSpace::Region::SetAsUnevacFromSpace(bool clear_live_bytes) {
+  // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
+  DCHECK(GetUseGenerationalCC() || clear_live_bytes);
+  DCHECK(!IsFree() && IsInToSpace());
+  type_ = RegionType::kRegionTypeUnevacFromSpace;
+  if (IsNewlyAllocated()) {
+    // A newly allocated region set as unevac from-space must be
+    // a large or large tail region.
+    DCHECK(IsLarge() || IsLargeTail()) << static_cast<uint>(state_);
+    // Always clear the live bytes of a newly allocated (large or
+    // large tail) region.
+    clear_live_bytes = true;
+    // Clear the "newly allocated" status here, as we do not want the
+    // GC to see it when encountering (and processing) references in the
+    // from-space.
+    //
+    // Invariant: There should be no newly-allocated region in the
+    // from-space (when the from-space exists, which is between the calls
+    // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
+    is_newly_allocated_ = false;
+  }
+  if (clear_live_bytes) {
+    // Reset the live bytes, as we have made a non-evacuation
+    // decision (possibly based on the percentage of live bytes).
+    live_bytes_ = 0;
+  }
+}
+
+bool RegionSpace::Region::GetUseGenerationalCC() {
+  // We are retrieving the info from Heap, instead of the cached version in
+  // RegionSpace, because accessing the Heap from a Region object is easier
+  // than accessing the RegionSpace.
+  return art::Runtime::Current()->GetHeap()->GetUseGenerationalCC();
+}
+
 inline bool RegionSpace::Region::ShouldBeEvacuated(EvacMode evac_mode) {
   // Evacuation mode `kEvacModeNewlyAllocated` is only used during sticky-bit CC collections.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection || (evac_mode != kEvacModeNewlyAllocated));
+  DCHECK(GetUseGenerationalCC() || (evac_mode != kEvacModeNewlyAllocated));
   DCHECK((IsAllocated() || IsLarge()) && IsInToSpace());
   // The region should be evacuated if:
   // - the evacuation is forced (`evac_mode == kEvacModeForceAll`); or
@@ -249,7 +290,7 @@
 
 void RegionSpace::ZeroLiveBytesForLargeObject(mirror::Object* obj) {
   // This method is only used when Generational CC collection is enabled.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection);
+  DCHECK(use_generational_cc_);
 
   // This code uses a logic similar to the one used in RegionSpace::FreeLarge
   // to traverse the regions supporting `obj`.
@@ -288,7 +329,7 @@
                                EvacMode evac_mode,
                                bool clear_live_bytes) {
   // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection || clear_live_bytes);
+  DCHECK(use_generational_cc_ || clear_live_bytes);
   ++time_;
   if (kUseTableLookupReadBarrier) {
     DCHECK(rb_table->IsAllCleared());
@@ -315,6 +356,7 @@
                 state == RegionState::kRegionStateLarge) &&
                type == RegionType::kRegionTypeToSpace);
         bool should_evacuate = r->ShouldBeEvacuated(evac_mode);
+        bool is_newly_allocated = r->IsNewlyAllocated();
         if (should_evacuate) {
           r->SetAsFromSpace();
           DCHECK(r->IsInFromSpace());
@@ -325,6 +367,15 @@
         if (UNLIKELY(state == RegionState::kRegionStateLarge &&
                      type == RegionType::kRegionTypeToSpace)) {
           prev_large_evacuated = should_evacuate;
+          // In 2-phase full heap GC, this function is called after marking is
+          // done. So, it is possible that some newly allocated large object is
+          // marked but its live_bytes is still -1. We need to clear the
+          // mark-bit otherwise the live_bytes will not be updated in
+          // ConcurrentCopying::ProcessMarkStackRef() and hence will break the
+          // logic.
+          if (use_generational_cc_ && !should_evacuate && is_newly_allocated) {
+            GetMarkBitmap()->Clear(reinterpret_cast<mirror::Object*>(r->Begin()));
+          }
           num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
           DCHECK_GT(num_expected_large_tails, 0U);
         }
@@ -363,7 +414,8 @@
 }
 
 void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
-                                 /* out */ uint64_t* cleared_objects) {
+                                 /* out */ uint64_t* cleared_objects,
+                                 const bool clear_bitmap) {
   DCHECK(cleared_bytes != nullptr);
   DCHECK(cleared_objects != nullptr);
   *cleared_bytes = 0;
@@ -391,13 +443,18 @@
   // (see b/62194020).
   uint8_t* clear_block_begin = nullptr;
   uint8_t* clear_block_end = nullptr;
-  auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) {
-    r->Clear(/*zero_and_release_pages*/false);
+  auto clear_region = [this, &clear_block_begin, &clear_block_end, clear_bitmap](Region* r) {
+    r->Clear(/*zero_and_release_pages=*/false);
     if (clear_block_end != r->Begin()) {
       // Region `r` is not adjacent to the current clear block; zero and release
       // pages within the current block and restart a new clear block at the
       // beginning of region `r`.
       ZeroAndProtectRegion(clear_block_begin, clear_block_end);
+      if (clear_bitmap) {
+        GetLiveBitmap()->ClearRange(
+            reinterpret_cast<mirror::Object*>(clear_block_begin),
+            reinterpret_cast<mirror::Object*>(clear_block_end));
+      }
       clear_block_begin = r->Begin();
     }
     // Add region `r` to the clear block.
@@ -422,20 +479,23 @@
         // It is also better to clear these regions now instead of at the end of the next GC to
         // save RAM. If we don't clear the regions here, they will be cleared next GC by the normal
         // live percent evacuation logic.
+        *cleared_bytes += r->BytesAllocated();
+        *cleared_objects += r->ObjectsAllocated();
+        clear_region(r);
         size_t free_regions = 1;
         // Also release RAM for large tails.
         while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
-          DCHECK(r->IsLarge());
           clear_region(&regions_[i + free_regions]);
           ++free_regions;
         }
-        *cleared_bytes += r->BytesAllocated();
-        *cleared_objects += r->ObjectsAllocated();
         num_non_free_regions_ -= free_regions;
-        clear_region(r);
-        GetLiveBitmap()->ClearRange(
-            reinterpret_cast<mirror::Object*>(r->Begin()),
-            reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
+        // When clear_bitmap is true, this clearing of bitmap is taken care in
+        // clear_region().
+        if (!clear_bitmap) {
+          GetLiveBitmap()->ClearRange(
+              reinterpret_cast<mirror::Object*>(r->Begin()),
+              reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
+        }
         continue;
       }
       r->SetUnevacFromSpaceAsToSpace();
@@ -481,7 +541,7 @@
         // bitmap. But they cannot do so before we know the next GC cycle will
         // be a major one, so this operation happens at the beginning of such a
         // major collection, before marking starts.
-        if (!kEnableGenerationalConcurrentCopyingCollection) {
+        if (!use_generational_cc_) {
           GetLiveBitmap()->ClearRange(
               reinterpret_cast<mirror::Object*>(r->Begin()),
               reinterpret_cast<mirror::Object*>(r->Begin() + regions_to_clear_bitmap * kRegionSize));
@@ -495,8 +555,7 @@
         // `r` when it has an undefined live bytes count (i.e. when
         // `r->LiveBytes() == static_cast<size_t>(-1)`) with
         // Generational CC.
-        if (!kEnableGenerationalConcurrentCopyingCollection ||
-            (r->LiveBytes() != static_cast<size_t>(-1))) {
+        if (!use_generational_cc_ || (r->LiveBytes() != static_cast<size_t>(-1))) {
           // Only some allocated bytes are live in this unevac region.
           // This should only happen for an allocated non-large region.
           DCHECK(r->IsAllocated()) << r->State();
@@ -515,6 +574,11 @@
   }
   // Clear pages for the last block since clearing happens when a new block opens.
   ZeroAndReleasePages(clear_block_begin, clear_block_end - clear_block_begin);
+  if (clear_bitmap) {
+    GetLiveBitmap()->ClearRange(
+        reinterpret_cast<mirror::Object*>(clear_block_begin),
+        reinterpret_cast<mirror::Object*>(clear_block_end));
+  }
   // Update non_free_region_index_limit_.
   SetNonFreeRegionLimit(new_non_free_region_index_limit);
   evac_region_ = nullptr;
@@ -586,7 +650,7 @@
 
   // Functor poisoning the space between `obj` and the previously
   // visited (live) object (or the beginng of the region), if any.
-  auto maybe_poison = [this, &prev_obj_end](mirror::Object* obj) REQUIRES(Locks::mutator_lock_) {
+  auto maybe_poison = [&prev_obj_end](mirror::Object* obj) REQUIRES(Locks::mutator_lock_) {
     DCHECK_ALIGNED(obj, kAlignment);
     uint8_t* cur_obj_begin = reinterpret_cast<uint8_t*>(obj);
     if (cur_obj_begin != prev_obj_end) {
@@ -655,7 +719,7 @@
     if (!r->IsFree()) {
       --num_non_free_regions_;
     }
-    r->Clear(/*zero_and_release_pages*/true);
+    r->Clear(/*zero_and_release_pages=*/true);
   }
   SetNonFreeRegionLimit(0);
   DCHECK_EQ(num_non_free_regions_, 0u);
@@ -726,7 +790,7 @@
 void RegionSpace::RecordAlloc(mirror::Object* ref) {
   CHECK(ref != nullptr);
   Region* r = RefToRegion(ref);
-  r->objects_allocated_.fetch_add(1, std::memory_order_seq_cst);
+  r->objects_allocated_.fetch_add(1, std::memory_order_relaxed);
 }
 
 bool RegionSpace::AllocNewTlab(Thread* self, size_t min_bytes) {
@@ -734,7 +798,7 @@
   RevokeThreadLocalBuffersLocked(self);
   // Retain sufficient free regions for full evacuation.
 
-  Region* r = AllocateRegion(/*for_evac*/ false);
+  Region* r = AllocateRegion(/*for_evac=*/ false);
   if (r != nullptr) {
     r->is_a_tlab_ = true;
     r->thread_ = self;
@@ -805,12 +869,45 @@
      << " type=" << type_
      << " objects_allocated=" << objects_allocated_
      << " alloc_time=" << alloc_time_
-     << " live_bytes=" << live_bytes_
-     << " is_newly_allocated=" << std::boolalpha << is_newly_allocated_ << std::noboolalpha
+     << " live_bytes=" << live_bytes_;
+
+  if (live_bytes_ != static_cast<size_t>(-1)) {
+    os << " ratio over allocated bytes="
+       << (static_cast<float>(live_bytes_) / RoundUp(BytesAllocated(), kRegionSize));
+    uint64_t longest_consecutive_free_bytes = GetLongestConsecutiveFreeBytes();
+    os << " longest_consecutive_free_bytes=" << longest_consecutive_free_bytes
+       << " (" << PrettySize(longest_consecutive_free_bytes) << ")";
+  }
+
+  os << " is_newly_allocated=" << std::boolalpha << is_newly_allocated_ << std::noboolalpha
      << " is_a_tlab=" << std::boolalpha << is_a_tlab_ << std::noboolalpha
      << " thread=" << thread_ << '\n';
 }
 
+uint64_t RegionSpace::Region::GetLongestConsecutiveFreeBytes() const {
+  if (IsFree()) {
+    return kRegionSize;
+  }
+  if (IsLarge() || IsLargeTail()) {
+    return 0u;
+  }
+  uintptr_t max_gap = 0u;
+  uintptr_t prev_object_end = reinterpret_cast<uintptr_t>(Begin());
+  // Iterate through all live objects and find the largest free gap.
+  auto visitor = [&max_gap, &prev_object_end](mirror::Object* obj)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+    uintptr_t current = reinterpret_cast<uintptr_t>(obj);
+    uintptr_t diff = current - prev_object_end;
+    max_gap = std::max(diff, max_gap);
+    uintptr_t object_end = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
+    prev_object_end = RoundUp(object_end, kAlignment);
+  };
+  space::RegionSpace* region_space = art::Runtime::Current()->GetHeap()->GetRegionSpace();
+  region_space->WalkNonLargeRegion(visitor, this);
+  return static_cast<uint64_t>(max_gap);
+}
+
+
 size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
   size_t num_bytes = obj->SizeOf();
   if (usable_size != nullptr) {
@@ -855,7 +952,7 @@
     Region* r = &regions_[region_index];
     if (r->IsFree()) {
       r->Unfree(this, time_);
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         // TODO: Add an explanation for this assertion.
         DCHECK(!for_evac || !r->is_newly_allocated_);
       }
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 0bf4f38..d8b54e2 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -35,8 +35,10 @@
 // will not try to allocate a new region from the beginning of the
 // region space, but from the last allocated region. This allocation
 // strategy reduces region reuse and should help catch some GC bugs
-// earlier.
-static constexpr bool kCyclicRegionAllocation = true;
+// earlier. However, cyclic region allocation can also create memory
+// fragmentation at the region level (see b/33795328); therefore, we
+// only enable it in debug mode.
+static constexpr bool kCyclicRegionAllocation = kIsDebugBuild;
 
 // A space that consists of equal-sized regions.
 class RegionSpace final : public ContinuousMemMapAllocSpace {
@@ -57,7 +59,7 @@
   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
   // space to confirm the request was granted.
   static MemMap CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
-  static RegionSpace* Create(const std::string& name, MemMap&& mem_map);
+  static RegionSpace* Create(const std::string& name, MemMap&& mem_map, bool use_generational_cc);
 
   // Allocate `num_bytes`, returns null if the space is full.
   mirror::Object* Alloc(Thread* self,
@@ -203,14 +205,18 @@
 
   // Go through all of the blocks and visit the continuous objects.
   template <typename Visitor>
-  ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_) {
-    WalkInternal<false /* kToSpaceOnly */>(visitor);
-  }
+  ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_);
   template <typename Visitor>
-  ALWAYS_INLINE void WalkToSpace(Visitor&& visitor)
-      REQUIRES(Locks::mutator_lock_) {
-    WalkInternal<true /* kToSpaceOnly */>(visitor);
-  }
+  ALWAYS_INLINE void WalkToSpace(Visitor&& visitor) REQUIRES(Locks::mutator_lock_);
+
+  // Scans regions and calls visitor for objects in unevac-space corresponding
+  // to the bits set in 'bitmap'.
+  // Cannot acquire region_lock_ as visitor may need to acquire it for allocation.
+  // Should not be called concurrently with functions (like SetFromSpace()) which
+  // change regions' type.
+  template <typename Visitor>
+  ALWAYS_INLINE void ScanUnevacFromSpace(accounting::ContinuousSpaceBitmap* bitmap,
+                                         Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
 
   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
     return nullptr;
@@ -231,6 +237,11 @@
     return false;
   }
 
+  bool IsRegionNewlyAllocated(size_t idx) const NO_THREAD_SAFETY_ANALYSIS {
+    DCHECK_LT(idx, num_regions_);
+    return regions_[idx].IsNewlyAllocated();
+  }
+
   bool IsInNewlyAllocatedRegion(mirror::Object* ref) {
     if (HasAddress(ref)) {
       Region* r = RefToRegionUnlocked(ref);
@@ -294,7 +305,9 @@
   size_t FromSpaceSize() REQUIRES(!region_lock_);
   size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
   size_t ToSpaceSize() REQUIRES(!region_lock_);
-  void ClearFromSpace(/* out */ uint64_t* cleared_bytes, /* out */ uint64_t* cleared_objects)
+  void ClearFromSpace(/* out */ uint64_t* cleared_bytes,
+                      /* out */ uint64_t* cleared_objects,
+                      const bool clear_bitmap)
       REQUIRES(!region_lock_);
 
   void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
@@ -313,6 +326,40 @@
     }
   }
 
+  void SetAllRegionLiveBytesZero() REQUIRES(!region_lock_) {
+    MutexLock mu(Thread::Current(), region_lock_);
+    const size_t iter_limit = kUseTableLookupReadBarrier
+        ? num_regions_
+        : std::min(num_regions_, non_free_region_index_limit_);
+    for (size_t i = 0; i < iter_limit; ++i) {
+      Region* r = &regions_[i];
+      // Newly allocated regions don't need up-to-date live_bytes_ for deciding
+      // whether to be evacuated or not. See Region::ShouldBeEvacuated().
+      if (!r->IsFree() && !r->IsNewlyAllocated()) {
+        r->ZeroLiveBytes();
+      }
+    }
+  }
+
+  size_t RegionIdxForRefUnchecked(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS {
+    DCHECK(HasAddress(ref));
+    uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
+    size_t reg_idx = offset / kRegionSize;
+    DCHECK_LT(reg_idx, num_regions_);
+    Region* reg = &regions_[reg_idx];
+    DCHECK_EQ(reg->Idx(), reg_idx);
+    DCHECK(reg->Contains(ref));
+    return reg_idx;
+  }
+  // Return -1 as region index for references outside this region space.
+  size_t RegionIdxForRef(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS {
+    if (HasAddress(ref)) {
+      return RegionIdxForRefUnchecked(ref);
+    } else {
+      return static_cast<size_t>(-1);
+    }
+  }
+
   void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
   bool AllocNewTlab(Thread* self, size_t min_bytes) REQUIRES(!region_lock_);
 
@@ -321,10 +368,7 @@
   }
 
  private:
-  RegionSpace(const std::string& name, MemMap&& mem_map);
-
-  template<bool kToSpaceOnly, typename Visitor>
-  ALWAYS_INLINE void WalkInternal(Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
+  RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc);
 
   class Region {
    public:
@@ -479,33 +523,7 @@
     // collection, RegionSpace::ClearFromSpace will preserve the space
     // used by this region, and tag it as to-space (see
     // Region::SetUnevacFromSpaceAsToSpace below).
-    void SetAsUnevacFromSpace(bool clear_live_bytes) {
-      // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
-      DCHECK(kEnableGenerationalConcurrentCopyingCollection || clear_live_bytes);
-      DCHECK(!IsFree() && IsInToSpace());
-      type_ = RegionType::kRegionTypeUnevacFromSpace;
-      if (IsNewlyAllocated()) {
-        // A newly allocated region set as unevac from-space must be
-        // a large or large tail region.
-        DCHECK(IsLarge() || IsLargeTail()) << static_cast<uint>(state_);
-        // Always clear the live bytes of a newly allocated (large or
-        // large tail) region.
-        clear_live_bytes = true;
-        // Clear the "newly allocated" status here, as we do not want the
-        // GC to see it when encountering (and processing) references in the
-        // from-space.
-        //
-        // Invariant: There should be no newly-allocated region in the
-        // from-space (when the from-space exists, which is between the calls
-        // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
-        is_newly_allocated_ = false;
-      }
-      if (clear_live_bytes) {
-        // Reset the live bytes, as we have made a non-evacuation
-        // decision (possibly based on the percentage of live bytes).
-        live_bytes_ = 0;
-      }
-    }
+    void SetAsUnevacFromSpace(bool clear_live_bytes);
 
     // Set this region as to-space. Used by RegionSpace::ClearFromSpace.
     // This is only valid if it is currently an unevac from-space region.
@@ -518,11 +536,10 @@
     ALWAYS_INLINE bool ShouldBeEvacuated(EvacMode evac_mode);
 
     void AddLiveBytes(size_t live_bytes) {
-      DCHECK(IsInUnevacFromSpace());
+      DCHECK(GetUseGenerationalCC() || IsInUnevacFromSpace());
       DCHECK(!IsLargeTail());
       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
-      // For large allocations, we always consider all bytes in the
-      // regions live.
+      // For large allocations, we always consider all bytes in the regions live.
       live_bytes_ += IsLarge() ? Top() - begin_ : live_bytes;
       DCHECK_LE(live_bytes_, BytesAllocated());
     }
@@ -570,7 +587,11 @@
       DCHECK_LE(Top(), end_);
     }
 
+    uint64_t GetLongestConsecutiveFreeBytes() const;
+
    private:
+    static bool GetUseGenerationalCC();
+
     size_t idx_;                        // The region's index in the region space.
     size_t live_bytes_;                 // The live bytes. Used to compute the live percent.
     uint8_t* begin_;                    // The begin address of the region.
@@ -580,6 +601,8 @@
     // (large region + one or more large tail regions).
     Atomic<uint8_t*> top_;              // The current position of the allocation.
     uint8_t* end_;                      // The end address of the region.
+    // objects_allocated_ is accessed using memory_order_relaxed. Treat as approximate when there
+    // are concurrent updates.
     Atomic<size_t> objects_allocated_;  // The number of objects allocated.
     uint32_t alloc_time_;               // The allocation time of the region.
     // Note that newly allocated and evacuated regions use -1 as
@@ -592,6 +615,14 @@
     friend class RegionSpace;
   };
 
+  template<bool kToSpaceOnly, typename Visitor>
+  ALWAYS_INLINE void WalkInternal(Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
+
+  // Visitor will be iterating on objects in increasing address order.
+  template<typename Visitor>
+  ALWAYS_INLINE void WalkNonLargeRegion(Visitor&& visitor, const Region* r)
+      NO_THREAD_SAFETY_ANALYSIS;
+
   Region* RefToRegion(mirror::Object* ref) REQUIRES(!region_lock_) {
     MutexLock mu(Thread::Current(), region_lock_);
     return RefToRegionLocked(ref);
@@ -626,7 +657,7 @@
   // - the region containing `obj` is fully used; and
   // - `obj` is not the last object of that region;
   // the returned location is not guaranteed to be a valid object.
-  mirror::Object* GetNextObject(mirror::Object* obj)
+  static mirror::Object* GetNextObject(mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void AdjustNonFreeRegionLimit(size_t new_non_free_region_index) REQUIRES(region_lock_) {
@@ -683,6 +714,8 @@
 
   Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
+  // Cached version of Heap::use_generational_cc_.
+  const bool use_generational_cc_;
   uint32_t time_;                  // The time as the number of collections since the startup.
   size_t num_regions_;             // The number of regions in this space.
   // The number of non-free regions in this space.
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 10ff1c1..36fd864 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -133,17 +133,19 @@
   delete rosalloc_;
 }
 
-RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size,
-                                     size_t growth_limit, size_t capacity, uint8_t* requested_begin,
-                                     bool low_memory_mode, bool can_move_objects) {
+RosAllocSpace* RosAllocSpace::Create(const std::string& name,
+                                     size_t initial_size,
+                                     size_t growth_limit,
+                                     size_t capacity,
+                                     bool low_memory_mode,
+                                     bool can_move_objects) {
   uint64_t start_time = 0;
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     start_time = NanoTime();
     VLOG(startup) << "RosAllocSpace::Create entering " << name
                   << " initial_size=" << PrettySize(initial_size)
                   << " growth_limit=" << PrettySize(growth_limit)
-                  << " capacity=" << PrettySize(capacity)
-                  << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
+                  << " capacity=" << PrettySize(capacity);
   }
 
   // Memory we promise to rosalloc before it asks for morecore.
@@ -151,8 +153,7 @@
   // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
   // size of the large allocation) will be greater than the footprint limit.
   size_t starting_size = Heap::kDefaultStartingSize;
-  MemMap mem_map =
-      CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+  MemMap mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity);
   if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
                << PrettySize(capacity);
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 5162a06..9e95c16 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -38,8 +38,11 @@
   // base address is not guaranteed to be granted, if it is required,
   // the caller should call Begin on the returned space to confirm the
   // request was granted.
-  static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
-                               size_t capacity, uint8_t* requested_begin, bool low_memory_mode,
+  static RosAllocSpace* Create(const std::string& name,
+                               size_t initial_size,
+                               size_t growth_limit,
+                               size_t capacity,
+                               bool low_memory_mode,
                                bool can_move_objects);
   static RosAllocSpace* CreateFromMemMap(MemMap&& mem_map,
                                          const std::string& name,
diff --git a/runtime/gc/space/rosalloc_space_random_test.cc b/runtime/gc/space/rosalloc_space_random_test.cc
index ca3aff4..f0b3231 100644
--- a/runtime/gc/space/rosalloc_space_random_test.cc
+++ b/runtime/gc/space/rosalloc_space_random_test.cc
@@ -16,19 +16,26 @@
 
 #include "space_test.h"
 
+#include "rosalloc_space.h"
+
 namespace art {
 namespace gc {
 namespace space {
 
-MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, uint8_t* requested_begin) {
-  return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
-                               Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
+MallocSpace* CreateRosAllocSpace(const std::string& name,
+                                 size_t initial_size,
+                                 size_t growth_limit,
+                                 size_t capacity) {
+  return RosAllocSpace::Create(name,
+                               initial_size,
+                               growth_limit,
+                               capacity,
+                               Runtime::Current()->GetHeap()->IsLowMemoryMode(),
+                               /*can_move_objects=*/ false);
 }
 
 TEST_SPACE_CREATE_FN_RANDOM(RosAllocSpace, CreateRosAllocSpace)
 
-
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/rosalloc_space_static_test.cc b/runtime/gc/space/rosalloc_space_static_test.cc
index a78623e..d7e7e90 100644
--- a/runtime/gc/space/rosalloc_space_static_test.cc
+++ b/runtime/gc/space/rosalloc_space_static_test.cc
@@ -16,19 +16,25 @@
 
 #include "space_test.h"
 
+#include "rosalloc_space.h"
+
 namespace art {
 namespace gc {
 namespace space {
 
-MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, uint8_t* requested_begin) {
-  return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
-                               Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
+MallocSpace* CreateRosAllocSpace(const std::string& name,
+                                 size_t initial_size,
+                                 size_t growth_limit,
+                                 size_t capacity) {
+  return RosAllocSpace::Create(name, initial_size,
+                               growth_limit,
+                               capacity,
+                               Runtime::Current()->GetHeap()->IsLowMemoryMode(),
+                               /*can_move_objects=*/ false);
 }
 
 TEST_SPACE_CREATE_FN_STATIC(RosAllocSpace, CreateRosAllocSpace)
 
-
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index a8bd7b8..e7961eb 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -63,7 +63,7 @@
 
 RegionSpace* Space::AsRegionSpace() {
   LOG(FATAL) << "Unreachable";
-  return nullptr;
+  UNREACHABLE();
 }
 
 AllocSpace* Space::AsAllocSpace() {
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 545e3d8..903263f 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -21,12 +21,12 @@
 #include <string>
 
 #include "base/atomic.h"
-#include "base/globals.h"
+#include "base/locks.h"
 #include "base/macros.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 #include "gc/accounting/space_bitmap.h"
 #include "gc/collector/object_byte_pair.h"
+#include "runtime_globals.h"
 
 namespace art {
 namespace mirror {
diff --git a/runtime/gc/space/space_create_test.cc b/runtime/gc/space/space_create_test.cc
index ca5f306..d3db679 100644
--- a/runtime/gc/space/space_create_test.cc
+++ b/runtime/gc/space/space_create_test.cc
@@ -34,25 +34,22 @@
   MallocSpace* CreateSpace(const std::string& name,
                            size_t initial_size,
                            size_t growth_limit,
-                           size_t capacity,
-                           uint8_t* requested_begin) {
+                           size_t capacity) {
     const MallocSpaceType type = GetParam();
     if (type == kMallocSpaceDlMalloc) {
       return DlMallocSpace::Create(name,
                                    initial_size,
                                    growth_limit,
                                    capacity,
-                                   requested_begin,
-                                   false);
+                                   /*can_move_objects=*/ false);
     }
     DCHECK_EQ(static_cast<uint32_t>(type), static_cast<uint32_t>(kMallocSpaceRosAlloc));
     return RosAllocSpace::Create(name,
                                  initial_size,
                                  growth_limit,
                                  capacity,
-                                 requested_begin,
                                  Runtime::Current()->GetHeap()->IsLowMemoryMode(),
-                                 false);
+                                 /*can_move_objects=*/ false);
   }
 };
 
@@ -62,25 +59,25 @@
 
   {
     // Init < max == growth
-    std::unique_ptr<Space> space(CreateSpace("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
+    std::unique_ptr<Space> space(CreateSpace("test", 16 * MB, 32 * MB, 32 * MB));
     EXPECT_TRUE(space != nullptr);
     // Init == max == growth
-    space.reset(CreateSpace("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
+    space.reset(CreateSpace("test", 16 * MB, 16 * MB, 16 * MB));
     EXPECT_TRUE(space != nullptr);
     // Init > max == growth
-    space.reset(CreateSpace("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
+    space.reset(CreateSpace("test", 32 * MB, 16 * MB, 16 * MB));
     EXPECT_TRUE(space == nullptr);
     // Growth == init < max
-    space.reset(CreateSpace("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
+    space.reset(CreateSpace("test", 16 * MB, 16 * MB, 32 * MB));
     EXPECT_TRUE(space != nullptr);
     // Growth < init < max
-    space.reset(CreateSpace("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
+    space.reset(CreateSpace("test", 16 * MB, 8 * MB, 32 * MB));
     EXPECT_TRUE(space == nullptr);
     // Init < growth < max
-    space.reset(CreateSpace("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
+    space.reset(CreateSpace("test", 8 * MB, 16 * MB, 32 * MB));
     EXPECT_TRUE(space != nullptr);
     // Init < max < growth
-    space.reset(CreateSpace("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
+    space.reset(CreateSpace("test", 8 * MB, 32 * MB, 16 * MB));
     EXPECT_TRUE(space == nullptr);
   }
 }
@@ -91,7 +88,7 @@
 // the GC works with the ZygoteSpace.
 TEST_P(SpaceCreateTest, ZygoteSpaceTestBody) {
   size_t dummy;
-  MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+  MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
   ASSERT_TRUE(space != nullptr);
 
   // Make space findable to the heap, will also delete space when runtime is cleaned up
@@ -225,7 +222,7 @@
 
 TEST_P(SpaceCreateTest, AllocAndFreeTestBody) {
   size_t dummy = 0;
-  MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+  MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
   ASSERT_TRUE(space != nullptr);
   Thread* self = Thread::Current();
   ScopedObjectAccess soa(self);
@@ -301,7 +298,7 @@
 }
 
 TEST_P(SpaceCreateTest, AllocAndFreeListTestBody) {
-  MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+  MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
   ASSERT_TRUE(space != nullptr);
 
   // Make space findable to the heap, will also delete space when runtime is cleaned up
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index c94b666..7fbd0b5 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -20,12 +20,13 @@
 #include <stdint.h>
 #include <memory>
 
-#include "base/globals.h"
 #include "common_runtime_test.h"
+#include "handle_scope-inl.h"
 #include "mirror/array-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
 #include "mirror/object-inl.h"
+#include "runtime_globals.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread_list.h"
 #include "zygote_space.h"
@@ -122,8 +123,10 @@
     return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
   }
 
-  typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
-                                        size_t capacity, uint8_t* requested_begin);
+  typedef MallocSpace* (*CreateSpaceFn)(const std::string& name,
+                                        size_t initial_size,
+                                        size_t growth_limit,
+                                        size_t capacity);
 
   void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
                                            int round, size_t growth_limit);
@@ -322,7 +325,7 @@
   size_t initial_size = 4 * MB;
   size_t growth_limit = 8 * MB;
   size_t capacity = 16 * MB;
-  MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
+  MallocSpace* space(create_space("test", initial_size, growth_limit, capacity));
   ASSERT_TRUE(space != nullptr);
 
   // Basic sanity
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index ed85b06..f482466 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -127,7 +127,7 @@
     // Need to mark the card since this will update the mod-union table next GC cycle.
     card_table->MarkCard(ptrs[i]);
   }
-  zygote_space->objects_allocated_.fetch_sub(num_ptrs, std::memory_order_seq_cst);
+  zygote_space->objects_allocated_.fetch_sub(num_ptrs);
 }
 
 }  // namespace space
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 1f73577..03e2ec8 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -68,7 +68,7 @@
   }
 
   uint64_t GetObjectsAllocated() {
-    return objects_allocated_.load(std::memory_order_seq_cst);
+    return objects_allocated_.load();
   }
 
   void Clear() override;
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index 07725b9..4fe8027 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -112,6 +112,8 @@
   switch (type) {
     case CollectorType::kCollectorTypeCMS:
     case CollectorType::kCollectorTypeCC:
+    case CollectorType::kCollectorTypeSS:
+    case CollectorType::kCollectorTypeGSS:
       return true;
 
     default:
@@ -143,7 +145,7 @@
   cswh.Set(GcRoot<mirror::Object>(s.Get()));
 
   // Trigger a GC.
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
 
   // Expect the holder to have been called.
   EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
@@ -164,7 +166,7 @@
   cswh.Set(GcRoot<mirror::Object>(mirror::String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
 
   // Trigger a GC.
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
 
   // Expect the holder to have been called.
   EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
@@ -188,7 +190,7 @@
   cswh.Set(GcRoot<mirror::Object>(s.Get()));
 
   // Trigger a GC.
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
 
   // Expect the holder to have been called.
   ASSERT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
@@ -203,7 +205,7 @@
   Runtime::Current()->RemoveSystemWeakHolder(&cswh);
 
   // Trigger another GC.
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
 
   // Expectation: no change in the numbers.
   EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
diff --git a/runtime/gc/task_processor.h b/runtime/gc/task_processor.h
index 6db3c37..86e36ab 100644
--- a/runtime/gc/task_processor.h
+++ b/runtime/gc/task_processor.h
@@ -20,8 +20,8 @@
 #include <memory>
 #include <set>
 
-#include "base/globals.h"
 #include "base/mutex.h"
+#include "runtime_globals.h"
 #include "thread_pool.h"
 
 namespace art {
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index 5d234ea..8670a22 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -21,6 +21,7 @@
 
 #include "art_field-inl.h"
 #include "base/file_utils.h"
+#include "base/logging.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-refvisitor-inl.h"
 
@@ -58,8 +59,8 @@
     oss << " klass=" << klass;
     if (IsValidClass(klass)) {
       oss << "(" << klass->PrettyClass() << ")";
-      if (klass->IsArrayClass<kVerifyNone, kWithoutReadBarrier>()) {
-        oss << " length=" << obj->AsArray<kVerifyNone, kWithoutReadBarrier>()->GetLength();
+      if (klass->IsArrayClass<kVerifyNone>()) {
+        oss << " length=" << obj->AsArray<kVerifyNone>()->GetLength();
       }
     } else {
       oss << " <invalid address>";
@@ -87,7 +88,8 @@
                                      bool fatal) const {
   // Lowest priority logging first:
   PrintFileToLog("/proc/self/maps", android::base::LogSeverity::FATAL_WITHOUT_ABORT);
-  MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+  MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
+  Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
   // Buffer the output in the string stream since it is more important than the stack traces
   // and we want it to have log priority. The stack traces are printed from Runtime::Abort
   // which is called from LOG(FATAL) but before the abort message.
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 0bd43f9..32af62d 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_GC_ROOT_H_
 #define ART_RUNTIME_GC_ROOT_H_
 
+#include "base/locks.h"       // For Locks::mutator_lock_.
 #include "base/macros.h"
-#include "base/mutex.h"       // For Locks::mutator_lock_.
 #include "mirror/object_reference.h"
 #include "read_barrier_option.h"
 
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
deleted file mode 100644
index 464c2b7..0000000
--- a/runtime/generated/asm_support_gen.h
+++ /dev/null
@@ -1,173 +0,0 @@
-
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
-#define ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
-
-// This file has been auto-generated by cpp-define-generator; do not edit directly.
-
-#define STACK_REFERENCE_SIZE 0x4
-DEFINE_CHECK_EQ(static_cast<size_t>(STACK_REFERENCE_SIZE), (static_cast<size_t>(sizeof(art::StackReference<art::mirror::Object>))))
-#define COMPRESSED_REFERENCE_SIZE 0x4
-DEFINE_CHECK_EQ(static_cast<size_t>(COMPRESSED_REFERENCE_SIZE), (static_cast<size_t>(sizeof(art::mirror::CompressedReference<art::mirror::Object>))))
-#define COMPRESSED_REFERENCE_SIZE_SHIFT 0x2
-DEFINE_CHECK_EQ(static_cast<size_t>(COMPRESSED_REFERENCE_SIZE_SHIFT), (static_cast<size_t>(art::WhichPowerOf2(sizeof(art::mirror::CompressedReference<art::mirror::Object>)))))
-#define RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveAllCalleeSaves))))
-#define RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET 0x8
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveRefsOnly))))
-#define RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET 0x10
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveRefsAndArgs))))
-#define RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET 0x18
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveEverything))))
-#define RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET 0x20
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveEverythingForClinit))))
-#define RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET 0x28
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveEverythingForSuspendCheck))))
-#define THREAD_FLAGS_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_FLAGS_OFFSET), (static_cast<int32_t>(art::Thread::ThreadFlagsOffset<art::kRuntimePointerSize>().Int32Value())))
-#define THREAD_ID_OFFSET 12
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_ID_OFFSET), (static_cast<int32_t>(art::Thread::ThinLockIdOffset<art::kRuntimePointerSize>().Int32Value())))
-#define THREAD_IS_GC_MARKING_OFFSET 52
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_IS_GC_MARKING_OFFSET), (static_cast<int32_t>(art::Thread::IsGcMarkingOffset<art::kRuntimePointerSize>().Int32Value())))
-#define THREAD_CARD_TABLE_OFFSET 136
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_CARD_TABLE_OFFSET), (static_cast<int32_t>(art::Thread::CardTableOffset<art::kRuntimePointerSize>().Int32Value())))
-#define MIRROR_CLASS_DEX_CACHE_OFFSET 16
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_CLASS_DEX_CACHE_OFFSET), (static_cast<int32_t>(art::mirror::Class::DexCacheOffset().Int32Value())))
-#define MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET 48
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET), (static_cast<int32_t>(art::mirror::DexCache::ResolvedMethodsOffset().Int32Value())))
-#define MIRROR_OBJECT_CLASS_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_OBJECT_CLASS_OFFSET), (static_cast<int32_t>(art::mirror::Object::ClassOffset().Int32Value())))
-#define MIRROR_OBJECT_LOCK_WORD_OFFSET 4
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_OBJECT_LOCK_WORD_OFFSET), (static_cast<int32_t>(art::mirror::Object::MonitorOffset().Int32Value())))
-#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE 0x80000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), (static_cast<uint32_t>((art::kAccClassIsFinalizable))))
-#define ACCESS_FLAGS_CLASS_IS_INTERFACE 0x200
-DEFINE_CHECK_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_INTERFACE), (static_cast<uint32_t>((art::kAccInterface))))
-#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT 0x1f
-DEFINE_CHECK_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT), (static_cast<uint32_t>((art::MostSignificantBit(art::kAccClassIsFinalizable)))))
-#define ART_METHOD_JNI_OFFSET_32 20
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_32), (static_cast<int32_t>(art::ArtMethod::EntryPointFromJniOffset(art::PointerSize::k32).Int32Value())))
-#define ART_METHOD_JNI_OFFSET_64 24
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_64), (static_cast<int32_t>(art::ArtMethod::EntryPointFromJniOffset(art::PointerSize::k64).Int32Value())))
-#define ART_METHOD_QUICK_CODE_OFFSET_32 24
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_32), (static_cast<int32_t>(art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())))
-#define ART_METHOD_QUICK_CODE_OFFSET_64 32
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_64), (static_cast<int32_t>(art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())))
-#define ART_METHOD_DECLARING_CLASS_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DECLARING_CLASS_OFFSET), (static_cast<int32_t>(art::ArtMethod::DeclaringClassOffset().Int32Value())))
-#define ART_METHOD_ACCESS_FLAGS_OFFSET 4
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_ACCESS_FLAGS_OFFSET), (static_cast<int32_t>(art::ArtMethod::AccessFlagsOffset().Int32Value())))
-#define STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT 3
-DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT), (static_cast<int32_t>(art::WhichPowerOf2(sizeof(art::mirror::StringDexCachePair)))))
-#define STRING_DEX_CACHE_SIZE_MINUS_ONE 1023
-DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_SIZE_MINUS_ONE), (static_cast<int32_t>(art::mirror::DexCache::kDexCacheStringCacheSize - 1)))
-#define STRING_DEX_CACHE_HASH_BITS 10
-DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))))
-#define STRING_DEX_CACHE_ELEMENT_SIZE 8
-DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_ELEMENT_SIZE), (static_cast<int32_t>(sizeof(art::mirror::StringDexCachePair))))
-#define METHOD_DEX_CACHE_SIZE_MINUS_ONE 1023
-DEFINE_CHECK_EQ(static_cast<int32_t>(METHOD_DEX_CACHE_SIZE_MINUS_ONE), (static_cast<int32_t>(art::mirror::DexCache::kDexCacheMethodCacheSize - 1)))
-#define METHOD_DEX_CACHE_HASH_BITS 10
-DEFINE_CHECK_EQ(static_cast<int32_t>(METHOD_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheMethodCacheSize))))
-#define CARD_TABLE_CARD_SHIFT 0xa
-DEFINE_CHECK_EQ(static_cast<size_t>(CARD_TABLE_CARD_SHIFT), (static_cast<size_t>(art::gc::accounting::CardTable::kCardShift)))
-#define MIN_LARGE_OBJECT_THRESHOLD 0x3000
-DEFINE_CHECK_EQ(static_cast<size_t>(MIN_LARGE_OBJECT_THRESHOLD), (static_cast<size_t>(art::gc::Heap::kMinLargeObjectThreshold)))
-#define LOCK_WORD_STATE_SHIFT 30
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kStateShift)))
-#define LOCK_WORD_STATE_MASK_SHIFTED 0xc0000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kStateMaskShifted)))
-#define LOCK_WORD_READ_BARRIER_STATE_SHIFT 28
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_READ_BARRIER_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kReadBarrierStateShift)))
-#define LOCK_WORD_READ_BARRIER_STATE_MASK 0x10000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShifted)))
-#define LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED 0xefffffff
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShiftedToggled)))
-#define LOCK_WORD_THIN_LOCK_COUNT_SIZE 12
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_SIZE), (static_cast<int32_t>(art::LockWord::kThinLockCountSize)))
-#define LOCK_WORD_THIN_LOCK_COUNT_SHIFT 16
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_SHIFT), (static_cast<int32_t>(art::LockWord::kThinLockCountShift)))
-#define LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED 0xfff0000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kThinLockCountMaskShifted)))
-#define LOCK_WORD_THIN_LOCK_COUNT_ONE 0x10000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_COUNT_ONE), (static_cast<uint32_t>(art::LockWord::kThinLockCountOne)))
-#define LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED 0xffff
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kThinLockOwnerMaskShifted)))
-#define LOCK_WORD_STATE_FORWARDING_ADDRESS 0x3
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS), (static_cast<uint32_t>(art::LockWord::kStateForwardingAddress)))
-#define LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW 0x40000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW), (static_cast<uint32_t>(art::LockWord::kStateForwardingAddressOverflow)))
-#define LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT 0x3
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT), (static_cast<uint32_t>(art::LockWord::kForwardingAddressShift)))
-#define LOCK_WORD_GC_STATE_MASK_SHIFTED 0x30000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShifted)))
-#define LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED 0xcfffffff
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShiftedToggled)))
-#define LOCK_WORD_GC_STATE_SIZE 2
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_GC_STATE_SIZE), (static_cast<int32_t>(art::LockWord::kGCStateSize)))
-#define LOCK_WORD_GC_STATE_SHIFT 28
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_GC_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kGCStateShift)))
-#define LOCK_WORD_MARK_BIT_SHIFT 29
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_MARK_BIT_SHIFT), (static_cast<int32_t>(art::LockWord::kMarkBitStateShift)))
-#define LOCK_WORD_MARK_BIT_MASK_SHIFTED 0x20000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_MARK_BIT_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kMarkBitStateMaskShifted)))
-#define STD_MEMORY_ORDER_RELAXED 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(STD_MEMORY_ORDER_RELAXED), (static_cast<int32_t>(std::memory_order_relaxed)))
-#define OBJECT_ALIGNMENT_MASK 0x7
-DEFINE_CHECK_EQ(static_cast<size_t>(OBJECT_ALIGNMENT_MASK), (static_cast<size_t>(art::kObjectAlignment - 1)))
-#define OBJECT_ALIGNMENT_MASK_TOGGLED 0xfffffff8
-DEFINE_CHECK_EQ(static_cast<uint32_t>(OBJECT_ALIGNMENT_MASK_TOGGLED), (static_cast<uint32_t>(~static_cast<uint32_t>(art::kObjectAlignment - 1))))
-#define OBJECT_ALIGNMENT_MASK_TOGGLED64 0xfffffffffffffff8
-DEFINE_CHECK_EQ(static_cast<uint64_t>(OBJECT_ALIGNMENT_MASK_TOGGLED64), (static_cast<uint64_t>(~static_cast<uint64_t>(art::kObjectAlignment - 1))))
-#define ACC_OBSOLETE_METHOD 262144
-DEFINE_CHECK_EQ(static_cast<int32_t>(ACC_OBSOLETE_METHOD), (static_cast<int32_t>(art::kAccObsoleteMethod)))
-#define ACC_OBSOLETE_METHOD_SHIFT 18
-DEFINE_CHECK_EQ(static_cast<int32_t>(ACC_OBSOLETE_METHOD_SHIFT), (static_cast<int32_t>(art::WhichPowerOf2(art::kAccObsoleteMethod))))
-#define ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE 128
-DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), (static_cast<int32_t>((art::gc::allocator::RosAlloc::kMaxThreadLocalBracketSize))))
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT 3
-DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), (static_cast<int32_t>((art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSizeShift))))
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK 7
-DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK), (static_cast<int32_t>((static_cast<int32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1)))))
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32 0xfffffff8
-DEFINE_CHECK_EQ(static_cast<uint32_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32), (static_cast<uint32_t>((~static_cast<uint32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1)))))
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64 0xfffffffffffffff8
-DEFINE_CHECK_EQ(static_cast<uint64_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64), (static_cast<uint64_t>((~static_cast<uint64_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1)))))
-#define ROSALLOC_RUN_FREE_LIST_OFFSET 8
-DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_RUN_FREE_LIST_OFFSET), (static_cast<int32_t>((art::gc::allocator::RosAlloc::RunFreeListOffset()))))
-#define ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET), (static_cast<int32_t>((art::gc::allocator::RosAlloc::RunFreeListHeadOffset()))))
-#define ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET 16
-DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET), (static_cast<int32_t>((art::gc::allocator::RosAlloc::RunFreeListSizeOffset()))))
-#define ROSALLOC_SLOT_NEXT_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_SLOT_NEXT_OFFSET), (static_cast<int32_t>((art::gc::allocator::RosAlloc::RunSlotNextOffset()))))
-#define THREAD_SUSPEND_REQUEST 1
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_SUSPEND_REQUEST), (static_cast<int32_t>((art::kSuspendRequest))))
-#define THREAD_CHECKPOINT_REQUEST 2
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_CHECKPOINT_REQUEST), (static_cast<int32_t>((art::kCheckpointRequest))))
-#define THREAD_EMPTY_CHECKPOINT_REQUEST 4
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_EMPTY_CHECKPOINT_REQUEST), (static_cast<int32_t>((art::kEmptyCheckpointRequest))))
-#define THREAD_SUSPEND_OR_CHECKPOINT_REQUEST 7
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), (static_cast<int32_t>((art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest))))
-#define JIT_CHECK_OSR (-1)
-DEFINE_CHECK_EQ(static_cast<int16_t>(JIT_CHECK_OSR), (static_cast<int16_t>((art::jit::kJitCheckForOSR))))
-#define JIT_HOTNESS_DISABLE (-2)
-DEFINE_CHECK_EQ(static_cast<int16_t>(JIT_HOTNESS_DISABLE), (static_cast<int16_t>((art::jit::kJitHotnessDisabled))))
-
-#endif  // ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
-
diff --git a/runtime/handle.h b/runtime/handle.h
index 18e503d..0c9c029 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -20,8 +20,8 @@
 #include <android-base/logging.h>
 
 #include "base/casts.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/value_object.h"
 #include "jni.h"
 #include "obj_ptr.h"
@@ -62,8 +62,9 @@
     return down_cast<T*>(reference_->AsMirrorPtr());
   }
 
-  ALWAYS_INLINE bool IsNull() const REQUIRES_SHARED(Locks::mutator_lock_) {
-    return Get() == nullptr;
+  ALWAYS_INLINE bool IsNull() const {
+    // It's safe to null-check it without a read barrier.
+    return reference_->IsNull();
   }
 
   ALWAYS_INLINE jobject ToJObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index d091e7f..765ed7d 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -21,6 +21,7 @@
 
 #include "base/mutex.h"
 #include "handle.h"
+#include "handle_wrapper.h"
 #include "obj_ptr-inl.h"
 #include "thread-current-inl.h"
 #include "verify_object.h"
@@ -106,6 +107,15 @@
       handle_scope_entry <= &GetReferences()[number_of_references_ - 1];
 }
 
+template <typename Visitor>
+inline void HandleScope::VisitRoots(Visitor& visitor) {
+  for (size_t i = 0, count = NumberOfReferences(); i < count; ++i) {
+    // GetReference returns a pointer to the stack reference within the handle scope. If this
+    // needs to be updated, it will be done by the root visitor.
+    visitor.VisitRootIfNonNull(GetHandle(i).GetReference());
+  }
+}
+
 template<size_t kNumReferences> template<class T>
 inline MutableHandle<T> FixedSizeHandleScope<kNumReferences>::NewHandle(T* object) {
   SetReference(pos_, object);
@@ -199,7 +209,7 @@
 inline VariableSizedHandleScope::VariableSizedHandleScope(Thread* const self)
     : BaseHandleScope(self->GetTopHandleScope()),
       self_(self) {
-  current_scope_ = new LocalScopeType(/*link*/ nullptr);
+  current_scope_ = new LocalScopeType(/*link=*/ nullptr);
   self_->PushHandleScope(this);
 }
 
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 9eaf1ec..5a6f1ac 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -22,15 +22,17 @@
 #include <android-base/logging.h>
 
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
-#include "handle.h"
 #include "stack_reference.h"
-#include "verify_object.h"
 
 namespace art {
 
+template<class T> class Handle;
 class HandleScope;
+template<class T> class HandleWrapper;
+template<class T> class HandleWrapperObjPtr;
+template<class T> class MutableHandle;
 template<class MirrorType> class ObjPtr;
 class Thread;
 class VariableSizedHandleScope;
@@ -144,13 +146,7 @@
   }
 
   template <typename Visitor>
-  void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
-    for (size_t i = 0, count = NumberOfReferences(); i < count; ++i) {
-      // GetReference returns a pointer to the stack reference within the handle scope. If this
-      // needs to be updated, it will be done by the root visitor.
-      visitor.VisitRootIfNonNull(GetHandle(i).GetReference());
-    }
-  }
+  ALWAYS_INLINE void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
  protected:
   // Return backing storage used for references.
@@ -172,44 +168,6 @@
   DISALLOW_COPY_AND_ASSIGN(HandleScope);
 };
 
-// A wrapper which wraps around Object** and restores the pointer in the destructor.
-// TODO: Delete
-template<class T>
-class HandleWrapper : public MutableHandle<T> {
- public:
-  HandleWrapper(T** obj, const MutableHandle<T>& handle)
-     : MutableHandle<T>(handle), obj_(obj) {
-  }
-
-  HandleWrapper(const HandleWrapper&) = default;
-
-  ~HandleWrapper() {
-    *obj_ = MutableHandle<T>::Get();
-  }
-
- private:
-  T** const obj_;
-};
-
-
-// A wrapper which wraps around ObjPtr<Object>* and restores the pointer in the destructor.
-// TODO: Add more functionality.
-template<class T>
-class HandleWrapperObjPtr : public MutableHandle<T> {
- public:
-  HandleWrapperObjPtr(ObjPtr<T>* obj, const MutableHandle<T>& handle)
-      : MutableHandle<T>(handle), obj_(obj) {}
-
-  HandleWrapperObjPtr(const HandleWrapperObjPtr&) = default;
-
-  ~HandleWrapperObjPtr() {
-    *obj_ = ObjPtr<T>(MutableHandle<T>::Get());
-  }
-
- private:
-  ObjPtr<T>* const obj_;
-};
-
 // Fixed size handle scope that is not necessarily linked in the thread.
 template<size_t kNumReferences>
 class PACKED(4) FixedSizeHandleScope : public HandleScope {
@@ -300,20 +258,17 @@
   void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  static constexpr size_t kLocalScopeSize = 64u;
-  static constexpr size_t kSizeOfReferencesPerScope =
-      kLocalScopeSize
-          - /* BaseHandleScope::link_ */ sizeof(BaseHandleScope*)
-          - /* BaseHandleScope::number_of_references_ */ sizeof(int32_t)
-          - /* FixedSizeHandleScope<>::pos_ */ sizeof(uint32_t);
-  static constexpr size_t kNumReferencesPerScope =
-      kSizeOfReferencesPerScope / sizeof(StackReference<mirror::Object>);
+  static constexpr size_t kMaxLocalScopeSize = 64u;
+  // In order to have consistent compilation with both 32bit and 64bit dex2oat
+  // binaries we need this to be an actual constant. We picked this because it
+  // will ensure that we use <64bit internal scopes.
+  static constexpr size_t kNumReferencesPerScope = 12u;
 
   Thread* const self_;
 
   // Linked list of fixed size handle scopes.
   using LocalScopeType = FixedSizeHandleScope<kNumReferencesPerScope>;
-  static_assert(sizeof(LocalScopeType) == kLocalScopeSize, "Unexpected size of LocalScopeType");
+  static_assert(sizeof(LocalScopeType) <= kMaxLocalScopeSize, "Unexpected size of LocalScopeType");
   LocalScopeType* current_scope_;
 
   DISALLOW_COPY_AND_ASSIGN(VariableSizedHandleScope);
diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc
index f888482..d72dbe6 100644
--- a/runtime/handle_scope_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -22,6 +22,7 @@
 #include "gtest/gtest.h"
 #include "handle.h"
 #include "handle_scope-inl.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/object.h"
 #include "scoped_thread_state_change-inl.h"
diff --git a/runtime/handle_wrapper.h b/runtime/handle_wrapper.h
new file mode 100644
index 0000000..01252c7
--- /dev/null
+++ b/runtime/handle_wrapper.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_HANDLE_WRAPPER_H_
+#define ART_RUNTIME_HANDLE_WRAPPER_H_
+
+#include "handle.h"
+#include "obj_ptr.h"
+
+namespace art {
+
+// A wrapper which wraps around Object** and restores the pointer in the destructor.
+// TODO: Delete
+template<class T>
+class HandleWrapper : public MutableHandle<T> {
+ public:
+  HandleWrapper(T** obj, const MutableHandle<T>& handle)
+     : MutableHandle<T>(handle), obj_(obj) {
+  }
+
+  HandleWrapper(const HandleWrapper&) = default;
+
+  ~HandleWrapper() {
+    *obj_ = MutableHandle<T>::Get();
+  }
+
+ private:
+  T** const obj_;
+};
+
+
+// A wrapper which wraps around ObjPtr<Object>* and restores the pointer in the destructor.
+// TODO: Add more functionality.
+template<class T>
+class HandleWrapperObjPtr : public MutableHandle<T> {
+ public:
+  HandleWrapperObjPtr(ObjPtr<T>* obj, const MutableHandle<T>& handle)
+      : MutableHandle<T>(handle), obj_(obj) {}
+
+  HandleWrapperObjPtr(const HandleWrapperObjPtr&) = default;
+
+  ~HandleWrapperObjPtr() {
+    *obj_ = ObjPtr<T>(MutableHandle<T>::Get());
+  }
+
+ private:
+  ObjPtr<T>* const obj_;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_HANDLE_WRAPPER_H_
diff --git a/runtime/hidden_api.cc b/runtime/hidden_api.cc
index 5729800..1279997 100644
--- a/runtime/hidden_api.cc
+++ b/runtime/hidden_api.cc
@@ -18,19 +18,17 @@
 
 #include <nativehelper/scoped_local_ref.h>
 
+#include "art_field-inl.h"
+#include "art_method-inl.h"
 #include "base/dumpable.h"
-#include "thread-current-inl.h"
+#include "class_root.h"
+#include "dex/class_accessor-inl.h"
+#include "dex/dex_file_loader.h"
+#include "mirror/class_ext.h"
+#include "scoped_thread_state_change.h"
+#include "thread-inl.h"
 #include "well_known_classes.h"
 
-#ifdef ART_TARGET_ANDROID
-#include <metricslogger/metrics_logger.h>
-using android::metricslogger::ComplexEventLogger;
-using android::metricslogger::ACTION_HIDDEN_API_ACCESSED;
-using android::metricslogger::FIELD_HIDDEN_API_ACCESS_METHOD;
-using android::metricslogger::FIELD_HIDDEN_API_ACCESS_DENIED;
-using android::metricslogger::FIELD_HIDDEN_API_SIGNATURE;
-#endif
-
 namespace art {
 namespace hiddenapi {
 
@@ -44,38 +42,46 @@
 
 static inline std::ostream& operator<<(std::ostream& os, AccessMethod value) {
   switch (value) {
-    case kNone:
+    case AccessMethod::kNone:
       LOG(FATAL) << "Internal access to hidden API should not be logged";
       UNREACHABLE();
-    case kReflection:
+    case AccessMethod::kReflection:
       os << "reflection";
       break;
-    case kJNI:
+    case AccessMethod::kJNI:
       os << "JNI";
       break;
-    case kLinking:
+    case AccessMethod::kLinking:
       os << "linking";
       break;
   }
   return os;
 }
 
-static constexpr bool EnumsEqual(EnforcementPolicy policy, HiddenApiAccessFlags::ApiList apiList) {
-  return static_cast<int>(policy) == static_cast<int>(apiList);
+static inline std::ostream& operator<<(std::ostream& os, const AccessContext& value)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (!value.GetClass().IsNull()) {
+    std::string tmp;
+    os << value.GetClass()->GetDescriptor(&tmp);
+  } else if (value.GetDexFile() != nullptr) {
+    os << value.GetDexFile()->GetLocation();
+  } else {
+    os << "<unknown_caller>";
+  }
+  return os;
 }
 
-// GetMemberAction-related static_asserts.
-static_assert(
-    EnumsEqual(EnforcementPolicy::kDarkGreyAndBlackList, HiddenApiAccessFlags::kDarkGreylist) &&
-    EnumsEqual(EnforcementPolicy::kBlacklistOnly, HiddenApiAccessFlags::kBlacklist),
-    "Mismatch between EnforcementPolicy and ApiList enums");
-static_assert(
-    EnforcementPolicy::kJustWarn < EnforcementPolicy::kDarkGreyAndBlackList &&
-    EnforcementPolicy::kDarkGreyAndBlackList < EnforcementPolicy::kBlacklistOnly,
-    "EnforcementPolicy values ordering not correct");
-
 namespace detail {
 
+// Do not change the values of items in this enum, as they are written to the
+// event log for offline analysis. Any changes will interfere with that analysis.
+enum AccessContextFlags {
+  // Accessed member is a field if this bit is set, else a method
+  kMemberIsField = 1 << 0,
+  // Indicates if access was denied to the member, instead of just printing a warning.
+  kAccessDenied  = 1 << 1,
+};
+
 MemberSignature::MemberSignature(ArtField* field) {
   class_name_ = field->GetDeclaringClass()->GetDescriptor(&tmp_);
   member_name_ = field->GetName();
@@ -94,6 +100,24 @@
   type_ = kMethod;
 }
 
+MemberSignature::MemberSignature(const ClassAccessor::Field& field) {
+  const DexFile& dex_file = field.GetDexFile();
+  const dex::FieldId& field_id = dex_file.GetFieldId(field.GetIndex());
+  class_name_ = dex_file.GetFieldDeclaringClassDescriptor(field_id);
+  member_name_ = dex_file.GetFieldName(field_id);
+  type_signature_ = dex_file.GetFieldTypeDescriptor(field_id);
+  type_ = kField;
+}
+
+MemberSignature::MemberSignature(const ClassAccessor::Method& method) {
+  const DexFile& dex_file = method.GetDexFile();
+  const dex::MethodId& method_id = dex_file.GetMethodId(method.GetIndex());
+  class_name_ = dex_file.GetMethodDeclaringClassDescriptor(method_id);
+  member_name_ = dex_file.GetMethodName(method_id);
+  type_signature_ = dex_file.GetMethodSignature(method_id).ToString();
+  type_ = kMethod;
+}
+
 inline std::vector<const char*> MemberSignature::GetSignatureParts() const {
   if (type_ == kField) {
     return { class_name_.c_str(), "->", member_name_.c_str(), ":", type_signature_.c_str() };
@@ -133,160 +157,69 @@
   }
 }
 
-void MemberSignature::WarnAboutAccess(AccessMethod access_method,
-                                      HiddenApiAccessFlags::ApiList list) {
+void MemberSignature::WarnAboutAccess(AccessMethod access_method, hiddenapi::ApiList list) {
   LOG(WARNING) << "Accessing hidden " << (type_ == kField ? "field " : "method ")
                << Dumpable<MemberSignature>(*this) << " (" << list << ", " << access_method << ")";
 }
-#ifdef ART_TARGET_ANDROID
-// Convert an AccessMethod enum to a value for logging from the proto enum.
-// This method may look odd (the enum values are current the same), but it
-// prevents coupling the internal enum to the proto enum (which should never
-// be changed) so that we are free to change the internal one if necessary in
-// future.
-inline static int32_t GetEnumValueForLog(AccessMethod access_method) {
-  switch (access_method) {
-    case kNone:
-      return android::metricslogger::ACCESS_METHOD_NONE;
-    case kReflection:
-      return android::metricslogger::ACCESS_METHOD_REFLECTION;
-    case kJNI:
-      return android::metricslogger::ACCESS_METHOD_JNI;
-    case kLinking:
-      return android::metricslogger::ACCESS_METHOD_LINKING;
-    default:
-      DCHECK(false);
-  }
-}
-#endif
 
-void MemberSignature::LogAccessToEventLog(AccessMethod access_method, Action action_taken) {
+bool MemberSignature::Equals(const MemberSignature& other) {
+  return type_ == other.type_ &&
+         class_name_ == other.class_name_ &&
+         member_name_ == other.member_name_ &&
+         type_signature_ == other.type_signature_;
+}
+
+bool MemberSignature::MemberNameAndTypeMatch(const MemberSignature& other) {
+  return member_name_ == other.member_name_ && type_signature_ == other.type_signature_;
+}
+
+void MemberSignature::LogAccessToEventLog(AccessMethod access_method, bool access_denied) {
 #ifdef ART_TARGET_ANDROID
-  if (access_method == kLinking || access_method == kNone) {
+  if (access_method == AccessMethod::kLinking || access_method == AccessMethod::kNone) {
     // Linking warnings come from static analysis/compilation of the bytecode
     // and can contain false positives (i.e. code that is never run). We choose
     // not to log these in the event log.
     // None does not correspond to actual access, so should also be ignored.
     return;
   }
-  ComplexEventLogger log_maker(ACTION_HIDDEN_API_ACCESSED);
-  log_maker.AddTaggedData(FIELD_HIDDEN_API_ACCESS_METHOD, GetEnumValueForLog(access_method));
-  if (action_taken == kDeny) {
-    log_maker.AddTaggedData(FIELD_HIDDEN_API_ACCESS_DENIED, 1);
+  Runtime* runtime = Runtime::Current();
+  if (runtime->IsAotCompiler()) {
+    return;
   }
+  JNIEnvExt* env = Thread::Current()->GetJniEnv();
   const std::string& package_name = Runtime::Current()->GetProcessPackageName();
-  if (!package_name.empty()) {
-    log_maker.SetPackageName(package_name);
+  ScopedLocalRef<jstring> package_str(env, env->NewStringUTF(package_name.c_str()));
+  if (env->ExceptionCheck()) {
+    env->ExceptionClear();
+    LOG(ERROR) << "Unable to allocate string for package name which called hidden api";
   }
   std::ostringstream signature_str;
   Dump(signature_str);
-  log_maker.AddTaggedData(FIELD_HIDDEN_API_SIGNATURE, signature_str.str());
-  log_maker.Record();
+  ScopedLocalRef<jstring> signature_jstr(env,
+      env->NewStringUTF(signature_str.str().c_str()));
+  if (env->ExceptionCheck()) {
+    env->ExceptionClear();
+    LOG(ERROR) << "Unable to allocate string for hidden api method signature";
+  }
+  env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
+      WellKnownClasses::dalvik_system_VMRuntime_hiddenApiUsed, package_str.get(),
+      signature_jstr.get(), static_cast<jint>(access_method), access_denied);
+  if (env->ExceptionCheck()) {
+    env->ExceptionClear();
+    LOG(ERROR) << "Unable to report hidden api usage";
+  }
 #else
   UNUSED(access_method);
-  UNUSED(action_taken);
+  UNUSED(access_denied);
 #endif
 }
 
-static ALWAYS_INLINE bool CanUpdateMemberAccessFlags(ArtField*) {
-  return true;
-}
-
-static ALWAYS_INLINE bool CanUpdateMemberAccessFlags(ArtMethod* method) {
-  return !method->IsIntrinsic();
-}
-
-template<typename T>
-static ALWAYS_INLINE void MaybeWhitelistMember(Runtime* runtime, T* member)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (CanUpdateMemberAccessFlags(member) && runtime->ShouldDedupeHiddenApiWarnings()) {
-    member->SetAccessFlags(HiddenApiAccessFlags::EncodeForRuntime(
-        member->GetAccessFlags(), HiddenApiAccessFlags::kWhitelist));
-  }
-}
-
-template<typename T>
-Action GetMemberActionImpl(T* member,
-                           HiddenApiAccessFlags::ApiList api_list,
-                           Action action,
-                           AccessMethod access_method) {
-  DCHECK_NE(action, kAllow);
-
-  // Get the signature, we need it later.
-  MemberSignature member_signature(member);
-
-  Runtime* runtime = Runtime::Current();
-
-  // Check for an exemption first. Exempted APIs are treated as white list.
-  // We only do this if we're about to deny, or if the app is debuggable. This is because:
-  // - we only print a warning for light greylist violations for debuggable apps
-  // - for non-debuggable apps, there is no distinction between light grey & whitelisted APIs.
-  // - we want to avoid the overhead of checking for exemptions for light greylisted APIs whenever
-  //   possible.
-  const bool shouldWarn = kLogAllAccesses || runtime->IsJavaDebuggable();
-  if (shouldWarn || action == kDeny) {
-    if (member_signature.IsExempted(runtime->GetHiddenApiExemptions())) {
-      action = kAllow;
-      // Avoid re-examining the exemption list next time.
-      // Note this results in no warning for the member, which seems like what one would expect.
-      // Exemptions effectively adds new members to the whitelist.
-      MaybeWhitelistMember(runtime, member);
-      return kAllow;
-    }
-
-    if (access_method != kNone) {
-      // Print a log message with information about this class member access.
-      // We do this if we're about to block access, or the app is debuggable.
-      member_signature.WarnAboutAccess(access_method, api_list);
-    }
+void MemberSignature::NotifyHiddenApiListener(AccessMethod access_method) {
+  if (access_method != AccessMethod::kReflection && access_method != AccessMethod::kJNI) {
+    // We can only up-call into Java during reflection and JNI down-calls.
+    return;
   }
 
-  if (kIsTargetBuild && !kIsTargetLinux) {
-    uint32_t eventLogSampleRate = runtime->GetHiddenApiEventLogSampleRate();
-    // Assert that RAND_MAX is big enough, to ensure sampling below works as expected.
-    static_assert(RAND_MAX >= 0xffff, "RAND_MAX too small");
-    if (eventLogSampleRate != 0 &&
-        (static_cast<uint32_t>(std::rand()) & 0xffff) < eventLogSampleRate) {
-      member_signature.LogAccessToEventLog(access_method, action);
-    }
-  }
-
-  if (action == kDeny) {
-    // Block access
-    return action;
-  }
-
-  // Allow access to this member but print a warning.
-  DCHECK(action == kAllowButWarn || action == kAllowButWarnAndToast);
-
-  if (access_method != kNone) {
-    // Depending on a runtime flag, we might move the member into whitelist and
-    // skip the warning the next time the member is accessed.
-    MaybeWhitelistMember(runtime, member);
-
-    // If this action requires a UI warning, set the appropriate flag.
-    if (shouldWarn &&
-        (action == kAllowButWarnAndToast || runtime->ShouldAlwaysSetHiddenApiWarningFlag())) {
-      runtime->SetPendingHiddenApiWarning(true);
-    }
-  }
-
-  return action;
-}
-
-// Need to instantiate this.
-template Action GetMemberActionImpl<ArtField>(ArtField* member,
-                                              HiddenApiAccessFlags::ApiList api_list,
-                                              Action action,
-                                              AccessMethod access_method);
-template Action GetMemberActionImpl<ArtMethod>(ArtMethod* member,
-                                               HiddenApiAccessFlags::ApiList api_list,
-                                               Action action,
-                                               AccessMethod access_method);
-}  // namespace detail
-
-template<typename T>
-void NotifyHiddenApiListener(T* member) {
   Runtime* runtime = Runtime::Current();
   if (!runtime->IsAotCompiler()) {
     ScopedObjectAccessUnchecked soa(Thread::Current());
@@ -298,9 +231,8 @@
     // If the consumer is non-null, we call back to it to let it know that we
     // have encountered an API that's in one of our lists.
     if (consumer_object != nullptr) {
-      detail::MemberSignature member_signature(member);
       std::ostringstream member_signature_str;
-      member_signature.Dump(member_signature_str);
+      Dump(member_signature_str);
 
       ScopedLocalRef<jobject> signature_str(
           soa.Env(),
@@ -314,8 +246,186 @@
   }
 }
 
-template void NotifyHiddenApiListener<ArtMethod>(ArtMethod* member);
-template void NotifyHiddenApiListener<ArtField>(ArtField* member);
+static ALWAYS_INLINE bool CanUpdateRuntimeFlags(ArtField*) {
+  return true;
+}
+
+static ALWAYS_INLINE bool CanUpdateRuntimeFlags(ArtMethod* method) {
+  return !method->IsIntrinsic();
+}
+
+template<typename T>
+static ALWAYS_INLINE void MaybeWhitelistMember(Runtime* runtime, T* member)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (CanUpdateRuntimeFlags(member) && runtime->ShouldDedupeHiddenApiWarnings()) {
+    member->SetAccessFlags(member->GetAccessFlags() | kAccPublicApi);
+  }
+}
+
+static ALWAYS_INLINE uint32_t GetMemberDexIndex(ArtField* field) {
+  return field->GetDexFieldIndex();
+}
+
+static ALWAYS_INLINE uint32_t GetMemberDexIndex(ArtMethod* method)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  // Use the non-obsolete method to avoid DexFile mismatch between
+  // the method index and the declaring class.
+  return method->GetNonObsoleteMethod()->GetDexMethodIndex();
+}
+
+static void VisitMembers(const DexFile& dex_file,
+                         const dex::ClassDef& class_def,
+                         const std::function<void(const ClassAccessor::Field&)>& fn_visit) {
+  ClassAccessor accessor(dex_file, class_def, /* parse_hiddenapi_class_data= */ true);
+  accessor.VisitFields(fn_visit, fn_visit);
+}
+
+static void VisitMembers(const DexFile& dex_file,
+                         const dex::ClassDef& class_def,
+                         const std::function<void(const ClassAccessor::Method&)>& fn_visit) {
+  ClassAccessor accessor(dex_file, class_def, /* parse_hiddenapi_class_data= */ true);
+  accessor.VisitMethods(fn_visit, fn_visit);
+}
+
+template<typename T>
+uint32_t GetDexFlags(T* member) REQUIRES_SHARED(Locks::mutator_lock_) {
+  static_assert(std::is_same<T, ArtField>::value || std::is_same<T, ArtMethod>::value);
+  using AccessorType = typename std::conditional<std::is_same<T, ArtField>::value,
+      ClassAccessor::Field, ClassAccessor::Method>::type;
+
+  ObjPtr<mirror::Class> declaring_class = member->GetDeclaringClass();
+  DCHECK(!declaring_class.IsNull()) << "Attempting to access a runtime method";
+
+  ApiList flags;
+  DCHECK(!flags.IsValid());
+
+  // Check if the declaring class has ClassExt allocated. If it does, check if
+  // the pre-JVMTI redefine dex file has been set to determine if the declaring
+  // class has been JVMTI-redefined.
+  ObjPtr<mirror::ClassExt> ext(declaring_class->GetExtData());
+  const DexFile* original_dex = ext.IsNull() ? nullptr : ext->GetPreRedefineDexFile();
+  if (LIKELY(original_dex == nullptr)) {
+    // Class is not redefined. Find the class def, iterate over its members and
+    // find the entry corresponding to this `member`.
+    const dex::ClassDef* class_def = declaring_class->GetClassDef();
+    DCHECK(class_def != nullptr) << "Class def should always be set for initialized classes";
+
+    uint32_t member_index = GetMemberDexIndex(member);
+    auto fn_visit = [&](const AccessorType& dex_member) {
+      if (dex_member.GetIndex() == member_index) {
+        flags = ApiList(dex_member.GetHiddenapiFlags());
+      }
+    };
+    VisitMembers(declaring_class->GetDexFile(), *class_def, fn_visit);
+  } else {
+    // Class was redefined using JVMTI. We have a pointer to the original dex file
+    // and the class def index of this class in that dex file, but the field/method
+    // indices are lost. Iterate over all members of the class def and find the one
+    // corresponding to this `member` by name and type string comparison.
+    // This is obviously very slow, but it is only used when non-exempt code tries
+    // to access a hidden member of a JVMTI-redefined class.
+    uint16_t class_def_idx = ext->GetPreRedefineClassDefIndex();
+    DCHECK_NE(class_def_idx, DexFile::kDexNoIndex16);
+    const dex::ClassDef& original_class_def = original_dex->GetClassDef(class_def_idx);
+    MemberSignature member_signature(member);
+    auto fn_visit = [&](const AccessorType& dex_member) {
+      MemberSignature cur_signature(dex_member);
+      if (member_signature.MemberNameAndTypeMatch(cur_signature)) {
+        DCHECK(member_signature.Equals(cur_signature));
+        flags = ApiList(dex_member.GetHiddenapiFlags());
+      }
+    };
+    VisitMembers(*original_dex, original_class_def, fn_visit);
+  }
+
+  CHECK(flags.IsValid()) << "Could not find hiddenapi flags for "
+      << Dumpable<MemberSignature>(MemberSignature(member));
+  return flags.GetDexFlags();
+}
+
+template<typename T>
+void MaybeReportCorePlatformApiViolation(T* member,
+                                         const AccessContext& caller_context,
+                                         AccessMethod access_method) {
+  if (access_method != AccessMethod::kNone) {
+    MemberSignature sig(member);
+    LOG(ERROR) << "CorePlatformApi violation: " << Dumpable<MemberSignature>(sig)
+               << " from " << caller_context << " using " << access_method;
+  }
+}
+
+template<typename T>
+bool ShouldDenyAccessToMemberImpl(T* member, ApiList api_list, AccessMethod access_method) {
+  DCHECK(member != nullptr);
+  Runtime* runtime = Runtime::Current();
+
+  EnforcementPolicy policy = runtime->GetHiddenApiEnforcementPolicy();
+  DCHECK(policy != EnforcementPolicy::kDisabled)
+      << "Should never enter this function when access checks are completely disabled";
+
+  const bool deny_access =
+      (policy == EnforcementPolicy::kEnabled) &&
+      IsSdkVersionSetAndMoreThan(runtime->GetTargetSdkVersion(),
+                                 api_list.GetMaxAllowedSdkVersion());
+
+  MemberSignature member_signature(member);
+
+  // Check for an exemption first. Exempted APIs are treated as white list.
+  if (member_signature.IsExempted(runtime->GetHiddenApiExemptions())) {
+    // Avoid re-examining the exemption list next time.
+    // Note this results in no warning for the member, which seems like what one would expect.
+    // Exemptions effectively adds new members to the whitelist.
+    MaybeWhitelistMember(runtime, member);
+    return false;
+  }
+
+  if (access_method != AccessMethod::kNone) {
+    // Print a log message with information about this class member access.
+    // We do this if we're about to deny access, or the app is debuggable.
+    if (kLogAllAccesses || deny_access || runtime->IsJavaDebuggable()) {
+      member_signature.WarnAboutAccess(access_method, api_list);
+    }
+
+    // If there is a StrictMode listener, notify it about this violation.
+    member_signature.NotifyHiddenApiListener(access_method);
+
+    // If event log sampling is enabled, report this violation.
+    if (kIsTargetBuild && !kIsTargetLinux) {
+      uint32_t eventLogSampleRate = runtime->GetHiddenApiEventLogSampleRate();
+      // Assert that RAND_MAX is big enough, to ensure sampling below works as expected.
+      static_assert(RAND_MAX >= 0xffff, "RAND_MAX too small");
+      if (eventLogSampleRate != 0 &&
+          (static_cast<uint32_t>(std::rand()) & 0xffff) < eventLogSampleRate) {
+        member_signature.LogAccessToEventLog(access_method, deny_access);
+      }
+    }
+
+    // If this access was not denied, move the member into whitelist and skip
+    // the warning the next time the member is accessed.
+    if (!deny_access) {
+      MaybeWhitelistMember(runtime, member);
+    }
+  }
+
+  return deny_access;
+}
+
+// Need to instantiate these.
+template uint32_t GetDexFlags<ArtField>(ArtField* member);
+template uint32_t GetDexFlags<ArtMethod>(ArtMethod* member);
+template void MaybeReportCorePlatformApiViolation(ArtField* member,
+                                                  const AccessContext& caller_context,
+                                                  AccessMethod access_method);
+template void MaybeReportCorePlatformApiViolation(ArtMethod* member,
+                                                  const AccessContext& caller_context,
+                                                  AccessMethod access_method);
+template bool ShouldDenyAccessToMemberImpl<ArtField>(ArtField* member,
+                                                     ApiList api_list,
+                                                     AccessMethod access_method);
+template bool ShouldDenyAccessToMemberImpl<ArtMethod>(ArtMethod* member,
+                                                      ApiList api_list,
+                                                      AccessMethod access_method);
+}  // namespace detail
 
 }  // namespace hiddenapi
 }  // namespace art
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index 580224e..0bdb5c8 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -17,10 +17,11 @@
 #ifndef ART_RUNTIME_HIDDEN_API_H_
 #define ART_RUNTIME_HIDDEN_API_H_
 
-#include "art_field-inl.h"
-#include "art_method-inl.h"
-#include "base/mutex.h"
-#include "dex/hidden_api_access_flags.h"
+#include "art_field.h"
+#include "art_method.h"
+#include "base/hiddenapi_flags.h"
+#include "base/locks.h"
+#include "intrinsics_enum.h"
 #include "mirror/class-inl.h"
 #include "reflection.h"
 #include "runtime.h"
@@ -32,11 +33,10 @@
 // This must be kept in sync with ApplicationInfo.ApiEnforcementPolicy in
 // frameworks/base/core/java/android/content/pm/ApplicationInfo.java
 enum class EnforcementPolicy {
-  kNoChecks             = 0,
+  kDisabled             = 0,
   kJustWarn             = 1,  // keep checks enabled, but allow everything (enables logging)
-  kDarkGreyAndBlackList = 2,  // ban dark grey & blacklist
-  kBlacklistOnly        = 3,  // ban blacklist violations only
-  kMax = kBlacklistOnly,
+  kEnabled              = 2,  // ban dark grey & blacklist
+  kMax = kEnabled,
 };
 
 inline EnforcementPolicy EnforcementPolicyFromInt(int api_policy_int) {
@@ -45,56 +45,102 @@
   return static_cast<EnforcementPolicy>(api_policy_int);
 }
 
-enum Action {
-  kAllow,
-  kAllowButWarn,
-  kAllowButWarnAndToast,
-  kDeny
+// Hidden API access method
+// Thist must be kept in sync with VMRuntime.HiddenApiUsageLogger.ACCESS_METHOD_*
+enum class AccessMethod {
+  kNone = 0,  // internal test that does not correspond to an actual access by app
+  kReflection = 1,
+  kJNI = 2,
+  kLinking = 3,
 };
 
-enum AccessMethod {
-  kNone,  // internal test that does not correspond to an actual access by app
-  kReflection,
-  kJNI,
-  kLinking,
+// Represents the API domain of a caller/callee.
+class AccessContext {
+ public:
+  // Initialize to either the fully-trusted or fully-untrusted domain.
+  explicit AccessContext(bool is_trusted)
+      : klass_(nullptr),
+        dex_file_(nullptr),
+        domain_(ComputeDomain(is_trusted)) {}
+
+  // Initialize from class loader and dex file (via dex cache).
+  AccessContext(ObjPtr<mirror::ClassLoader> class_loader, ObjPtr<mirror::DexCache> dex_cache)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      : klass_(nullptr),
+        dex_file_(GetDexFileFromDexCache(dex_cache)),
+        domain_(ComputeDomain(class_loader, dex_file_)) {}
+
+  // Initialize from Class.
+  explicit AccessContext(ObjPtr<mirror::Class> klass)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      : klass_(klass),
+        dex_file_(GetDexFileFromDexCache(klass->GetDexCache())),
+        domain_(ComputeDomain(klass, dex_file_)) {}
+
+  ObjPtr<mirror::Class> GetClass() const { return klass_; }
+  const DexFile* GetDexFile() const { return dex_file_; }
+  Domain GetDomain() const { return domain_; }
+
+  bool IsUntrustedDomain() const { return domain_ == Domain::kApplication; }
+
+  // Returns true if this domain is always allowed to access the domain of `callee`.
+  bool CanAlwaysAccess(const AccessContext& callee) const {
+    return IsDomainMoreTrustedThan(domain_, callee.domain_);
+  }
+
+ private:
+  static const DexFile* GetDexFileFromDexCache(ObjPtr<mirror::DexCache> dex_cache)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    return dex_cache.IsNull() ? nullptr : dex_cache->GetDexFile();
+  }
+
+  static Domain ComputeDomain(bool is_trusted) {
+    return is_trusted ? Domain::kCorePlatform : Domain::kApplication;
+  }
+
+  static Domain ComputeDomain(ObjPtr<mirror::ClassLoader> class_loader, const DexFile* dex_file)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (dex_file == nullptr) {
+      return ComputeDomain(/* is_trusted= */ class_loader.IsNull());
+    }
+
+    Domain dex_domain = dex_file->GetHiddenapiDomain();
+    if (class_loader.IsNull() && dex_domain == Domain::kApplication) {
+      // LOG(WARNING) << "DexFile " << dex_file->GetLocation() << " is in boot classpath "
+      //              << "but is assigned untrusted domain";
+      dex_domain = Domain::kPlatform;
+    }
+    return dex_domain;
+  }
+
+  static Domain ComputeDomain(ObjPtr<mirror::Class> klass, const DexFile* dex_file)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Check other aspects of the context.
+    Domain domain = ComputeDomain(klass->GetClassLoader(), dex_file);
+
+    if (domain == Domain::kApplication &&
+        klass->ShouldSkipHiddenApiChecks() &&
+        Runtime::Current()->IsJavaDebuggable()) {
+      // Class is known, it is marked trusted and we are in debuggable mode.
+      domain = ComputeDomain(/* is_trusted= */ true);
+    }
+
+    return domain;
+  }
+
+  // Pointer to declaring class of the caller/callee (null if not provided).
+  // This is not safe across GC but we're only using this class for passing
+  // information about the caller to the access check logic and never retain
+  // the AccessContext instance beyond that.
+  const ObjPtr<mirror::Class> klass_;
+
+  // DexFile of the caller/callee (null if not provided).
+  const DexFile* const dex_file_;
+
+  // Computed domain of the caller/callee.
+  const Domain domain_;
 };
 
-// Do not change the values of items in this enum, as they are written to the
-// event log for offline analysis. Any changes will interfere with that analysis.
-enum AccessContextFlags {
-  // Accessed member is a field if this bit is set, else a method
-  kMemberIsField = 1 << 0,
-  // Indicates if access was denied to the member, instead of just printing a warning.
-  kAccessDenied  = 1 << 1,
-};
-
-inline Action GetActionFromAccessFlags(HiddenApiAccessFlags::ApiList api_list) {
-  if (api_list == HiddenApiAccessFlags::kWhitelist) {
-    return kAllow;
-  }
-
-  EnforcementPolicy policy = Runtime::Current()->GetHiddenApiEnforcementPolicy();
-  if (policy == EnforcementPolicy::kNoChecks) {
-    // Exit early. Nothing to enforce.
-    return kAllow;
-  }
-
-  // if policy is "just warn", always warn. We returned above for whitelist APIs.
-  if (policy == EnforcementPolicy::kJustWarn) {
-    return kAllowButWarn;
-  }
-  DCHECK(policy >= EnforcementPolicy::kDarkGreyAndBlackList);
-  // The logic below relies on equality of values in the enums EnforcementPolicy and
-  // HiddenApiAccessFlags::ApiList, and their ordering. Assertions are in hidden_api.cc.
-  if (static_cast<int>(policy) > static_cast<int>(api_list)) {
-    return api_list == HiddenApiAccessFlags::kDarkGreylist
-        ? kAllowButWarnAndToast
-        : kAllowButWarn;
-  } else {
-    return kDeny;
-  }
-}
-
 class ScopedHiddenApiEnforcementPolicySetting {
  public:
   explicit ScopedHiddenApiEnforcementPolicySetting(EnforcementPolicy new_policy)
@@ -134,9 +180,14 @@
  public:
   explicit MemberSignature(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_);
   explicit MemberSignature(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+  explicit MemberSignature(const ClassAccessor::Field& field);
+  explicit MemberSignature(const ClassAccessor::Method& method);
 
   void Dump(std::ostream& os) const;
 
+  bool Equals(const MemberSignature& other);
+  bool MemberNameAndTypeMatch(const MemberSignature& other);
+
   // Performs prefix match on this member. Since the full member signature is
   // composed of several parts, we match each part in turn (rather than
   // building the entire thing in memory and performing a simple prefix match)
@@ -144,119 +195,251 @@
 
   bool IsExempted(const std::vector<std::string>& exemptions);
 
-  void WarnAboutAccess(AccessMethod access_method, HiddenApiAccessFlags::ApiList list);
+  void WarnAboutAccess(AccessMethod access_method, ApiList list);
 
-  void LogAccessToEventLog(AccessMethod access_method, Action action_taken);
+  void LogAccessToEventLog(AccessMethod access_method, bool access_denied);
+
+  // Calls back into managed code to notify VMRuntime.nonSdkApiUsageConsumer that
+  // |member| was accessed. This is usually called when an API is on the black,
+  // dark grey or light grey lists. Given that the callback can execute arbitrary
+  // code, a call to this method can result in thread suspension.
+  void NotifyHiddenApiListener(AccessMethod access_method);
 };
 
+// Locates hiddenapi flags for `field` in the corresponding dex file.
+// NB: This is an O(N) operation, linear with the number of members in the class def.
 template<typename T>
-Action GetMemberActionImpl(T* member,
-                           HiddenApiAccessFlags::ApiList api_list,
-                           Action action,
-                           AccessMethod access_method)
+uint32_t GetDexFlags(T* member) REQUIRES_SHARED(Locks::mutator_lock_);
+
+template<typename T>
+void MaybeReportCorePlatformApiViolation(T* member,
+                                         const AccessContext& caller_context,
+                                         AccessMethod access_method)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
-// Returns true if the caller is either loaded by the boot strap class loader or comes from
-// a dex file located in ${ANDROID_ROOT}/framework/.
-ALWAYS_INLINE
-inline bool IsCallerTrusted(ObjPtr<mirror::Class> caller,
-                            ObjPtr<mirror::ClassLoader> caller_class_loader,
-                            ObjPtr<mirror::DexCache> caller_dex_cache)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (caller_class_loader.IsNull()) {
-    // Boot class loader.
-    return true;
-  }
-
-  if (!caller_dex_cache.IsNull()) {
-    const DexFile* caller_dex_file = caller_dex_cache->GetDexFile();
-    if (caller_dex_file != nullptr && caller_dex_file->IsPlatformDexFile()) {
-      // Caller is in a platform dex file.
-      return true;
-    }
-  }
-
-  if (!caller.IsNull() &&
-      caller->ShouldSkipHiddenApiChecks() &&
-      Runtime::Current()->IsJavaDebuggable()) {
-    // We are in debuggable mode and this caller has been marked trusted.
-    return true;
-  }
-
-  return false;
-}
+template<typename T>
+bool ShouldDenyAccessToMemberImpl(T* member, ApiList api_list, AccessMethod access_method)
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
 }  // namespace detail
 
-// Returns true if access to `member` should be denied to the caller of the
-// reflective query. The decision is based on whether the caller is trusted or
-// not. Because different users of this function determine this in a different
-// way, `fn_caller_is_trusted(self)` is called and should return true if the
-// caller is allowed to access the platform.
+// Returns access flags for the runtime representation of a class member (ArtField/ArtMember).
+ALWAYS_INLINE inline uint32_t CreateRuntimeFlags(const ClassAccessor::BaseItem& member) {
+  uint32_t runtime_flags = 0u;
+
+  ApiList api_list(member.GetHiddenapiFlags());
+  DCHECK(api_list.IsValid());
+
+  if (api_list.Contains(ApiList::Whitelist())) {
+    runtime_flags |= kAccPublicApi;
+  } else {
+    // Only add domain-specific flags for non-public API members.
+    // This simplifies hardcoded values for intrinsics.
+    if (api_list.Contains(ApiList::CorePlatformApi())) {
+      runtime_flags |= kAccCorePlatformApi;
+    }
+  }
+
+  DCHECK_EQ(runtime_flags & kAccHiddenapiBits, runtime_flags)
+      << "Runtime flags not in reserved access flags bits";
+  return runtime_flags;
+}
+
+// Extracts hiddenapi runtime flags from access flags of ArtField.
+ALWAYS_INLINE inline uint32_t GetRuntimeFlags(ArtField* field)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  return field->GetAccessFlags() & kAccHiddenapiBits;
+}
+
+// Extracts hiddenapi runtime flags from access flags of ArtMethod.
+// Uses hardcoded values for intrinsics.
+ALWAYS_INLINE inline uint32_t GetRuntimeFlags(ArtMethod* method)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (UNLIKELY(method->IsIntrinsic())) {
+    switch (static_cast<Intrinsics>(method->GetIntrinsic())) {
+      case Intrinsics::kSystemArrayCopyChar:
+      case Intrinsics::kStringGetCharsNoCheck:
+      case Intrinsics::kReferenceGetReferent:
+      case Intrinsics::kMemoryPeekByte:
+      case Intrinsics::kMemoryPokeByte:
+      case Intrinsics::kUnsafeCASInt:
+      case Intrinsics::kUnsafeCASLong:
+      case Intrinsics::kUnsafeCASObject:
+      case Intrinsics::kUnsafeGet:
+      case Intrinsics::kUnsafeGetAndAddInt:
+      case Intrinsics::kUnsafeGetAndAddLong:
+      case Intrinsics::kUnsafeGetAndSetInt:
+      case Intrinsics::kUnsafeGetAndSetLong:
+      case Intrinsics::kUnsafeGetAndSetObject:
+      case Intrinsics::kUnsafeGetLong:
+      case Intrinsics::kUnsafeGetLongVolatile:
+      case Intrinsics::kUnsafeGetObject:
+      case Intrinsics::kUnsafeGetObjectVolatile:
+      case Intrinsics::kUnsafeGetVolatile:
+      case Intrinsics::kUnsafePut:
+      case Intrinsics::kUnsafePutLong:
+      case Intrinsics::kUnsafePutLongOrdered:
+      case Intrinsics::kUnsafePutLongVolatile:
+      case Intrinsics::kUnsafePutObject:
+      case Intrinsics::kUnsafePutObjectOrdered:
+      case Intrinsics::kUnsafePutObjectVolatile:
+      case Intrinsics::kUnsafePutOrdered:
+      case Intrinsics::kUnsafePutVolatile:
+      case Intrinsics::kUnsafeLoadFence:
+      case Intrinsics::kUnsafeStoreFence:
+      case Intrinsics::kUnsafeFullFence:
+      case Intrinsics::kCRC32Update:
+      case Intrinsics::kCRC32UpdateBytes:
+      case Intrinsics::kCRC32UpdateByteBuffer:
+      case Intrinsics::kStringNewStringFromBytes:
+      case Intrinsics::kStringNewStringFromChars:
+      case Intrinsics::kStringNewStringFromString:
+      case Intrinsics::kMemoryPeekIntNative:
+      case Intrinsics::kMemoryPeekLongNative:
+      case Intrinsics::kMemoryPeekShortNative:
+      case Intrinsics::kMemoryPokeIntNative:
+      case Intrinsics::kMemoryPokeLongNative:
+      case Intrinsics::kMemoryPokeShortNative:
+      case Intrinsics::kVarHandleFullFence:
+      case Intrinsics::kVarHandleAcquireFence:
+      case Intrinsics::kVarHandleReleaseFence:
+      case Intrinsics::kVarHandleLoadLoadFence:
+      case Intrinsics::kVarHandleStoreStoreFence:
+      case Intrinsics::kVarHandleCompareAndExchange:
+      case Intrinsics::kVarHandleCompareAndExchangeAcquire:
+      case Intrinsics::kVarHandleCompareAndExchangeRelease:
+      case Intrinsics::kVarHandleCompareAndSet:
+      case Intrinsics::kVarHandleGet:
+      case Intrinsics::kVarHandleGetAcquire:
+      case Intrinsics::kVarHandleGetAndAdd:
+      case Intrinsics::kVarHandleGetAndAddAcquire:
+      case Intrinsics::kVarHandleGetAndAddRelease:
+      case Intrinsics::kVarHandleGetAndBitwiseAnd:
+      case Intrinsics::kVarHandleGetAndBitwiseAndAcquire:
+      case Intrinsics::kVarHandleGetAndBitwiseAndRelease:
+      case Intrinsics::kVarHandleGetAndBitwiseOr:
+      case Intrinsics::kVarHandleGetAndBitwiseOrAcquire:
+      case Intrinsics::kVarHandleGetAndBitwiseOrRelease:
+      case Intrinsics::kVarHandleGetAndBitwiseXor:
+      case Intrinsics::kVarHandleGetAndBitwiseXorAcquire:
+      case Intrinsics::kVarHandleGetAndBitwiseXorRelease:
+      case Intrinsics::kVarHandleGetAndSet:
+      case Intrinsics::kVarHandleGetAndSetAcquire:
+      case Intrinsics::kVarHandleGetAndSetRelease:
+      case Intrinsics::kVarHandleGetOpaque:
+      case Intrinsics::kVarHandleGetVolatile:
+      case Intrinsics::kVarHandleSet:
+      case Intrinsics::kVarHandleSetOpaque:
+      case Intrinsics::kVarHandleSetRelease:
+      case Intrinsics::kVarHandleSetVolatile:
+      case Intrinsics::kVarHandleWeakCompareAndSet:
+      case Intrinsics::kVarHandleWeakCompareAndSetAcquire:
+      case Intrinsics::kVarHandleWeakCompareAndSetPlain:
+      case Intrinsics::kVarHandleWeakCompareAndSetRelease:
+        return 0u;
+      default:
+        // Remaining intrinsics are public API. We DCHECK that in SetIntrinsic().
+        return kAccPublicApi;
+    }
+  } else {
+    return method->GetAccessFlags() & kAccHiddenapiBits;
+  }
+}
+
+// Returns true if access to `member` should be denied in the given context.
+// The decision is based on whether the caller is in a trusted context or not.
+// Because determining the access context can be expensive, a lambda function
+// "fn_get_access_context" is lazily invoked after other criteria have been
+// considered.
 // This function might print warnings into the log if the member is hidden.
 template<typename T>
-inline Action GetMemberAction(T* member,
-                              Thread* self,
-                              std::function<bool(Thread*)> fn_caller_is_trusted,
-                              AccessMethod access_method)
+inline bool ShouldDenyAccessToMember(T* member,
+                                     const std::function<AccessContext()>& fn_get_access_context,
+                                     AccessMethod access_method)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK(member != nullptr);
+  const uint32_t runtime_flags = GetRuntimeFlags(member);
 
-  // Decode hidden API access flags.
-  // NB Multiple threads might try to access (and overwrite) these simultaneously,
-  // causing a race. We only do that if access has not been denied, so the race
-  // cannot change Java semantics. We should, however, decode the access flags
-  // once and use it throughout this function, otherwise we may get inconsistent
-  // results, e.g. print whitelist warnings (b/78327881).
-  HiddenApiAccessFlags::ApiList api_list = member->GetHiddenApiAccessFlags();
-
-  Action action = GetActionFromAccessFlags(member->GetHiddenApiAccessFlags());
-  if (action == kAllow) {
-    // Nothing to do.
-    return action;
+  // Exit early if member is public API. This flag is also set for non-boot class
+  // path fields/methods.
+  if ((runtime_flags & kAccPublicApi) != 0) {
+    return false;
   }
 
-  // Member is hidden. Invoke `fn_caller_in_platform` and find the origin of the access.
-  // This can be *very* expensive. Save it for last.
-  if (fn_caller_is_trusted(self)) {
-    // Caller is trusted. Exit.
-    return kAllow;
+  // Determine which domain the caller and callee belong to.
+  // This can be *very* expensive. This is why ShouldDenyAccessToMember
+  // should not be called on every individual access.
+  const AccessContext caller_context = fn_get_access_context();
+  const AccessContext callee_context(member->GetDeclaringClass());
+
+  // Non-boot classpath callers should have exited early.
+  DCHECK(!callee_context.IsUntrustedDomain());
+
+  // Check if the caller is always allowed to access members in the callee context.
+  if (caller_context.CanAlwaysAccess(callee_context)) {
+    return false;
   }
 
-  // Member is hidden and caller is not in the platform.
-  return detail::GetMemberActionImpl(member, api_list, action, access_method);
+  // Check if this is platform accessing core platform. We may warn if `member` is
+  // not part of core platform API.
+  switch (caller_context.GetDomain()) {
+    case Domain::kApplication: {
+      DCHECK(!callee_context.IsUntrustedDomain());
+
+      // Exit early if access checks are completely disabled.
+      EnforcementPolicy policy = Runtime::Current()->GetHiddenApiEnforcementPolicy();
+      if (policy == EnforcementPolicy::kDisabled) {
+        return false;
+      }
+
+      // Decode hidden API access flags from the dex file.
+      // This is an O(N) operation scaling with the number of fields/methods
+      // in the class. Only do this on slow path and only do it once.
+      ApiList api_list(detail::GetDexFlags(member));
+      DCHECK(api_list.IsValid());
+
+      // Member is hidden and caller is not exempted. Enter slow path.
+      return detail::ShouldDenyAccessToMemberImpl(member, api_list, access_method);
+    }
+
+    case Domain::kPlatform: {
+      DCHECK(callee_context.GetDomain() == Domain::kCorePlatform);
+
+      // Member is part of core platform API. Accessing it is allowed.
+      if ((runtime_flags & kAccCorePlatformApi) != 0) {
+        return false;
+      }
+
+      // Allow access if access checks are disabled.
+      EnforcementPolicy policy = Runtime::Current()->GetCorePlatformApiEnforcementPolicy();
+      if (policy == EnforcementPolicy::kDisabled) {
+        return false;
+      }
+
+      // Access checks are not disabled, report the violation.
+      // detail::MaybeReportCorePlatformApiViolation(member, caller_context, access_method);
+
+      // Deny access if the policy is enabled.
+      return policy == EnforcementPolicy::kEnabled;
+    }
+
+    case Domain::kCorePlatform: {
+      LOG(FATAL) << "CorePlatform domain should be allowed to access all domains";
+      UNREACHABLE();
+    }
+  }
 }
 
-inline bool IsCallerTrusted(ObjPtr<mirror::Class> caller) REQUIRES_SHARED(Locks::mutator_lock_) {
-  return !caller.IsNull() &&
-      detail::IsCallerTrusted(caller, caller->GetClassLoader(), caller->GetDexCache());
-}
-
-// Returns true if access to `member` should be denied to a caller loaded with
-// `caller_class_loader`.
-// This function might print warnings into the log if the member is hidden.
+// Helper method for callers where access context can be determined beforehand.
+// Wraps AccessContext in a lambda and passes it to the real ShouldDenyAccessToMember.
 template<typename T>
-inline Action GetMemberAction(T* member,
-                              ObjPtr<mirror::ClassLoader> caller_class_loader,
-                              ObjPtr<mirror::DexCache> caller_dex_cache,
-                              AccessMethod access_method)
+inline bool ShouldDenyAccessToMember(T* member,
+                                     const AccessContext& access_context,
+                                     AccessMethod access_method)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  bool is_caller_trusted =
-      detail::IsCallerTrusted(/* caller */ nullptr, caller_class_loader, caller_dex_cache);
-  return GetMemberAction(member,
-                         /* thread */ nullptr,
-                         [is_caller_trusted] (Thread*) { return is_caller_trusted; },
-                         access_method);
+  return ShouldDenyAccessToMember(member, [&]() { return access_context; }, access_method);
 }
 
-// Calls back into managed code to notify VMRuntime.nonSdkApiUsageConsumer that
-// |member| was accessed. This is usually called when an API is on the black,
-// dark grey or light grey lists. Given that the callback can execute arbitrary
-// code, a call to this method can result in thread suspension.
-template<typename T> void NotifyHiddenApiListener(T* member)
-    REQUIRES_SHARED(Locks::mutator_lock_);
-
-
 }  // namespace hiddenapi
 }  // namespace art
 
diff --git a/runtime/hidden_api_test.cc b/runtime/hidden_api_test.cc
index 4c7efe6..1f83c05 100644
--- a/runtime/hidden_api_test.cc
+++ b/runtime/hidden_api_test.cc
@@ -16,6 +16,7 @@
 
 #include "hidden_api.h"
 
+#include "base/sdk_version.h"
 #include "common_runtime_test.h"
 #include "jni/jni_internal.h"
 #include "proxy_test.h"
@@ -23,7 +24,7 @@
 namespace art {
 
 using hiddenapi::detail::MemberSignature;
-using hiddenapi::GetActionFromAccessFlags;
+using hiddenapi::detail::ShouldDenyAccessToMemberImpl;
 
 class HiddenApiTest : public CommonRuntimeTest {
  protected:
@@ -68,6 +69,15 @@
     return art_field;
   }
 
+  bool ShouldDenyAccess(hiddenapi::ApiList list) REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Choose parameters such that there are no side effects (AccessMethod::kNone)
+    // and that the member is not on the exemptions list (here we choose one which
+    // is not even in boot class path).
+    return ShouldDenyAccessToMemberImpl(/* member= */ class1_field1_,
+                                        list,
+                                        /* access_method= */ hiddenapi::AccessMethod::kNone);
+  }
+
  protected:
   Thread* self_;
   jobject jclass_loader_;
@@ -88,41 +98,41 @@
 };
 
 TEST_F(HiddenApiTest, CheckGetActionFromRuntimeFlags) {
-  runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kNoChecks);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kWhitelist), hiddenapi::kAllow);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kLightGreylist), hiddenapi::kAllow);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kDarkGreylist), hiddenapi::kAllow);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kBlacklist), hiddenapi::kAllow);
+  ScopedObjectAccess soa(self_);
 
   runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kJustWarn);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kWhitelist),
-            hiddenapi::kAllow);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kLightGreylist),
-            hiddenapi::kAllowButWarn);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kDarkGreylist),
-            hiddenapi::kAllowButWarn);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kBlacklist),
-            hiddenapi::kAllowButWarn);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxP()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), false);
 
-  runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kDarkGreyAndBlackList);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kWhitelist),
-            hiddenapi::kAllow);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kLightGreylist),
-            hiddenapi::kAllowButWarn);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kDarkGreylist),
-            hiddenapi::kDeny);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kBlacklist),
-            hiddenapi::kDeny);
+  runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
+  runtime_->SetTargetSdkVersion(
+      static_cast<uint32_t>(hiddenapi::ApiList::GreylistMaxO().GetMaxAllowedSdkVersion()));
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxP()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), true);
 
-  runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kBlacklistOnly);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kWhitelist),
-            hiddenapi::kAllow);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kLightGreylist),
-            hiddenapi::kAllowButWarn);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kDarkGreylist),
-            hiddenapi::kAllowButWarnAndToast);
-  ASSERT_EQ(GetActionFromAccessFlags(HiddenApiAccessFlags::kBlacklist),
-            hiddenapi::kDeny);
+  runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
+  runtime_->SetTargetSdkVersion(
+      static_cast<uint32_t>(hiddenapi::ApiList::GreylistMaxO().GetMaxAllowedSdkVersion()) + 1);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxP()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), true);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), true);
+
+  runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
+  runtime_->SetTargetSdkVersion(
+      static_cast<uint32_t>(hiddenapi::ApiList::GreylistMaxP().GetMaxAllowedSdkVersion()) + 1);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxP()), true);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), true);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), true);
 }
 
 TEST_F(HiddenApiTest, CheckMembersRead) {
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index e8a47d1..34f645b 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -41,7 +41,7 @@
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/array_ref.h"
-#include "base/globals.h"
+#include "base/file_utils.h"
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "base/os.h"
@@ -65,6 +65,7 @@
 #include "mirror/class-inl.h"
 #include "mirror/class.h"
 #include "mirror/object-refvisitor-inl.h"
+#include "runtime_globals.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread_list.h"
 
@@ -148,11 +149,11 @@
   hprof_basic_long = 11,
 };
 
-typedef uint32_t HprofStringId;
-typedef uint32_t HprofClassObjectId;
-typedef uint32_t HprofClassSerialNumber;
-typedef uint32_t HprofStackTraceSerialNumber;
-typedef uint32_t HprofStackFrameId;
+using HprofStringId = uint32_t;
+using HprofClassObjectId = uint32_t;
+using HprofClassSerialNumber = uint32_t;
+using HprofStackTraceSerialNumber = uint32_t;
+using HprofStackFrameId = uint32_t;
 static constexpr HprofStackTraceSerialNumber kHprofNullStackTrace = 0;
 
 class EndianOutput {
@@ -761,13 +762,13 @@
     // Where exactly are we writing to?
     int out_fd;
     if (fd_ >= 0) {
-      out_fd = dup(fd_);
+      out_fd = DupCloexec(fd_);
       if (out_fd < 0) {
         ThrowRuntimeException("Couldn't dump heap; dup(%d) failed: %s", fd_, strerror(errno));
         return false;
       }
     } else {
-      out_fd = open(filename_.c_str(), O_WRONLY|O_CREAT|O_TRUNC, 0644);
+      out_fd = open(filename_.c_str(), O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
       if (out_fd < 0) {
         ThrowRuntimeException("Couldn't dump heap; open(\"%s\") failed: %s", filename_.c_str(),
                               strerror(errno));
@@ -1050,7 +1051,7 @@
     case HPROF_ROOT_REFERENCE_CLEANUP:
     case HPROF_UNREACHABLE:
       LOG(FATAL) << "obsolete tag " << static_cast<int>(heap_tag);
-      break;
+      UNREACHABLE();
   }
 
   ++objects_in_segment_;
@@ -1073,7 +1074,8 @@
   if (obj->IsClass() && obj->AsClass()->IsRetired()) {
     return;
   }
-  DCHECK(visited_objects_.insert(obj).second) << "Already visited " << obj;
+  DCHECK(visited_objects_.insert(obj).second)
+      << "Already visited " << obj << "(" << obj->PrettyTypeOf() << ")";
 
   ++total_objects_;
 
@@ -1251,7 +1253,7 @@
   __ AddU1(HPROF_CLASS_DUMP);
   __ AddClassId(LookupClassId(klass));
   __ AddStackTraceSerialNumber(LookupStackTraceSerialNumber(klass));
-  __ AddClassId(LookupClassId(klass->GetSuperClass()));
+  __ AddClassId(LookupClassId(klass->GetSuperClass().Ptr()));
   __ AddObjectId(klass->GetClassLoader());
   __ AddObjectId(nullptr);    // no signer
   __ AddObjectId(nullptr);    // no prot domain
@@ -1542,7 +1544,7 @@
           reinterpret_cast<uintptr_t>(obj) + kObjectAlignment / 2);
       __ AddObjectId(fake_object_array);
     }
-    klass = klass->GetSuperClass();
+    klass = klass->GetSuperClass().Ptr();
   } while (klass != nullptr);
 
   // Patch the instance field length.
diff --git a/runtime/image-inl.h b/runtime/image-inl.h
index c527f6f..2082064 100644
--- a/runtime/image-inl.h
+++ b/runtime/image-inl.h
@@ -49,6 +49,38 @@
   return image_roots;
 }
 
+inline void ImageHeader::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const {
+  const ImageSection& fields = GetFieldsSection();
+  for (size_t pos = 0u; pos < fields.Size(); ) {
+    auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + fields.Offset() + pos);
+    for (size_t i = 0u; i < array->size(); ++i) {
+      visitor->Visit(&array->At(i, sizeof(ArtField)));
+    }
+    pos += array->ComputeSize(array->size());
+  }
+}
+
+inline void ImageHeader::VisitPackedArtMethods(ArtMethodVisitor* visitor,
+                                        uint8_t* base,
+                                        PointerSize pointer_size) const {
+  const size_t method_alignment = ArtMethod::Alignment(pointer_size);
+  const size_t method_size = ArtMethod::Size(pointer_size);
+  const ImageSection& methods = GetMethodsSection();
+  for (size_t pos = 0u; pos < methods.Size(); ) {
+    auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + methods.Offset() + pos);
+    for (size_t i = 0u; i < array->size(); ++i) {
+      visitor->Visit(&array->At(i, method_size, method_alignment));
+    }
+    pos += array->ComputeSize(array->size(), method_size, method_alignment);
+  }
+  const ImageSection& runtime_methods = GetRuntimeMethodsSection();
+  for (size_t pos = 0u; pos < runtime_methods.Size(); ) {
+    auto* method = reinterpret_cast<ArtMethod*>(base + runtime_methods.Offset() + pos);
+    visitor->Visit(method);
+    pos += method_size;
+  }
+}
+
 template <typename Visitor>
 inline void ImageHeader::VisitPackedImTables(const Visitor& visitor,
                                              uint8_t* base,
diff --git a/runtime/image.cc b/runtime/image.cc
index 028c515..b6bb0b1 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -16,6 +16,9 @@
 
 #include "image.h"
 
+#include <lz4.h>
+#include <sstream>
+
 #include "base/bit_utils.h"
 #include "base/length_prefixed_array.h"
 #include "base/utils.h"
@@ -26,9 +29,11 @@
 namespace art {
 
 const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '3', '\0' };  // Image relocations.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '7', '4', '\0' };  // CRC32UpdateBB intrinsic
 
-ImageHeader::ImageHeader(uint32_t image_begin,
+ImageHeader::ImageHeader(uint32_t image_reservation_size,
+                         uint32_t component_count,
+                         uint32_t image_begin,
                          uint32_t image_size,
                          ImageSection* sections,
                          uint32_t image_roots,
@@ -39,15 +44,12 @@
                          uint32_t oat_file_end,
                          uint32_t boot_image_begin,
                          uint32_t boot_image_size,
-                         uint32_t boot_oat_begin,
-                         uint32_t boot_oat_size,
-                         uint32_t pointer_size,
-                         bool compile_pic,
-                         bool is_pic,
-                         StorageMode storage_mode,
-                         size_t data_size)
-  : image_begin_(image_begin),
+                         uint32_t pointer_size)
+  : image_reservation_size_(image_reservation_size),
+    component_count_(component_count),
+    image_begin_(image_begin),
     image_size_(image_size),
+    image_checksum_(0u),
     oat_checksum_(oat_checksum),
     oat_file_begin_(oat_file_begin),
     oat_data_begin_(oat_data_begin),
@@ -55,15 +57,8 @@
     oat_file_end_(oat_file_end),
     boot_image_begin_(boot_image_begin),
     boot_image_size_(boot_image_size),
-    boot_oat_begin_(boot_oat_begin),
-    boot_oat_size_(boot_oat_size),
-    patch_delta_(0),
     image_roots_(image_roots),
-    pointer_size_(pointer_size),
-    compile_pic_(compile_pic),
-    is_pic_(is_pic),
-    storage_mode_(storage_mode),
-    data_size_(data_size) {
+    pointer_size_(pointer_size) {
   CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize));
   CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize));
   CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, kPageSize));
@@ -77,23 +72,22 @@
   std::copy_n(sections, kSectionCount, sections_);
 }
 
-void ImageHeader::RelocateImage(off_t delta) {
+void ImageHeader::RelocateImage(int64_t delta) {
   CHECK_ALIGNED(delta, kPageSize) << " patch delta must be page aligned";
   oat_file_begin_ += delta;
   oat_data_begin_ += delta;
   oat_data_end_ += delta;
   oat_file_end_ += delta;
-  patch_delta_ += delta;
   RelocateImageObjects(delta);
   RelocateImageMethods(delta);
 }
 
-void ImageHeader::RelocateImageObjects(off_t delta) {
+void ImageHeader::RelocateImageObjects(int64_t delta) {
   image_begin_ += delta;
   image_roots_ += delta;
 }
 
-void ImageHeader::RelocateImageMethods(off_t delta) {
+void ImageHeader::RelocateImageMethods(int64_t delta) {
   for (size_t i = 0; i < kImageMethodsCount; ++i) {
     image_methods_[i] += delta;
   }
@@ -106,6 +100,9 @@
   if (memcmp(version_, kImageVersion, sizeof(kImageVersion)) != 0) {
     return false;
   }
+  if (!IsAligned<kPageSize>(image_reservation_size_)) {
+    return false;
+  }
   // Unsigned so wraparound is well defined.
   if (image_begin_ >= image_begin_ + image_size_) {
     return false;
@@ -119,9 +116,6 @@
   if (oat_file_begin_ >= oat_data_begin_) {
     return false;
   }
-  if (!IsAligned<kPageSize>(patch_delta_)) {
-    return false;
-  }
   return true;
 }
 
@@ -152,40 +146,38 @@
   }
 }
 
-void ImageHeader::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const {
-  const ImageSection& fields = GetFieldsSection();
-  for (size_t pos = 0; pos < fields.Size(); ) {
-    auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + fields.Offset() + pos);
-    for (size_t i = 0; i < array->size(); ++i) {
-      visitor->Visit(&array->At(i, sizeof(ArtField)));
-    }
-    pos += array->ComputeSize(array->size());
-  }
-}
-
-void ImageHeader::VisitPackedArtMethods(ArtMethodVisitor* visitor,
-                                        uint8_t* base,
-                                        PointerSize pointer_size) const {
-  const size_t method_alignment = ArtMethod::Alignment(pointer_size);
-  const size_t method_size = ArtMethod::Size(pointer_size);
-  const ImageSection& methods = GetMethodsSection();
-  for (size_t pos = 0; pos < methods.Size(); ) {
-    auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + methods.Offset() + pos);
-    for (size_t i = 0; i < array->size(); ++i) {
-      visitor->Visit(&array->At(i, method_size, method_alignment));
-    }
-    pos += array->ComputeSize(array->size(), method_size, method_alignment);
-  }
-  const ImageSection& runtime_methods = GetRuntimeMethodsSection();
-  for (size_t pos = 0; pos < runtime_methods.Size(); ) {
-    auto* method = reinterpret_cast<ArtMethod*>(base + runtime_methods.Offset() + pos);
-    visitor->Visit(method);
-    pos += method_size;
-  }
-}
-
 PointerSize ImageHeader::GetPointerSize() const {
   return ConvertToPointerSize(pointer_size_);
 }
 
+bool ImageHeader::Block::Decompress(uint8_t* out_ptr,
+                                    const uint8_t* in_ptr,
+                                    std::string* error_msg) const {
+  switch (storage_mode_) {
+    case kStorageModeUncompressed: {
+      CHECK_EQ(image_size_, data_size_);
+      memcpy(out_ptr + image_offset_, in_ptr + data_offset_, data_size_);
+      break;
+    }
+    case kStorageModeLZ4:
+    case kStorageModeLZ4HC: {
+      // LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
+      const size_t decompressed_size = LZ4_decompress_safe(
+          reinterpret_cast<const char*>(in_ptr) + data_offset_,
+          reinterpret_cast<char*>(out_ptr) + image_offset_,
+          data_size_,
+          image_size_);
+      CHECK_EQ(decompressed_size, image_size_);
+      break;
+    }
+    default: {
+      if (error_msg != nullptr) {
+        *error_msg = (std::ostringstream() << "Invalid image format " << storage_mode_).str();
+      }
+      return false;
+    }
+  }
+  return true;
+}
+
 }  // namespace art
diff --git a/runtime/image.h b/runtime/image.h
index af092ad..6578287 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -20,8 +20,9 @@
 #include <string.h>
 
 #include "base/enums.h"
-#include "base/globals.h"
+#include "base/iteration_range.h"
 #include "mirror/object.h"
+#include "runtime_globals.h"
 
 namespace art {
 
@@ -82,8 +83,10 @@
   uint32_t size_;
 };
 
-// header of image files written by ImageWriter, read and validated by Space.
-class PACKED(4) ImageHeader {
+// Header of image files written by ImageWriter, read and validated by Space.
+// Packed to object alignment since the first object follows directly after the header.
+static_assert(kObjectAlignment == 8, "Alignment check");
+class PACKED(8) ImageHeader {
  public:
   enum StorageMode : uint32_t {
     kStorageModeUncompressed,
@@ -93,27 +96,51 @@
   };
   static constexpr StorageMode kDefaultStorageMode = kStorageModeUncompressed;
 
-  ImageHeader()
-      : image_begin_(0U),
-        image_size_(0U),
-        oat_checksum_(0U),
-        oat_file_begin_(0U),
-        oat_data_begin_(0U),
-        oat_data_end_(0U),
-        oat_file_end_(0U),
-        boot_image_begin_(0U),
-        boot_image_size_(0U),
-        boot_oat_begin_(0U),
-        boot_oat_size_(0U),
-        patch_delta_(0),
-        image_roots_(0U),
-        pointer_size_(0U),
-        compile_pic_(0),
-        is_pic_(0),
-        storage_mode_(kDefaultStorageMode),
-        data_size_(0) {}
+  // Solid block of the image. May be compressed or uncompressed.
+  class PACKED(4) Block final {
+   public:
+    Block(StorageMode storage_mode,
+          uint32_t data_offset,
+          uint32_t data_size,
+          uint32_t image_offset,
+          uint32_t image_size)
+        : storage_mode_(storage_mode),
+          data_offset_(data_offset),
+          data_size_(data_size),
+          image_offset_(image_offset),
+          image_size_(image_size) {}
 
-  ImageHeader(uint32_t image_begin,
+    bool Decompress(uint8_t* out_ptr, const uint8_t* in_ptr, std::string* error_msg) const;
+
+    StorageMode GetStorageMode() const {
+      return storage_mode_;
+    }
+
+    uint32_t GetDataSize() const {
+      return data_size_;
+    }
+
+    uint32_t GetImageSize() const {
+      return image_size_;
+    }
+
+   private:
+    // Storage method for the image, the image may be compressed.
+    StorageMode storage_mode_ = kDefaultStorageMode;
+
+    // Compressed offset and size.
+    uint32_t data_offset_ = 0u;
+    uint32_t data_size_ = 0u;
+
+    // Image offset and size (decompressed or mapped location).
+    uint32_t image_offset_ = 0u;
+    uint32_t image_size_ = 0u;
+  };
+
+  ImageHeader() {}
+  ImageHeader(uint32_t image_reservation_size,
+              uint32_t component_count,
+              uint32_t image_begin,
               uint32_t image_size,
               ImageSection* sections,
               uint32_t image_roots,
@@ -124,23 +151,33 @@
               uint32_t oat_file_end,
               uint32_t boot_image_begin,
               uint32_t boot_image_size,
-              uint32_t boot_oat_begin,
-              uint32_t boot_oat_size,
-              uint32_t pointer_size,
-              bool compile_pic,
-              bool is_pic,
-              StorageMode storage_mode,
-              size_t data_size);
+              uint32_t pointer_size);
 
   bool IsValid() const;
   const char* GetMagic() const;
 
+  uint32_t GetImageReservationSize() const {
+    return image_reservation_size_;
+  }
+
+  uint32_t GetComponentCount() const {
+    return component_count_;
+  }
+
   uint8_t* GetImageBegin() const {
     return reinterpret_cast<uint8_t*>(image_begin_);
   }
 
   size_t GetImageSize() const {
-    return static_cast<uint32_t>(image_size_);
+    return image_size_;
+  }
+
+  uint32_t GetImageChecksum() const {
+    return image_checksum_;
+  }
+
+  void SetImageChecksum(uint32_t image_checksum) {
+    image_checksum_ = image_checksum;
   }
 
   uint32_t GetOatChecksum() const {
@@ -175,14 +212,6 @@
     return pointer_size_;
   }
 
-  off_t GetPatchDelta() const {
-    return patch_delta_;
-  }
-
-  void SetPatchDelta(off_t patch_delta) {
-    patch_delta_ = patch_delta;
-  }
-
   static std::string GetOatLocationFromImageLocation(const std::string& image) {
     return GetLocationFromImageLocation(image, "oat");
   }
@@ -219,6 +248,16 @@
     kBootImageLiveObjects = kSpecialRoots,  // Array of boot image objects that must be kept live.
   };
 
+  /*
+   * This describes the number and ordering of sections inside of Boot
+   * and App Images.  It is very important that changes to this struct
+   * are reflected in the compiler and loader.
+   *
+   * See:
+   *   - ImageWriter::ImageInfo::CreateImageSections()
+   *   - ImageWriter::Write()
+   *   - ImageWriter::AllocMemory()
+   */
   enum ImageSections {
     kSectionObjects,
     kSectionArtFields,
@@ -229,8 +268,9 @@
     kSectionDexCacheArrays,
     kSectionInternedStrings,
     kSectionClassTable,
+    kSectionStringReferenceOffsets,
+    kSectionMetadata,
     kSectionImageBitmap,
-    kSectionImageRelocations,
     kSectionCount,  // Number of elements in enum.
   };
 
@@ -242,6 +282,11 @@
 
   ArtMethod* GetImageMethod(ImageMethod index) const;
 
+  ImageSection& GetImageSection(ImageSections index) {
+    DCHECK_LT(static_cast<size_t>(index), kSectionCount);
+    return sections_[index];
+  }
+
   const ImageSection& GetImageSection(ImageSections index) const {
     DCHECK_LT(static_cast<size_t>(index), kSectionCount);
     return sections_[index];
@@ -283,12 +328,16 @@
     return GetImageSection(kSectionClassTable);
   }
 
-  const ImageSection& GetImageBitmapSection() const {
-    return GetImageSection(kSectionImageBitmap);
+  const ImageSection& GetImageStringReferenceOffsetsSection() const {
+    return GetImageSection(kSectionStringReferenceOffsets);
   }
 
-  const ImageSection& GetImageRelocationsSection() const {
-    return GetImageSection(kSectionImageRelocations);
+  const ImageSection& GetMetadataSection() const {
+    return GetImageSection(kSectionMetadata);
+  }
+
+  const ImageSection& GetImageBitmapSection() const {
+    return GetImageSection(kSectionImageBitmap);
   }
 
   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
@@ -299,17 +348,9 @@
   ObjPtr<mirror::ObjectArray<mirror::Object>> GetImageRoots() const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void RelocateImage(off_t delta);
-  void RelocateImageMethods(off_t delta);
-  void RelocateImageObjects(off_t delta);
-
-  bool CompilePic() const {
-    return compile_pic_ != 0;
-  }
-
-  bool IsPic() const {
-    return is_pic_ != 0;
-  }
+  void RelocateImage(int64_t delta);
+  void RelocateImageMethods(int64_t delta);
+  void RelocateImageObjects(int64_t delta);
 
   uint32_t GetBootImageBegin() const {
     return boot_image_begin_;
@@ -319,18 +360,6 @@
     return boot_image_size_;
   }
 
-  uint32_t GetBootOatBegin() const {
-    return boot_oat_begin_;
-  }
-
-  uint32_t GetBootOatSize() const {
-    return boot_oat_size_;
-  }
-
-  StorageMode GetStorageMode() const {
-    return storage_mode_;
-  }
-
   uint64_t GetDataSize() const {
     return data_size_;
   }
@@ -368,6 +397,24 @@
                                     uint8_t* base,
                                     PointerSize pointer_size) const;
 
+  IterationRange<const Block*> GetBlocks() const {
+    return GetBlocks(GetImageBegin());
+  }
+
+  IterationRange<const Block*> GetBlocks(const uint8_t* image_begin) const {
+    const Block* begin = reinterpret_cast<const Block*>(image_begin + blocks_offset_);
+    return {begin, begin + blocks_count_};
+  }
+
+  // Return true if the image has any compressed blocks.
+  bool HasCompressedBlock() const {
+    return blocks_count_ != 0u;
+  }
+
+  uint32_t GetBlockCount() const {
+    return blocks_count_;
+  }
+
  private:
   static const uint8_t kImageMagic[4];
   static const uint8_t kImageVersion[4];
@@ -386,54 +433,53 @@
   uint8_t magic_[4];
   uint8_t version_[4];
 
+  // The total memory reservation size for the image.
+  // For boot image or boot image extension, the primary image includes the reservation
+  // for all image files and oat files, secondary images have the reservation set to 0.
+  // App images have reservation equal to `image_size_` rounded up to page size because
+  // their oat files are mmapped independently.
+  uint32_t image_reservation_size_ = 0u;
+
+  // The number of components.
+  // For boot image or boot image extension, the primary image stores the total number
+  // of images, secondary images have this set to 0.
+  // App images have 1 component.
+  uint32_t component_count_ = 0u;
+
   // Required base address for mapping the image.
-  uint32_t image_begin_;
+  uint32_t image_begin_ = 0u;
 
   // Image size, not page aligned.
-  uint32_t image_size_;
+  uint32_t image_size_ = 0u;
+
+  // Image file checksum (calculated with the checksum field set to 0).
+  uint32_t image_checksum_ = 0u;
 
   // Checksum of the oat file we link to for load time sanity check.
-  uint32_t oat_checksum_;
+  uint32_t oat_checksum_ = 0u;
 
   // Start address for oat file. Will be before oat_data_begin_ for .so files.
-  uint32_t oat_file_begin_;
+  uint32_t oat_file_begin_ = 0u;
 
   // Required oat address expected by image Method::GetCode() pointers.
-  uint32_t oat_data_begin_;
+  uint32_t oat_data_begin_ = 0u;
 
   // End of oat data address range for this image file.
-  uint32_t oat_data_end_;
+  uint32_t oat_data_end_ = 0u;
 
   // End of oat file address range. will be after oat_data_end_ for
   // .so files. Used for positioning a following alloc spaces.
-  uint32_t oat_file_end_;
+  uint32_t oat_file_end_ = 0u;
 
   // Boot image begin and end (app image headers only).
-  uint32_t boot_image_begin_;
-  uint32_t boot_image_size_;
-
-  // Boot oat begin and end (app image headers only).
-  uint32_t boot_oat_begin_;
-  uint32_t boot_oat_size_;
-
-  // TODO: We should probably insert a boot image checksum for app images.
-
-  // The total delta that this image has been patched.
-  int32_t patch_delta_;
+  uint32_t boot_image_begin_ = 0u;
+  uint32_t boot_image_size_ = 0u;  // Includes heap (*.art) and code (.oat).
 
   // Absolute address of an Object[] of objects needed to reinitialize from an image.
-  uint32_t image_roots_;
+  uint32_t image_roots_ = 0u;
 
   // Pointer size, this affects the size of the ArtMethods.
-  uint32_t pointer_size_;
-
-  // Boolean (0 or 1) to denote if the image was compiled with --compile-pic option
-  const uint32_t compile_pic_;
-
-  // Boolean (0 or 1) to denote if the image can be mapped at a random address, this only refers to
-  // the .art file. Currently, app oat files do not depend on their app image. There are no pointers
-  // from the app oat code to the app image.
-  const uint32_t is_pic_;
+  uint32_t pointer_size_ = 0u;
 
   // Image section sizes/offsets correspond to the uncompressed form.
   ImageSection sections_[kSectionCount];
@@ -441,16 +487,91 @@
   // Image methods, may be inside of the boot image for app images.
   uint64_t image_methods_[kImageMethodsCount];
 
-  // Storage method for the image, the image may be compressed.
-  StorageMode storage_mode_;
-
   // Data size for the image data excluding the bitmap and the header. For compressed images, this
   // is the compressed size in the file.
-  uint32_t data_size_;
+  uint32_t data_size_ = 0u;
+
+  // Image blocks, only used for compressed images.
+  uint32_t blocks_offset_ = 0u;
+  uint32_t blocks_count_ = 0u;
 
   friend class linker::ImageWriter;
 };
 
+/*
+ * This type holds the information necessary to fix up AppImage string
+ * references.
+ *
+ * The first element of the pair is an offset into the image space.  If the
+ * offset is tagged (testable using HasDexCacheNativeRefTag) it indicates the location
+ * of a DexCache object that has one or more native references to managed
+ * strings that need to be fixed up.  In this case the second element has no
+ * meaningful value.
+ *
+ * If the first element isn't tagged then it indicates the location of a
+ * managed object with a field that needs fixing up.  In this case the second
+ * element of the pair is an object-relative offset to the field in question.
+ */
+typedef std::pair<uint32_t, uint32_t> AppImageReferenceOffsetInfo;
+
+/*
+ * Tags the last bit.  Used by AppImage logic to differentiate between pointers
+ * to managed objects and pointers to native reference arrays.
+ */
+template<typename T>
+T SetDexCacheStringNativeRefTag(T val) {
+  static_assert(std::is_integral<T>::value, "Expected integral type.");
+
+  return val | 1u;
+}
+
+/*
+ * Tags the second last bit.  Used by AppImage logic to differentiate between pointers
+ * to managed objects and pointers to native reference arrays.
+ */
+template<typename T>
+T SetDexCachePreResolvedStringNativeRefTag(T val) {
+  static_assert(std::is_integral<T>::value, "Expected integral type.");
+
+  return val | 2u;
+}
+
+/*
+ * Retrieves the value of the last bit.  Used by AppImage logic to
+ * differentiate between pointers to managed objects and pointers to native
+ * reference arrays.
+ */
+template<typename T>
+bool HasDexCacheStringNativeRefTag(T val) {
+  static_assert(std::is_integral<T>::value, "Expected integral type.");
+
+  return (val & 1u) != 0u;
+}
+
+/*
+ * Retrieves the value of the second last bit.  Used by AppImage logic to
+ * differentiate between pointers to managed objects and pointers to native
+ * reference arrays.
+ */
+template<typename T>
+bool HasDexCachePreResolvedStringNativeRefTag(T val) {
+  static_assert(std::is_integral<T>::value, "Expected integral type.");
+
+  return (val & 2u) != 0u;
+}
+
+/*
+ * Sets the last bit of the value to 0.  Used by AppImage logic to
+ * differentiate between pointers to managed objects and pointers to native
+ * reference arrays.
+ */
+template<typename T>
+T ClearDexCacheNativeRefTags(T val) {
+  static_assert(std::is_integral<T>::value, "Expected integral type.");
+
+  return val & ~3u;
+}
+
 std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageMethod& policy);
 std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageRoot& policy);
 std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageSections& section);
diff --git a/runtime/imt_conflict_table.h b/runtime/imt_conflict_table.h
index 3586864..02b3be4 100644
--- a/runtime/imt_conflict_table.h
+++ b/runtime/imt_conflict_table.h
@@ -187,17 +187,17 @@
 
   ArtMethod* GetMethod(size_t index, PointerSize pointer_size) const {
     if (pointer_size == PointerSize::k64) {
-      return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data64_[index]));
+      return reinterpret_cast64<ArtMethod*>(data64_[index]);
     } else {
-      return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data32_[index]));
+      return reinterpret_cast32<ArtMethod*>(data32_[index]);
     }
   }
 
   void SetMethod(size_t index, PointerSize pointer_size, ArtMethod* method) {
     if (pointer_size == PointerSize::k64) {
-      data64_[index] = dchecked_integral_cast<uint64_t>(reinterpret_cast<uintptr_t>(method));
+      data64_[index] = reinterpret_cast64<uint64_t>(method);
     } else {
-      data32_[index] = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(method));
+      data32_[index] = reinterpret_cast32<uint32_t>(method);
     }
   }
 
diff --git a/runtime/imtable-inl.h b/runtime/imtable-inl.h
index 93346f6..21e3eb1 100644
--- a/runtime/imtable-inl.h
+++ b/runtime/imtable-inl.h
@@ -46,7 +46,7 @@
     }
 
     const DexFile* dex_file = method->GetDexFile();
-    const DexFile::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex());
+    const dex::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex());
 
     // Class descriptor for the class component.
     *class_hash = ComputeModifiedUtf8Hash(dex_file->GetMethodDeclaringClassDescriptor(method_id));
@@ -54,7 +54,7 @@
     // Method name for the method component.
     *name_hash = ComputeModifiedUtf8Hash(dex_file->GetMethodName(method_id));
 
-    const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
+    const dex::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
 
     // Read the proto for the signature component.
     uint32_t tmp = ComputeModifiedUtf8Hash(
@@ -63,10 +63,10 @@
     // Mix in the argument types.
     // Note: we could consider just using the shorty. This would be faster, at the price of
     //       potential collisions.
-    const DexFile::TypeList* param_types = dex_file->GetProtoParameters(proto_id);
+    const dex::TypeList* param_types = dex_file->GetProtoParameters(proto_id);
     if (param_types != nullptr) {
       for (size_t i = 0; i != param_types->Size(); ++i) {
-        const DexFile::TypeItem& type = param_types->GetTypeItem(i);
+        const dex::TypeItem& type = param_types->GetTypeItem(i);
         tmp = 31 * tmp + ComputeModifiedUtf8Hash(
             dex_file->GetTypeDescriptor(dex_file->GetTypeId(type.type_idx_)));
       }
diff --git a/runtime/imtable.h b/runtime/imtable.h
index aa0a504..48a8643 100644
--- a/runtime/imtable.h
+++ b/runtime/imtable.h
@@ -21,9 +21,10 @@
 #error IMT_SIZE not defined
 #endif
 
+#include "base/casts.h"
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 
 namespace art {
 
@@ -46,10 +47,10 @@
     uint8_t* ptr = AddressOfElement(index, pointer_size);
     if (pointer_size == PointerSize::k32) {
       uint32_t value = *reinterpret_cast<uint32_t*>(ptr);
-      return reinterpret_cast<ArtMethod*>(value);
+      return reinterpret_cast32<ArtMethod*>(value);
     } else {
       uint64_t value = *reinterpret_cast<uint64_t*>(ptr);
-      return reinterpret_cast<ArtMethod*>(value);
+      return reinterpret_cast64<ArtMethod*>(value);
     }
   }
 
@@ -57,11 +58,9 @@
     DCHECK_LT(index, kSize);
     uint8_t* ptr = AddressOfElement(index, pointer_size);
     if (pointer_size == PointerSize::k32) {
-      uintptr_t value = reinterpret_cast<uintptr_t>(method);
-      DCHECK_EQ(static_cast<uint32_t>(value), value);  // Check that we dont lose any non 0 bits.
-      *reinterpret_cast<uint32_t*>(ptr) = static_cast<uint32_t>(value);
+      *reinterpret_cast<uint32_t*>(ptr) = reinterpret_cast32<uint32_t>(method);
     } else {
-      *reinterpret_cast<uint64_t*>(ptr) = reinterpret_cast<uint64_t>(method);
+      *reinterpret_cast<uint64_t*>(ptr) = reinterpret_cast64<uint64_t>(method);
     }
   }
 
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 8ab4a9b..361dccb 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -21,6 +21,7 @@
 #include "base/utils.h"
 #include "jni/java_vm_ext.h"
 #include "jni/jni_internal.h"
+#include "mirror/object-inl.h"
 #include "nth_caller_visitor.h"
 #include "reference_table.h"
 #include "runtime.h"
@@ -79,10 +80,9 @@
 
   const size_t table_bytes = max_count * sizeof(IrtEntry);
   table_mem_map_ = MemMap::MapAnonymous("indirect ref table",
-                                        /* addr */ nullptr,
                                         table_bytes,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ false,
+                                        /*low_4gb=*/ false,
                                         error_msg);
   if (!table_mem_map_.IsValid() && error_msg->empty()) {
     *error_msg = "Unable to map memory for indirect ref table";
@@ -222,10 +222,9 @@
 
   const size_t table_bytes = new_size * sizeof(IrtEntry);
   MemMap new_map = MemMap::MapAnonymous("indirect ref table",
-                                        /* addr */ nullptr,
                                         table_bytes,
                                         PROT_READ | PROT_WRITE,
-                                        /* is_low_4gb */ false,
+                                        /*low_4gb=*/ false,
                                         error_msg);
   if (!new_map.IsValid()) {
     return false;
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 8c63c00..eb07035 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -26,9 +26,9 @@
 #include <android-base/logging.h>
 
 #include "base/bit_utils.h"
+#include "base/locks.h"
 #include "base/macros.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 #include "gc_root.h"
 #include "obj_ptr.h"
 #include "offsets.h"
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index 141feb4..c5ae4c6 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -20,6 +20,7 @@
 
 #include "class_linker-inl.h"
 #include "common_runtime_test.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/object-inl.h"
 #include "scoped_thread_state_change-inl.h"
 
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index b42433c..36f7b3d 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -18,6 +18,8 @@
 
 #include <sstream>
 
+#include <android-base/logging.h>
+
 #include "arch/context.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
@@ -43,6 +45,7 @@
 #include "mirror/object_array-inl.h"
 #include "nth_caller_visitor.h"
 #include "oat_quick_method_header.h"
+#include "runtime-inl.h"
 #include "thread.h"
 #include "thread_list.h"
 
@@ -160,9 +163,9 @@
       have_exception_thrown_listeners_(false),
       have_watched_frame_pop_listeners_(false),
       have_branch_listeners_(false),
-      have_invoke_virtual_or_interface_listeners_(false),
       have_exception_handled_listeners_(false),
-      deoptimized_methods_lock_("deoptimized methods lock", kGenericBottomLock),
+      deoptimized_methods_lock_(new ReaderWriterMutex("deoptimized methods lock",
+                                                      kGenericBottomLock)),
       deoptimization_enabled_(false),
       interpreter_handler_table_(kMainHandlerTable),
       quick_alloc_entry_points_instrumentation_counter_(0),
@@ -321,8 +324,9 @@
 
         const InstrumentationStackFrame& frame =
             (*instrumentation_stack_)[instrumentation_stack_depth_];
-        CHECK_EQ(m, frame.method_) << "Expected " << ArtMethod::PrettyMethod(m)
-                                   << ", Found " << ArtMethod::PrettyMethod(frame.method_);
+        CHECK_EQ(m->GetNonObsoleteMethod(), frame.method_->GetNonObsoleteMethod())
+            << "Expected " << ArtMethod::PrettyMethod(m)
+            << ", Found " << ArtMethod::PrettyMethod(frame.method_);
         return_pc = frame.return_pc_;
         if (kVerboseInstrumentation) {
           LOG(INFO) << "Ignoring already instrumented " << frame.Dump();
@@ -468,7 +472,9 @@
           if (instrumentation_frame.interpreter_entry_) {
             CHECK(m == Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
           } else {
-            CHECK(m == instrumentation_frame.method_) << ArtMethod::PrettyMethod(m);
+            CHECK_EQ(m->GetNonObsoleteMethod(),
+                     instrumentation_frame.method_->GetNonObsoleteMethod())
+                << ArtMethod::PrettyMethod(m);
           }
           SetReturnPc(instrumentation_frame.return_pc_);
           if (instrumentation_->ShouldNotifyMethodEnterExitEvents() &&
@@ -537,7 +543,7 @@
   } else {
     list.push_back(listener);
   }
-  *has_listener = true;
+  Runtime::DoAndMaybeSwitchInterpreter([=](){ *has_listener = true; });
 }
 
 void Instrumentation::AddListener(InstrumentationListener* listener, uint32_t events) {
@@ -562,11 +568,6 @@
                            branch_listeners_,
                            listener,
                            &have_branch_listeners_);
-  PotentiallyAddListenerTo(kInvokeVirtualOrInterface,
-                           events,
-                           invoke_virtual_or_interface_listeners_,
-                           listener,
-                           &have_invoke_virtual_or_interface_listeners_);
   PotentiallyAddListenerTo(kDexPcMoved,
                            events,
                            dex_pc_listeners_,
@@ -620,11 +621,11 @@
   // Check if the list contains any non-null listener, and update 'has_listener'.
   for (InstrumentationListener* l : list) {
     if (l != nullptr) {
-      *has_listener = true;
+      Runtime::DoAndMaybeSwitchInterpreter([=](){ *has_listener = true; });
       return;
     }
   }
-  *has_listener = false;
+  Runtime::DoAndMaybeSwitchInterpreter([=](){ *has_listener = false; });
 }
 
 void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t events) {
@@ -649,11 +650,6 @@
                                 branch_listeners_,
                                 listener,
                                 &have_branch_listeners_);
-  PotentiallyRemoveListenerFrom(kInvokeVirtualOrInterface,
-                                events,
-                                invoke_virtual_or_interface_listeners_,
-                                listener,
-                                &have_invoke_virtual_or_interface_listeners_);
   PotentiallyRemoveListenerFrom(kDexPcMoved,
                                 events,
                                 dex_pc_listeners_,
@@ -751,7 +747,7 @@
     // Restore stack only if there is no method currently deoptimized.
     bool empty;
     {
-      ReaderMutexLock mu(self, deoptimized_methods_lock_);
+      ReaderMutexLock mu(self, *GetDeoptimizedMethodsLock());
       empty = IsDeoptimizedMethodsEmpty();  // Avoid lock violation.
     }
     if (empty) {
@@ -852,7 +848,7 @@
         new_quick_code = GetQuickInstrumentationEntryPoint();
         if (!method->IsNative() && Runtime::Current()->GetJit() != nullptr) {
           // Native methods use trampoline entrypoints during interpreter tracing.
-          DCHECK(!Runtime::Current()->GetJit()->GetCodeCache()->GetGarbageCollectCode());
+          DCHECK(!Runtime::Current()->GetJit()->GetCodeCache()->GetGarbageCollectCodeUnsafe());
           ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
           // Tracing will look at the saved entry point in the profiling info to know the actual
           // entrypoint, so we store it here.
@@ -939,7 +935,7 @@
 
   Thread* self = Thread::Current();
   {
-    WriterMutexLock mu(self, deoptimized_methods_lock_);
+    WriterMutexLock mu(self, *GetDeoptimizedMethodsLock());
     bool has_not_been_deoptimized = AddDeoptimizedMethod(method);
     CHECK(has_not_been_deoptimized) << "Method " << ArtMethod::PrettyMethod(method)
         << " is already deoptimized";
@@ -963,7 +959,7 @@
   Thread* self = Thread::Current();
   bool empty;
   {
-    WriterMutexLock mu(self, deoptimized_methods_lock_);
+    WriterMutexLock mu(self, *GetDeoptimizedMethodsLock());
     bool found_and_erased = RemoveDeoptimizedMethod(method);
     CHECK(found_and_erased) << "Method " << ArtMethod::PrettyMethod(method)
         << " is not deoptimized";
@@ -995,12 +991,12 @@
 
 bool Instrumentation::IsDeoptimized(ArtMethod* method) {
   DCHECK(method != nullptr);
-  ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+  ReaderMutexLock mu(Thread::Current(), *GetDeoptimizedMethodsLock());
   return IsDeoptimizedMethod(method);
 }
 
 void Instrumentation::EnableDeoptimization() {
-  ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+  ReaderMutexLock mu(Thread::Current(), *GetDeoptimizedMethodsLock());
   CHECK(IsDeoptimizedMethodsEmpty());
   CHECK_EQ(deoptimization_enabled_, false);
   deoptimization_enabled_ = true;
@@ -1017,7 +1013,7 @@
   while (true) {
     ArtMethod* method;
     {
-      ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+      ReaderMutexLock mu(Thread::Current(), *GetDeoptimizedMethodsLock());
       if (IsDeoptimizedMethodsEmpty()) {
         break;
       }
@@ -1054,14 +1050,6 @@
     level = InstrumentationLevel::kInstrumentWithInterpreter;
   } else {
     level = InstrumentationLevel::kInstrumentWithInstrumentationStubs;
-    if (Runtime::Current()->GetJit() != nullptr) {
-      // TODO b/110263880 It would be better if we didn't need to do this.
-      // Since we need to hold the method entrypoint across a suspend to ensure instrumentation
-      // hooks are called correctly we have to disable jit-gc to ensure that the entrypoint doesn't
-      // go away. Furthermore we need to leave this off permanently since one could get the same
-      // effect by causing this to be toggled on and off.
-      Runtime::Current()->GetJit()->GetCodeCache()->SetGarbageCollectCode(false);
-    }
   }
   ConfigureStubs(key, level);
 }
@@ -1213,21 +1201,6 @@
   }
 }
 
-void Instrumentation::InvokeVirtualOrInterfaceImpl(Thread* thread,
-                                                   ObjPtr<mirror::Object> this_object,
-                                                   ArtMethod* caller,
-                                                   uint32_t dex_pc,
-                                                   ArtMethod* callee) const {
-  Thread* self = Thread::Current();
-  StackHandleScope<1> hs(self);
-  Handle<mirror::Object> thiz(hs.NewHandle(this_object));
-  for (InstrumentationListener* listener : invoke_virtual_or_interface_listeners_) {
-    if (listener != nullptr) {
-      listener->InvokeVirtualOrInterface(thread, thiz, caller, dex_pc, callee);
-    }
-  }
-}
-
 void Instrumentation::WatchedFramePopImpl(Thread* thread, const ShadowFrame& frame) const {
   for (InstrumentationListener* listener : watched_frame_pop_listeners_) {
     if (listener != nullptr) {
@@ -1380,47 +1353,66 @@
 }
 
 // Try to get the shorty of a runtime method if it's an invocation stub.
-struct RuntimeMethodShortyVisitor : public StackVisitor {
-  explicit RuntimeMethodShortyVisitor(Thread* thread)
-      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        shorty('V') {}
-
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* m = GetMethod();
-    if (m != nullptr && !m->IsRuntimeMethod()) {
-      // The first Java method.
-      if (m->IsNative()) {
-        // Use JNI method's shorty for the jni stub.
-        shorty = m->GetShorty()[0];
-        return false;
-      }
-      if (m->IsProxyMethod()) {
-        // Proxy method just invokes its proxied method via
-        // art_quick_proxy_invoke_handler.
-        shorty = m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty()[0];
-        return false;
-      }
-      const Instruction& instr = m->DexInstructions().InstructionAt(GetDexPc());
-      if (instr.IsInvoke()) {
-        const DexFile* dex_file = m->GetDexFile();
-        if (interpreter::IsStringInit(dex_file, instr.VRegB())) {
-          // Invoking string init constructor is turned into invoking
-          // StringFactory.newStringFromChars() which returns a string.
-          shorty = 'L';
-          return false;
+static char GetRuntimeMethodShorty(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) {
+  char shorty = 'V';
+  StackVisitor::WalkStack(
+      [&shorty](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        ArtMethod* m = stack_visitor->GetMethod();
+        if (m == nullptr || m->IsRuntimeMethod()) {
+          return true;
         }
-        // A regular invoke, use callee's shorty.
-        uint32_t method_idx = instr.VRegB();
-        shorty = dex_file->GetMethodShorty(method_idx)[0];
-      }
-      // Stop stack walking since we've seen a Java frame.
-      return false;
-    }
-    return true;
-  }
+        // The first Java method.
+        if (m->IsNative()) {
+          // Use JNI method's shorty for the jni stub.
+          shorty = m->GetShorty()[0];
+        } else if (m->IsProxyMethod()) {
+          // Proxy method just invokes its proxied method via
+          // art_quick_proxy_invoke_handler.
+          shorty = m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty()[0];
+        } else {
+          const Instruction& instr = m->DexInstructions().InstructionAt(stack_visitor->GetDexPc());
+          if (instr.IsInvoke()) {
+            auto get_method_index_fn = [](ArtMethod* caller,
+                                          const Instruction& inst,
+                                          uint32_t dex_pc)
+                REQUIRES_SHARED(Locks::mutator_lock_) {
+              switch (inst.Opcode()) {
+                case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
+                case Instruction::INVOKE_VIRTUAL_QUICK: {
+                  uint16_t method_idx = caller->GetIndexFromQuickening(dex_pc);
+                  CHECK_NE(method_idx, DexFile::kDexNoIndex16);
+                  return method_idx;
+                }
+                default: {
+                  return static_cast<uint16_t>(inst.VRegB());
+                }
+              }
+            };
 
-  char shorty;
-};
+            uint16_t method_index = get_method_index_fn(m, instr, stack_visitor->GetDexPc());
+            const DexFile* dex_file = m->GetDexFile();
+            if (interpreter::IsStringInit(dex_file, method_index)) {
+              // Invoking string init constructor is turned into invoking
+              // StringFactory.newStringFromChars() which returns a string.
+              shorty = 'L';
+            } else {
+              shorty = dex_file->GetMethodShorty(method_index)[0];
+            }
+
+          } else {
+            // It could be that a non-invoke opcode invokes a stub, which in turn
+            // invokes Java code. In such cases, we should never expect a return
+            // value from the stub.
+          }
+        }
+        // Stop stack walking since we've seen a Java frame.
+        return false;
+      },
+      thread,
+      /* context= */ nullptr,
+      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+  return shorty;
+}
 
 TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
                                                             uintptr_t* return_pc,
@@ -1454,9 +1446,7 @@
       // for clinit, we need to pass return results to the caller.
       // We need the correct shorty to decide whether we need to pass the return
       // result for deoptimization below.
-      RuntimeMethodShortyVisitor visitor(self);
-      visitor.WalkStack();
-      return_shorty = visitor.shorty;
+      return_shorty = GetRuntimeMethodShorty(self);
     } else {
       // Some runtime methods such as allocations, unresolved field getters, etc.
       // have return value. We don't need to set return_value since MethodExitEvent()
@@ -1520,8 +1510,8 @@
     DeoptimizationMethodType deopt_method_type = GetDeoptimizationMethodType(method);
     self->PushDeoptimizationContext(return_value,
                                     return_shorty == 'L' || return_shorty == '[',
-                                    nullptr /* no pending exception */,
-                                    false /* from_code */,
+                                    /* exception= */ nullptr ,
+                                    /* from_code= */ false,
                                     deopt_method_type);
     return GetTwoWordSuccessValue(*return_pc,
                                   reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index e5d8800..d4c3c29 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -19,12 +19,13 @@
 
 #include <stdint.h>
 #include <list>
+#include <memory>
 #include <unordered_set>
 
 #include "arch/instruction_set.h"
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/safe_map.h"
 #include "gc_root.h"
 
@@ -39,6 +40,7 @@
 template <typename T> class Handle;
 template <typename T> class MutableHandle;
 union JValue;
+class SHARED_LOCKABLE ReaderWriterMutex;
 class ShadowFrame;
 class Thread;
 enum class DeoptimizationMethodType;
@@ -143,14 +145,6 @@
                       int32_t dex_pc_offset)
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
 
-  // Call-back for when we get an invokevirtual or an invokeinterface.
-  virtual void InvokeVirtualOrInterface(Thread* thread,
-                                        Handle<mirror::Object> this_object,
-                                        ArtMethod* caller,
-                                        uint32_t dex_pc,
-                                        ArtMethod* callee)
-      REQUIRES_SHARED(Locks::mutator_lock_) = 0;
-
   // Call-back when a shadow_frame with the needs_notify_pop_ boolean set is popped off the stack by
   // either return or exceptions. Normally instrumentation listeners should ensure that there are
   // shadow-frames by deoptimizing stacks.
@@ -193,7 +187,6 @@
     kFieldWritten = 0x20,
     kExceptionThrown = 0x40,
     kBranch = 0x80,
-    kInvokeVirtualOrInterface = 0x100,
     kWatchedFramePop = 0x200,
     kExceptionHandled = 0x400,
   };
@@ -220,11 +213,11 @@
   // Deoptimization.
   void EnableDeoptimization()
       REQUIRES(Locks::mutator_lock_)
-      REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES(!GetDeoptimizedMethodsLock());
   // Calls UndeoptimizeEverything which may visit class linker classes through ConfigureStubs.
   void DisableDeoptimization(const char* key)
       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
-      REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES(!GetDeoptimizedMethodsLock());
 
   bool AreAllMethodsDeoptimized() const {
     return interpreter_stubs_installed_;
@@ -240,7 +233,7 @@
       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
       REQUIRES(!Locks::thread_list_lock_,
                !Locks::classlinker_classes_lock_,
-               !deoptimized_methods_lock_);
+               !GetDeoptimizedMethodsLock());
 
   // Executes everything with compiled code (or interpreter if there is no code). May visit class
   // linker classes through ConfigureStubs.
@@ -248,23 +241,23 @@
       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
       REQUIRES(!Locks::thread_list_lock_,
                !Locks::classlinker_classes_lock_,
-               !deoptimized_methods_lock_);
+               !GetDeoptimizedMethodsLock());
 
   // Deoptimize a method by forcing its execution with the interpreter. Nevertheless, a static
   // method (except a class initializer) set to the resolution trampoline will be deoptimized only
   // once its declaring class is initialized.
   void Deoptimize(ArtMethod* method)
-      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !deoptimized_methods_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !GetDeoptimizedMethodsLock());
 
   // Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method
   // (except a class initializer) set to the resolution trampoline will be updated only once its
   // declaring class is initialized.
   void Undeoptimize(ArtMethod* method)
-      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !deoptimized_methods_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !GetDeoptimizedMethodsLock());
 
   // Indicates whether the method has been deoptimized so it is executed with the interpreter.
   bool IsDeoptimized(ArtMethod* method)
-      REQUIRES(!deoptimized_methods_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES(!GetDeoptimizedMethodsLock()) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Enable method tracing by installing instrumentation entry/exit stubs or interpreter.
   void EnableMethodTracing(const char* key,
@@ -272,14 +265,14 @@
       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
       REQUIRES(!Locks::thread_list_lock_,
                !Locks::classlinker_classes_lock_,
-               !deoptimized_methods_lock_);
+               !GetDeoptimizedMethodsLock());
 
   // Disable method tracing by uninstalling instrumentation entry/exit stubs or interpreter.
   void DisableMethodTracing(const char* key)
       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
       REQUIRES(!Locks::thread_list_lock_,
                !Locks::classlinker_classes_lock_,
-               !deoptimized_methods_lock_);
+               !GetDeoptimizedMethodsLock());
 
   InterpreterHandlerTable GetInterpreterHandlerTable() const
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -298,19 +291,19 @@
 
   // Update the code of a method respecting any installed stubs.
   void UpdateMethodsCode(ArtMethod* method, const void* quick_code)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
 
   // Update the code of a native method to a JITed stub.
   void UpdateNativeMethodsCodeToJitCode(ArtMethod* method, const void* quick_code)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
 
   // Update the code of a method to the interpreter respecting any installed stubs from debugger.
   void UpdateMethodsCodeToInterpreterEntryPoint(ArtMethod* method)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
 
   // Update the code of a method respecting any installed stubs from debugger.
   void UpdateMethodsCodeForJavaDebuggable(ArtMethod* method, const void* quick_code)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
 
   // Return the code that we can execute for an invoke including from the JIT.
   const void* GetCodeForInvoke(ArtMethod* method) const
@@ -377,10 +370,6 @@
     return have_branch_listeners_;
   }
 
-  bool HasInvokeVirtualOrInterfaceListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
-    return have_invoke_virtual_or_interface_listeners_;
-  }
-
   bool HasWatchedFramePopListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
     return have_watched_frame_pop_listeners_;
   }
@@ -393,15 +382,6 @@
     return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
         have_field_read_listeners_ || have_field_write_listeners_ ||
         have_exception_thrown_listeners_ || have_method_unwind_listeners_ ||
-        have_branch_listeners_ || have_invoke_virtual_or_interface_listeners_ ||
-        have_watched_frame_pop_listeners_ || have_exception_handled_listeners_;
-  }
-
-  // Any instrumentation *other* than what is needed for Jit profiling active?
-  bool NonJitProfilingActive() const REQUIRES_SHARED(Locks::mutator_lock_) {
-    return have_dex_pc_listeners_ || have_method_exit_listeners_ ||
-        have_field_read_listeners_ || have_field_write_listeners_ ||
-        have_exception_thrown_listeners_ || have_method_unwind_listeners_ ||
         have_branch_listeners_ || have_watched_frame_pop_listeners_ ||
         have_exception_handled_listeners_;
   }
@@ -470,17 +450,6 @@
     }
   }
 
-  void InvokeVirtualOrInterface(Thread* thread,
-                                mirror::Object* this_object,
-                                ArtMethod* caller,
-                                uint32_t dex_pc,
-                                ArtMethod* callee) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (UNLIKELY(HasInvokeVirtualOrInterfaceListeners())) {
-      InvokeVirtualOrInterfaceImpl(thread, this_object, caller, dex_pc, callee);
-    }
-  }
-
   // Inform listeners that a branch has been taken (only supported by the interpreter).
   void WatchedFramePopped(Thread* thread, const ShadowFrame& frame) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -516,7 +485,7 @@
   // being returned from.
   TwoWordReturn PopInstrumentationStackFrame(Thread* self, uintptr_t* return_pc,
                                              uint64_t* gpr_result, uint64_t* fpr_result)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
 
   // Pops nframes instrumentation frames from the current thread. Returns the return pc for the last
   // instrumentation frame that's popped.
@@ -525,10 +494,10 @@
 
   // Call back for configure stubs.
   void InstallStubsForClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES(!GetDeoptimizedMethodsLock());
 
   void InstallStubsForMethod(ArtMethod* method)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
 
   // Install instrumentation exit stub on every method of the stack of the given thread.
   // This is used by the debugger to cause a deoptimization of the thread's stack after updating
@@ -561,7 +530,7 @@
   // becomes the highest instrumentation level required by a client.
   void ConfigureStubs(const char* key, InstrumentationLevel desired_instrumentation_level)
       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
-      REQUIRES(!deoptimized_methods_lock_,
+      REQUIRES(!GetDeoptimizedMethodsLock(),
                !Locks::thread_list_lock_,
                !Locks::classlinker_classes_lock_);
 
@@ -598,12 +567,6 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
   void BranchImpl(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void InvokeVirtualOrInterfaceImpl(Thread* thread,
-                                    ObjPtr<mirror::Object> this_object,
-                                    ArtMethod* caller,
-                                    uint32_t dex_pc,
-                                    ArtMethod* callee) const
-      REQUIRES_SHARED(Locks::mutator_lock_);
   void WatchedFramePopImpl(Thread* thread, const ShadowFrame& frame) const
       REQUIRES_SHARED(Locks::mutator_lock_);
   void FieldReadEventImpl(Thread* thread,
@@ -622,18 +585,21 @@
 
   // Read barrier-aware utility functions for accessing deoptimized_methods_
   bool AddDeoptimizedMethod(ArtMethod* method)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(GetDeoptimizedMethodsLock());
   bool IsDeoptimizedMethod(ArtMethod* method)
-      REQUIRES_SHARED(Locks::mutator_lock_, deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_, GetDeoptimizedMethodsLock());
   bool RemoveDeoptimizedMethod(ArtMethod* method)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(GetDeoptimizedMethodsLock());
   ArtMethod* BeginDeoptimizedMethod()
-      REQUIRES_SHARED(Locks::mutator_lock_, deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_, GetDeoptimizedMethodsLock());
   bool IsDeoptimizedMethodsEmpty() const
-      REQUIRES_SHARED(Locks::mutator_lock_, deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_, GetDeoptimizedMethodsLock());
   void UpdateMethodsCodeImpl(ArtMethod* method, const void* quick_code)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
 
+  ReaderWriterMutex* GetDeoptimizedMethodsLock() const {
+    return deoptimized_methods_lock_.get();
+  }
 
   // Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code?
   bool instrumentation_stubs_installed_;
@@ -683,9 +649,6 @@
   // Do we have any branch listeners? Short-cut to avoid taking the instrumentation_lock_.
   bool have_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
 
-  // Do we have any invoke listeners? Short-cut to avoid taking the instrumentation_lock_.
-  bool have_invoke_virtual_or_interface_listeners_ GUARDED_BY(Locks::mutator_lock_);
-
   // Do we have any exception handled listeners? Short-cut to avoid taking the
   // instrumentation_lock_.
   bool have_exception_handled_listeners_ GUARDED_BY(Locks::mutator_lock_);
@@ -709,8 +672,6 @@
   std::list<InstrumentationListener*> method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
   std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
   std::list<InstrumentationListener*> branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
-  std::list<InstrumentationListener*> invoke_virtual_or_interface_listeners_
-      GUARDED_BY(Locks::mutator_lock_);
   std::list<InstrumentationListener*> dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_);
   std::list<InstrumentationListener*> field_read_listeners_ GUARDED_BY(Locks::mutator_lock_);
   std::list<InstrumentationListener*> field_write_listeners_ GUARDED_BY(Locks::mutator_lock_);
@@ -720,8 +681,8 @@
 
   // The set of methods being deoptimized (by the debugger) which must be executed with interpreter
   // only.
-  mutable ReaderWriterMutex deoptimized_methods_lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
-  std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(deoptimized_methods_lock_);
+  mutable std::unique_ptr<ReaderWriterMutex> deoptimized_methods_lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
+  std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(GetDeoptimizedMethodsLock());
   bool deoptimization_enabled_;
 
   // Current interpreter handler table. This is updated each time the thread state flags are
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 9146245..d973689 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -50,7 +50,6 @@
       received_exception_thrown_event(false),
       received_exception_handled_event(false),
       received_branch_event(false),
-      received_invoke_virtual_or_interface_event(false),
       received_watched_frame_pop(false) {}
 
   virtual ~TestInstrumentationListener() {}
@@ -146,15 +145,6 @@
     received_branch_event = true;
   }
 
-  void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED,
-                                Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
-                                ArtMethod* caller ATTRIBUTE_UNUSED,
-                                uint32_t dex_pc ATTRIBUTE_UNUSED,
-                                ArtMethod* callee ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    received_invoke_virtual_or_interface_event = true;
-  }
-
   void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED, const ShadowFrame& frame ATTRIBUTE_UNUSED)
       override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_watched_frame_pop  = true;
@@ -172,7 +162,6 @@
     received_exception_thrown_event = false;
     received_exception_handled_event = false;
     received_branch_event = false;
-    received_invoke_virtual_or_interface_event = false;
     received_watched_frame_pop = false;
   }
 
@@ -187,7 +176,6 @@
   bool received_exception_thrown_event;
   bool received_exception_handled_event;
   bool received_branch_event;
-  bool received_invoke_virtual_or_interface_event;
   bool received_watched_frame_pop;
 
  private:
@@ -382,8 +370,6 @@
         return instr->HasExceptionHandledListeners();
       case instrumentation::Instrumentation::kBranch:
         return instr->HasBranchListeners();
-      case instrumentation::Instrumentation::kInvokeVirtualOrInterface:
-        return instr->HasInvokeVirtualOrInterfaceListeners();
       case instrumentation::Instrumentation::kWatchedFramePop:
         return instr->HasWatchedFramePopListeners();
       default:
@@ -434,9 +420,6 @@
       case instrumentation::Instrumentation::kBranch:
         instr->Branch(self, method, dex_pc, -1);
         break;
-      case instrumentation::Instrumentation::kInvokeVirtualOrInterface:
-        instr->InvokeVirtualOrInterface(self, obj, method, dex_pc, method);
-        break;
       case instrumentation::Instrumentation::kWatchedFramePop:
         instr->WatchedFramePopped(self, frame);
         break;
@@ -477,8 +460,6 @@
         return listener.received_exception_handled_event;
       case instrumentation::Instrumentation::kBranch:
         return listener.received_branch_event;
-      case instrumentation::Instrumentation::kInvokeVirtualOrInterface:
-        return listener.received_invoke_virtual_or_interface_event;
       case instrumentation::Instrumentation::kWatchedFramePop:
         return listener.received_watched_frame_pop;
       default:
@@ -528,9 +509,9 @@
   ASSERT_TRUE(method->IsDirect());
   ASSERT_TRUE(method->GetDeclaringClass() == klass);
   TestEvent(instrumentation::Instrumentation::kMethodEntered,
-            /*event_method*/ method,
-            /*event_field*/ nullptr,
-            /*with_object*/ true);
+            /*event_method=*/ method,
+            /*event_field=*/ nullptr,
+            /*with_object=*/ true);
 }
 
 TEST_F(InstrumentationTest, MethodExitObjectEvent) {
@@ -548,9 +529,9 @@
   ASSERT_TRUE(method->IsDirect());
   ASSERT_TRUE(method->GetDeclaringClass() == klass);
   TestEvent(instrumentation::Instrumentation::kMethodExited,
-            /*event_method*/ method,
-            /*event_field*/ nullptr,
-            /*with_object*/ true);
+            /*event_method=*/ method,
+            /*event_field=*/ nullptr,
+            /*with_object=*/ true);
 }
 
 TEST_F(InstrumentationTest, MethodExitPrimEvent) {
@@ -567,9 +548,9 @@
   ASSERT_TRUE(method->IsDirect());
   ASSERT_TRUE(method->GetDeclaringClass() == klass);
   TestEvent(instrumentation::Instrumentation::kMethodExited,
-            /*event_method*/ method,
-            /*event_field*/ nullptr,
-            /*with_object*/ false);
+            /*event_method=*/ method,
+            /*event_field=*/ nullptr,
+            /*with_object=*/ false);
 }
 
 TEST_F(InstrumentationTest, MethodUnwindEvent) {
@@ -601,9 +582,9 @@
   ASSERT_TRUE(field != nullptr);
 
   TestEvent(instrumentation::Instrumentation::kFieldWritten,
-            /*event_method*/ nullptr,
-            /*event_field*/ field,
-            /*with_object*/ true);
+            /*event_method=*/ nullptr,
+            /*event_field=*/ field,
+            /*with_object=*/ true);
 }
 
 TEST_F(InstrumentationTest, FieldWritePrimEvent) {
@@ -619,9 +600,9 @@
   ASSERT_TRUE(field != nullptr);
 
   TestEvent(instrumentation::Instrumentation::kFieldWritten,
-            /*event_method*/ nullptr,
-            /*event_field*/ field,
-            /*with_object*/ false);
+            /*event_method=*/ nullptr,
+            /*event_field=*/ field,
+            /*with_object=*/ false);
 }
 
 TEST_F(InstrumentationTest, ExceptionHandledEvent) {
@@ -636,10 +617,6 @@
   TestEvent(instrumentation::Instrumentation::kBranch);
 }
 
-TEST_F(InstrumentationTest, InvokeVirtualOrInterfaceEvent) {
-  TestEvent(instrumentation::Instrumentation::kInvokeVirtualOrInterface);
-}
-
 TEST_F(InstrumentationTest, DeoptimizeDirectMethod) {
   ScopedObjectAccess soa(Thread::Current());
   jobject class_loader = LoadDex("Instrumentation");
diff --git a/runtime/intern_table-inl.h b/runtime/intern_table-inl.h
new file mode 100644
index 0000000..6fc53e9
--- /dev/null
+++ b/runtime/intern_table-inl.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERN_TABLE_INL_H_
+#define ART_RUNTIME_INTERN_TABLE_INL_H_
+
+#include "intern_table.h"
+
+// Required for ToModifiedUtf8 below.
+#include "mirror/string-inl.h"
+
+namespace art {
+
+template <typename Visitor>
+inline void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space,
+                                                const Visitor& visitor) {
+  DCHECK(image_space != nullptr);
+  // Only add if we have the interned strings section.
+  const ImageHeader& header = image_space->GetImageHeader();
+  const ImageSection& section = header.GetInternedStringsSection();
+  if (section.Size() > 0) {
+    AddTableFromMemory(image_space->Begin() + section.Offset(), visitor, !header.IsAppImage());
+  }
+}
+
+template <typename Visitor>
+inline size_t InternTable::AddTableFromMemory(const uint8_t* ptr,
+                                              const Visitor& visitor,
+                                              bool is_boot_image) {
+  size_t read_count = 0;
+  UnorderedSet set(ptr, /*make copy*/false, &read_count);
+  {
+    // Hold the lock while calling the visitor to prevent possible race
+    // conditions with another thread adding intern strings.
+    MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+    // Visit the unordered set, may remove elements.
+    visitor(set);
+    if (!set.empty()) {
+      strong_interns_.AddInternStrings(std::move(set), is_boot_image);
+    }
+  }
+  return read_count;
+}
+
+inline void InternTable::Table::AddInternStrings(UnorderedSet&& intern_strings,
+                                                 bool is_boot_image) {
+  static constexpr bool kCheckDuplicates = kIsDebugBuild;
+  if (kCheckDuplicates) {
+    // Avoid doing read barriers since the space might not yet be added to the heap.
+    // See b/117803941
+    for (GcRoot<mirror::String>& string : intern_strings) {
+      CHECK(Find(string.Read<kWithoutReadBarrier>()) == nullptr)
+          << "Already found " << string.Read<kWithoutReadBarrier>()->ToModifiedUtf8()
+          << " in the intern table";
+    }
+  }
+  // Insert at the front since we add new interns into the back.
+  tables_.insert(tables_.begin(),
+                 InternalTable(std::move(intern_strings), is_boot_image));
+}
+
+template <typename Visitor>
+inline void InternTable::VisitInterns(const Visitor& visitor,
+                                      bool visit_boot_images,
+                                      bool visit_non_boot_images) {
+  auto visit_tables = [&](std::vector<Table::InternalTable>& tables)
+      NO_THREAD_SAFETY_ANALYSIS {
+    for (Table::InternalTable& table : tables) {
+      // Determine if we want to visit the table based on the flags..
+      const bool visit =
+          (visit_boot_images && table.IsBootImage()) ||
+          (visit_non_boot_images && !table.IsBootImage());
+      if (visit) {
+        for (auto& intern : table.set_) {
+          visitor(intern);
+        }
+      }
+    }
+  };
+  visit_tables(strong_interns_.tables_);
+  visit_tables(weak_interns_.tables_);
+}
+
+inline size_t InternTable::CountInterns(bool visit_boot_images,
+                                        bool visit_non_boot_images) const {
+  size_t ret = 0u;
+  auto visit_tables = [&](const std::vector<Table::InternalTable>& tables)
+      NO_THREAD_SAFETY_ANALYSIS {
+    for (const Table::InternalTable& table : tables) {
+      // Determine if we want to visit the table based on the flags..
+      const bool visit =
+          (visit_boot_images && table.IsBootImage()) ||
+          (visit_non_boot_images && !table.IsBootImage());
+      if (visit) {
+        ret += table.set_.size();
+      }
+    }
+  };
+  visit_tables(strong_interns_.tables_);
+  visit_tables(weak_interns_.tables_);
+  return ret;
+}
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_INTERN_TABLE_INL_H_
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index c8aaa21..9ac9927 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -23,6 +23,7 @@
 #include "gc/space/image_space.h"
 #include "gc/weak_root_state.h"
 #include "gc_root-inl.h"
+#include "handle_scope-inl.h"
 #include "image-inl.h"
 #include "mirror/dex_cache-inl.h"
 #include "mirror/object-inl.h"
@@ -177,18 +178,6 @@
   RemoveWeak(s);
 }
 
-void InternTable::AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces) {
-  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
-  for (gc::space::ImageSpace* image_space : image_spaces) {
-    const ImageHeader* const header = &image_space->GetImageHeader();
-    // Check if we have the interned strings section.
-    const ImageSection& section = header->GetInternedStringsSection();
-    if (section.Size() > 0) {
-      AddTableFromMemoryLocked(image_space->Begin() + section.Offset());
-    }
-  }
-}
-
 void InternTable::BroadcastForNewInterns() {
   Thread* self = Thread::Current();
   MutexLock mu(self, *Locks::intern_table_lock_);
@@ -303,15 +292,6 @@
   weak_interns_.SweepWeaks(visitor);
 }
 
-size_t InternTable::AddTableFromMemory(const uint8_t* ptr) {
-  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
-  return AddTableFromMemoryLocked(ptr);
-}
-
-size_t InternTable::AddTableFromMemoryLocked(const uint8_t* ptr) {
-  return strong_interns_.AddTableFromMemory(ptr);
-}
-
 size_t InternTable::WriteToMemory(uint8_t* ptr) {
   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
   return strong_interns_.WriteToMemory(ptr);
@@ -363,25 +343,6 @@
   }
 }
 
-size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) {
-  size_t read_count = 0;
-  UnorderedSet set(ptr, /*make copy*/false, &read_count);
-  if (set.empty()) {
-    // Avoid inserting empty sets.
-    return read_count;
-  }
-  // TODO: Disable this for app images if app images have intern tables.
-  static constexpr bool kCheckDuplicates = kIsDebugBuild;
-  if (kCheckDuplicates) {
-    for (GcRoot<mirror::String>& string : set) {
-      CHECK(Find(string.Read()) == nullptr) << "Already found " << string.Read()->ToModifiedUtf8();
-    }
-  }
-  // Insert at the front since we add new interns into the back.
-  tables_.insert(tables_.begin(), std::move(set));
-  return read_count;
-}
-
 size_t InternTable::Table::WriteToMemory(uint8_t* ptr) {
   if (tables_.empty()) {
     return 0;
@@ -390,22 +351,22 @@
   UnorderedSet combined;
   if (tables_.size() > 1) {
     table_to_write = &combined;
-    for (UnorderedSet& table : tables_) {
-      for (GcRoot<mirror::String>& string : table) {
+    for (InternalTable& table : tables_) {
+      for (GcRoot<mirror::String>& string : table.set_) {
         combined.insert(string);
       }
     }
   } else {
-    table_to_write = &tables_.back();
+    table_to_write = &tables_.back().set_;
   }
   return table_to_write->WriteToMemory(ptr);
 }
 
 void InternTable::Table::Remove(ObjPtr<mirror::String> s) {
-  for (UnorderedSet& table : tables_) {
-    auto it = table.find(GcRoot<mirror::String>(s));
-    if (it != table.end()) {
-      table.erase(it);
+  for (InternalTable& table : tables_) {
+    auto it = table.set_.find(GcRoot<mirror::String>(s));
+    if (it != table.set_.end()) {
+      table.set_.erase(it);
       return;
     }
   }
@@ -414,9 +375,9 @@
 
 ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s) {
   Locks::intern_table_lock_->AssertHeld(Thread::Current());
-  for (UnorderedSet& table : tables_) {
-    auto it = table.find(GcRoot<mirror::String>(s));
-    if (it != table.end()) {
+  for (InternalTable& table : tables_) {
+    auto it = table.set_.find(GcRoot<mirror::String>(s));
+    if (it != table.set_.end()) {
       return it->Read();
     }
   }
@@ -425,9 +386,9 @@
 
 ObjPtr<mirror::String> InternTable::Table::Find(const Utf8String& string) {
   Locks::intern_table_lock_->AssertHeld(Thread::Current());
-  for (UnorderedSet& table : tables_) {
-    auto it = table.find(string);
-    if (it != table.end()) {
+  for (InternalTable& table : tables_) {
+    auto it = table.set_.find(string);
+    if (it != table.set_.end()) {
       return it->Read();
     }
   }
@@ -435,29 +396,29 @@
 }
 
 void InternTable::Table::AddNewTable() {
-  tables_.push_back(UnorderedSet());
+  tables_.push_back(InternalTable());
 }
 
 void InternTable::Table::Insert(ObjPtr<mirror::String> s) {
   // Always insert the last table, the image tables are before and we avoid inserting into these
   // to prevent dirty pages.
   DCHECK(!tables_.empty());
-  tables_.back().insert(GcRoot<mirror::String>(s));
+  tables_.back().set_.insert(GcRoot<mirror::String>(s));
 }
 
 void InternTable::Table::VisitRoots(RootVisitor* visitor) {
   BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
       visitor, RootInfo(kRootInternedString));
-  for (UnorderedSet& table : tables_) {
-    for (auto& intern : table) {
+  for (InternalTable& table : tables_) {
+    for (auto& intern : table.set_) {
       buffered_visitor.VisitRoot(intern);
     }
   }
 }
 
 void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
-  for (UnorderedSet& table : tables_) {
-    SweepWeaks(&table, visitor);
+  for (InternalTable& table : tables_) {
+    SweepWeaks(&table.set_, visitor);
   }
 }
 
@@ -479,8 +440,8 @@
   return std::accumulate(tables_.begin(),
                          tables_.end(),
                          0U,
-                         [](size_t sum, const UnorderedSet& set) {
-                           return sum + set.size();
+                         [](size_t sum, const InternalTable& table) {
+                           return sum + table.Size();
                          });
 }
 
@@ -499,10 +460,10 @@
 
 InternTable::Table::Table() {
   Runtime* const runtime = Runtime::Current();
-  // Initial table.
-  tables_.push_back(UnorderedSet());
-  tables_.back().SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
-                               runtime->GetHashTableMaxLoadFactor());
+  InternalTable initial_table;
+  initial_table.set_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
+                                   runtime->GetHashTableMaxLoadFactor());
+  tables_.push_back(std::move(initial_table));
 }
 
 }  // namespace art
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 5ba3e18..165d56c 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -59,6 +59,54 @@
  */
 class InternTable {
  public:
+  // Modified UTF-8-encoded string treated as UTF16.
+  class Utf8String {
+   public:
+    Utf8String(uint32_t utf16_length, const char* utf8_data, int32_t hash)
+        : hash_(hash), utf16_length_(utf16_length), utf8_data_(utf8_data) { }
+
+    int32_t GetHash() const { return hash_; }
+    uint32_t GetUtf16Length() const { return utf16_length_; }
+    const char* GetUtf8Data() const { return utf8_data_; }
+
+   private:
+    int32_t hash_;
+    uint32_t utf16_length_;
+    const char* utf8_data_;
+  };
+
+  class StringHashEquals {
+   public:
+    std::size_t operator()(const GcRoot<mirror::String>& root) const NO_THREAD_SAFETY_ANALYSIS;
+    bool operator()(const GcRoot<mirror::String>& a, const GcRoot<mirror::String>& b) const
+        NO_THREAD_SAFETY_ANALYSIS;
+
+    // Utf8String can be used for lookup.
+    std::size_t operator()(const Utf8String& key) const {
+      // A cast to prevent undesired sign extension.
+      return static_cast<uint32_t>(key.GetHash());
+    }
+
+    bool operator()(const GcRoot<mirror::String>& a, const Utf8String& b) const
+        NO_THREAD_SAFETY_ANALYSIS;
+  };
+
+  class GcRootEmptyFn {
+   public:
+    void MakeEmpty(GcRoot<mirror::String>& item) const {
+      item = GcRoot<mirror::String>();
+    }
+    bool IsEmpty(const GcRoot<mirror::String>& item) const {
+      return item.IsNull();
+    }
+  };
+
+  using UnorderedSet = HashSet<GcRoot<mirror::String>,
+                               GcRootEmptyFn,
+                               StringHashEquals,
+                               StringHashEquals,
+                               TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>>;
+
   InternTable();
 
   // Interns a potentially new string in the 'strong' table. May cause thread suspension.
@@ -97,11 +145,15 @@
   ObjPtr<mirror::String> LookupStrong(Thread* self, uint32_t utf16_length, const char* utf8_data)
       REQUIRES(!Locks::intern_table_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
+  ObjPtr<mirror::String> LookupStrongLocked(ObjPtr<mirror::String> s)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
 
   // Lookup a weak intern, returns null if not found.
   ObjPtr<mirror::String> LookupWeak(Thread* self, ObjPtr<mirror::String> s)
       REQUIRES(!Locks::intern_table_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
+  ObjPtr<mirror::String> LookupWeakLocked(ObjPtr<mirror::String> s)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
 
   // Total number of interned strings.
   size_t Size() const REQUIRES(!Locks::intern_table_lock_);
@@ -115,14 +167,27 @@
   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
 
+  // Visit all of the interns in the table.
+  template <typename Visitor>
+  void VisitInterns(const Visitor& visitor,
+                    bool visit_boot_images,
+                    bool visit_non_boot_images)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+
+  // Count the number of intern strings in the table.
+  size_t CountInterns(bool visit_boot_images, bool visit_non_boot_images) const
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+
   void DumpForSigQuit(std::ostream& os) const REQUIRES(!Locks::intern_table_lock_);
 
   void BroadcastForNewInterns();
 
-  // Adds all of the resolved image strings from the image spaces into the intern table. The
-  // advantage of doing this is preventing expensive DexFile::FindStringId calls. Sets
-  // images_added_to_intern_table_ to true.
-  void AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces)
+  // Add all of the strings in the image's intern table into this intern table. This is required so
+  // the intern table is correct.
+  // The visitor arg type is UnorderedSet
+  template <typename Visitor>
+  void AddImageStringsToTable(gc::space::ImageSpace* image_space,
+                              const Visitor& visitor)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
 
   // Add a new intern table for inserting to, previous intern tables are still there but no
@@ -130,11 +195,6 @@
   void AddNewTable()
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
 
-  // Read the intern table from memory. The elements aren't copied, the intern hash set data will
-  // point to somewhere within ptr. Only reads the strong interns.
-  size_t AddTableFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   // Write the post zygote intern table to a pointer. Only writes the strong interns since it is
   // expected that there is no weak interns since this is called from the image writer.
   size_t WriteToMemory(uint8_t* ptr) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -145,51 +205,37 @@
       REQUIRES(!Locks::intern_table_lock_);
 
  private:
-  // Modified UTF-8-encoded string treated as UTF16.
-  class Utf8String {
-   public:
-    Utf8String(uint32_t utf16_length, const char* utf8_data, int32_t hash)
-        : hash_(hash), utf16_length_(utf16_length), utf8_data_(utf8_data) { }
-
-    int32_t GetHash() const { return hash_; }
-    uint32_t GetUtf16Length() const { return utf16_length_; }
-    const char* GetUtf8Data() const { return utf8_data_; }
-
-   private:
-    int32_t hash_;
-    uint32_t utf16_length_;
-    const char* utf8_data_;
-  };
-
-  class StringHashEquals {
-   public:
-    std::size_t operator()(const GcRoot<mirror::String>& root) const NO_THREAD_SAFETY_ANALYSIS;
-    bool operator()(const GcRoot<mirror::String>& a, const GcRoot<mirror::String>& b) const
-        NO_THREAD_SAFETY_ANALYSIS;
-
-    // Utf8String can be used for lookup.
-    std::size_t operator()(const Utf8String& key) const {
-      // A cast to prevent undesired sign extension.
-      return static_cast<uint32_t>(key.GetHash());
-    }
-
-    bool operator()(const GcRoot<mirror::String>& a, const Utf8String& b) const
-        NO_THREAD_SAFETY_ANALYSIS;
-  };
-  class GcRootEmptyFn {
-   public:
-    void MakeEmpty(GcRoot<mirror::String>& item) const {
-      item = GcRoot<mirror::String>();
-    }
-    bool IsEmpty(const GcRoot<mirror::String>& item) const {
-      return item.IsNull();
-    }
-  };
-
   // Table which holds pre zygote and post zygote interned strings. There is one instance for
   // weak interns and strong interns.
   class Table {
    public:
+    class InternalTable {
+     public:
+      InternalTable() = default;
+      InternalTable(UnorderedSet&& set, bool is_boot_image)
+          : set_(std::move(set)), is_boot_image_(is_boot_image) {}
+
+      bool Empty() const {
+        return set_.empty();
+      }
+
+      size_t Size() const {
+        return set_.size();
+      }
+
+      bool IsBootImage() const {
+        return is_boot_image_;
+      }
+
+     private:
+      UnorderedSet set_;
+      bool is_boot_image_ = false;
+
+      friend class InternTable;
+      friend class Table;
+      ART_FRIEND_TEST(InternTableTest, CrossHash);
+    };
+
     Table();
     ObjPtr<mirror::String> Find(ObjPtr<mirror::String> s) REQUIRES_SHARED(Locks::mutator_lock_)
         REQUIRES(Locks::intern_table_lock_);
@@ -209,24 +255,28 @@
     // Read and add an intern table from ptr.
     // Tables read are inserted at the front of the table array. Only checks for conflicts in
     // debug builds. Returns how many bytes were read.
-    size_t AddTableFromMemory(const uint8_t* ptr)
-        REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+    // NO_THREAD_SAFETY_ANALYSIS for the visitor that may require locks.
+    template <typename Visitor>
+    size_t AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor, bool is_boot_image)
+        REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
     // Write the intern tables to ptr, if there are multiple tables they are combined into a single
     // one. Returns how many bytes were written.
     size_t WriteToMemory(uint8_t* ptr)
         REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
 
    private:
-    typedef HashSet<GcRoot<mirror::String>, GcRootEmptyFn, StringHashEquals, StringHashEquals,
-        TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> UnorderedSet;
-
     void SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor)
         REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
 
+    // Add a table to the front of the tables vector.
+    void AddInternStrings(UnorderedSet&& intern_strings, bool is_boot_image)
+        REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+
     // We call AddNewTable when we create the zygote to reduce private dirty pages caused by
     // modifying the zygote intern table. The back of table is modified when strings are interned.
-    std::vector<UnorderedSet> tables_;
+    std::vector<InternalTable> tables_;
 
+    friend class InternTable;
     friend class linker::ImageWriter;
     ART_FRIEND_TEST(InternTableTest, CrossHash);
   };
@@ -237,10 +287,11 @@
   ObjPtr<mirror::String> Insert(ObjPtr<mirror::String> s, bool is_strong, bool holding_locks)
       REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ObjPtr<mirror::String> LookupStrongLocked(ObjPtr<mirror::String> s)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
-  ObjPtr<mirror::String> LookupWeakLocked(ObjPtr<mirror::String> s)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+  // Add a table from memory to the strong interns.
+  template <typename Visitor>
+  size_t AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor, bool is_boot_image)
+      REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+
   ObjPtr<mirror::String> InsertStrong(ObjPtr<mirror::String> s)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
   ObjPtr<mirror::String> InsertWeak(ObjPtr<mirror::String> s)
@@ -260,9 +311,6 @@
   void RemoveWeakFromTransaction(ObjPtr<mirror::String> s)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
 
-  size_t AddTableFromMemoryLocked(const uint8_t* ptr)
-      REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
   // Change the weak root state. May broadcast to waiters.
   void ChangeWeakRootStateLocked(gc::WeakRootState new_state)
       REQUIRES(Locks::intern_table_lock_);
@@ -287,6 +335,7 @@
   // Weak root state, used for concurrent system weak processing and more.
   gc::WeakRootState weak_root_state_ GUARDED_BY(Locks::intern_table_lock_);
 
+  friend class gc::space::ImageSpace;
   friend class linker::ImageWriter;
   friend class Transaction;
   ART_FRIEND_TEST(InternTableTest, CrossHash);
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index 8b4fe44..b64ca7d 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -78,9 +78,9 @@
   GcRoot<mirror::String> str(mirror::String::AllocFromModifiedUtf8(soa.Self(), "00000000"));
 
   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
-  for (InternTable::Table::UnorderedSet& table : t.strong_interns_.tables_) {
+  for (InternTable::Table::InternalTable& table : t.strong_interns_.tables_) {
     // The negative hash value shall be 32-bit wide on every host.
-    ASSERT_TRUE(IsUint<32>(table.hashfn_(str)));
+    ASSERT_TRUE(IsUint<32>(table.set_.hashfn_(str)));
   }
 }
 
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 048c6e4..2e41a9d 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -56,7 +56,7 @@
   ScopedObjectAccessUnchecked soa(self);
   if (method->IsStatic()) {
     if (shorty == "L") {
-      typedef jobject (fntype)(JNIEnv*, jclass);
+      using fntype = jobject(JNIEnv*, jclass);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
@@ -67,35 +67,35 @@
       }
       result->SetL(soa.Decode<mirror::Object>(jresult));
     } else if (shorty == "V") {
-      typedef void (fntype)(JNIEnv*, jclass);
+      using fntype = void(JNIEnv*, jclass);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedThreadStateChange tsc(self, kNative);
       fn(soa.Env(), klass.get());
     } else if (shorty == "Z") {
-      typedef jboolean (fntype)(JNIEnv*, jclass);
+      using fntype = jboolean(JNIEnv*, jclass);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedThreadStateChange tsc(self, kNative);
       result->SetZ(fn(soa.Env(), klass.get()));
     } else if (shorty == "BI") {
-      typedef jbyte (fntype)(JNIEnv*, jclass, jint);
+      using fntype = jbyte(JNIEnv*, jclass, jint);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedThreadStateChange tsc(self, kNative);
       result->SetB(fn(soa.Env(), klass.get(), args[0]));
     } else if (shorty == "II") {
-      typedef jint (fntype)(JNIEnv*, jclass, jint);
+      using fntype = jint(JNIEnv*, jclass, jint);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedThreadStateChange tsc(self, kNative);
       result->SetI(fn(soa.Env(), klass.get(), args[0]));
     } else if (shorty == "LL") {
-      typedef jobject (fntype)(JNIEnv*, jclass, jobject);
+      using fntype = jobject(JNIEnv*, jclass, jobject);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
@@ -108,14 +108,14 @@
       }
       result->SetL(soa.Decode<mirror::Object>(jresult));
     } else if (shorty == "IIZ") {
-      typedef jint (fntype)(JNIEnv*, jclass, jint, jboolean);
+      using fntype = jint(JNIEnv*, jclass, jint, jboolean);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedThreadStateChange tsc(self, kNative);
       result->SetI(fn(soa.Env(), klass.get(), args[0], args[1]));
     } else if (shorty == "ILI") {
-      typedef jint (fntype)(JNIEnv*, jclass, jobject, jint);
+      using fntype = jint(JNIEnv*, jclass, jobject, jint);
       fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(
           method->GetEntryPointFromJni()));
       ScopedLocalRef<jclass> klass(soa.Env(),
@@ -125,7 +125,7 @@
       ScopedThreadStateChange tsc(self, kNative);
       result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1]));
     } else if (shorty == "SIZ") {
-      typedef jshort (fntype)(JNIEnv*, jclass, jint, jboolean);
+      using fntype = jshort(JNIEnv*, jclass, jint, jboolean);
       fntype* const fn =
           reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
       ScopedLocalRef<jclass> klass(soa.Env(),
@@ -133,14 +133,14 @@
       ScopedThreadStateChange tsc(self, kNative);
       result->SetS(fn(soa.Env(), klass.get(), args[0], args[1]));
     } else if (shorty == "VIZ") {
-      typedef void (fntype)(JNIEnv*, jclass, jint, jboolean);
+      using fntype = void(JNIEnv*, jclass, jint, jboolean);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedThreadStateChange tsc(self, kNative);
       fn(soa.Env(), klass.get(), args[0], args[1]);
     } else if (shorty == "ZLL") {
-      typedef jboolean (fntype)(JNIEnv*, jclass, jobject, jobject);
+      using fntype = jboolean(JNIEnv*, jclass, jobject, jobject);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
@@ -151,7 +151,7 @@
       ScopedThreadStateChange tsc(self, kNative);
       result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
     } else if (shorty == "ZILL") {
-      typedef jboolean (fntype)(JNIEnv*, jclass, jint, jobject, jobject);
+      using fntype = jboolean(JNIEnv*, jclass, jint, jobject, jobject);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
@@ -162,7 +162,7 @@
       ScopedThreadStateChange tsc(self, kNative);
       result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get()));
     } else if (shorty == "VILII") {
-      typedef void (fntype)(JNIEnv*, jclass, jint, jobject, jint, jint);
+      using fntype = void(JNIEnv*, jclass, jint, jobject, jint, jint);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
@@ -171,7 +171,7 @@
       ScopedThreadStateChange tsc(self, kNative);
       fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]);
     } else if (shorty == "VLILII") {
-      typedef void (fntype)(JNIEnv*, jclass, jobject, jint, jobject, jint, jint);
+      using fntype = void(JNIEnv*, jclass, jobject, jint, jobject, jint, jint);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
@@ -187,7 +187,7 @@
     }
   } else {
     if (shorty == "L") {
-      typedef jobject (fntype)(JNIEnv*, jobject);
+      using fntype = jobject(JNIEnv*, jobject);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jobject> rcvr(soa.Env(),
                                    soa.AddLocalReference<jobject>(receiver));
@@ -198,14 +198,14 @@
       }
       result->SetL(soa.Decode<mirror::Object>(jresult));
     } else if (shorty == "V") {
-      typedef void (fntype)(JNIEnv*, jobject);
+      using fntype = void(JNIEnv*, jobject);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jobject> rcvr(soa.Env(),
                                    soa.AddLocalReference<jobject>(receiver));
       ScopedThreadStateChange tsc(self, kNative);
       fn(soa.Env(), rcvr.get());
     } else if (shorty == "LL") {
-      typedef jobject (fntype)(JNIEnv*, jobject, jobject);
+      using fntype = jobject(JNIEnv*, jobject, jobject);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jobject> rcvr(soa.Env(),
                                    soa.AddLocalReference<jobject>(receiver));
@@ -219,7 +219,7 @@
       result->SetL(soa.Decode<mirror::Object>(jresult));
       ScopedThreadStateChange tsc(self, kNative);
     } else if (shorty == "III") {
-      typedef jint (fntype)(JNIEnv*, jobject, jint, jint);
+      using fntype = jint(JNIEnv*, jobject, jint, jint);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jobject> rcvr(soa.Env(),
                                    soa.AddLocalReference<jobject>(receiver));
@@ -237,7 +237,11 @@
   kMterpImplKind          // Assembly interpreter
 };
 
+#if ART_USE_CXX_INTERPRETER
+static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImplKind;
+#else
 static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
+#endif
 
 static inline JValue Execute(
     Thread* self,
@@ -248,6 +252,14 @@
     bool from_deoptimize = false) REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK(!shadow_frame.GetMethod()->IsAbstract());
   DCHECK(!shadow_frame.GetMethod()->IsNative());
+
+  // Check that we are using the right interpreter.
+  if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) {
+    // The flag might be currently being updated on all threads. Retry with lock.
+    MutexLock tll_mu(self, *Locks::thread_list_lock_);
+    DCHECK_EQ(self->UseMterp(), CanUseMterp());
+  }
+
   if (LIKELY(!from_deoptimize)) {  // Entering the method, but not via deoptimization.
     if (kIsDebugBuild) {
       CHECK_EQ(shadow_frame.GetDexPC(), 0u);
@@ -261,6 +273,12 @@
                                         shadow_frame.GetThisObject(accessor.InsSize()),
                                         method,
                                         0);
+      if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
+        // The caller will retry this invoke. Just return immediately without any value.
+        DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+        DCHECK(PrevFrameWillRetry(self, shadow_frame));
+        return JValue();
+      }
       if (UNLIKELY(self->IsExceptionPending())) {
         instrumentation->MethodUnwindEvent(self,
                                            shadow_frame.GetThisObject(accessor.InsSize()),
@@ -315,7 +333,7 @@
       } else {
         while (true) {
           // Mterp does not support all instrumentation/debugging.
-          if (MterpShouldSwitchInterpreters() != 0) {
+          if (!self->UseMterp()) {
             return ExecuteSwitchImpl<false, false>(self, accessor, shadow_frame, result_register,
                                                    false);
           }
@@ -348,6 +366,13 @@
     }
   } else {
     // Enter the "with access check" interpreter.
+
+    // The boot classpath should really not have to run access checks.
+    DCHECK(method->GetDeclaringClass()->GetClassLoader() != nullptr
+           || Runtime::Current()->IsVerificationSoftFail()
+           || Runtime::Current()->IsAotCompiler())
+        << method->PrettyMethod();
+
     if (kInterpreterImplKind == kMterpImplKind) {
       // No access check variants for Mterp.  Just use the switch version.
       if (transaction_active) {
@@ -494,8 +519,8 @@
   JValue value;
   // Set value to last known result in case the shadow frame chain is empty.
   value.SetJ(ret_val->GetJ());
-  // Are we executing the first shadow frame?
-  bool first = true;
+  // How many frames we have executed.
+  size_t frame_cnt = 0;
   while (shadow_frame != nullptr) {
     // We do not want to recover lock state for lock counting when deoptimizing. Currently,
     // the compiler should not have compiled a method that failed structured-locking checks.
@@ -510,24 +535,30 @@
       // the instrumentation. To prevent from reporting it a second time, we simply pass a
       // null Instrumentation*.
       const instrumentation::Instrumentation* const instrumentation =
-          first ? nullptr : Runtime::Current()->GetInstrumentation();
+          frame_cnt == 0 ? nullptr : Runtime::Current()->GetInstrumentation();
       new_dex_pc = MoveToExceptionHandler(
           self, *shadow_frame, instrumentation) ? shadow_frame->GetDexPC() : dex::kDexNoIndex;
     } else if (!from_code) {
       // Deoptimization is not called from code directly.
       const Instruction* instr = &accessor.InstructionAt(dex_pc);
-      if (deopt_method_type == DeoptimizationMethodType::kKeepDexPc) {
-        DCHECK(first);
+      if (deopt_method_type == DeoptimizationMethodType::kKeepDexPc ||
+          shadow_frame->GetForceRetryInstruction()) {
+        DCHECK(frame_cnt == 0 || (frame_cnt == 1 && shadow_frame->GetForceRetryInstruction()))
+            << "frame_cnt: " << frame_cnt
+            << " force-retry: " << shadow_frame->GetForceRetryInstruction();
         // Need to re-execute the dex instruction.
         // (1) An invocation might be split into class initialization and invoke.
         //     In this case, the invoke should not be skipped.
         // (2) A suspend check should also execute the dex instruction at the
         //     corresponding dex pc.
+        // If the ForceRetryInstruction bit is set this must be the second frame (the first being
+        // the one that is being popped).
         DCHECK_EQ(new_dex_pc, dex_pc);
+        shadow_frame->SetForceRetryInstruction(false);
       } else if (instr->Opcode() == Instruction::MONITOR_ENTER ||
                  instr->Opcode() == Instruction::MONITOR_EXIT) {
         DCHECK(deopt_method_type == DeoptimizationMethodType::kDefault);
-        DCHECK(first);
+        DCHECK_EQ(frame_cnt, 0u);
         // Non-idempotent dex instruction should not be re-executed.
         // On the other hand, if a MONITOR_ENTER is at the dex_pc of a suspend
         // check, that MONITOR_ENTER should be executed. That case is handled
@@ -553,7 +584,7 @@
         DCHECK_EQ(new_dex_pc, dex_pc);
       } else {
         DCHECK(deopt_method_type == DeoptimizationMethodType::kDefault);
-        DCHECK(first);
+        DCHECK_EQ(frame_cnt, 0u);
         // By default, we re-execute the dex instruction since if they are not
         // an invoke, so that we don't have to decode the dex instruction to move
         // result into the right vreg. All slow paths have been audited to be
@@ -566,7 +597,7 @@
     } else {
       // Nothing to do, the dex_pc is the one at which the code requested
       // the deoptimization.
-      DCHECK(first);
+      DCHECK_EQ(frame_cnt, 0u);
       DCHECK_EQ(new_dex_pc, dex_pc);
     }
     if (new_dex_pc != dex::kDexNoIndex) {
@@ -575,8 +606,8 @@
                       accessor,
                       *shadow_frame,
                       value,
-                      /* stay_in_interpreter */ true,
-                      /* from_deoptimize */ true);
+                      /* stay_in_interpreter= */ true,
+                      /* from_deoptimize= */ true);
     }
     ShadowFrame* old_frame = shadow_frame;
     shadow_frame = shadow_frame->GetLink();
@@ -585,7 +616,7 @@
     // and should advance dex pc past the invoke instruction.
     from_code = false;
     deopt_method_type = DeoptimizationMethodType::kDefault;
-    first = false;
+    frame_cnt++;
   }
   ret_val->SetJ(value.GetJ());
 }
@@ -657,5 +688,18 @@
   InitMterpTls(self);
 }
 
+bool PrevFrameWillRetry(Thread* self, const ShadowFrame& frame) {
+  ShadowFrame* prev_frame = frame.GetLink();
+  if (prev_frame == nullptr) {
+    NthCallerVisitor vis(self, 1, false);
+    vis.WalkStack();
+    prev_frame = vis.GetCurrentShadowFrame();
+    if (prev_frame == nullptr) {
+      prev_frame = self->FindDebuggerShadowFrame(vis.GetFrameId());
+    }
+  }
+  return prev_frame != nullptr && prev_frame->GetForceRetryInstruction();
+}
+
 }  // namespace interpreter
 }  // namespace art
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index 0d43b90..e92d195 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_H_
 #define ART_RUNTIME_INTERPRETER_INTERPRETER_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "dex/dex_file.h"
 #include "obj_ptr.h"
 
@@ -69,6 +69,12 @@
 
 void InitInterpreterTls(Thread* self);
 
+// Returns true if the previous frame has the ForceRetryInstruction bit set. This is required for
+// ForPopFrame to work correctly since that will cause the java function return with null/0 which
+// might not be expected by the code being run.
+bool PrevFrameWillRetry(Thread* self, const ShadowFrame& frame)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
 }  // namespace interpreter
 
 }  // namespace art
diff --git a/runtime/interpreter/interpreter_cache.cc b/runtime/interpreter/interpreter_cache.cc
new file mode 100644
index 0000000..e43fe31
--- /dev/null
+++ b/runtime/interpreter/interpreter_cache.cc
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "interpreter_cache.h"
+#include "thread-inl.h"
+
+namespace art {
+
+void InterpreterCache::Clear(Thread* owning_thread) {
+  DCHECK(owning_thread->GetInterpreterCache() == this);
+  DCHECK(owning_thread == Thread::Current() || owning_thread->IsSuspended());
+  data_.fill(Entry{});
+}
+
+bool InterpreterCache::IsCalledFromOwningThread() {
+  return Thread::Current()->GetInterpreterCache() == this;
+}
+
+}  // namespace art
diff --git a/runtime/interpreter/interpreter_cache.h b/runtime/interpreter/interpreter_cache.h
new file mode 100644
index 0000000..003ea6c
--- /dev/null
+++ b/runtime/interpreter/interpreter_cache.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_CACHE_H_
+#define ART_RUNTIME_INTERPRETER_INTERPRETER_CACHE_H_
+
+#include <array>
+#include <atomic>
+
+#include "base/bit_utils.h"
+#include "base/macros.h"
+
+namespace art {
+
+class Thread;
+
+// Small fast thread-local cache for the interpreter.
+// It can hold arbitrary pointer-sized key-value pair.
+// The interpretation of the value depends on the key.
+// Presence of entry might imply some pre-conditions.
+// All operations must be done from the owning thread,
+// or at a point when the owning thread is suspended.
+//
+// The key-value pairs stored in the cache currently are:
+//   iget/iput: The field offset. The field must be non-volatile.
+//   sget/sput: The ArtField* pointer. The field must be non-volitile.
+//   invoke: The ArtMethod* pointer (before vtable indirection, etc).
+//
+// We ensure consistency of the cache by clearing it
+// whenever any dex file is unloaded.
+//
+// Aligned to 16-bytes to make it easier to get the address of the cache
+// from assembly (it ensures that the offset is valid immediate value).
+class ALIGNED(16) InterpreterCache {
+  // Aligned since we load the whole entry in single assembly instruction.
+  typedef std::pair<const void*, size_t> Entry ALIGNED(2 * sizeof(size_t));
+
+ public:
+  // 2x size increase/decrease corresponds to ~0.5% interpreter performance change.
+  // Value of 256 has around 75% cache hit rate.
+  static constexpr size_t kSize = 256;
+
+  InterpreterCache() {
+    // We can not use the Clear() method since the constructor will not
+    // be called from the owning thread.
+    data_.fill(Entry{});
+  }
+
+  // Clear the whole cache. It requires the owning thread for DCHECKs.
+  void Clear(Thread* owning_thread);
+
+  ALWAYS_INLINE bool Get(const void* key, /* out */ size_t* value) {
+    DCHECK(IsCalledFromOwningThread());
+    Entry& entry = data_[IndexOf(key)];
+    if (LIKELY(entry.first == key)) {
+      *value = entry.second;
+      return true;
+    }
+    return false;
+  }
+
+  ALWAYS_INLINE void Set(const void* key, size_t value) {
+    DCHECK(IsCalledFromOwningThread());
+    data_[IndexOf(key)] = Entry{key, value};
+  }
+
+ private:
+  bool IsCalledFromOwningThread();
+
+  static ALWAYS_INLINE size_t IndexOf(const void* key) {
+    static_assert(IsPowerOfTwo(kSize), "Size must be power of two");
+    size_t index = (reinterpret_cast<uintptr_t>(key) >> 2) & (kSize - 1);
+    DCHECK_LT(index, kSize);
+    return index;
+  }
+
+  std::array<Entry, kSize> data_;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_INTERPRETER_INTERPRETER_CACHE_H_
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 92d4731..7a40ab4 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -18,6 +18,7 @@
 
 #include <cmath>
 
+#include "base/casts.h"
 #include "base/enums.h"
 #include "class_root.h"
 #include "debugger.h"
@@ -28,10 +29,13 @@
 #include "jvalue-inl.h"
 #include "method_handles-inl.h"
 #include "method_handles.h"
+#include "mirror/array-alloc-inl.h"
 #include "mirror/array-inl.h"
 #include "mirror/class.h"
 #include "mirror/emulated_stack_frame.h"
 #include "mirror/method_handle_impl-inl.h"
+#include "mirror/object_array-alloc-inl.h"
+#include "mirror/object_array-inl.h"
 #include "mirror/var_handle.h"
 #include "reflection-inl.h"
 #include "reflection.h"
@@ -49,6 +53,42 @@
   ThrowNullPointerExceptionFromDexPC();
 }
 
+bool CheckStackOverflow(Thread* self, size_t frame_size)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
+  uint8_t* stack_end = self->GetStackEndForInterpreter(implicit_check);
+  if (UNLIKELY(__builtin_frame_address(0) < stack_end + frame_size)) {
+    ThrowStackOverflowError(self);
+    return false;
+  }
+  return true;
+}
+
+bool UseFastInterpreterToInterpreterInvoke(ArtMethod* method) {
+  Runtime* runtime = Runtime::Current();
+  const void* quick_code = method->GetEntryPointFromQuickCompiledCode();
+  if (!runtime->GetClassLinker()->IsQuickToInterpreterBridge(quick_code)) {
+    return false;
+  }
+  if (!method->SkipAccessChecks() || method->IsNative() || method->IsProxyMethod()) {
+    return false;
+  }
+  if (method->IsIntrinsic()) {
+    return false;
+  }
+  if (method->GetDeclaringClass()->IsStringClass() && method->IsConstructor()) {
+    return false;
+  }
+  if (method->IsStatic() && !method->GetDeclaringClass()->IsInitialized()) {
+    return false;
+  }
+  ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
+  if ((profiling_info != nullptr) && (profiling_info->GetSavedEntryPoint() != nullptr)) {
+    return false;
+  }
+  return true;
+}
+
 template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
          bool transaction_active>
 bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
@@ -371,6 +411,12 @@
     if (UNLIKELY(self->IsExceptionPending())) {
       return false;
     }
+    if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
+      // Don't actually set the field. The next instruction will force us to pop.
+      DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+      DCHECK(PrevFrameWillRetry(self, shadow_frame));
+      return true;
+    }
   }
   // Note: iput-x-quick instructions are only for non-volatile fields.
   switch (field_type) {
@@ -440,6 +486,11 @@
       self->IsExceptionThrownByCurrentMethod(exception.Get())) {
     // See b/65049545 for why we don't need to check to see if the exception has changed.
     instrumentation->ExceptionThrownEvent(self, exception.Get());
+    if (shadow_frame.GetForcePopFrame()) {
+      // We will check in the caller for GetForcePopFrame again. We need to bail out early to
+      // prevent an ExceptionHandledEvent from also being sent before popping.
+      return true;
+    }
   }
   bool clear_exception = false;
   uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(
@@ -584,10 +635,10 @@
   for (uint32_t i = 0, e = shadow_frame->NumberOfVRegs(); i < e; ++i) {
     if (shadow_frame->GetVRegReference(i) == existing) {
       DCHECK_EQ(shadow_frame->GetVRegReference(i),
-                reinterpret_cast<mirror::Object*>(shadow_frame->GetVReg(i)));
+                reinterpret_cast32<mirror::Object*>(shadow_frame->GetVReg(i)));
       shadow_frame->SetVRegReference(i, result.GetL());
       DCHECK_EQ(shadow_frame->GetVRegReference(i),
-                reinterpret_cast<mirror::Object*>(shadow_frame->GetVReg(i)));
+                reinterpret_cast32<mirror::Object*>(shadow_frame->GetVReg(i)));
     }
   }
 }
@@ -702,12 +753,12 @@
   if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) {
     static const bool kIsRange = false;
     return DoMethodHandleInvokeCommon<kIsRange>(
-        self, shadow_frame, true /* is_exact */, inst, inst_data, result);
+        self, shadow_frame, /* invoke_exact= */ true, inst, inst_data, result);
   } else {
     DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_POLYMORPHIC_RANGE);
     static const bool kIsRange = true;
     return DoMethodHandleInvokeCommon<kIsRange>(
-        self, shadow_frame, true /* is_exact */, inst, inst_data, result);
+        self, shadow_frame, /* invoke_exact= */ true, inst, inst_data, result);
   }
 }
 
@@ -719,12 +770,12 @@
   if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) {
     static const bool kIsRange = false;
     return DoMethodHandleInvokeCommon<kIsRange>(
-        self, shadow_frame, false /* is_exact */, inst, inst_data, result);
+        self, shadow_frame, /* invoke_exact= */ false, inst, inst_data, result);
   } else {
     DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_POLYMORPHIC_RANGE);
     static const bool kIsRange = true;
     return DoMethodHandleInvokeCommon<kIsRange>(
-        self, shadow_frame, false /* is_exact */, inst, inst_data, result);
+        self, shadow_frame, /* invoke_exact= */ false, inst, inst_data, result);
   }
 }
 
@@ -1059,7 +1110,7 @@
   return true;
 
 #define COLLECT_REFERENCE_ARRAY(T, Type)                                \
-  Handle<mirror::ObjectArray<T>> array =                                \
+  Handle<mirror::ObjectArray<T>> array =                   /* NOLINT */ \
       hs.NewHandle(mirror::ObjectArray<T>::Alloc(self,                  \
                                                  array_type,            \
                                                  array_length));        \
@@ -1117,7 +1168,7 @@
                                                                   const DexFile* dex_file,
                                                                   uint32_t call_site_idx)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  const DexFile::CallSiteIdItem& csi = dex_file->GetCallSiteId(call_site_idx);
+  const dex::CallSiteIdItem& csi = dex_file->GetCallSiteId(call_site_idx);
   CallSiteArrayValueIterator it(*dex_file, csi);
   DCHECK_GE(it.Size(), 1u);
 
@@ -1172,7 +1223,7 @@
   static constexpr size_t kMandatoryArgumentsCount = 3;
   ArtMethod* referrer = shadow_frame.GetMethod();
   const DexFile* dex_file = referrer->GetDexFile();
-  const DexFile::CallSiteIdItem& csi = dex_file->GetCallSiteId(call_site_idx);
+  const dex::CallSiteIdItem& csi = dex_file->GetCallSiteId(call_site_idx);
   CallSiteArrayValueIterator it(*dex_file, csi);
   if (it.Size() < kMandatoryArgumentsCount) {
     ThrowBootstrapMethodError("Truncated bootstrap arguments (%zu < %zu)",
@@ -1445,7 +1496,7 @@
   // If both register locations contains the same value, the register probably holds a reference.
   // Note: As an optimization, non-moving collectors leave a stale reference value
   // in the references array even after the original vreg was overwritten to a non-reference.
-  if (src_value == reinterpret_cast<uintptr_t>(o.Ptr())) {
+  if (src_value == reinterpret_cast32<uint32_t>(o.Ptr())) {
     new_shadow_frame->SetVRegReference(dest_reg, o);
   } else {
     new_shadow_frame->SetVReg(dest_reg, src_value);
@@ -1586,7 +1637,7 @@
 
     // We need to do runtime check on reference assignment. We need to load the shorty
     // to get the exact type of each reference argument.
-    const DexFile::TypeList* params = method->GetParameterTypeList();
+    const dex::TypeList* params = method->GetParameterTypeList();
     uint32_t shorty_len = 0;
     const char* shorty = method->GetShorty(&shorty_len);
 
@@ -1825,7 +1876,7 @@
     default:
       LOG(FATAL) << "Unsupported primitive type " << primitive_component_type
                  << " in fill-array-data";
-      break;
+      UNREACHABLE();
   }
 }
 
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index b324b4c..6366035 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -32,8 +32,8 @@
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "class_linker-inl.h"
 #include "class_root.h"
 #include "common_dex_operations.h"
@@ -42,7 +42,9 @@
 #include "dex/dex_instruction-inl.h"
 #include "entrypoints/entrypoint_utils-inl.h"
 #include "handle_scope-inl.h"
-#include "jit/jit.h"
+#include "interpreter_mterp_impl.h"
+#include "interpreter_switch_impl.h"
+#include "jit/jit-inl.h"
 #include "mirror/call_site.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache.h"
@@ -51,6 +53,7 @@
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/string-inl.h"
+#include "mterp/mterp.h"
 #include "obj_ptr.h"
 #include "stack.h"
 #include "thread.h"
@@ -121,92 +124,185 @@
 bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
             const Instruction* inst, uint16_t inst_data, JValue* result);
 
-// Handles streamlined non-range invoke static, direct and virtual instructions originating in
-// mterp. Access checks and instrumentation other than jit profiling are not supported, but does
-// support interpreter intrinsics if applicable.
-// Returns true on success, otherwise throws an exception and returns false.
-template<InvokeType type>
-static inline bool DoFastInvoke(Thread* self,
-                                ShadowFrame& shadow_frame,
-                                const Instruction* inst,
-                                uint16_t inst_data,
-                                JValue* result) {
-  const uint32_t method_idx = inst->VRegB_35c();
-  const uint32_t vregC = inst->VRegC_35c();
-  ObjPtr<mirror::Object> receiver = (type == kStatic)
-      ? nullptr
-      : shadow_frame.GetVRegReference(vregC);
-  ArtMethod* sf_method = shadow_frame.GetMethod();
-  ArtMethod* const called_method = FindMethodFromCode<type, false>(
-      method_idx, &receiver, sf_method, self);
-  // The shadow frame should already be pushed, so we don't need to update it.
-  if (UNLIKELY(called_method == nullptr)) {
-    CHECK(self->IsExceptionPending());
-    result->SetJ(0);
-    return false;
-  } else if (UNLIKELY(!called_method->IsInvokable())) {
-    called_method->ThrowInvocationTimeError();
-    result->SetJ(0);
-    return false;
-  } else {
-    jit::Jit* jit = Runtime::Current()->GetJit();
-    if (jit != nullptr && type == kVirtual) {
-      jit->InvokeVirtualOrInterface(receiver, sf_method, shadow_frame.GetDexPC(), called_method);
-    }
-    if (called_method->IsIntrinsic()) {
-      if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data,
-                               shadow_frame.GetResultRegister())) {
-        return !self->IsExceptionPending();
-      }
-    }
-    return DoCall<false, false>(called_method, self, shadow_frame, inst, inst_data, result);
-  }
-}
+bool UseFastInterpreterToInterpreterInvoke(ArtMethod* method)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+// Throws exception if we are getting close to the end of the stack.
+NO_INLINE bool CheckStackOverflow(Thread* self, size_t frame_size)
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
 // Handles all invoke-XXX/range instructions except for invoke-polymorphic[/range].
 // Returns true on success, otherwise throws an exception and returns false.
-template<InvokeType type, bool is_range, bool do_access_check>
-static inline bool DoInvoke(Thread* self,
-                            ShadowFrame& shadow_frame,
-                            const Instruction* inst,
-                            uint16_t inst_data,
-                            JValue* result) {
+template<InvokeType type, bool is_range, bool do_access_check, bool is_mterp, bool is_quick = false>
+static ALWAYS_INLINE bool DoInvoke(Thread* self,
+                                   ShadowFrame& shadow_frame,
+                                   const Instruction* inst,
+                                   uint16_t inst_data,
+                                   JValue* result)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   // Make sure to check for async exceptions before anything else.
-  if (UNLIKELY(self->ObserveAsyncException())) {
+  if (is_mterp && self->UseMterp()) {
+    DCHECK(!self->ObserveAsyncException());
+  } else if (UNLIKELY(self->ObserveAsyncException())) {
     return false;
   }
   const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
   const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+  ArtMethod* sf_method = shadow_frame.GetMethod();
+
+  // Try to find the method in small thread-local cache first.
+  InterpreterCache* tls_cache = self->GetInterpreterCache();
+  size_t tls_value;
+  ArtMethod* resolved_method;
+  if (is_quick) {
+    resolved_method = nullptr;  // We don't know/care what the original method was.
+  } else if (LIKELY(tls_cache->Get(inst, &tls_value))) {
+    resolved_method = reinterpret_cast<ArtMethod*>(tls_value);
+  } else {
+    ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+    constexpr ClassLinker::ResolveMode resolve_mode =
+        do_access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
+                        : ClassLinker::ResolveMode::kNoChecks;
+    resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, sf_method, type);
+    if (UNLIKELY(resolved_method == nullptr)) {
+      CHECK(self->IsExceptionPending());
+      result->SetJ(0);
+      return false;
+    }
+    tls_cache->Set(inst, reinterpret_cast<size_t>(resolved_method));
+  }
+
+  // Null pointer check and virtual method resolution.
   ObjPtr<mirror::Object> receiver =
       (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
-  ArtMethod* sf_method = shadow_frame.GetMethod();
-  ArtMethod* const called_method = FindMethodFromCode<type, do_access_check>(
-      method_idx, &receiver, sf_method, self);
-  // The shadow frame should already be pushed, so we don't need to update it.
+  ArtMethod* called_method;
+  if (is_quick) {
+    if (UNLIKELY(receiver == nullptr)) {
+      // We lost the reference to the method index so we cannot get a more precise exception.
+      ThrowNullPointerExceptionFromDexPC();
+      return false;
+    }
+    DCHECK(receiver->GetClass()->ShouldHaveEmbeddedVTable());
+    called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
+        /*vtable_idx=*/ method_idx, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
+  } else {
+    called_method = FindMethodToCall<type, do_access_check>(
+        method_idx, resolved_method, &receiver, sf_method, self);
+  }
   if (UNLIKELY(called_method == nullptr)) {
     CHECK(self->IsExceptionPending());
     result->SetJ(0);
     return false;
-  } else if (UNLIKELY(!called_method->IsInvokable())) {
+  }
+  if (UNLIKELY(!called_method->IsInvokable())) {
     called_method->ThrowInvocationTimeError();
     result->SetJ(0);
     return false;
-  } else {
-    jit::Jit* jit = Runtime::Current()->GetJit();
-    if (jit != nullptr && (type == kVirtual || type == kInterface)) {
-      jit->InvokeVirtualOrInterface(receiver, sf_method, shadow_frame.GetDexPC(), called_method);
+  }
+
+  jit::Jit* jit = Runtime::Current()->GetJit();
+  if (jit != nullptr && (type == kVirtual || type == kInterface)) {
+    jit->InvokeVirtualOrInterface(receiver, sf_method, shadow_frame.GetDexPC(), called_method);
+  }
+
+  if (is_mterp && !is_range && called_method->IsIntrinsic()) {
+    if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data,
+                             shadow_frame.GetResultRegister())) {
+      if (jit != nullptr && sf_method != nullptr) {
+        jit->NotifyInterpreterToCompiledCodeTransition(self, sf_method);
+      }
+      return !self->IsExceptionPending();
     }
-    // TODO: Remove the InvokeVirtualOrInterface instrumentation, as it was only used by the JIT.
-    if (type == kVirtual || type == kInterface) {
-      instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
-      if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
-        instrumentation->InvokeVirtualOrInterface(
-            self, receiver.Ptr(), sf_method, shadow_frame.GetDexPC(), called_method);
+  }
+
+  // Check whether we can use the fast path. The result is cached in the ArtMethod.
+  // If the bit is not set, we explicitly recheck all the conditions.
+  // If any of the conditions get falsified, it is important to clear the bit.
+  bool use_fast_path = false;
+  if (is_mterp && self->UseMterp()) {
+    use_fast_path = called_method->UseFastInterpreterToInterpreterInvoke();
+    if (!use_fast_path) {
+      use_fast_path = UseFastInterpreterToInterpreterInvoke(called_method);
+      if (use_fast_path) {
+        called_method->SetFastInterpreterToInterpreterInvokeFlag();
       }
     }
-    return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
-                                             result);
   }
+
+  if (use_fast_path) {
+    DCHECK(Runtime::Current()->IsStarted());
+    DCHECK(!Runtime::Current()->IsActiveTransaction());
+    DCHECK(called_method->SkipAccessChecks());
+    DCHECK(!called_method->IsNative());
+    DCHECK(!called_method->IsProxyMethod());
+    DCHECK(!called_method->IsIntrinsic());
+    DCHECK(!(called_method->GetDeclaringClass()->IsStringClass() &&
+        called_method->IsConstructor()));
+    DCHECK(type != kStatic || called_method->GetDeclaringClass()->IsInitialized());
+
+    const uint16_t number_of_inputs =
+        (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
+    CodeItemDataAccessor accessor(called_method->DexInstructionData());
+    uint32_t num_regs = accessor.RegistersSize();
+    DCHECK_EQ(number_of_inputs, accessor.InsSize());
+    DCHECK_GE(num_regs, number_of_inputs);
+    size_t first_dest_reg = num_regs - number_of_inputs;
+
+    if (UNLIKELY(!CheckStackOverflow(self, ShadowFrame::ComputeSize(num_regs)))) {
+      return false;
+    }
+
+    if (jit != nullptr) {
+      jit->AddSamples(self, called_method, 1, /* with_backedges */false);
+    }
+
+    // Create shadow frame on the stack.
+    const char* old_cause = self->StartAssertNoThreadSuspension("DoFastInvoke");
+    ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
+        CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, /* dex pc */ 0);
+    ShadowFrame* new_shadow_frame = shadow_frame_unique_ptr.get();
+    if (is_range) {
+      size_t src = vregC;
+      for (size_t i = 0, dst = first_dest_reg; i < number_of_inputs; ++i, ++dst, ++src) {
+        *new_shadow_frame->GetVRegAddr(dst) = *shadow_frame.GetVRegAddr(src);
+        *new_shadow_frame->GetShadowRefAddr(dst) = *shadow_frame.GetShadowRefAddr(src);
+      }
+    } else {
+      uint32_t arg[Instruction::kMaxVarArgRegs];
+      inst->GetVarArgs(arg, inst_data);
+      for (size_t i = 0, dst = first_dest_reg; i < number_of_inputs; ++i, ++dst) {
+        *new_shadow_frame->GetVRegAddr(dst) = *shadow_frame.GetVRegAddr(arg[i]);
+        *new_shadow_frame->GetShadowRefAddr(dst) = *shadow_frame.GetShadowRefAddr(arg[i]);
+      }
+    }
+    self->PushShadowFrame(new_shadow_frame);
+    self->EndAssertNoThreadSuspension(old_cause);
+
+    DCheckStaticState(self, called_method);
+    while (true) {
+      // Mterp does not support all instrumentation/debugging.
+      if (!self->UseMterp()) {
+        *result =
+            ExecuteSwitchImpl<false, false>(self, accessor, *new_shadow_frame, *result, false);
+        break;
+      }
+      if (ExecuteMterpImpl(self, accessor.Insns(), new_shadow_frame, result)) {
+        break;
+      } else {
+        // Mterp didn't like that instruction.  Single-step it with the reference interpreter.
+        *result = ExecuteSwitchImpl<false, false>(self, accessor, *new_shadow_frame, *result, true);
+        if (new_shadow_frame->GetDexPC() == dex::kDexNoIndex) {
+          break;  // Single-stepped a return or an exception not handled locally.
+        }
+      }
+    }
+    self->PopShadowFrame();
+
+    return !self->IsExceptionPending();
+  }
+
+  return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
+                                           result);
 }
 
 static inline ObjPtr<mirror::MethodHandle> ResolveMethodHandle(Thread* self,
@@ -272,50 +368,6 @@
   }
 }
 
-// Handles invoke-virtual-quick and invoke-virtual-quick-range instructions.
-// Returns true on success, otherwise throws an exception and returns false.
-template<bool is_range>
-static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
-                                        const Instruction* inst, uint16_t inst_data,
-                                        JValue* result) {
-  const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
-  ObjPtr<mirror::Object> const receiver = shadow_frame.GetVRegReference(vregC);
-  if (UNLIKELY(receiver == nullptr)) {
-    // We lost the reference to the method index so we cannot get a more
-    // precised exception message.
-    ThrowNullPointerExceptionFromDexPC();
-    return false;
-  }
-  const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
-  CHECK(receiver->GetClass()->ShouldHaveEmbeddedVTable());
-  ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
-      vtable_idx, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
-  if (UNLIKELY(called_method == nullptr)) {
-    CHECK(self->IsExceptionPending());
-    result->SetJ(0);
-    return false;
-  } else if (UNLIKELY(!called_method->IsInvokable())) {
-    called_method->ThrowInvocationTimeError();
-    result->SetJ(0);
-    return false;
-  } else {
-    jit::Jit* jit = Runtime::Current()->GetJit();
-    if (jit != nullptr) {
-      jit->InvokeVirtualOrInterface(
-          receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
-      jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges*/false);
-    }
-    instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
-    // TODO: Remove the InvokeVirtualOrInterface instrumentation, as it was only used by the JIT.
-    if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
-      instrumentation->InvokeVirtualOrInterface(
-          self, receiver.Ptr(), shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
-    }
-    // No need to check since we've been quickened.
-    return DoCall<is_range, false>(called_method, self, shadow_frame, inst, inst_data, result);
-  }
-}
-
 // Handles iget-XXX and sget-XXX instructions.
 // Returns true on success, otherwise throws an exception and returns false.
 template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
@@ -569,7 +621,7 @@
 
 static inline bool IsStringInit(const DexFile* dex_file, uint32_t method_idx)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
+  const dex::MethodId& method_id = dex_file->GetMethodId(method_idx);
   const char* class_name = dex_file->StringByTypeIdx(method_id.class_idx_);
   const char* method_name = dex_file->GetMethodName(method_id);
   // Instead of calling ResolveMethod() which has suspend point and can trigger
@@ -601,52 +653,6 @@
                                     uint16_t this_obj_vreg,
                                     JValue result);
 
-// Explicitly instantiate all DoInvoke functions.
-#define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check)                      \
-  template REQUIRES_SHARED(Locks::mutator_lock_)                                           \
-  bool DoInvoke<_type, _is_range, _do_check>(Thread* self,                                 \
-                                             ShadowFrame& shadow_frame,                    \
-                                             const Instruction* inst, uint16_t inst_data,  \
-                                             JValue* result)
-
-#define EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(_type)       \
-  EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, false, false);  \
-  EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, false, true);   \
-  EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, true, false);   \
-  EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, true, true);
-
-EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kStatic)      // invoke-static/range.
-EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kDirect)      // invoke-direct/range.
-EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kVirtual)     // invoke-virtual/range.
-EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kSuper)       // invoke-super/range.
-EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kInterface)   // invoke-interface/range.
-#undef EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL
-#undef EXPLICIT_DO_INVOKE_TEMPLATE_DECL
-
-// Explicitly instantiate all DoFastInvoke functions.
-#define EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(_type)                     \
-  template REQUIRES_SHARED(Locks::mutator_lock_)                         \
-  bool DoFastInvoke<_type>(Thread* self,                                 \
-                           ShadowFrame& shadow_frame,                    \
-                           const Instruction* inst, uint16_t inst_data,  \
-                           JValue* result)
-
-EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(kStatic);     // invoke-static
-EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(kDirect);     // invoke-direct
-EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(kVirtual);    // invoke-virtual
-#undef EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL
-
-// Explicitly instantiate all DoInvokeVirtualQuick functions.
-#define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range)                    \
-  template REQUIRES_SHARED(Locks::mutator_lock_)                                     \
-  bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame,      \
-                                       const Instruction* inst, uint16_t inst_data,  \
-                                       JValue* result)
-
-EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(false);  // invoke-virtual-quick.
-EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(true);   // invoke-virtual-quick-range.
-#undef EXPLICIT_INSTANTIATION_DO_INVOKE_VIRTUAL_QUICK
-
 }  // namespace interpreter
 }  // namespace art
 
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 17b3cd4..2127f1d 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -558,6 +558,9 @@
     UNIMPLEMENTED_CASE(ReferenceGetReferent /* ()Ljava/lang/Object; */)
     UNIMPLEMENTED_CASE(IntegerValueOf /* (I)Ljava/lang/Integer; */)
     UNIMPLEMENTED_CASE(ThreadInterrupted /* ()Z */)
+    UNIMPLEMENTED_CASE(CRC32Update /* (II)I */)
+    UNIMPLEMENTED_CASE(CRC32UpdateBytes /* (I[BII)I */)
+    UNIMPLEMENTED_CASE(CRC32UpdateByteBuffer /* (IJII)I */)
     INTRINSIC_CASE(VarHandleFullFence)
     INTRINSIC_CASE(VarHandleAcquireFence)
     INTRINSIC_CASE(VarHandleReleaseFence)
diff --git a/runtime/interpreter/interpreter_mterp_impl.h b/runtime/interpreter/interpreter_mterp_impl.h
index d8a764f..177b0fd 100644
--- a/runtime/interpreter/interpreter_mterp_impl.h
+++ b/runtime/interpreter/interpreter_mterp_impl.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_MTERP_IMPL_H_
 #define ART_RUNTIME_INTERPRETER_INTERPRETER_MTERP_IMPL_H_
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/dex_file.h"
 #include "jvalue.h"
 #include "obj_ptr.h"
diff --git a/runtime/interpreter/interpreter_switch_impl-inl.h b/runtime/interpreter/interpreter_switch_impl-inl.h
new file mode 100644
index 0000000..aec2aa2
--- /dev/null
+++ b/runtime/interpreter/interpreter_switch_impl-inl.h
@@ -0,0 +1,2655 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_SWITCH_IMPL_INL_H_
+#define ART_RUNTIME_INTERPRETER_INTERPRETER_SWITCH_IMPL_INL_H_
+
+#include "interpreter_switch_impl.h"
+
+#include "base/enums.h"
+#include "base/memory_tool.h"
+#include "base/quasi_atomic.h"
+#include "dex/dex_file_types.h"
+#include "dex/dex_instruction_list.h"
+#include "experimental_flags.h"
+#include "interpreter_common.h"
+#include "jit/jit-inl.h"
+#include "jvalue-inl.h"
+#include "mirror/string-alloc-inl.h"
+#include "nth_caller_visitor.h"
+#include "safe_math.h"
+#include "shadow_frame-inl.h"
+#include "thread.h"
+
+namespace art {
+namespace interpreter {
+
+// Short-lived helper class which executes single DEX bytecode.  It is inlined by compiler.
+//
+// The function names must match the names from dex_instruction_list.h and have no arguments.
+//
+// Any relevant execution information is stored in the fields - it should be kept to minimum.
+//
+// Helper methods may return boolean value - in which case 'false' always means
+// "stop executing current opcode" (which does not necessarily exit the interpreter loop).
+//
+template<bool do_access_check, bool transaction_active>
+class InstructionHandler {
+ public:
+  ALWAYS_INLINE WARN_UNUSED bool CheckForceReturn()
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
+      DCHECK(PrevFrameWillRetry(self, shadow_frame))
+          << "Pop frame forced without previous frame ready to retry instruction!";
+      DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+      if (UNLIKELY(NeedsMethodExitEvent(instrumentation))) {
+        SendMethodExitEvents(self,
+                             instrumentation,
+                             shadow_frame,
+                             shadow_frame.GetThisObject(Accessor().InsSize()),
+                             shadow_frame.GetMethod(),
+                             inst->GetDexPc(Insns()),
+                             JValue());
+      }
+      ctx->result = JValue(); /* Handled in caller. */
+      exit_interpreter_loop = true;
+      return false;
+    }
+    return true;
+  }
+
+  NO_INLINE WARN_UNUSED bool HandlePendingExceptionWithInstrumentationImpl(
+      const instrumentation::Instrumentation* instr)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(self->IsExceptionPending());
+    self->AllowThreadSuspension();
+    if (!CheckForceReturn()) {
+      return false;
+    }
+    if (!MoveToExceptionHandler(self, shadow_frame, instr)) {
+      /* Structured locking is to be enforced for abnormal termination, too. */
+      DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame);
+      if (ctx->interpret_one_instruction) {
+        /* Signal mterp to return to caller */
+        shadow_frame.SetDexPC(dex::kDexNoIndex);
+      }
+      ctx->result = JValue(); /* Handled in caller. */
+      exit_interpreter_loop = true;
+      return false;  // Return to caller.
+    }
+    if (!CheckForceReturn()) {
+      return false;
+    }
+    int32_t displacement =
+        static_cast<int32_t>(shadow_frame.GetDexPC()) - static_cast<int32_t>(dex_pc);
+    inst = inst->RelativeAt(displacement);
+    return false;  // Stop executing this opcode and continue in the exception handler.
+  }
+
+  // Forwards the call to the NO_INLINE HandlePendingExceptionWithInstrumentationImpl.
+  ALWAYS_INLINE WARN_UNUSED bool HandlePendingExceptionWithInstrumentation(
+      const instrumentation::Instrumentation* instr)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    // We need to help the compiler a bit to make the NO_INLINE call efficient.
+    //  * All handler fields should be in registers, so we do not want to take the object
+    //    address (for 'this' argument). Make a copy of the handler just for the slow path.
+    //  * The modifiable fields should also be in registers, so we don't want to store their
+    //    address even in the handler copy. Make a copy of them just for the call as well.
+    const Instruction* inst_copy = inst;
+    bool exit_loop_copy = exit_interpreter_loop;
+    InstructionHandler<do_access_check, transaction_active> handler_copy(
+        ctx, instrumentation, self, shadow_frame, dex_pc, inst_copy, inst_data, exit_loop_copy);
+    bool result = handler_copy.HandlePendingExceptionWithInstrumentationImpl(instr);
+    inst = inst_copy;
+    exit_interpreter_loop = exit_loop_copy;
+    return result;
+  }
+
+  ALWAYS_INLINE WARN_UNUSED bool HandlePendingException()
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    return HandlePendingExceptionWithInstrumentation(instrumentation);
+  }
+
+  ALWAYS_INLINE WARN_UNUSED bool PossiblyHandlePendingExceptionOnInvokeImpl(
+      bool is_exception_pending,
+      const Instruction* next_inst)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (UNLIKELY(shadow_frame.GetForceRetryInstruction())) {
+      /* Don't need to do anything except clear the flag and exception. We leave the */
+      /* instruction the same so it will be re-executed on the next go-around.       */
+      DCHECK(inst->IsInvoke());
+      shadow_frame.SetForceRetryInstruction(false);
+      if (UNLIKELY(is_exception_pending)) {
+        DCHECK(self->IsExceptionPending());
+        if (kIsDebugBuild) {
+          LOG(WARNING) << "Suppressing exception for instruction-retry: "
+                       << self->GetException()->Dump();
+        }
+        self->ClearException();
+      }
+    } else if (UNLIKELY(is_exception_pending)) {
+      /* Should have succeeded. */
+      DCHECK(!shadow_frame.GetForceRetryInstruction());
+      if (!HandlePendingException()) {
+        return false;
+      }
+    } else {
+      inst = next_inst;
+    }
+    return true;
+  }
+
+  ALWAYS_INLINE WARN_UNUSED bool PossiblyHandlePendingException(
+      bool is_exception_pending,
+      const Instruction* next_inst)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    /* Should only be on invoke instructions. */
+    DCHECK(!shadow_frame.GetForceRetryInstruction());
+    if (UNLIKELY(is_exception_pending)) {
+      if (!HandlePendingException()) {
+        return false;
+      }
+    } else {
+      inst = next_inst;
+    }
+    return true;
+  }
+
+  ALWAYS_INLINE WARN_UNUSED bool HandleMonitorChecks()
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (!DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame)) {
+      if (!HandlePendingException()) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  // Code to run before each dex instruction.
+  ALWAYS_INLINE WARN_UNUSED bool Preamble()
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    /* We need to put this before & after the instrumentation to avoid having to put in a */
+    /* post-script macro.                                                                 */
+    if (!CheckForceReturn()) {
+      return false;
+    }
+    if (UNLIKELY(instrumentation->HasDexPcListeners())) {
+      uint8_t opcode = inst->Opcode(inst_data);
+      bool is_move_result_object = (opcode == Instruction::MOVE_RESULT_OBJECT);
+      JValue* save_ref = is_move_result_object ? &ctx->result_register : nullptr;
+      if (UNLIKELY(!DoDexPcMoveEvent(self,
+                                     Accessor(),
+                                     shadow_frame,
+                                     dex_pc,
+                                     instrumentation,
+                                     save_ref))) {
+        if (!HandlePendingException()) {
+          return false;
+        }
+      }
+      if (!CheckForceReturn()) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  ALWAYS_INLINE WARN_UNUSED bool BranchInstrumentation(int32_t offset)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (UNLIKELY(instrumentation->HasBranchListeners())) {
+      instrumentation->Branch(self, shadow_frame.GetMethod(), dex_pc, offset);
+    }
+    JValue result;
+    if (jit::Jit::MaybeDoOnStackReplacement(self,
+                                            shadow_frame.GetMethod(),
+                                            dex_pc,
+                                            offset,
+                                            &result)) {
+      if (ctx->interpret_one_instruction) {
+        /* OSR has completed execution of the method.  Signal mterp to return to caller */
+        shadow_frame.SetDexPC(dex::kDexNoIndex);
+      }
+      ctx->result = result;
+      exit_interpreter_loop = true;
+      return false;
+    }
+    return true;
+  }
+
+  ALWAYS_INLINE void HotnessUpdate()
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    jit::Jit* jit = Runtime::Current()->GetJit();
+    if (jit != nullptr) {
+      jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges=*/ true);
+    }
+  }
+
+  ALWAYS_INLINE WARN_UNUSED bool HandleAsyncException()
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (UNLIKELY(self->ObserveAsyncException())) {
+      if (!HandlePendingException()) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  ALWAYS_INLINE void HandleBackwardBranch(int32_t offset)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (IsBackwardBranch(offset)) {
+      HotnessUpdate();
+      /* Record new dex pc early to have consistent suspend point at loop header. */
+      shadow_frame.SetDexPC(inst->GetDexPc(Insns()));
+      self->AllowThreadSuspension();
+    }
+  }
+
+  // Unlike most other events the DexPcMovedEvent can be sent when there is a pending exception (if
+  // the next instruction is MOVE_EXCEPTION). This means it needs to be handled carefully to be able
+  // to detect exceptions thrown by the DexPcMovedEvent itself. These exceptions could be thrown by
+  // jvmti-agents while handling breakpoint or single step events. We had to move this into its own
+  // function because it was making ExecuteSwitchImpl have too large a stack.
+  NO_INLINE static bool DoDexPcMoveEvent(Thread* self,
+                                         const CodeItemDataAccessor& accessor,
+                                         const ShadowFrame& shadow_frame,
+                                         uint32_t dex_pc,
+                                         const instrumentation::Instrumentation* instrumentation,
+                                         JValue* save_ref)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(instrumentation->HasDexPcListeners());
+    StackHandleScope<2> hs(self);
+    Handle<mirror::Throwable> thr(hs.NewHandle(self->GetException()));
+    mirror::Object* null_obj = nullptr;
+    HandleWrapper<mirror::Object> h(
+        hs.NewHandleWrapper(LIKELY(save_ref == nullptr) ? &null_obj : save_ref->GetGCRoot()));
+    self->ClearException();
+    instrumentation->DexPcMovedEvent(self,
+                                     shadow_frame.GetThisObject(accessor.InsSize()),
+                                     shadow_frame.GetMethod(),
+                                     dex_pc);
+    if (UNLIKELY(self->IsExceptionPending())) {
+      // We got a new exception in the dex-pc-moved event.
+      // We just let this exception replace the old one.
+      // TODO It would be good to add the old exception to the
+      // suppressed exceptions of the new one if possible.
+      return false;
+    } else {
+      if (UNLIKELY(!thr.IsNull())) {
+        self->SetException(thr.Get());
+      }
+      return true;
+    }
+  }
+
+  static bool NeedsMethodExitEvent(const instrumentation::Instrumentation* ins)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    return ins->HasMethodExitListeners() || ins->HasWatchedFramePopListeners();
+  }
+
+  // Sends the normal method exit event.
+  // Returns true if the events succeeded and false if there is a pending exception.
+  NO_INLINE static bool SendMethodExitEvents(
+      Thread* self,
+      const instrumentation::Instrumentation* instrumentation,
+      const ShadowFrame& frame,
+      ObjPtr<mirror::Object> thiz,
+      ArtMethod* method,
+      uint32_t dex_pc,
+      const JValue& result)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool had_event = false;
+    // We don't send method-exit if it's a pop-frame. We still send frame_popped though.
+    if (UNLIKELY(instrumentation->HasMethodExitListeners() && !frame.GetForcePopFrame())) {
+      had_event = true;
+      instrumentation->MethodExitEvent(self, thiz.Ptr(), method, dex_pc, result);
+    }
+    if (UNLIKELY(frame.NeedsNotifyPop() && instrumentation->HasWatchedFramePopListeners())) {
+      had_event = true;
+      instrumentation->WatchedFramePopped(self, frame);
+    }
+    if (UNLIKELY(had_event)) {
+      return !self->IsExceptionPending();
+    } else {
+      return true;
+    }
+  }
+
+#define BRANCH_INSTRUMENTATION(offset)                                                            \
+  if (!BranchInstrumentation(offset)) {                                                           \
+    return;                                                                                       \
+  }
+
+#define HANDLE_PENDING_EXCEPTION()                                                                \
+  if (!HandlePendingException()) {                                                                \
+    return;                                                                                       \
+  }
+
+#define POSSIBLY_HANDLE_PENDING_EXCEPTION(is_exception_pending, next_function)                    \
+  if (!PossiblyHandlePendingException(is_exception_pending, inst->next_function())) {             \
+    return;                                                                                       \
+  }
+
+#define POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(is_exception_pending)             \
+  if (!PossiblyHandlePendingExceptionOnInvokeImpl(is_exception_pending, inst->Next_4xx())) {      \
+    return;                                                                                       \
+  }
+
+#define POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(is_exception_pending)                         \
+  if (!PossiblyHandlePendingExceptionOnInvokeImpl(is_exception_pending, inst->Next_3xx())) {      \
+    return;                                                                                       \
+  }
+
+  ALWAYS_INLINE void NOP() REQUIRES_SHARED(Locks::mutator_lock_) {
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void MOVE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+                         shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void MOVE_FROM16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22x(inst_data),
+                         shadow_frame.GetVReg(inst->VRegB_22x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void MOVE_16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_32x(),
+                         shadow_frame.GetVReg(inst->VRegB_32x()));
+    inst = inst->Next_3xx();
+  }
+
+  ALWAYS_INLINE void MOVE_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data),
+                             shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void MOVE_WIDE_FROM16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_22x(inst_data),
+                             shadow_frame.GetVRegLong(inst->VRegB_22x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void MOVE_WIDE_16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_32x(),
+                             shadow_frame.GetVRegLong(inst->VRegB_32x()));
+    inst = inst->Next_3xx();
+  }
+
+  ALWAYS_INLINE void MOVE_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegReference(inst->VRegA_12x(inst_data),
+                                  shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void MOVE_OBJECT_FROM16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegReference(inst->VRegA_22x(inst_data),
+                                  shadow_frame.GetVRegReference(inst->VRegB_22x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void MOVE_OBJECT_16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegReference(inst->VRegA_32x(),
+                                  shadow_frame.GetVRegReference(inst->VRegB_32x()));
+    inst = inst->Next_3xx();
+  }
+
+  ALWAYS_INLINE void MOVE_RESULT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_11x(inst_data), ResultRegister()->GetI());
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void MOVE_RESULT_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_11x(inst_data), ResultRegister()->GetJ());
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void MOVE_RESULT_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), ResultRegister()->GetL());
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void MOVE_EXCEPTION() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Throwable> exception = self->GetException();
+    DCHECK(exception != nullptr) << "No pending exception on MOVE_EXCEPTION instruction";
+    shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
+    self->ClearException();
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void RETURN_VOID_NO_BARRIER() REQUIRES_SHARED(Locks::mutator_lock_) {
+    JValue result;
+    self->AllowThreadSuspension();
+    if (!HandleMonitorChecks()) {
+      return;
+    }
+    if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
+                 !SendMethodExitEvents(self,
+                                       instrumentation,
+                                       shadow_frame,
+                                       shadow_frame.GetThisObject(Accessor().InsSize()),
+                                       shadow_frame.GetMethod(),
+                                       inst->GetDexPc(Insns()),
+                                       result))) {
+      if (!HandlePendingExceptionWithInstrumentation(nullptr)) {
+        return;
+      }
+    }
+    if (ctx->interpret_one_instruction) {
+      /* Signal mterp to return to caller */
+      shadow_frame.SetDexPC(dex::kDexNoIndex);
+    }
+    ctx->result = result;
+    exit_interpreter_loop = true;
+  }
+
+  ALWAYS_INLINE void RETURN_VOID() REQUIRES_SHARED(Locks::mutator_lock_) {
+    QuasiAtomic::ThreadFenceForConstructor();
+    JValue result;
+    self->AllowThreadSuspension();
+    if (!HandleMonitorChecks()) {
+      return;
+    }
+    if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
+                 !SendMethodExitEvents(self,
+                                       instrumentation,
+                                       shadow_frame,
+                                       shadow_frame.GetThisObject(Accessor().InsSize()),
+                                       shadow_frame.GetMethod(),
+                                       inst->GetDexPc(Insns()),
+                                       result))) {
+      if (!HandlePendingExceptionWithInstrumentation(nullptr)) {
+        return;
+      }
+    }
+    if (ctx->interpret_one_instruction) {
+      /* Signal mterp to return to caller */
+      shadow_frame.SetDexPC(dex::kDexNoIndex);
+    }
+    ctx->result = result;
+    exit_interpreter_loop = true;
+  }
+
+  ALWAYS_INLINE void RETURN() REQUIRES_SHARED(Locks::mutator_lock_) {
+    JValue result;
+    result.SetJ(0);
+    result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
+    self->AllowThreadSuspension();
+    if (!HandleMonitorChecks()) {
+      return;
+    }
+    if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
+                 !SendMethodExitEvents(self,
+                                       instrumentation,
+                                       shadow_frame,
+                                       shadow_frame.GetThisObject(Accessor().InsSize()),
+                                       shadow_frame.GetMethod(),
+                                       inst->GetDexPc(Insns()),
+                                       result))) {
+      if (!HandlePendingExceptionWithInstrumentation(nullptr)) {
+        return;
+      }
+    }
+    if (ctx->interpret_one_instruction) {
+      /* Signal mterp to return to caller */
+      shadow_frame.SetDexPC(dex::kDexNoIndex);
+    }
+    ctx->result = result;
+    exit_interpreter_loop = true;
+  }
+
+  ALWAYS_INLINE void RETURN_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    JValue result;
+    result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
+    self->AllowThreadSuspension();
+    if (!HandleMonitorChecks()) {
+      return;
+    }
+    if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
+                 !SendMethodExitEvents(self,
+                                       instrumentation,
+                                       shadow_frame,
+                                       shadow_frame.GetThisObject(Accessor().InsSize()),
+                                       shadow_frame.GetMethod(),
+                                       inst->GetDexPc(Insns()),
+                                       result))) {
+      if (!HandlePendingExceptionWithInstrumentation(nullptr)) {
+        return;
+      }
+    }
+    if (ctx->interpret_one_instruction) {
+      /* Signal mterp to return to caller */
+      shadow_frame.SetDexPC(dex::kDexNoIndex);
+    }
+    ctx->result = result;
+    exit_interpreter_loop = true;
+  }
+
+  ALWAYS_INLINE void RETURN_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    JValue result;
+    self->AllowThreadSuspension();
+    if (!HandleMonitorChecks()) {
+      return;
+    }
+    const size_t ref_idx = inst->VRegA_11x(inst_data);
+    ObjPtr<mirror::Object> obj_result = shadow_frame.GetVRegReference(ref_idx);
+    if (do_assignability_check && obj_result != nullptr) {
+      ObjPtr<mirror::Class> return_type = shadow_frame.GetMethod()->ResolveReturnType();
+      // Re-load since it might have moved.
+      obj_result = shadow_frame.GetVRegReference(ref_idx);
+      if (return_type == nullptr) {
+        // Return the pending exception.
+        HANDLE_PENDING_EXCEPTION();
+      }
+      if (!obj_result->VerifierInstanceOf(return_type)) {
+        // This should never happen.
+        std::string temp1, temp2;
+        self->ThrowNewExceptionF("Ljava/lang/InternalError;",
+                                 "Returning '%s' that is not instance of return type '%s'",
+                                 obj_result->GetClass()->GetDescriptor(&temp1),
+                                 return_type->GetDescriptor(&temp2));
+        HANDLE_PENDING_EXCEPTION();
+      }
+    }
+    result.SetL(obj_result);
+    if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
+                 !SendMethodExitEvents(self,
+                                       instrumentation,
+                                       shadow_frame,
+                                       shadow_frame.GetThisObject(Accessor().InsSize()),
+                                       shadow_frame.GetMethod(),
+                                       inst->GetDexPc(Insns()),
+                                       result))) {
+      if (!HandlePendingExceptionWithInstrumentation(nullptr)) {
+        return;
+      }
+    }
+    // Re-load since it might have moved during the MethodExitEvent.
+    result.SetL(shadow_frame.GetVRegReference(ref_idx));
+    if (ctx->interpret_one_instruction) {
+      /* Signal mterp to return to caller */
+      shadow_frame.SetDexPC(dex::kDexNoIndex);
+    }
+    ctx->result = result;
+    exit_interpreter_loop = true;
+  }
+
+  ALWAYS_INLINE void CONST_4() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t dst = inst->VRegA_11n(inst_data);
+    int4_t val = inst->VRegB_11n(inst_data);
+    shadow_frame.SetVReg(dst, val);
+    if (val == 0) {
+      shadow_frame.SetVRegReference(dst, nullptr);
+    }
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void CONST_16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint8_t dst = inst->VRegA_21s(inst_data);
+    int16_t val = inst->VRegB_21s();
+    shadow_frame.SetVReg(dst, val);
+    if (val == 0) {
+      shadow_frame.SetVRegReference(dst, nullptr);
+    }
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void CONST() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint8_t dst = inst->VRegA_31i(inst_data);
+    int32_t val = inst->VRegB_31i();
+    shadow_frame.SetVReg(dst, val);
+    if (val == 0) {
+      shadow_frame.SetVRegReference(dst, nullptr);
+    }
+    inst = inst->Next_3xx();
+  }
+
+  ALWAYS_INLINE void CONST_HIGH16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint8_t dst = inst->VRegA_21h(inst_data);
+    int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
+    shadow_frame.SetVReg(dst, val);
+    if (val == 0) {
+      shadow_frame.SetVRegReference(dst, nullptr);
+    }
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void CONST_WIDE_16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_21s(inst_data), inst->VRegB_21s());
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void CONST_WIDE_32() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_31i(inst_data), inst->VRegB_31i());
+    inst = inst->Next_3xx();
+  }
+
+  ALWAYS_INLINE void CONST_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_51l(inst_data), inst->VRegB_51l());
+    inst = inst->Next_51l();
+  }
+
+  ALWAYS_INLINE void CONST_WIDE_HIGH16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_21h(inst_data),
+                             static_cast<uint64_t>(inst->VRegB_21h()) << 48);
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void CONST_STRING() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::String> s = ResolveString(self,
+                                             shadow_frame,
+                                             dex::StringIndex(inst->VRegB_21c()));
+    if (UNLIKELY(s == nullptr)) {
+      HANDLE_PENDING_EXCEPTION();
+    } else {
+      shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void CONST_STRING_JUMBO() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::String> s = ResolveString(self,
+                                             shadow_frame,
+                                             dex::StringIndex(inst->VRegB_31c()));
+    if (UNLIKELY(s == nullptr)) {
+      HANDLE_PENDING_EXCEPTION();
+    } else {
+      shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
+      inst = inst->Next_3xx();
+    }
+  }
+
+  ALWAYS_INLINE void CONST_CLASS() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
+                                                     shadow_frame.GetMethod(),
+                                                     self,
+                                                     false,
+                                                     do_access_check);
+    if (UNLIKELY(c == nullptr)) {
+      HANDLE_PENDING_EXCEPTION();
+    } else {
+      shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void CONST_METHOD_HANDLE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ClassLinker* cl = Runtime::Current()->GetClassLinker();
+    ObjPtr<mirror::MethodHandle> mh = cl->ResolveMethodHandle(self,
+                                                              inst->VRegB_21c(),
+                                                              shadow_frame.GetMethod());
+    if (UNLIKELY(mh == nullptr)) {
+      HANDLE_PENDING_EXCEPTION();
+    } else {
+      shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), mh);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void CONST_METHOD_TYPE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ClassLinker* cl = Runtime::Current()->GetClassLinker();
+    ObjPtr<mirror::MethodType> mt = cl->ResolveMethodType(self,
+                                                          dex::ProtoIndex(inst->VRegB_21c()),
+                                                          shadow_frame.GetMethod());
+    if (UNLIKELY(mt == nullptr)) {
+      HANDLE_PENDING_EXCEPTION();
+    } else {
+      shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), mt);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void MONITOR_ENTER() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (!HandleAsyncException()) {
+      return;
+    }
+    ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+    if (UNLIKELY(obj == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    } else {
+      DoMonitorEnter<do_assignability_check>(self, &shadow_frame, obj);
+      POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+    }
+  }
+
+  ALWAYS_INLINE void MONITOR_EXIT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (!HandleAsyncException()) {
+      return;
+    }
+    ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+    if (UNLIKELY(obj == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    } else {
+      DoMonitorExit<do_assignability_check>(self, &shadow_frame, obj);
+      POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+    }
+  }
+
+  ALWAYS_INLINE void CHECK_CAST() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
+                                                     shadow_frame.GetMethod(),
+                                                     self,
+                                                     false,
+                                                     do_access_check);
+    if (UNLIKELY(c == nullptr)) {
+      HANDLE_PENDING_EXCEPTION();
+    } else {
+      ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
+      if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
+        ThrowClassCastException(c, obj->GetClass());
+        HANDLE_PENDING_EXCEPTION();
+      } else {
+        inst = inst->Next_2xx();
+      }
+    }
+  }
+
+  ALWAYS_INLINE void INSTANCE_OF() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegC_22c()),
+                                                     shadow_frame.GetMethod(),
+                                                     self,
+                                                     false,
+                                                     do_access_check);
+    if (UNLIKELY(c == nullptr)) {
+      HANDLE_PENDING_EXCEPTION();
+    } else {
+      ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+      shadow_frame.SetVReg(inst->VRegA_22c(inst_data),
+                           (obj != nullptr && obj->InstanceOf(c)) ? 1 : 0);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void ARRAY_LENGTH() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
+    if (UNLIKELY(array == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    } else {
+      shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
+      inst = inst->Next_1xx();
+    }
+  }
+
+  ALWAYS_INLINE void NEW_INSTANCE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> obj = nullptr;
+    ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
+                                                     shadow_frame.GetMethod(),
+                                                     self,
+                                                     false,
+                                                     do_access_check);
+    if (LIKELY(c != nullptr)) {
+      if (UNLIKELY(c->IsStringClass())) {
+        gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+        obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
+      } else {
+        obj = AllocObjectFromCode<true>(
+            c.Ptr(),
+            self,
+            Runtime::Current()->GetHeap()->GetCurrentAllocator());
+      }
+    }
+    if (UNLIKELY(obj == nullptr)) {
+      HANDLE_PENDING_EXCEPTION();
+    } else {
+      obj->GetClass()->AssertInitializedOrInitializingInThread(self);
+      // Don't allow finalizable objects to be allocated during a transaction since these can't
+      // be finalized without a started runtime.
+      if (transaction_active && obj->GetClass()->IsFinalizable()) {
+        AbortTransactionF(self, "Allocating finalizable object in transaction: %s",
+                          obj->PrettyTypeOf().c_str());
+        HANDLE_PENDING_EXCEPTION();
+      }
+      shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), obj);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void NEW_ARRAY() REQUIRES_SHARED(Locks::mutator_lock_) {
+    int32_t length = shadow_frame.GetVReg(inst->VRegB_22c(inst_data));
+    ObjPtr<mirror::Object> obj = AllocArrayFromCode<do_access_check, true>(
+        dex::TypeIndex(inst->VRegC_22c()),
+        length,
+        shadow_frame.GetMethod(),
+        self,
+        Runtime::Current()->GetHeap()->GetCurrentAllocator());
+    if (UNLIKELY(obj == nullptr)) {
+      HANDLE_PENDING_EXCEPTION();
+    } else {
+      shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void FILLED_NEW_ARRAY() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success =
+        DoFilledNewArray<false, do_access_check, transaction_active>(inst, shadow_frame, self,
+                                                                     ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+  }
+
+  ALWAYS_INLINE void FILLED_NEW_ARRAY_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success =
+        DoFilledNewArray<true, do_access_check, transaction_active>(inst, shadow_frame,
+                                                                    self, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+  }
+
+  ALWAYS_INLINE void FILL_ARRAY_DATA() REQUIRES_SHARED(Locks::mutator_lock_) {
+    const uint16_t* payload_addr = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
+    const Instruction::ArrayDataPayload* payload =
+        reinterpret_cast<const Instruction::ArrayDataPayload*>(payload_addr);
+    ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_31t(inst_data));
+    bool success = FillArrayData(obj, payload);
+    if (!success) {
+      HANDLE_PENDING_EXCEPTION();
+    }
+    if (transaction_active) {
+      RecordArrayElementsInTransaction(obj->AsArray(), payload->element_count);
+    }
+    inst = inst->Next_3xx();
+  }
+
+  ALWAYS_INLINE void THROW() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (!HandleAsyncException()) {
+      return;
+    }
+    ObjPtr<mirror::Object> exception =
+        shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+    if (UNLIKELY(exception == nullptr)) {
+      ThrowNullPointerException("throw with null exception");
+    } else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
+      // This should never happen.
+      std::string temp;
+      self->ThrowNewExceptionF("Ljava/lang/InternalError;",
+                               "Throwing '%s' that is not instance of Throwable",
+                               exception->GetClass()->GetDescriptor(&temp));
+    } else {
+      self->SetException(exception->AsThrowable());
+    }
+    HANDLE_PENDING_EXCEPTION();
+  }
+
+  ALWAYS_INLINE void GOTO() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (!HandleAsyncException()) {
+      return;
+    }
+    int8_t offset = inst->VRegA_10t(inst_data);
+    BRANCH_INSTRUMENTATION(offset);
+    inst = inst->RelativeAt(offset);
+    HandleBackwardBranch(offset);
+  }
+
+  ALWAYS_INLINE void GOTO_16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (!HandleAsyncException()) {
+      return;
+    }
+    int16_t offset = inst->VRegA_20t();
+    BRANCH_INSTRUMENTATION(offset);
+    inst = inst->RelativeAt(offset);
+    HandleBackwardBranch(offset);
+  }
+
+  ALWAYS_INLINE void GOTO_32() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (!HandleAsyncException()) {
+      return;
+    }
+    int32_t offset = inst->VRegA_30t();
+    BRANCH_INSTRUMENTATION(offset);
+    inst = inst->RelativeAt(offset);
+    HandleBackwardBranch(offset);
+  }
+
+  ALWAYS_INLINE void PACKED_SWITCH() REQUIRES_SHARED(Locks::mutator_lock_) {
+    int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
+    BRANCH_INSTRUMENTATION(offset);
+    inst = inst->RelativeAt(offset);
+    HandleBackwardBranch(offset);
+  }
+
+  ALWAYS_INLINE void SPARSE_SWITCH() REQUIRES_SHARED(Locks::mutator_lock_) {
+    int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
+    BRANCH_INSTRUMENTATION(offset);
+    inst = inst->RelativeAt(offset);
+    HandleBackwardBranch(offset);
+  }
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wfloat-equal"
+
+
+  ALWAYS_INLINE void CMPL_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
+    float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
+    int32_t result;
+    if (val1 > val2) {
+      result = 1;
+    } else if (val1 == val2) {
+      result = 0;
+    } else {
+      result = -1;
+    }
+    shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void CMPG_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
+    float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
+    int32_t result;
+    if (val1 < val2) {
+      result = -1;
+    } else if (val1 == val2) {
+      result = 0;
+    } else {
+      result = 1;
+    }
+    shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void CMPL_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
+    double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
+    int32_t result;
+    if (val1 > val2) {
+      result = 1;
+    } else if (val1 == val2) {
+      result = 0;
+    } else {
+      result = -1;
+    }
+    shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+    inst = inst->Next_2xx();
+  }
+
+
+  ALWAYS_INLINE void CMPG_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
+    double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
+    int32_t result;
+    if (val1 < val2) {
+      result = -1;
+    } else if (val1 == val2) {
+      result = 0;
+    } else {
+      result = 1;
+    }
+    shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+    inst = inst->Next_2xx();
+  }
+
+#pragma clang diagnostic pop
+
+
+  ALWAYS_INLINE void CMP_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    int64_t val1 = shadow_frame.GetVRegLong(inst->VRegB_23x());
+    int64_t val2 = shadow_frame.GetVRegLong(inst->VRegC_23x());
+    int32_t result;
+    if (val1 > val2) {
+      result = 1;
+    } else if (val1 == val2) {
+      result = 0;
+    } else {
+      result = -1;
+    }
+    shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void IF_EQ() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) ==
+        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+      int16_t offset = inst->VRegC_22t();
+      BRANCH_INSTRUMENTATION(offset);
+      inst = inst->RelativeAt(offset);
+      HandleBackwardBranch(offset);
+    } else {
+      BRANCH_INSTRUMENTATION(2);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void IF_NE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) !=
+        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+      int16_t offset = inst->VRegC_22t();
+      BRANCH_INSTRUMENTATION(offset);
+      inst = inst->RelativeAt(offset);
+      HandleBackwardBranch(offset);
+    } else {
+      BRANCH_INSTRUMENTATION(2);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void IF_LT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <
+        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+      int16_t offset = inst->VRegC_22t();
+      BRANCH_INSTRUMENTATION(offset);
+      inst = inst->RelativeAt(offset);
+      HandleBackwardBranch(offset);
+    } else {
+      BRANCH_INSTRUMENTATION(2);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void IF_GE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >=
+        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+      int16_t offset = inst->VRegC_22t();
+      BRANCH_INSTRUMENTATION(offset);
+      inst = inst->RelativeAt(offset);
+      HandleBackwardBranch(offset);
+    } else {
+      BRANCH_INSTRUMENTATION(2);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void IF_GT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >
+    shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+      int16_t offset = inst->VRegC_22t();
+      BRANCH_INSTRUMENTATION(offset);
+      inst = inst->RelativeAt(offset);
+      HandleBackwardBranch(offset);
+    } else {
+      BRANCH_INSTRUMENTATION(2);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void IF_LE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <=
+        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+      int16_t offset = inst->VRegC_22t();
+      BRANCH_INSTRUMENTATION(offset);
+      inst = inst->RelativeAt(offset);
+      HandleBackwardBranch(offset);
+    } else {
+      BRANCH_INSTRUMENTATION(2);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void IF_EQZ() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) == 0) {
+      int16_t offset = inst->VRegB_21t();
+      BRANCH_INSTRUMENTATION(offset);
+      inst = inst->RelativeAt(offset);
+      HandleBackwardBranch(offset);
+    } else {
+      BRANCH_INSTRUMENTATION(2);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void IF_NEZ() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) != 0) {
+      int16_t offset = inst->VRegB_21t();
+      BRANCH_INSTRUMENTATION(offset);
+      inst = inst->RelativeAt(offset);
+      HandleBackwardBranch(offset);
+    } else {
+      BRANCH_INSTRUMENTATION(2);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void IF_LTZ() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) < 0) {
+      int16_t offset = inst->VRegB_21t();
+      BRANCH_INSTRUMENTATION(offset);
+      inst = inst->RelativeAt(offset);
+      HandleBackwardBranch(offset);
+    } else {
+      BRANCH_INSTRUMENTATION(2);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void IF_GEZ() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) >= 0) {
+      int16_t offset = inst->VRegB_21t();
+      BRANCH_INSTRUMENTATION(offset);
+      inst = inst->RelativeAt(offset);
+      HandleBackwardBranch(offset);
+    } else {
+      BRANCH_INSTRUMENTATION(2);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void IF_GTZ() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) > 0) {
+      int16_t offset = inst->VRegB_21t();
+      BRANCH_INSTRUMENTATION(offset);
+      inst = inst->RelativeAt(offset);
+      HandleBackwardBranch(offset);
+    } else {
+      BRANCH_INSTRUMENTATION(2);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void IF_LEZ() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) <= 0) {
+      int16_t offset = inst->VRegB_21t();
+      BRANCH_INSTRUMENTATION(offset);
+      inst = inst->RelativeAt(offset);
+      HandleBackwardBranch(offset);
+    } else {
+      BRANCH_INSTRUMENTATION(2);
+      inst = inst->Next_2xx();
+    }
+  }
+
+  ALWAYS_INLINE void AGET_BOOLEAN() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    }
+    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+    ObjPtr<mirror::BooleanArray> array = a->AsBooleanArray();
+    if (array->CheckIsValidIndex(index)) {
+      shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
+      inst = inst->Next_2xx();
+    } else {
+      HANDLE_PENDING_EXCEPTION();
+    }
+  }
+
+  ALWAYS_INLINE void AGET_BYTE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    }
+    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+    ObjPtr<mirror::ByteArray> array = a->AsByteArray();
+    if (array->CheckIsValidIndex(index)) {
+      shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
+      inst = inst->Next_2xx();
+    } else {
+      HANDLE_PENDING_EXCEPTION();
+    }
+  }
+
+  ALWAYS_INLINE void AGET_CHAR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    }
+    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+    ObjPtr<mirror::CharArray> array = a->AsCharArray();
+    if (array->CheckIsValidIndex(index)) {
+      shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
+      inst = inst->Next_2xx();
+    } else {
+      HANDLE_PENDING_EXCEPTION();
+    }
+  }
+
+  ALWAYS_INLINE void AGET_SHORT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    }
+    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+    ObjPtr<mirror::ShortArray> array = a->AsShortArray();
+    if (array->CheckIsValidIndex(index)) {
+      shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
+      inst = inst->Next_2xx();
+    } else {
+      HANDLE_PENDING_EXCEPTION();
+    }
+  }
+
+  ALWAYS_INLINE void AGET() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    }
+    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+    DCHECK(a->IsIntArray() || a->IsFloatArray()) << a->PrettyTypeOf();
+    ObjPtr<mirror::IntArray> array = ObjPtr<mirror::IntArray>::DownCast(a);
+    if (array->CheckIsValidIndex(index)) {
+      shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
+      inst = inst->Next_2xx();
+    } else {
+      HANDLE_PENDING_EXCEPTION();
+    }
+  }
+
+  ALWAYS_INLINE void AGET_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    }
+    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+    DCHECK(a->IsLongArray() || a->IsDoubleArray()) << a->PrettyTypeOf();
+    ObjPtr<mirror::LongArray> array = ObjPtr<mirror::LongArray>::DownCast(a);
+    if (array->CheckIsValidIndex(index)) {
+      shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
+      inst = inst->Next_2xx();
+    } else {
+      HANDLE_PENDING_EXCEPTION();
+    }
+  }
+
+  ALWAYS_INLINE void AGET_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    }
+    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+    ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
+    if (array->CheckIsValidIndex(index)) {
+      shadow_frame.SetVRegReference(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
+      inst = inst->Next_2xx();
+    } else {
+      HANDLE_PENDING_EXCEPTION();
+    }
+  }
+
+  ALWAYS_INLINE void APUT_BOOLEAN() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    }
+    uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+    ObjPtr<mirror::BooleanArray> array = a->AsBooleanArray();
+    if (array->CheckIsValidIndex(index)) {
+      array->SetWithoutChecks<transaction_active>(index, val);
+      inst = inst->Next_2xx();
+    } else {
+      HANDLE_PENDING_EXCEPTION();
+    }
+  }
+
+  ALWAYS_INLINE void APUT_BYTE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    }
+    int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+    ObjPtr<mirror::ByteArray> array = a->AsByteArray();
+    if (array->CheckIsValidIndex(index)) {
+      array->SetWithoutChecks<transaction_active>(index, val);
+      inst = inst->Next_2xx();
+    } else {
+      HANDLE_PENDING_EXCEPTION();
+    }
+  }
+
+  ALWAYS_INLINE void APUT_CHAR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    }
+    uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+    ObjPtr<mirror::CharArray> array = a->AsCharArray();
+    if (array->CheckIsValidIndex(index)) {
+      array->SetWithoutChecks<transaction_active>(index, val);
+      inst = inst->Next_2xx();
+    } else {
+      HANDLE_PENDING_EXCEPTION();
+    }
+  }
+
+  ALWAYS_INLINE void APUT_SHORT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    }
+    int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+    ObjPtr<mirror::ShortArray> array = a->AsShortArray();
+    if (array->CheckIsValidIndex(index)) {
+      array->SetWithoutChecks<transaction_active>(index, val);
+      inst = inst->Next_2xx();
+    } else {
+      HANDLE_PENDING_EXCEPTION();
+    }
+  }
+
+  ALWAYS_INLINE void APUT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    }
+    int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+    DCHECK(a->IsIntArray() || a->IsFloatArray()) << a->PrettyTypeOf();
+    ObjPtr<mirror::IntArray> array = ObjPtr<mirror::IntArray>::DownCast(a);
+    if (array->CheckIsValidIndex(index)) {
+      array->SetWithoutChecks<transaction_active>(index, val);
+      inst = inst->Next_2xx();
+    } else {
+      HANDLE_PENDING_EXCEPTION();
+    }
+  }
+
+  ALWAYS_INLINE void APUT_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    }
+    int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
+    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+    DCHECK(a->IsLongArray() || a->IsDoubleArray()) << a->PrettyTypeOf();
+    ObjPtr<mirror::LongArray> array = ObjPtr<mirror::LongArray>::DownCast(a);
+    if (array->CheckIsValidIndex(index)) {
+      array->SetWithoutChecks<transaction_active>(index, val);
+      inst = inst->Next_2xx();
+    } else {
+      HANDLE_PENDING_EXCEPTION();
+    }
+  }
+
+  ALWAYS_INLINE void APUT_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      HANDLE_PENDING_EXCEPTION();
+    }
+    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+    ObjPtr<mirror::Object> val = shadow_frame.GetVRegReference(inst->VRegA_23x(inst_data));
+    ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
+    if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
+      array->SetWithoutChecks<transaction_active>(index, val);
+      inst = inst->Next_2xx();
+    } else {
+      HANDLE_PENDING_EXCEPTION();
+    }
+  }
+
+  ALWAYS_INLINE void IGET_BOOLEAN() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+        self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IGET_BYTE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(
+        self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IGET_CHAR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(
+        self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IGET_SHORT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(
+        self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IGET() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(
+        self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IGET_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(
+        self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IGET_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(
+        self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IGET_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIGetQuick<Primitive::kPrimInt>(shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IGET_WIDE_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIGetQuick<Primitive::kPrimLong>(shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IGET_OBJECT_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIGetQuick<Primitive::kPrimNot>(shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IGET_BOOLEAN_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIGetQuick<Primitive::kPrimBoolean>(shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IGET_BYTE_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIGetQuick<Primitive::kPrimByte>(shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IGET_CHAR_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIGetQuick<Primitive::kPrimChar>(shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IGET_SHORT_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIGetQuick<Primitive::kPrimShort>(shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void SGET_BOOLEAN() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void SGET_BYTE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void SGET_CHAR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void SGET_SHORT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void SGET() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void SGET_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void SGET_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IPUT_BOOLEAN() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IPUT_BYTE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IPUT_CHAR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IPUT_SHORT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IPUT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IPUT_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IPUT_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IPUT_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(
+        shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IPUT_BOOLEAN_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(
+        shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IPUT_BYTE_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(
+        shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IPUT_CHAR_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(
+        shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IPUT_SHORT_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(
+        shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IPUT_WIDE_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(
+        shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void IPUT_OBJECT_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(
+        shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void SPUT_BOOLEAN() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void SPUT_BYTE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void SPUT_CHAR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void SPUT_SHORT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void SPUT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void SPUT_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void SPUT_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void INVOKE_VIRTUAL() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoInvoke<kVirtual, false, do_access_check, /*is_mterp=*/ false>(
+        self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  }
+
+  ALWAYS_INLINE void INVOKE_VIRTUAL_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoInvoke<kVirtual, true, do_access_check, /*is_mterp=*/ false>(
+        self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  }
+
+  ALWAYS_INLINE void INVOKE_SUPER() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoInvoke<kSuper, false, do_access_check, /*is_mterp=*/ false>(
+        self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  }
+
+  ALWAYS_INLINE void INVOKE_SUPER_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoInvoke<kSuper, true, do_access_check, /*is_mterp=*/ false>(
+        self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  }
+
+  ALWAYS_INLINE void INVOKE_DIRECT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoInvoke<kDirect, false, do_access_check, /*is_mterp=*/ false>(
+        self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  }
+
+  ALWAYS_INLINE void INVOKE_DIRECT_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoInvoke<kDirect, true, do_access_check, /*is_mterp=*/ false>(
+        self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  }
+
+  ALWAYS_INLINE void INVOKE_INTERFACE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoInvoke<kInterface, false, do_access_check, /*is_mterp=*/ false>(
+        self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  }
+
+  ALWAYS_INLINE void INVOKE_INTERFACE_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoInvoke<kInterface, true, do_access_check, /*is_mterp=*/ false>(
+        self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  }
+
+  ALWAYS_INLINE void INVOKE_STATIC() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoInvoke<kStatic, false, do_access_check, /*is_mterp=*/ false>(
+        self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  }
+
+  ALWAYS_INLINE void INVOKE_STATIC_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoInvoke<kStatic, true, do_access_check, /*is_mterp=*/ false>(
+        self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  }
+
+  ALWAYS_INLINE void INVOKE_VIRTUAL_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoInvoke<kVirtual, false, do_access_check, /*is_mterp=*/ false,
+        /*is_quick=*/ true>(self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  }
+
+  ALWAYS_INLINE void INVOKE_VIRTUAL_RANGE_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoInvoke<kVirtual, true, do_access_check, /*is_mterp=*/ false,
+        /*is_quick=*/ true>(self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  }
+
+  ALWAYS_INLINE void INVOKE_POLYMORPHIC() REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
+    bool success = DoInvokePolymorphic</* is_range= */ false>(
+        self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(!success);
+  }
+
+  ALWAYS_INLINE void INVOKE_POLYMORPHIC_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
+    bool success = DoInvokePolymorphic</* is_range= */ true>(
+        self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(!success);
+  }
+
+  ALWAYS_INLINE void INVOKE_CUSTOM() REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
+    bool success = DoInvokeCustom</* is_range= */ false>(
+        self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  }
+
+  ALWAYS_INLINE void INVOKE_CUSTOM_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
+    bool success = DoInvokeCustom</* is_range= */ true>(
+        self, shadow_frame, inst, inst_data, ResultRegister());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  }
+
+  ALWAYS_INLINE void NEG_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(
+        inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void NOT_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(
+        inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void NEG_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(
+        inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void NOT_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(
+        inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void NEG_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegFloat(
+        inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void NEG_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegDouble(
+        inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void INT_TO_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data),
+                             shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void INT_TO_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data),
+                              shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void INT_TO_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data),
+                               shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void LONG_TO_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+                         shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void LONG_TO_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data),
+                              shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void LONG_TO_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data),
+                               shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void FLOAT_TO_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
+    int32_t result = art_float_to_integral<int32_t, float>(val);
+    shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void FLOAT_TO_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
+    int64_t result = art_float_to_integral<int64_t, float>(val);
+    shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void FLOAT_TO_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data),
+                               shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void DOUBLE_TO_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
+    int32_t result = art_float_to_integral<int32_t, double>(val);
+    shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void DOUBLE_TO_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
+    int64_t result = art_float_to_integral<int64_t, double>(val);
+    shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void DOUBLE_TO_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data),
+                              shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void INT_TO_BYTE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<int8_t>(
+        shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void INT_TO_CHAR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<uint16_t>(
+        shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void INT_TO_SHORT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<int16_t>(
+        shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void ADD_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+                         SafeAdd(shadow_frame.GetVReg(inst->VRegB_23x()),
+                                 shadow_frame.GetVReg(inst->VRegC_23x())));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void SUB_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+                         SafeSub(shadow_frame.GetVReg(inst->VRegB_23x()),
+                                 shadow_frame.GetVReg(inst->VRegC_23x())));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void MUL_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+                         SafeMul(shadow_frame.GetVReg(inst->VRegB_23x()),
+                                 shadow_frame.GetVReg(inst->VRegC_23x())));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void DIV_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIntDivide(shadow_frame, inst->VRegA_23x(inst_data),
+                               shadow_frame.GetVReg(inst->VRegB_23x()),
+                               shadow_frame.GetVReg(inst->VRegC_23x()));
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void REM_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIntRemainder(shadow_frame, inst->VRegA_23x(inst_data),
+                                  shadow_frame.GetVReg(inst->VRegB_23x()),
+                                  shadow_frame.GetVReg(inst->VRegC_23x()));
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void SHL_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+                         shadow_frame.GetVReg(inst->VRegB_23x()) <<
+                         (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void SHR_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+                         shadow_frame.GetVReg(inst->VRegB_23x()) >>
+                         (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void USHR_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+                         static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_23x())) >>
+                         (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void AND_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+                         shadow_frame.GetVReg(inst->VRegB_23x()) &
+                         shadow_frame.GetVReg(inst->VRegC_23x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void OR_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+                         shadow_frame.GetVReg(inst->VRegB_23x()) |
+                         shadow_frame.GetVReg(inst->VRegC_23x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void XOR_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+                         shadow_frame.GetVReg(inst->VRegB_23x()) ^
+                         shadow_frame.GetVReg(inst->VRegC_23x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void ADD_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+                             SafeAdd(shadow_frame.GetVRegLong(inst->VRegB_23x()),
+                                     shadow_frame.GetVRegLong(inst->VRegC_23x())));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void SUB_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+                             SafeSub(shadow_frame.GetVRegLong(inst->VRegB_23x()),
+                                     shadow_frame.GetVRegLong(inst->VRegC_23x())));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void MUL_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+                             SafeMul(shadow_frame.GetVRegLong(inst->VRegB_23x()),
+                                     shadow_frame.GetVRegLong(inst->VRegC_23x())));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void DIV_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    DoLongDivide(shadow_frame, inst->VRegA_23x(inst_data),
+                 shadow_frame.GetVRegLong(inst->VRegB_23x()),
+                 shadow_frame.GetVRegLong(inst->VRegC_23x()));
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
+  }
+
+  ALWAYS_INLINE void REM_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    DoLongRemainder(shadow_frame, inst->VRegA_23x(inst_data),
+                    shadow_frame.GetVRegLong(inst->VRegB_23x()),
+                    shadow_frame.GetVRegLong(inst->VRegC_23x()));
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
+  }
+
+  ALWAYS_INLINE void AND_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+                             shadow_frame.GetVRegLong(inst->VRegB_23x()) &
+                             shadow_frame.GetVRegLong(inst->VRegC_23x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void OR_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+                             shadow_frame.GetVRegLong(inst->VRegB_23x()) |
+                             shadow_frame.GetVRegLong(inst->VRegC_23x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void XOR_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+                             shadow_frame.GetVRegLong(inst->VRegB_23x()) ^
+                             shadow_frame.GetVRegLong(inst->VRegC_23x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void SHL_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+                             shadow_frame.GetVRegLong(inst->VRegB_23x()) <<
+                             (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void SHR_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+                             shadow_frame.GetVRegLong(inst->VRegB_23x()) >>
+                             (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void USHR_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+                             static_cast<uint64_t>(shadow_frame.GetVRegLong(inst->VRegB_23x())) >>
+                             (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void ADD_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+                              shadow_frame.GetVRegFloat(inst->VRegB_23x()) +
+                              shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void SUB_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+                              shadow_frame.GetVRegFloat(inst->VRegB_23x()) -
+                              shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void MUL_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+                              shadow_frame.GetVRegFloat(inst->VRegB_23x()) *
+                              shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void DIV_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+                              shadow_frame.GetVRegFloat(inst->VRegB_23x()) /
+                              shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void REM_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+                              fmodf(shadow_frame.GetVRegFloat(inst->VRegB_23x()),
+                                    shadow_frame.GetVRegFloat(inst->VRegC_23x())));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void ADD_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+                               shadow_frame.GetVRegDouble(inst->VRegB_23x()) +
+                               shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void SUB_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+                               shadow_frame.GetVRegDouble(inst->VRegB_23x()) -
+                               shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void MUL_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+                               shadow_frame.GetVRegDouble(inst->VRegB_23x()) *
+                               shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void DIV_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+                               shadow_frame.GetVRegDouble(inst->VRegB_23x()) /
+                               shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void REM_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+                               fmod(shadow_frame.GetVRegDouble(inst->VRegB_23x()),
+                                    shadow_frame.GetVRegDouble(inst->VRegC_23x())));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void ADD_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVReg(vregA, SafeAdd(shadow_frame.GetVReg(vregA),
+                                        shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void SUB_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVReg(vregA,
+                         SafeSub(shadow_frame.GetVReg(vregA),
+                                 shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void MUL_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVReg(vregA,
+                         SafeMul(shadow_frame.GetVReg(vregA),
+                                 shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void DIV_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    bool success = DoIntDivide(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
+                               shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
+  }
+
+  ALWAYS_INLINE void REM_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    bool success = DoIntRemainder(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
+                                  shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
+  }
+
+  ALWAYS_INLINE void SHL_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVReg(vregA,
+                         shadow_frame.GetVReg(vregA) <<
+                         (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void SHR_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVReg(vregA,
+                         shadow_frame.GetVReg(vregA) >>
+                         (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void USHR_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVReg(vregA,
+                         static_cast<uint32_t>(shadow_frame.GetVReg(vregA)) >>
+                         (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void AND_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVReg(vregA,
+                         shadow_frame.GetVReg(vregA) &
+                         shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void OR_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVReg(vregA,
+                         shadow_frame.GetVReg(vregA) |
+                         shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void XOR_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVReg(vregA,
+                         shadow_frame.GetVReg(vregA) ^
+                         shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void ADD_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegLong(vregA,
+                             SafeAdd(shadow_frame.GetVRegLong(vregA),
+                                     shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void SUB_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegLong(vregA,
+                             SafeSub(shadow_frame.GetVRegLong(vregA),
+                                     shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void MUL_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegLong(vregA,
+                             SafeMul(shadow_frame.GetVRegLong(vregA),
+                                     shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void DIV_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    DoLongDivide(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
+                shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+  }
+
+  ALWAYS_INLINE void REM_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    DoLongRemainder(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
+                    shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+  }
+
+  ALWAYS_INLINE void AND_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegLong(vregA,
+                             shadow_frame.GetVRegLong(vregA) &
+                             shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void OR_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegLong(vregA,
+                             shadow_frame.GetVRegLong(vregA) |
+                             shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void XOR_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegLong(vregA,
+                             shadow_frame.GetVRegLong(vregA) ^
+                             shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void SHL_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegLong(vregA,
+                             shadow_frame.GetVRegLong(vregA) <<
+                             (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void SHR_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegLong(vregA,
+                             shadow_frame.GetVRegLong(vregA) >>
+                             (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void USHR_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegLong(vregA,
+                             static_cast<uint64_t>(shadow_frame.GetVRegLong(vregA)) >>
+                             (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void ADD_FLOAT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegFloat(vregA,
+                              shadow_frame.GetVRegFloat(vregA) +
+                              shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void SUB_FLOAT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegFloat(vregA,
+                              shadow_frame.GetVRegFloat(vregA) -
+                              shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void MUL_FLOAT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegFloat(vregA,
+                              shadow_frame.GetVRegFloat(vregA) *
+                              shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void DIV_FLOAT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegFloat(vregA,
+                              shadow_frame.GetVRegFloat(vregA) /
+                              shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void REM_FLOAT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegFloat(vregA,
+                              fmodf(shadow_frame.GetVRegFloat(vregA),
+                                    shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data))));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void ADD_DOUBLE_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegDouble(vregA,
+                               shadow_frame.GetVRegDouble(vregA) +
+                               shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void SUB_DOUBLE_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegDouble(vregA,
+                               shadow_frame.GetVRegDouble(vregA) -
+                               shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void MUL_DOUBLE_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegDouble(vregA,
+                               shadow_frame.GetVRegDouble(vregA) *
+                               shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void DIV_DOUBLE_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegDouble(vregA,
+                               shadow_frame.GetVRegDouble(vregA) /
+                               shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void REM_DOUBLE_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint4_t vregA = inst->VRegA_12x(inst_data);
+    shadow_frame.SetVRegDouble(vregA,
+                               fmod(shadow_frame.GetVRegDouble(vregA),
+                                    shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data))));
+    inst = inst->Next_1xx();
+  }
+
+  ALWAYS_INLINE void ADD_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+                         SafeAdd(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+                                 inst->VRegC_22s()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void RSUB_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+                         SafeSub(inst->VRegC_22s(),
+                                 shadow_frame.GetVReg(inst->VRegB_22s(inst_data))));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void MUL_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+                         SafeMul(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+                                 inst->VRegC_22s()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void DIV_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
+                               shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+                               inst->VRegC_22s());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void REM_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
+                                  shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+                                  inst->VRegC_22s());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void AND_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+                         shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) &
+                         inst->VRegC_22s());
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void OR_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+                         shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) |
+                         inst->VRegC_22s());
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void XOR_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+                         shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) ^
+                         inst->VRegC_22s());
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void ADD_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+                         SafeAdd(shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void RSUB_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+                         SafeSub(inst->VRegC_22b(), shadow_frame.GetVReg(inst->VRegB_22b())));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void MUL_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+                         SafeMul(shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b()));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void DIV_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIntDivide(shadow_frame, inst->VRegA_22b(inst_data),
+                               shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void REM_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool success = DoIntRemainder(shadow_frame, inst->VRegA_22b(inst_data),
+                                  shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  }
+
+  ALWAYS_INLINE void AND_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+                         shadow_frame.GetVReg(inst->VRegB_22b()) &
+                         inst->VRegC_22b());
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void OR_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+                         shadow_frame.GetVReg(inst->VRegB_22b()) |
+                         inst->VRegC_22b());
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void XOR_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+                         shadow_frame.GetVReg(inst->VRegB_22b()) ^
+                         inst->VRegC_22b());
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void SHL_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+                         shadow_frame.GetVReg(inst->VRegB_22b()) <<
+                         (inst->VRegC_22b() & 0x1f));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void SHR_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+                         shadow_frame.GetVReg(inst->VRegB_22b()) >>
+                         (inst->VRegC_22b() & 0x1f));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void USHR_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+                         static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_22b())) >>
+                         (inst->VRegC_22b() & 0x1f));
+    inst = inst->Next_2xx();
+  }
+
+  ALWAYS_INLINE void UNUSED_3E() REQUIRES_SHARED(Locks::mutator_lock_) {
+    UnexpectedOpcode(inst, shadow_frame);
+  }
+
+  ALWAYS_INLINE void UNUSED_3F() REQUIRES_SHARED(Locks::mutator_lock_) {
+    UnexpectedOpcode(inst, shadow_frame);
+  }
+
+  ALWAYS_INLINE void UNUSED_40() REQUIRES_SHARED(Locks::mutator_lock_) {
+    UnexpectedOpcode(inst, shadow_frame);
+  }
+
+  ALWAYS_INLINE void UNUSED_41() REQUIRES_SHARED(Locks::mutator_lock_) {
+    UnexpectedOpcode(inst, shadow_frame);
+  }
+
+  ALWAYS_INLINE void UNUSED_42() REQUIRES_SHARED(Locks::mutator_lock_) {
+    UnexpectedOpcode(inst, shadow_frame);
+  }
+
+  ALWAYS_INLINE void UNUSED_43() REQUIRES_SHARED(Locks::mutator_lock_) {
+    UnexpectedOpcode(inst, shadow_frame);
+  }
+
+  ALWAYS_INLINE void UNUSED_79() REQUIRES_SHARED(Locks::mutator_lock_) {
+    UnexpectedOpcode(inst, shadow_frame);
+  }
+
+  ALWAYS_INLINE void UNUSED_7A() REQUIRES_SHARED(Locks::mutator_lock_) {
+    UnexpectedOpcode(inst, shadow_frame);
+  }
+
+  ALWAYS_INLINE void UNUSED_F3() REQUIRES_SHARED(Locks::mutator_lock_) {
+    UnexpectedOpcode(inst, shadow_frame);
+  }
+
+  ALWAYS_INLINE void UNUSED_F4() REQUIRES_SHARED(Locks::mutator_lock_) {
+    UnexpectedOpcode(inst, shadow_frame);
+  }
+
+  ALWAYS_INLINE void UNUSED_F5() REQUIRES_SHARED(Locks::mutator_lock_) {
+    UnexpectedOpcode(inst, shadow_frame);
+  }
+
+  ALWAYS_INLINE void UNUSED_F6() REQUIRES_SHARED(Locks::mutator_lock_) {
+    UnexpectedOpcode(inst, shadow_frame);
+  }
+
+  ALWAYS_INLINE void UNUSED_F7() REQUIRES_SHARED(Locks::mutator_lock_) {
+    UnexpectedOpcode(inst, shadow_frame);
+  }
+
+  ALWAYS_INLINE void UNUSED_F8() REQUIRES_SHARED(Locks::mutator_lock_) {
+    UnexpectedOpcode(inst, shadow_frame);
+  }
+
+  ALWAYS_INLINE void UNUSED_F9() REQUIRES_SHARED(Locks::mutator_lock_) {
+    UnexpectedOpcode(inst, shadow_frame);
+  }
+
+  ALWAYS_INLINE InstructionHandler(SwitchImplContext* ctx,
+                                   const instrumentation::Instrumentation* instrumentation,
+                                   Thread* self,
+                                   ShadowFrame& shadow_frame,
+                                   uint16_t dex_pc,
+                                   const Instruction*& inst,
+                                   uint16_t inst_data,
+                                   bool& exit_interpreter_loop)
+    : ctx(ctx),
+      instrumentation(instrumentation),
+      self(self),
+      shadow_frame(shadow_frame),
+      dex_pc(dex_pc),
+      inst(inst),
+      inst_data(inst_data),
+      exit_interpreter_loop(exit_interpreter_loop) {
+  }
+
+ private:
+  static constexpr bool do_assignability_check = do_access_check;
+
+  const CodeItemDataAccessor& Accessor() { return ctx->accessor; }
+  const uint16_t* Insns() { return ctx->accessor.Insns(); }
+  JValue* ResultRegister() { return &ctx->result_register; }
+
+  SwitchImplContext* const ctx;
+  const instrumentation::Instrumentation* const instrumentation;
+  Thread* const self;
+  ShadowFrame& shadow_frame;
+  uint32_t const dex_pc;
+  const Instruction*& inst;
+  uint16_t const inst_data;
+  bool& exit_interpreter_loop;
+};
+
+#undef BRANCH_INSTRUMENTATION
+#undef POSSIBLY_HANDLE_PENDING_EXCEPTION
+#undef POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE
+#undef POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC
+#undef HANDLE_PENDING_EXCEPTION
+
+// TODO On ASAN builds this function gets a huge stack frame. Since normally we run in the mterp
+// this shouldn't cause any problems for stack overflow detection. Remove this once b/117341496 is
+// fixed.
+template<bool do_access_check, bool transaction_active>
+ATTRIBUTE_NO_SANITIZE_ADDRESS void ExecuteSwitchImplCpp(SwitchImplContext* ctx) {
+  Thread* self = ctx->self;
+  const CodeItemDataAccessor& accessor = ctx->accessor;
+  ShadowFrame& shadow_frame = ctx->shadow_frame;
+  if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
+    LOG(FATAL) << "Invalid shadow frame for interpreter use";
+    ctx->result = JValue();
+    return;
+  }
+  self->VerifyStack();
+
+  uint32_t dex_pc = shadow_frame.GetDexPC();
+  const auto* const instrumentation = Runtime::Current()->GetInstrumentation();
+  const uint16_t* const insns = accessor.Insns();
+  const Instruction* inst = Instruction::At(insns + dex_pc);
+  uint16_t inst_data;
+
+  DCHECK(!shadow_frame.GetForceRetryInstruction())
+      << "Entered interpreter from invoke without retry instruction being handled!";
+
+  bool const interpret_one_instruction = ctx->interpret_one_instruction;
+  while (true) {
+    dex_pc = inst->GetDexPc(insns);
+    shadow_frame.SetDexPC(dex_pc);
+    TraceExecution(shadow_frame, inst, dex_pc);
+    inst_data = inst->Fetch16(0);
+    {
+      bool exit_loop = false;
+      InstructionHandler<do_access_check, transaction_active> handler(
+          ctx, instrumentation, self, shadow_frame, dex_pc, inst, inst_data, exit_loop);
+      if (!handler.Preamble()) {
+        if (UNLIKELY(exit_loop)) {
+          return;
+        }
+        if (UNLIKELY(interpret_one_instruction)) {
+          break;
+        }
+        continue;
+      }
+    }
+    switch (inst->Opcode(inst_data)) {
+#define OPCODE_CASE(OPCODE, OPCODE_NAME, pname, f, i, a, e, v)                                    \
+      case OPCODE: {                                                                              \
+        bool exit_loop = false;                                                                   \
+        InstructionHandler<do_access_check, transaction_active> handler(                          \
+            ctx, instrumentation, self, shadow_frame, dex_pc, inst, inst_data, exit_loop);        \
+        handler.OPCODE_NAME();                                                                    \
+        /* TODO: Advance 'inst' here, instead of explicitly in each handler */                    \
+        if (UNLIKELY(exit_loop)) {                                                                \
+          return;                                                                                 \
+        }                                                                                         \
+        break;                                                                                    \
+      }
+DEX_INSTRUCTION_LIST(OPCODE_CASE)
+#undef OPCODE_CASE
+    }
+    if (UNLIKELY(interpret_one_instruction)) {
+      break;
+    }
+  }
+  // Record where we stopped.
+  shadow_frame.SetDexPC(inst->GetDexPc(insns));
+  ctx->result = ctx->result_register;
+  return;
+}  // NOLINT(readability/fn_size)
+
+}  // namespace interpreter
+}  // namespace art
+
+#endif  // ART_RUNTIME_INTERPRETER_INTERPRETER_SWITCH_IMPL_INL_H_
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
deleted file mode 100644
index 2762629..0000000
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ /dev/null
@@ -1,2517 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "interpreter_switch_impl.h"
-
-#include "base/enums.h"
-#include "base/quasi_atomic.h"
-#include "dex/dex_file_types.h"
-#include "experimental_flags.h"
-#include "interpreter_common.h"
-#include "jit/jit.h"
-#include "jvalue-inl.h"
-#include "safe_math.h"
-#include "shadow_frame-inl.h"
-
-namespace art {
-namespace interpreter {
-
-#define HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(instr)                                    \
-  do {                                                                                          \
-    DCHECK(self->IsExceptionPending());                                                         \
-    self->AllowThreadSuspension();                                                              \
-    if (!MoveToExceptionHandler(self, shadow_frame, instr)) {                                   \
-      /* Structured locking is to be enforced for abnormal termination, too. */                 \
-      DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame);                        \
-      if (interpret_one_instruction) {                                                          \
-        /* Signal mterp to return to caller */                                                  \
-        shadow_frame.SetDexPC(dex::kDexNoIndex);                                                \
-      }                                                                                         \
-      ctx->result = JValue(); /* Handled in caller. */                                          \
-      return;                                                                                   \
-    } else {                                                                                    \
-      int32_t displacement =                                                                    \
-          static_cast<int32_t>(shadow_frame.GetDexPC()) - static_cast<int32_t>(dex_pc);         \
-      inst = inst->RelativeAt(displacement);                                                    \
-    }                                                                                           \
-  } while (false)
-
-#define HANDLE_PENDING_EXCEPTION() HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(instrumentation)
-
-#define POSSIBLY_HANDLE_PENDING_EXCEPTION(_is_exception_pending, _next_function)  \
-  do {                                                                            \
-    if (UNLIKELY(_is_exception_pending)) {                                        \
-      HANDLE_PENDING_EXCEPTION();                                                 \
-    } else {                                                                      \
-      inst = inst->_next_function();                                              \
-    }                                                                             \
-  } while (false)
-
-#define HANDLE_MONITOR_CHECKS()                                                                   \
-  if (!DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame)) {                       \
-    HANDLE_PENDING_EXCEPTION();                                                                   \
-  }
-
-// Code to run before each dex instruction.
-#define PREAMBLE_SAVE(save_ref)                                                                      \
-  {                                                                                             \
-    if (UNLIKELY(instrumentation->HasDexPcListeners()) &&                                       \
-        UNLIKELY(!DoDexPcMoveEvent(self,                                                        \
-                                   accessor,                                                    \
-                                   shadow_frame,                                                \
-                                   dex_pc,                                                      \
-                                   instrumentation,                                             \
-                                   save_ref))) {                                                \
-      HANDLE_PENDING_EXCEPTION();                                                               \
-      break;                                                                                    \
-    }                                                                                           \
-  }                                                                                             \
-  do {} while (false)
-
-#define PREAMBLE() PREAMBLE_SAVE(nullptr)
-
-#define BRANCH_INSTRUMENTATION(offset)                                                         \
-  do {                                                                                         \
-    if (UNLIKELY(instrumentation->HasBranchListeners())) {                                     \
-      instrumentation->Branch(self, shadow_frame.GetMethod(), dex_pc, offset);                 \
-    }                                                                                          \
-    JValue result;                                                                             \
-    if (jit::Jit::MaybeDoOnStackReplacement(self,                                              \
-                                            shadow_frame.GetMethod(),                          \
-                                            dex_pc,                                            \
-                                            offset,                                            \
-                                            &result)) {                                        \
-      if (interpret_one_instruction) {                                                         \
-        /* OSR has completed execution of the method.  Signal mterp to return to caller */     \
-        shadow_frame.SetDexPC(dex::kDexNoIndex);                                               \
-      }                                                                                        \
-      ctx->result = result;                                                                    \
-      return;                                                                                  \
-    }                                                                                          \
-  } while (false)
-
-#define HOTNESS_UPDATE()                                                                       \
-  do {                                                                                         \
-    if (jit != nullptr) {                                                                      \
-      jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges*/ true);             \
-    }                                                                                          \
-  } while (false)
-
-#define HANDLE_ASYNC_EXCEPTION()                                                               \
-  if (UNLIKELY(self->ObserveAsyncException())) {                                               \
-    HANDLE_PENDING_EXCEPTION();                                                                \
-    break;                                                                                     \
-  }                                                                                            \
-  do {} while (false)
-
-#define HANDLE_BACKWARD_BRANCH(offset)                                                         \
-  do {                                                                                         \
-    if (IsBackwardBranch(offset)) {                                                            \
-      HOTNESS_UPDATE();                                                                        \
-      /* Record new dex pc early to have consistent suspend point at loop header. */           \
-      shadow_frame.SetDexPC(inst->GetDexPc(insns));                                            \
-      self->AllowThreadSuspension();                                                           \
-    }                                                                                          \
-  } while (false)
-
-// Unlike most other events the DexPcMovedEvent can be sent when there is a pending exception (if
-// the next instruction is MOVE_EXCEPTION). This means it needs to be handled carefully to be able
-// to detect exceptions thrown by the DexPcMovedEvent itself. These exceptions could be thrown by
-// jvmti-agents while handling breakpoint or single step events. We had to move this into its own
-// function because it was making ExecuteSwitchImpl have too large a stack.
-NO_INLINE static bool DoDexPcMoveEvent(Thread* self,
-                                       const CodeItemDataAccessor& accessor,
-                                       const ShadowFrame& shadow_frame,
-                                       uint32_t dex_pc,
-                                       const instrumentation::Instrumentation* instrumentation,
-                                       JValue* save_ref)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  DCHECK(instrumentation->HasDexPcListeners());
-  StackHandleScope<2> hs(self);
-  Handle<mirror::Throwable> thr(hs.NewHandle(self->GetException()));
-  mirror::Object* null_obj = nullptr;
-  HandleWrapper<mirror::Object> h(
-      hs.NewHandleWrapper(LIKELY(save_ref == nullptr) ? &null_obj : save_ref->GetGCRoot()));
-  self->ClearException();
-  instrumentation->DexPcMovedEvent(self,
-                                   shadow_frame.GetThisObject(accessor.InsSize()),
-                                   shadow_frame.GetMethod(),
-                                   dex_pc);
-  if (UNLIKELY(self->IsExceptionPending())) {
-    // We got a new exception in the dex-pc-moved event. We just let this exception replace the old
-    // one.
-    // TODO It would be good to add the old exception to the suppressed exceptions of the new one if
-    // possible.
-    return false;
-  } else {
-    if (UNLIKELY(!thr.IsNull())) {
-      self->SetException(thr.Get());
-    }
-    return true;
-  }
-}
-
-static bool NeedsMethodExitEvent(const instrumentation::Instrumentation* ins)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return ins->HasMethodExitListeners() || ins->HasWatchedFramePopListeners();
-}
-
-// Sends the normal method exit event. Returns true if the events succeeded and false if there is a
-// pending exception.
-NO_INLINE static bool SendMethodExitEvents(Thread* self,
-                                           const instrumentation::Instrumentation* instrumentation,
-                                           const ShadowFrame& frame,
-                                           ObjPtr<mirror::Object> thiz,
-                                           ArtMethod* method,
-                                           uint32_t dex_pc,
-                                           const JValue& result)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  bool had_event = false;
-  if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
-    had_event = true;
-    instrumentation->MethodExitEvent(self, thiz.Ptr(), method, dex_pc, result);
-  }
-  if (UNLIKELY(frame.NeedsNotifyPop() && instrumentation->HasWatchedFramePopListeners())) {
-    had_event = true;
-    instrumentation->WatchedFramePopped(self, frame);
-  }
-  if (UNLIKELY(had_event)) {
-    return !self->IsExceptionPending();
-  } else {
-    return true;
-  }
-}
-
-template<bool do_access_check, bool transaction_active>
-void ExecuteSwitchImplCpp(SwitchImplContext* ctx) {
-  Thread* self = ctx->self;
-  const CodeItemDataAccessor& accessor = ctx->accessor;
-  ShadowFrame& shadow_frame = ctx->shadow_frame;
-  JValue result_register = ctx->result_register;
-  bool interpret_one_instruction = ctx->interpret_one_instruction;
-  constexpr bool do_assignability_check = do_access_check;
-  if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
-    LOG(FATAL) << "Invalid shadow frame for interpreter use";
-    ctx->result = JValue();
-    return;
-  }
-  self->VerifyStack();
-
-  uint32_t dex_pc = shadow_frame.GetDexPC();
-  const auto* const instrumentation = Runtime::Current()->GetInstrumentation();
-  const uint16_t* const insns = accessor.Insns();
-  const Instruction* inst = Instruction::At(insns + dex_pc);
-  uint16_t inst_data;
-  jit::Jit* jit = Runtime::Current()->GetJit();
-
-  do {
-    dex_pc = inst->GetDexPc(insns);
-    shadow_frame.SetDexPC(dex_pc);
-    TraceExecution(shadow_frame, inst, dex_pc);
-    inst_data = inst->Fetch16(0);
-    switch (inst->Opcode(inst_data)) {
-      case Instruction::NOP:
-        PREAMBLE();
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::MOVE:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::MOVE_FROM16:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22x(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_22x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::MOVE_16:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_32x(),
-                             shadow_frame.GetVReg(inst->VRegB_32x()));
-        inst = inst->Next_3xx();
-        break;
-      case Instruction::MOVE_WIDE:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data),
-                                 shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::MOVE_WIDE_FROM16:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_22x(inst_data),
-                                 shadow_frame.GetVRegLong(inst->VRegB_22x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::MOVE_WIDE_16:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_32x(),
-                                 shadow_frame.GetVRegLong(inst->VRegB_32x()));
-        inst = inst->Next_3xx();
-        break;
-      case Instruction::MOVE_OBJECT:
-        PREAMBLE();
-        shadow_frame.SetVRegReference(inst->VRegA_12x(inst_data),
-                                      shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::MOVE_OBJECT_FROM16:
-        PREAMBLE();
-        shadow_frame.SetVRegReference(inst->VRegA_22x(inst_data),
-                                      shadow_frame.GetVRegReference(inst->VRegB_22x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::MOVE_OBJECT_16:
-        PREAMBLE();
-        shadow_frame.SetVRegReference(inst->VRegA_32x(),
-                                      shadow_frame.GetVRegReference(inst->VRegB_32x()));
-        inst = inst->Next_3xx();
-        break;
-      case Instruction::MOVE_RESULT:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_11x(inst_data), result_register.GetI());
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::MOVE_RESULT_WIDE:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_11x(inst_data), result_register.GetJ());
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::MOVE_RESULT_OBJECT:
-        PREAMBLE_SAVE(&result_register);
-        shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), result_register.GetL());
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::MOVE_EXCEPTION: {
-        PREAMBLE();
-        ObjPtr<mirror::Throwable> exception = self->GetException();
-        DCHECK(exception != nullptr) << "No pending exception on MOVE_EXCEPTION instruction";
-        shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
-        self->ClearException();
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::RETURN_VOID_NO_BARRIER: {
-        PREAMBLE();
-        JValue result;
-        self->AllowThreadSuspension();
-        HANDLE_MONITOR_CHECKS();
-        if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
-                     !SendMethodExitEvents(self,
-                                           instrumentation,
-                                           shadow_frame,
-                                           shadow_frame.GetThisObject(accessor.InsSize()),
-                                           shadow_frame.GetMethod(),
-                                           inst->GetDexPc(insns),
-                                           result))) {
-          HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(nullptr);
-        }
-        if (interpret_one_instruction) {
-          /* Signal mterp to return to caller */
-          shadow_frame.SetDexPC(dex::kDexNoIndex);
-        }
-        ctx->result = result;
-        return;
-      }
-      case Instruction::RETURN_VOID: {
-        PREAMBLE();
-        QuasiAtomic::ThreadFenceForConstructor();
-        JValue result;
-        self->AllowThreadSuspension();
-        HANDLE_MONITOR_CHECKS();
-        if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
-                     !SendMethodExitEvents(self,
-                                           instrumentation,
-                                           shadow_frame,
-                                           shadow_frame.GetThisObject(accessor.InsSize()),
-                                           shadow_frame.GetMethod(),
-                                           inst->GetDexPc(insns),
-                                           result))) {
-          HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(nullptr);
-        }
-        if (interpret_one_instruction) {
-          /* Signal mterp to return to caller */
-          shadow_frame.SetDexPC(dex::kDexNoIndex);
-        }
-        ctx->result = result;
-        return;
-      }
-      case Instruction::RETURN: {
-        PREAMBLE();
-        JValue result;
-        result.SetJ(0);
-        result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
-        self->AllowThreadSuspension();
-        HANDLE_MONITOR_CHECKS();
-        if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
-                     !SendMethodExitEvents(self,
-                                           instrumentation,
-                                           shadow_frame,
-                                           shadow_frame.GetThisObject(accessor.InsSize()),
-                                           shadow_frame.GetMethod(),
-                                           inst->GetDexPc(insns),
-                                           result))) {
-          HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(nullptr);
-        }
-        if (interpret_one_instruction) {
-          /* Signal mterp to return to caller */
-          shadow_frame.SetDexPC(dex::kDexNoIndex);
-        }
-        ctx->result = result;
-        return;
-      }
-      case Instruction::RETURN_WIDE: {
-        PREAMBLE();
-        JValue result;
-        result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
-        self->AllowThreadSuspension();
-        HANDLE_MONITOR_CHECKS();
-        if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
-                     !SendMethodExitEvents(self,
-                                           instrumentation,
-                                           shadow_frame,
-                                           shadow_frame.GetThisObject(accessor.InsSize()),
-                                           shadow_frame.GetMethod(),
-                                           inst->GetDexPc(insns),
-                                           result))) {
-          HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(nullptr);
-        }
-        if (interpret_one_instruction) {
-          /* Signal mterp to return to caller */
-          shadow_frame.SetDexPC(dex::kDexNoIndex);
-        }
-        ctx->result = result;
-        return;
-      }
-      case Instruction::RETURN_OBJECT: {
-        PREAMBLE();
-        JValue result;
-        self->AllowThreadSuspension();
-        HANDLE_MONITOR_CHECKS();
-        const size_t ref_idx = inst->VRegA_11x(inst_data);
-        ObjPtr<mirror::Object> obj_result = shadow_frame.GetVRegReference(ref_idx);
-        if (do_assignability_check && obj_result != nullptr) {
-          ObjPtr<mirror::Class> return_type = shadow_frame.GetMethod()->ResolveReturnType();
-          // Re-load since it might have moved.
-          obj_result = shadow_frame.GetVRegReference(ref_idx);
-          if (return_type == nullptr) {
-            // Return the pending exception.
-            HANDLE_PENDING_EXCEPTION();
-          }
-          if (!obj_result->VerifierInstanceOf(return_type)) {
-            // This should never happen.
-            std::string temp1, temp2;
-            self->ThrowNewExceptionF("Ljava/lang/InternalError;",
-                                     "Returning '%s' that is not instance of return type '%s'",
-                                     obj_result->GetClass()->GetDescriptor(&temp1),
-                                     return_type->GetDescriptor(&temp2));
-            HANDLE_PENDING_EXCEPTION();
-          }
-        }
-        result.SetL(obj_result);
-        if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
-                     !SendMethodExitEvents(self,
-                                           instrumentation,
-                                           shadow_frame,
-                                           shadow_frame.GetThisObject(accessor.InsSize()),
-                                           shadow_frame.GetMethod(),
-                                           inst->GetDexPc(insns),
-                                           result))) {
-          HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(nullptr);
-        }
-        // Re-load since it might have moved during the MethodExitEvent.
-        result.SetL(shadow_frame.GetVRegReference(ref_idx));
-        if (interpret_one_instruction) {
-          /* Signal mterp to return to caller */
-          shadow_frame.SetDexPC(dex::kDexNoIndex);
-        }
-        ctx->result = result;
-        return;
-      }
-      case Instruction::CONST_4: {
-        PREAMBLE();
-        uint4_t dst = inst->VRegA_11n(inst_data);
-        int4_t val = inst->VRegB_11n(inst_data);
-        shadow_frame.SetVReg(dst, val);
-        if (val == 0) {
-          shadow_frame.SetVRegReference(dst, nullptr);
-        }
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::CONST_16: {
-        PREAMBLE();
-        uint8_t dst = inst->VRegA_21s(inst_data);
-        int16_t val = inst->VRegB_21s();
-        shadow_frame.SetVReg(dst, val);
-        if (val == 0) {
-          shadow_frame.SetVRegReference(dst, nullptr);
-        }
-        inst = inst->Next_2xx();
-        break;
-      }
-      case Instruction::CONST: {
-        PREAMBLE();
-        uint8_t dst = inst->VRegA_31i(inst_data);
-        int32_t val = inst->VRegB_31i();
-        shadow_frame.SetVReg(dst, val);
-        if (val == 0) {
-          shadow_frame.SetVRegReference(dst, nullptr);
-        }
-        inst = inst->Next_3xx();
-        break;
-      }
-      case Instruction::CONST_HIGH16: {
-        PREAMBLE();
-        uint8_t dst = inst->VRegA_21h(inst_data);
-        int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
-        shadow_frame.SetVReg(dst, val);
-        if (val == 0) {
-          shadow_frame.SetVRegReference(dst, nullptr);
-        }
-        inst = inst->Next_2xx();
-        break;
-      }
-      case Instruction::CONST_WIDE_16:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_21s(inst_data), inst->VRegB_21s());
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::CONST_WIDE_32:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_31i(inst_data), inst->VRegB_31i());
-        inst = inst->Next_3xx();
-        break;
-      case Instruction::CONST_WIDE:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_51l(inst_data), inst->VRegB_51l());
-        inst = inst->Next_51l();
-        break;
-      case Instruction::CONST_WIDE_HIGH16:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_21h(inst_data),
-                                 static_cast<uint64_t>(inst->VRegB_21h()) << 48);
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::CONST_STRING: {
-        PREAMBLE();
-        ObjPtr<mirror::String> s = ResolveString(self,
-                                                 shadow_frame,
-                                                 dex::StringIndex(inst->VRegB_21c()));
-        if (UNLIKELY(s == nullptr)) {
-          HANDLE_PENDING_EXCEPTION();
-        } else {
-          shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::CONST_STRING_JUMBO: {
-        PREAMBLE();
-        ObjPtr<mirror::String> s = ResolveString(self,
-                                                 shadow_frame,
-                                                 dex::StringIndex(inst->VRegB_31c()));
-        if (UNLIKELY(s == nullptr)) {
-          HANDLE_PENDING_EXCEPTION();
-        } else {
-          shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
-          inst = inst->Next_3xx();
-        }
-        break;
-      }
-      case Instruction::CONST_CLASS: {
-        PREAMBLE();
-        ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
-                                                         shadow_frame.GetMethod(),
-                                                         self,
-                                                         false,
-                                                         do_access_check);
-        if (UNLIKELY(c == nullptr)) {
-          HANDLE_PENDING_EXCEPTION();
-        } else {
-          shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::CONST_METHOD_HANDLE: {
-        PREAMBLE();
-        ClassLinker* cl = Runtime::Current()->GetClassLinker();
-        ObjPtr<mirror::MethodHandle> mh = cl->ResolveMethodHandle(self,
-                                                                  inst->VRegB_21c(),
-                                                                  shadow_frame.GetMethod());
-        if (UNLIKELY(mh == nullptr)) {
-          HANDLE_PENDING_EXCEPTION();
-        } else {
-          shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), mh);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::CONST_METHOD_TYPE: {
-        PREAMBLE();
-        ClassLinker* cl = Runtime::Current()->GetClassLinker();
-        ObjPtr<mirror::MethodType> mt = cl->ResolveMethodType(self,
-                                                              dex::ProtoIndex(inst->VRegB_21c()),
-                                                              shadow_frame.GetMethod());
-        if (UNLIKELY(mt == nullptr)) {
-          HANDLE_PENDING_EXCEPTION();
-        } else {
-          shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), mt);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::MONITOR_ENTER: {
-        PREAMBLE();
-        HANDLE_ASYNC_EXCEPTION();
-        ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
-        if (UNLIKELY(obj == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-        } else {
-          DoMonitorEnter<do_assignability_check>(self, &shadow_frame, obj);
-          POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
-        }
-        break;
-      }
-      case Instruction::MONITOR_EXIT: {
-        PREAMBLE();
-        HANDLE_ASYNC_EXCEPTION();
-        ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
-        if (UNLIKELY(obj == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-        } else {
-          DoMonitorExit<do_assignability_check>(self, &shadow_frame, obj);
-          POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
-        }
-        break;
-      }
-      case Instruction::CHECK_CAST: {
-        PREAMBLE();
-        ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
-                                                         shadow_frame.GetMethod(),
-                                                         self,
-                                                         false,
-                                                         do_access_check);
-        if (UNLIKELY(c == nullptr)) {
-          HANDLE_PENDING_EXCEPTION();
-        } else {
-          ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
-          if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
-            ThrowClassCastException(c, obj->GetClass());
-            HANDLE_PENDING_EXCEPTION();
-          } else {
-            inst = inst->Next_2xx();
-          }
-        }
-        break;
-      }
-      case Instruction::INSTANCE_OF: {
-        PREAMBLE();
-        ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegC_22c()),
-                                                         shadow_frame.GetMethod(),
-                                                         self,
-                                                         false,
-                                                         do_access_check);
-        if (UNLIKELY(c == nullptr)) {
-          HANDLE_PENDING_EXCEPTION();
-        } else {
-          ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
-          shadow_frame.SetVReg(inst->VRegA_22c(inst_data),
-                               (obj != nullptr && obj->InstanceOf(c)) ? 1 : 0);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::ARRAY_LENGTH:  {
-        PREAMBLE();
-        ObjPtr<mirror::Object> array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
-        if (UNLIKELY(array == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-        } else {
-          shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
-          inst = inst->Next_1xx();
-        }
-        break;
-      }
-      case Instruction::NEW_INSTANCE: {
-        PREAMBLE();
-        ObjPtr<mirror::Object> obj = nullptr;
-        ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
-                                                         shadow_frame.GetMethod(),
-                                                         self,
-                                                         false,
-                                                         do_access_check);
-        if (LIKELY(c != nullptr)) {
-          if (UNLIKELY(c->IsStringClass())) {
-            gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
-            obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
-          } else {
-            obj = AllocObjectFromCode<true>(
-                c.Ptr(),
-                self,
-                Runtime::Current()->GetHeap()->GetCurrentAllocator());
-          }
-        }
-        if (UNLIKELY(obj == nullptr)) {
-          HANDLE_PENDING_EXCEPTION();
-        } else {
-          obj->GetClass()->AssertInitializedOrInitializingInThread(self);
-          // Don't allow finalizable objects to be allocated during a transaction since these can't
-          // be finalized without a started runtime.
-          if (transaction_active && obj->GetClass()->IsFinalizable()) {
-            AbortTransactionF(self, "Allocating finalizable object in transaction: %s",
-                              obj->PrettyTypeOf().c_str());
-            HANDLE_PENDING_EXCEPTION();
-            break;
-          }
-          shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), obj);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::NEW_ARRAY: {
-        PREAMBLE();
-        int32_t length = shadow_frame.GetVReg(inst->VRegB_22c(inst_data));
-        ObjPtr<mirror::Object> obj = AllocArrayFromCode<do_access_check, true>(
-            dex::TypeIndex(inst->VRegC_22c()),
-            length,
-            shadow_frame.GetMethod(),
-            self,
-            Runtime::Current()->GetHeap()->GetCurrentAllocator());
-        if (UNLIKELY(obj == nullptr)) {
-          HANDLE_PENDING_EXCEPTION();
-        } else {
-          shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::FILLED_NEW_ARRAY: {
-        PREAMBLE();
-        bool success =
-            DoFilledNewArray<false, do_access_check, transaction_active>(inst, shadow_frame, self,
-                                                                         &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::FILLED_NEW_ARRAY_RANGE: {
-        PREAMBLE();
-        bool success =
-            DoFilledNewArray<true, do_access_check, transaction_active>(inst, shadow_frame,
-                                                                        self, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::FILL_ARRAY_DATA: {
-        PREAMBLE();
-        const uint16_t* payload_addr = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
-        const Instruction::ArrayDataPayload* payload =
-            reinterpret_cast<const Instruction::ArrayDataPayload*>(payload_addr);
-        ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_31t(inst_data));
-        bool success = FillArrayData(obj, payload);
-        if (!success) {
-          HANDLE_PENDING_EXCEPTION();
-          break;
-        }
-        if (transaction_active) {
-          RecordArrayElementsInTransaction(obj->AsArray(), payload->element_count);
-        }
-        inst = inst->Next_3xx();
-        break;
-      }
-      case Instruction::THROW: {
-        PREAMBLE();
-        HANDLE_ASYNC_EXCEPTION();
-        ObjPtr<mirror::Object> exception =
-            shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
-        if (UNLIKELY(exception == nullptr)) {
-          ThrowNullPointerException("throw with null exception");
-        } else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
-          // This should never happen.
-          std::string temp;
-          self->ThrowNewExceptionF("Ljava/lang/InternalError;",
-                                   "Throwing '%s' that is not instance of Throwable",
-                                   exception->GetClass()->GetDescriptor(&temp));
-        } else {
-          self->SetException(exception->AsThrowable());
-        }
-        HANDLE_PENDING_EXCEPTION();
-        break;
-      }
-      case Instruction::GOTO: {
-        PREAMBLE();
-        HANDLE_ASYNC_EXCEPTION();
-        int8_t offset = inst->VRegA_10t(inst_data);
-        BRANCH_INSTRUMENTATION(offset);
-        inst = inst->RelativeAt(offset);
-        HANDLE_BACKWARD_BRANCH(offset);
-        break;
-      }
-      case Instruction::GOTO_16: {
-        PREAMBLE();
-        HANDLE_ASYNC_EXCEPTION();
-        int16_t offset = inst->VRegA_20t();
-        BRANCH_INSTRUMENTATION(offset);
-        inst = inst->RelativeAt(offset);
-        HANDLE_BACKWARD_BRANCH(offset);
-        break;
-      }
-      case Instruction::GOTO_32: {
-        PREAMBLE();
-        HANDLE_ASYNC_EXCEPTION();
-        int32_t offset = inst->VRegA_30t();
-        BRANCH_INSTRUMENTATION(offset);
-        inst = inst->RelativeAt(offset);
-        HANDLE_BACKWARD_BRANCH(offset);
-        break;
-      }
-      case Instruction::PACKED_SWITCH: {
-        PREAMBLE();
-        int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
-        BRANCH_INSTRUMENTATION(offset);
-        inst = inst->RelativeAt(offset);
-        HANDLE_BACKWARD_BRANCH(offset);
-        break;
-      }
-      case Instruction::SPARSE_SWITCH: {
-        PREAMBLE();
-        int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
-        BRANCH_INSTRUMENTATION(offset);
-        inst = inst->RelativeAt(offset);
-        HANDLE_BACKWARD_BRANCH(offset);
-        break;
-      }
-
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wfloat-equal"
-
-      case Instruction::CMPL_FLOAT: {
-        PREAMBLE();
-        float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
-        float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
-        int32_t result;
-        if (val1 > val2) {
-          result = 1;
-        } else if (val1 == val2) {
-          result = 0;
-        } else {
-          result = -1;
-        }
-        shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
-        inst = inst->Next_2xx();
-        break;
-      }
-      case Instruction::CMPG_FLOAT: {
-        PREAMBLE();
-        float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
-        float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
-        int32_t result;
-        if (val1 < val2) {
-          result = -1;
-        } else if (val1 == val2) {
-          result = 0;
-        } else {
-          result = 1;
-        }
-        shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
-        inst = inst->Next_2xx();
-        break;
-      }
-      case Instruction::CMPL_DOUBLE: {
-        PREAMBLE();
-        double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
-        double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
-        int32_t result;
-        if (val1 > val2) {
-          result = 1;
-        } else if (val1 == val2) {
-          result = 0;
-        } else {
-          result = -1;
-        }
-        shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
-        inst = inst->Next_2xx();
-        break;
-      }
-
-      case Instruction::CMPG_DOUBLE: {
-        PREAMBLE();
-        double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
-        double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
-        int32_t result;
-        if (val1 < val2) {
-          result = -1;
-        } else if (val1 == val2) {
-          result = 0;
-        } else {
-          result = 1;
-        }
-        shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
-        inst = inst->Next_2xx();
-        break;
-      }
-
-#pragma clang diagnostic pop
-
-      case Instruction::CMP_LONG: {
-        PREAMBLE();
-        int64_t val1 = shadow_frame.GetVRegLong(inst->VRegB_23x());
-        int64_t val2 = shadow_frame.GetVRegLong(inst->VRegC_23x());
-        int32_t result;
-        if (val1 > val2) {
-          result = 1;
-        } else if (val1 == val2) {
-          result = 0;
-        } else {
-          result = -1;
-        }
-        shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
-        inst = inst->Next_2xx();
-        break;
-      }
-      case Instruction::IF_EQ: {
-        PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) ==
-            shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
-          int16_t offset = inst->VRegC_22t();
-          BRANCH_INSTRUMENTATION(offset);
-          inst = inst->RelativeAt(offset);
-          HANDLE_BACKWARD_BRANCH(offset);
-        } else {
-          BRANCH_INSTRUMENTATION(2);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::IF_NE: {
-        PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) !=
-            shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
-          int16_t offset = inst->VRegC_22t();
-          BRANCH_INSTRUMENTATION(offset);
-          inst = inst->RelativeAt(offset);
-          HANDLE_BACKWARD_BRANCH(offset);
-        } else {
-          BRANCH_INSTRUMENTATION(2);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::IF_LT: {
-        PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <
-            shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
-          int16_t offset = inst->VRegC_22t();
-          BRANCH_INSTRUMENTATION(offset);
-          inst = inst->RelativeAt(offset);
-          HANDLE_BACKWARD_BRANCH(offset);
-        } else {
-          BRANCH_INSTRUMENTATION(2);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::IF_GE: {
-        PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >=
-            shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
-          int16_t offset = inst->VRegC_22t();
-          BRANCH_INSTRUMENTATION(offset);
-          inst = inst->RelativeAt(offset);
-          HANDLE_BACKWARD_BRANCH(offset);
-        } else {
-          BRANCH_INSTRUMENTATION(2);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::IF_GT: {
-        PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >
-        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
-          int16_t offset = inst->VRegC_22t();
-          BRANCH_INSTRUMENTATION(offset);
-          inst = inst->RelativeAt(offset);
-          HANDLE_BACKWARD_BRANCH(offset);
-        } else {
-          BRANCH_INSTRUMENTATION(2);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::IF_LE: {
-        PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <=
-            shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
-          int16_t offset = inst->VRegC_22t();
-          BRANCH_INSTRUMENTATION(offset);
-          inst = inst->RelativeAt(offset);
-          HANDLE_BACKWARD_BRANCH(offset);
-        } else {
-          BRANCH_INSTRUMENTATION(2);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::IF_EQZ: {
-        PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) == 0) {
-          int16_t offset = inst->VRegB_21t();
-          BRANCH_INSTRUMENTATION(offset);
-          inst = inst->RelativeAt(offset);
-          HANDLE_BACKWARD_BRANCH(offset);
-        } else {
-          BRANCH_INSTRUMENTATION(2);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::IF_NEZ: {
-        PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) != 0) {
-          int16_t offset = inst->VRegB_21t();
-          BRANCH_INSTRUMENTATION(offset);
-          inst = inst->RelativeAt(offset);
-          HANDLE_BACKWARD_BRANCH(offset);
-        } else {
-          BRANCH_INSTRUMENTATION(2);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::IF_LTZ: {
-        PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) < 0) {
-          int16_t offset = inst->VRegB_21t();
-          BRANCH_INSTRUMENTATION(offset);
-          inst = inst->RelativeAt(offset);
-          HANDLE_BACKWARD_BRANCH(offset);
-        } else {
-          BRANCH_INSTRUMENTATION(2);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::IF_GEZ: {
-        PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) >= 0) {
-          int16_t offset = inst->VRegB_21t();
-          BRANCH_INSTRUMENTATION(offset);
-          inst = inst->RelativeAt(offset);
-          HANDLE_BACKWARD_BRANCH(offset);
-        } else {
-          BRANCH_INSTRUMENTATION(2);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::IF_GTZ: {
-        PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) > 0) {
-          int16_t offset = inst->VRegB_21t();
-          BRANCH_INSTRUMENTATION(offset);
-          inst = inst->RelativeAt(offset);
-          HANDLE_BACKWARD_BRANCH(offset);
-        } else {
-          BRANCH_INSTRUMENTATION(2);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::IF_LEZ:  {
-        PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) <= 0) {
-          int16_t offset = inst->VRegB_21t();
-          BRANCH_INSTRUMENTATION(offset);
-          inst = inst->RelativeAt(offset);
-          HANDLE_BACKWARD_BRANCH(offset);
-        } else {
-          BRANCH_INSTRUMENTATION(2);
-          inst = inst->Next_2xx();
-        }
-        break;
-      }
-      case Instruction::AGET_BOOLEAN: {
-        PREAMBLE();
-        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-          break;
-        }
-        int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        ObjPtr<mirror::BooleanArray> array = a->AsBooleanArray();
-        if (array->CheckIsValidIndex(index)) {
-          shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
-          inst = inst->Next_2xx();
-        } else {
-          HANDLE_PENDING_EXCEPTION();
-        }
-        break;
-      }
-      case Instruction::AGET_BYTE: {
-        PREAMBLE();
-        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-          break;
-        }
-        int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        ObjPtr<mirror::ByteArray> array = a->AsByteArray();
-        if (array->CheckIsValidIndex(index)) {
-          shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
-          inst = inst->Next_2xx();
-        } else {
-          HANDLE_PENDING_EXCEPTION();
-        }
-        break;
-      }
-      case Instruction::AGET_CHAR: {
-        PREAMBLE();
-        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-          break;
-        }
-        int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        ObjPtr<mirror::CharArray> array = a->AsCharArray();
-        if (array->CheckIsValidIndex(index)) {
-          shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
-          inst = inst->Next_2xx();
-        } else {
-          HANDLE_PENDING_EXCEPTION();
-        }
-        break;
-      }
-      case Instruction::AGET_SHORT: {
-        PREAMBLE();
-        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-          break;
-        }
-        int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        ObjPtr<mirror::ShortArray> array = a->AsShortArray();
-        if (array->CheckIsValidIndex(index)) {
-          shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
-          inst = inst->Next_2xx();
-        } else {
-          HANDLE_PENDING_EXCEPTION();
-        }
-        break;
-      }
-      case Instruction::AGET: {
-        PREAMBLE();
-        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-          break;
-        }
-        int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        DCHECK(a->IsIntArray() || a->IsFloatArray()) << a->PrettyTypeOf();
-        ObjPtr<mirror::IntArray> array = ObjPtr<mirror::IntArray>::DownCast(a);
-        if (array->CheckIsValidIndex(index)) {
-          shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
-          inst = inst->Next_2xx();
-        } else {
-          HANDLE_PENDING_EXCEPTION();
-        }
-        break;
-      }
-      case Instruction::AGET_WIDE:  {
-        PREAMBLE();
-        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-          break;
-        }
-        int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        DCHECK(a->IsLongArray() || a->IsDoubleArray()) << a->PrettyTypeOf();
-        ObjPtr<mirror::LongArray> array = ObjPtr<mirror::LongArray>::DownCast(a);
-        if (array->CheckIsValidIndex(index)) {
-          shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
-          inst = inst->Next_2xx();
-        } else {
-          HANDLE_PENDING_EXCEPTION();
-        }
-        break;
-      }
-      case Instruction::AGET_OBJECT: {
-        PREAMBLE();
-        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-          break;
-        }
-        int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
-        if (array->CheckIsValidIndex(index)) {
-          shadow_frame.SetVRegReference(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
-          inst = inst->Next_2xx();
-        } else {
-          HANDLE_PENDING_EXCEPTION();
-        }
-        break;
-      }
-      case Instruction::APUT_BOOLEAN: {
-        PREAMBLE();
-        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-          break;
-        }
-        uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
-        int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        ObjPtr<mirror::BooleanArray> array = a->AsBooleanArray();
-        if (array->CheckIsValidIndex(index)) {
-          array->SetWithoutChecks<transaction_active>(index, val);
-          inst = inst->Next_2xx();
-        } else {
-          HANDLE_PENDING_EXCEPTION();
-        }
-        break;
-      }
-      case Instruction::APUT_BYTE: {
-        PREAMBLE();
-        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-          break;
-        }
-        int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
-        int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        ObjPtr<mirror::ByteArray> array = a->AsByteArray();
-        if (array->CheckIsValidIndex(index)) {
-          array->SetWithoutChecks<transaction_active>(index, val);
-          inst = inst->Next_2xx();
-        } else {
-          HANDLE_PENDING_EXCEPTION();
-        }
-        break;
-      }
-      case Instruction::APUT_CHAR: {
-        PREAMBLE();
-        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-          break;
-        }
-        uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
-        int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        ObjPtr<mirror::CharArray> array = a->AsCharArray();
-        if (array->CheckIsValidIndex(index)) {
-          array->SetWithoutChecks<transaction_active>(index, val);
-          inst = inst->Next_2xx();
-        } else {
-          HANDLE_PENDING_EXCEPTION();
-        }
-        break;
-      }
-      case Instruction::APUT_SHORT: {
-        PREAMBLE();
-        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-          break;
-        }
-        int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
-        int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        ObjPtr<mirror::ShortArray> array = a->AsShortArray();
-        if (array->CheckIsValidIndex(index)) {
-          array->SetWithoutChecks<transaction_active>(index, val);
-          inst = inst->Next_2xx();
-        } else {
-          HANDLE_PENDING_EXCEPTION();
-        }
-        break;
-      }
-      case Instruction::APUT: {
-        PREAMBLE();
-        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-          break;
-        }
-        int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
-        int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        DCHECK(a->IsIntArray() || a->IsFloatArray()) << a->PrettyTypeOf();
-        ObjPtr<mirror::IntArray> array = ObjPtr<mirror::IntArray>::DownCast(a);
-        if (array->CheckIsValidIndex(index)) {
-          array->SetWithoutChecks<transaction_active>(index, val);
-          inst = inst->Next_2xx();
-        } else {
-          HANDLE_PENDING_EXCEPTION();
-        }
-        break;
-      }
-      case Instruction::APUT_WIDE: {
-        PREAMBLE();
-        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-          break;
-        }
-        int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
-        int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        DCHECK(a->IsLongArray() || a->IsDoubleArray()) << a->PrettyTypeOf();
-        ObjPtr<mirror::LongArray> array = ObjPtr<mirror::LongArray>::DownCast(a);
-        if (array->CheckIsValidIndex(index)) {
-          array->SetWithoutChecks<transaction_active>(index, val);
-          inst = inst->Next_2xx();
-        } else {
-          HANDLE_PENDING_EXCEPTION();
-        }
-        break;
-      }
-      case Instruction::APUT_OBJECT: {
-        PREAMBLE();
-        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == nullptr)) {
-          ThrowNullPointerExceptionFromInterpreter();
-          HANDLE_PENDING_EXCEPTION();
-          break;
-        }
-        int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        ObjPtr<mirror::Object> val = shadow_frame.GetVRegReference(inst->VRegA_23x(inst_data));
-        ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
-        if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
-          array->SetWithoutChecks<transaction_active>(index, val);
-          inst = inst->Next_2xx();
-        } else {
-          HANDLE_PENDING_EXCEPTION();
-        }
-        break;
-      }
-      case Instruction::IGET_BOOLEAN: {
-        PREAMBLE();
-        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
-            self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IGET_BYTE: {
-        PREAMBLE();
-        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(
-            self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IGET_CHAR: {
-        PREAMBLE();
-        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(
-            self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IGET_SHORT: {
-        PREAMBLE();
-        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(
-            self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IGET: {
-        PREAMBLE();
-        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(
-            self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IGET_WIDE: {
-        PREAMBLE();
-        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(
-            self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IGET_OBJECT: {
-        PREAMBLE();
-        bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(
-            self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IGET_QUICK: {
-        PREAMBLE();
-        bool success = DoIGetQuick<Primitive::kPrimInt>(shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IGET_WIDE_QUICK: {
-        PREAMBLE();
-        bool success = DoIGetQuick<Primitive::kPrimLong>(shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IGET_OBJECT_QUICK: {
-        PREAMBLE();
-        bool success = DoIGetQuick<Primitive::kPrimNot>(shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IGET_BOOLEAN_QUICK: {
-        PREAMBLE();
-        bool success = DoIGetQuick<Primitive::kPrimBoolean>(shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IGET_BYTE_QUICK: {
-        PREAMBLE();
-        bool success = DoIGetQuick<Primitive::kPrimByte>(shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IGET_CHAR_QUICK: {
-        PREAMBLE();
-        bool success = DoIGetQuick<Primitive::kPrimChar>(shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IGET_SHORT_QUICK: {
-        PREAMBLE();
-        bool success = DoIGetQuick<Primitive::kPrimShort>(shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::SGET_BOOLEAN: {
-        PREAMBLE();
-        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::SGET_BYTE: {
-        PREAMBLE();
-        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::SGET_CHAR: {
-        PREAMBLE();
-        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::SGET_SHORT: {
-        PREAMBLE();
-        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::SGET: {
-        PREAMBLE();
-        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::SGET_WIDE: {
-        PREAMBLE();
-        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::SGET_OBJECT: {
-        PREAMBLE();
-        bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IPUT_BOOLEAN: {
-        PREAMBLE();
-        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IPUT_BYTE: {
-        PREAMBLE();
-        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IPUT_CHAR: {
-        PREAMBLE();
-        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IPUT_SHORT: {
-        PREAMBLE();
-        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IPUT: {
-        PREAMBLE();
-        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IPUT_WIDE: {
-        PREAMBLE();
-        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IPUT_OBJECT: {
-        PREAMBLE();
-        bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IPUT_QUICK: {
-        PREAMBLE();
-        bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(
-            shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IPUT_BOOLEAN_QUICK: {
-        PREAMBLE();
-        bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(
-            shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IPUT_BYTE_QUICK: {
-        PREAMBLE();
-        bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(
-            shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IPUT_CHAR_QUICK: {
-        PREAMBLE();
-        bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(
-            shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IPUT_SHORT_QUICK: {
-        PREAMBLE();
-        bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(
-            shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IPUT_WIDE_QUICK: {
-        PREAMBLE();
-        bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(
-            shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::IPUT_OBJECT_QUICK: {
-        PREAMBLE();
-        bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(
-            shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::SPUT_BOOLEAN: {
-        PREAMBLE();
-        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::SPUT_BYTE: {
-        PREAMBLE();
-        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::SPUT_CHAR: {
-        PREAMBLE();
-        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::SPUT_SHORT: {
-        PREAMBLE();
-        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::SPUT: {
-        PREAMBLE();
-        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::SPUT_WIDE: {
-        PREAMBLE();
-        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::SPUT_OBJECT: {
-        PREAMBLE();
-        bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check,
-            transaction_active>(self, shadow_frame, inst, inst_data);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::INVOKE_VIRTUAL: {
-        PREAMBLE();
-        bool success = DoInvoke<kVirtual, false, do_access_check>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::INVOKE_VIRTUAL_RANGE: {
-        PREAMBLE();
-        bool success = DoInvoke<kVirtual, true, do_access_check>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::INVOKE_SUPER: {
-        PREAMBLE();
-        bool success = DoInvoke<kSuper, false, do_access_check>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::INVOKE_SUPER_RANGE: {
-        PREAMBLE();
-        bool success = DoInvoke<kSuper, true, do_access_check>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::INVOKE_DIRECT: {
-        PREAMBLE();
-        bool success = DoInvoke<kDirect, false, do_access_check>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::INVOKE_DIRECT_RANGE: {
-        PREAMBLE();
-        bool success = DoInvoke<kDirect, true, do_access_check>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::INVOKE_INTERFACE: {
-        PREAMBLE();
-        bool success = DoInvoke<kInterface, false, do_access_check>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::INVOKE_INTERFACE_RANGE: {
-        PREAMBLE();
-        bool success = DoInvoke<kInterface, true, do_access_check>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::INVOKE_STATIC: {
-        PREAMBLE();
-        bool success = DoInvoke<kStatic, false, do_access_check>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::INVOKE_STATIC_RANGE: {
-        PREAMBLE();
-        bool success = DoInvoke<kStatic, true, do_access_check>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::INVOKE_VIRTUAL_QUICK: {
-        PREAMBLE();
-        bool success = DoInvokeVirtualQuick<false>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
-        PREAMBLE();
-        bool success = DoInvokeVirtualQuick<true>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::INVOKE_POLYMORPHIC: {
-        PREAMBLE();
-        DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
-        bool success = DoInvokePolymorphic<false /* is_range */>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_4xx);
-        break;
-      }
-      case Instruction::INVOKE_POLYMORPHIC_RANGE: {
-        PREAMBLE();
-        DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
-        bool success = DoInvokePolymorphic<true /* is_range */>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_4xx);
-        break;
-      }
-      case Instruction::INVOKE_CUSTOM: {
-        PREAMBLE();
-        DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
-        bool success = DoInvokeCustom<false /* is_range */>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::INVOKE_CUSTOM_RANGE: {
-        PREAMBLE();
-        DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
-        bool success = DoInvokeCustom<true /* is_range */>(
-            self, shadow_frame, inst, inst_data, &result_register);
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
-        break;
-      }
-      case Instruction::NEG_INT:
-        PREAMBLE();
-        shadow_frame.SetVReg(
-            inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::NOT_INT:
-        PREAMBLE();
-        shadow_frame.SetVReg(
-            inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::NEG_LONG:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(
-            inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::NOT_LONG:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(
-            inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::NEG_FLOAT:
-        PREAMBLE();
-        shadow_frame.SetVRegFloat(
-            inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::NEG_DOUBLE:
-        PREAMBLE();
-        shadow_frame.SetVRegDouble(
-            inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::INT_TO_LONG:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data),
-                                 shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::INT_TO_FLOAT:
-        PREAMBLE();
-        shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data),
-                                  shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::INT_TO_DOUBLE:
-        PREAMBLE();
-        shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data),
-                                   shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::LONG_TO_INT:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
-                             shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::LONG_TO_FLOAT:
-        PREAMBLE();
-        shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data),
-                                  shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::LONG_TO_DOUBLE:
-        PREAMBLE();
-        shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data),
-                                   shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::FLOAT_TO_INT: {
-        PREAMBLE();
-        float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
-        int32_t result = art_float_to_integral<int32_t, float>(val);
-        shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::FLOAT_TO_LONG: {
-        PREAMBLE();
-        float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
-        int64_t result = art_float_to_integral<int64_t, float>(val);
-        shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::FLOAT_TO_DOUBLE:
-        PREAMBLE();
-        shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data),
-                                   shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::DOUBLE_TO_INT: {
-        PREAMBLE();
-        double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
-        int32_t result = art_float_to_integral<int32_t, double>(val);
-        shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::DOUBLE_TO_LONG: {
-        PREAMBLE();
-        double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
-        int64_t result = art_float_to_integral<int64_t, double>(val);
-        shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::DOUBLE_TO_FLOAT:
-        PREAMBLE();
-        shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data),
-                                  shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::INT_TO_BYTE:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<int8_t>(
-            shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::INT_TO_CHAR:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<uint16_t>(
-            shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::INT_TO_SHORT:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<int16_t>(
-            shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
-        inst = inst->Next_1xx();
-        break;
-      case Instruction::ADD_INT: {
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                             SafeAdd(shadow_frame.GetVReg(inst->VRegB_23x()),
-                                     shadow_frame.GetVReg(inst->VRegC_23x())));
-        inst = inst->Next_2xx();
-        break;
-      }
-      case Instruction::SUB_INT:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                             SafeSub(shadow_frame.GetVReg(inst->VRegB_23x()),
-                                     shadow_frame.GetVReg(inst->VRegC_23x())));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::MUL_INT:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                             SafeMul(shadow_frame.GetVReg(inst->VRegB_23x()),
-                                     shadow_frame.GetVReg(inst->VRegC_23x())));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::DIV_INT: {
-        PREAMBLE();
-        bool success = DoIntDivide(shadow_frame, inst->VRegA_23x(inst_data),
-                                   shadow_frame.GetVReg(inst->VRegB_23x()),
-                                   shadow_frame.GetVReg(inst->VRegC_23x()));
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::REM_INT: {
-        PREAMBLE();
-        bool success = DoIntRemainder(shadow_frame, inst->VRegA_23x(inst_data),
-                                      shadow_frame.GetVReg(inst->VRegB_23x()),
-                                      shadow_frame.GetVReg(inst->VRegC_23x()));
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::SHL_INT:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_23x()) <<
-                             (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::SHR_INT:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_23x()) >>
-                             (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::USHR_INT:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                             static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_23x())) >>
-                             (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::AND_INT:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_23x()) &
-                             shadow_frame.GetVReg(inst->VRegC_23x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::OR_INT:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_23x()) |
-                             shadow_frame.GetVReg(inst->VRegC_23x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::XOR_INT:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_23x()) ^
-                             shadow_frame.GetVReg(inst->VRegC_23x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::ADD_LONG:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                                 SafeAdd(shadow_frame.GetVRegLong(inst->VRegB_23x()),
-                                         shadow_frame.GetVRegLong(inst->VRegC_23x())));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::SUB_LONG:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                                 SafeSub(shadow_frame.GetVRegLong(inst->VRegB_23x()),
-                                         shadow_frame.GetVRegLong(inst->VRegC_23x())));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::MUL_LONG:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                                 SafeMul(shadow_frame.GetVRegLong(inst->VRegB_23x()),
-                                         shadow_frame.GetVRegLong(inst->VRegC_23x())));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::DIV_LONG:
-        PREAMBLE();
-        DoLongDivide(shadow_frame, inst->VRegA_23x(inst_data),
-                     shadow_frame.GetVRegLong(inst->VRegB_23x()),
-                     shadow_frame.GetVRegLong(inst->VRegC_23x()));
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
-        break;
-      case Instruction::REM_LONG:
-        PREAMBLE();
-        DoLongRemainder(shadow_frame, inst->VRegA_23x(inst_data),
-                        shadow_frame.GetVRegLong(inst->VRegB_23x()),
-                        shadow_frame.GetVRegLong(inst->VRegC_23x()));
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
-        break;
-      case Instruction::AND_LONG:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                                 shadow_frame.GetVRegLong(inst->VRegB_23x()) &
-                                 shadow_frame.GetVRegLong(inst->VRegC_23x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::OR_LONG:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                                 shadow_frame.GetVRegLong(inst->VRegB_23x()) |
-                                 shadow_frame.GetVRegLong(inst->VRegC_23x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::XOR_LONG:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                                 shadow_frame.GetVRegLong(inst->VRegB_23x()) ^
-                                 shadow_frame.GetVRegLong(inst->VRegC_23x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::SHL_LONG:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                                 shadow_frame.GetVRegLong(inst->VRegB_23x()) <<
-                                 (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::SHR_LONG:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                                 shadow_frame.GetVRegLong(inst->VRegB_23x()) >>
-                                 (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::USHR_LONG:
-        PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                                 static_cast<uint64_t>(shadow_frame.GetVRegLong(inst->VRegB_23x())) >>
-                                 (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::ADD_FLOAT:
-        PREAMBLE();
-        shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
-                                  shadow_frame.GetVRegFloat(inst->VRegB_23x()) +
-                                  shadow_frame.GetVRegFloat(inst->VRegC_23x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::SUB_FLOAT:
-        PREAMBLE();
-        shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
-                                  shadow_frame.GetVRegFloat(inst->VRegB_23x()) -
-                                  shadow_frame.GetVRegFloat(inst->VRegC_23x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::MUL_FLOAT:
-        PREAMBLE();
-        shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
-                                  shadow_frame.GetVRegFloat(inst->VRegB_23x()) *
-                                  shadow_frame.GetVRegFloat(inst->VRegC_23x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::DIV_FLOAT:
-        PREAMBLE();
-        shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
-                                  shadow_frame.GetVRegFloat(inst->VRegB_23x()) /
-                                  shadow_frame.GetVRegFloat(inst->VRegC_23x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::REM_FLOAT:
-        PREAMBLE();
-        shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
-                                  fmodf(shadow_frame.GetVRegFloat(inst->VRegB_23x()),
-                                        shadow_frame.GetVRegFloat(inst->VRegC_23x())));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::ADD_DOUBLE:
-        PREAMBLE();
-        shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
-                                   shadow_frame.GetVRegDouble(inst->VRegB_23x()) +
-                                   shadow_frame.GetVRegDouble(inst->VRegC_23x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::SUB_DOUBLE:
-        PREAMBLE();
-        shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
-                                   shadow_frame.GetVRegDouble(inst->VRegB_23x()) -
-                                   shadow_frame.GetVRegDouble(inst->VRegC_23x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::MUL_DOUBLE:
-        PREAMBLE();
-        shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
-                                   shadow_frame.GetVRegDouble(inst->VRegB_23x()) *
-                                   shadow_frame.GetVRegDouble(inst->VRegC_23x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::DIV_DOUBLE:
-        PREAMBLE();
-        shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
-                                   shadow_frame.GetVRegDouble(inst->VRegB_23x()) /
-                                   shadow_frame.GetVRegDouble(inst->VRegC_23x()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::REM_DOUBLE:
-        PREAMBLE();
-        shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
-                                   fmod(shadow_frame.GetVRegDouble(inst->VRegB_23x()),
-                                        shadow_frame.GetVRegDouble(inst->VRegC_23x())));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::ADD_INT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVReg(vregA, SafeAdd(shadow_frame.GetVReg(vregA),
-                                            shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::SUB_INT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVReg(vregA,
-                             SafeSub(shadow_frame.GetVReg(vregA),
-                                     shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::MUL_INT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVReg(vregA,
-                             SafeMul(shadow_frame.GetVReg(vregA),
-                                     shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::DIV_INT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        bool success = DoIntDivide(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
-                                   shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
-        break;
-      }
-      case Instruction::REM_INT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        bool success = DoIntRemainder(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
-                                      shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
-        break;
-      }
-      case Instruction::SHL_INT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVReg(vregA,
-                             shadow_frame.GetVReg(vregA) <<
-                             (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::SHR_INT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVReg(vregA,
-                             shadow_frame.GetVReg(vregA) >>
-                             (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::USHR_INT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVReg(vregA,
-                             static_cast<uint32_t>(shadow_frame.GetVReg(vregA)) >>
-                             (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::AND_INT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVReg(vregA,
-                             shadow_frame.GetVReg(vregA) &
-                             shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::OR_INT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVReg(vregA,
-                             shadow_frame.GetVReg(vregA) |
-                             shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::XOR_INT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVReg(vregA,
-                             shadow_frame.GetVReg(vregA) ^
-                             shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::ADD_LONG_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegLong(vregA,
-                                 SafeAdd(shadow_frame.GetVRegLong(vregA),
-                                         shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::SUB_LONG_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegLong(vregA,
-                                 SafeSub(shadow_frame.GetVRegLong(vregA),
-                                         shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::MUL_LONG_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegLong(vregA,
-                                 SafeMul(shadow_frame.GetVRegLong(vregA),
-                                         shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::DIV_LONG_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        DoLongDivide(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
-                    shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
-        break;
-      }
-      case Instruction::REM_LONG_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        DoLongRemainder(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
-                        shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
-        break;
-      }
-      case Instruction::AND_LONG_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegLong(vregA,
-                                 shadow_frame.GetVRegLong(vregA) &
-                                 shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::OR_LONG_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegLong(vregA,
-                                 shadow_frame.GetVRegLong(vregA) |
-                                 shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::XOR_LONG_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegLong(vregA,
-                                 shadow_frame.GetVRegLong(vregA) ^
-                                 shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::SHL_LONG_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegLong(vregA,
-                                 shadow_frame.GetVRegLong(vregA) <<
-                                 (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::SHR_LONG_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegLong(vregA,
-                                 shadow_frame.GetVRegLong(vregA) >>
-                                 (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::USHR_LONG_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegLong(vregA,
-                                 static_cast<uint64_t>(shadow_frame.GetVRegLong(vregA)) >>
-                                 (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::ADD_FLOAT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegFloat(vregA,
-                                  shadow_frame.GetVRegFloat(vregA) +
-                                  shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::SUB_FLOAT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegFloat(vregA,
-                                  shadow_frame.GetVRegFloat(vregA) -
-                                  shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::MUL_FLOAT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegFloat(vregA,
-                                  shadow_frame.GetVRegFloat(vregA) *
-                                  shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::DIV_FLOAT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegFloat(vregA,
-                                  shadow_frame.GetVRegFloat(vregA) /
-                                  shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::REM_FLOAT_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegFloat(vregA,
-                                  fmodf(shadow_frame.GetVRegFloat(vregA),
-                                        shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data))));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::ADD_DOUBLE_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegDouble(vregA,
-                                   shadow_frame.GetVRegDouble(vregA) +
-                                   shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::SUB_DOUBLE_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegDouble(vregA,
-                                   shadow_frame.GetVRegDouble(vregA) -
-                                   shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::MUL_DOUBLE_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegDouble(vregA,
-                                   shadow_frame.GetVRegDouble(vregA) *
-                                   shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::DIV_DOUBLE_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegDouble(vregA,
-                                   shadow_frame.GetVRegDouble(vregA) /
-                                   shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::REM_DOUBLE_2ADDR: {
-        PREAMBLE();
-        uint4_t vregA = inst->VRegA_12x(inst_data);
-        shadow_frame.SetVRegDouble(vregA,
-                                   fmod(shadow_frame.GetVRegDouble(vregA),
-                                        shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data))));
-        inst = inst->Next_1xx();
-        break;
-      }
-      case Instruction::ADD_INT_LIT16:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
-                             SafeAdd(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
-                                     inst->VRegC_22s()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::RSUB_INT_LIT16:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
-                             SafeSub(inst->VRegC_22s(),
-                                     shadow_frame.GetVReg(inst->VRegB_22s(inst_data))));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::MUL_INT_LIT16:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
-                             SafeMul(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
-                                     inst->VRegC_22s()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::DIV_INT_LIT16: {
-        PREAMBLE();
-        bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
-                                   shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
-                                   inst->VRegC_22s());
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::REM_INT_LIT16: {
-        PREAMBLE();
-        bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
-                                      shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
-                                      inst->VRegC_22s());
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::AND_INT_LIT16:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) &
-                             inst->VRegC_22s());
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::OR_INT_LIT16:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) |
-                             inst->VRegC_22s());
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::XOR_INT_LIT16:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) ^
-                             inst->VRegC_22s());
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::ADD_INT_LIT8:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                             SafeAdd(shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::RSUB_INT_LIT8:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                             SafeSub(inst->VRegC_22b(), shadow_frame.GetVReg(inst->VRegB_22b())));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::MUL_INT_LIT8:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                             SafeMul(shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b()));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::DIV_INT_LIT8: {
-        PREAMBLE();
-        bool success = DoIntDivide(shadow_frame, inst->VRegA_22b(inst_data),
-                                   shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::REM_INT_LIT8: {
-        PREAMBLE();
-        bool success = DoIntRemainder(shadow_frame, inst->VRegA_22b(inst_data),
-                                      shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
-        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
-        break;
-      }
-      case Instruction::AND_INT_LIT8:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_22b()) &
-                             inst->VRegC_22b());
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::OR_INT_LIT8:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_22b()) |
-                             inst->VRegC_22b());
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::XOR_INT_LIT8:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_22b()) ^
-                             inst->VRegC_22b());
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::SHL_INT_LIT8:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_22b()) <<
-                             (inst->VRegC_22b() & 0x1f));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::SHR_INT_LIT8:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_22b()) >>
-                             (inst->VRegC_22b() & 0x1f));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::USHR_INT_LIT8:
-        PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                             static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_22b())) >>
-                             (inst->VRegC_22b() & 0x1f));
-        inst = inst->Next_2xx();
-        break;
-      case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
-      case Instruction::UNUSED_79 ... Instruction::UNUSED_7A:
-      case Instruction::UNUSED_F3 ... Instruction::UNUSED_F9:
-        UnexpectedOpcode(inst, shadow_frame);
-    }
-  } while (!interpret_one_instruction);
-  // Record where we stopped.
-  shadow_frame.SetDexPC(inst->GetDexPc(insns));
-  ctx->result = result_register;
-  return;
-}  // NOLINT(readability/fn_size)
-
-// Explicit definitions of ExecuteSwitchImplCpp.
-template HOT_ATTR
-void ExecuteSwitchImplCpp<true, false>(SwitchImplContext* ctx);
-template HOT_ATTR
-void ExecuteSwitchImplCpp<false, false>(SwitchImplContext* ctx);
-template
-void ExecuteSwitchImplCpp<true, true>(SwitchImplContext* ctx);
-template
-void ExecuteSwitchImplCpp<false, true>(SwitchImplContext* ctx);
-
-}  // namespace interpreter
-}  // namespace art
diff --git a/runtime/interpreter/interpreter_switch_impl.h b/runtime/interpreter/interpreter_switch_impl.h
index 9fc4239..d4dca11 100644
--- a/runtime/interpreter/interpreter_switch_impl.h
+++ b/runtime/interpreter/interpreter_switch_impl.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_SWITCH_IMPL_H_
 #define ART_RUNTIME_INTERPRETER_INTERPRETER_SWITCH_IMPL_H_
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/dex_file.h"
 #include "dex/code_item_accessors.h"
 #include "jvalue.h"
diff --git a/runtime/interpreter/interpreter_switch_impl0.cc b/runtime/interpreter/interpreter_switch_impl0.cc
new file mode 100644
index 0000000..00159ec
--- /dev/null
+++ b/runtime/interpreter/interpreter_switch_impl0.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// The interpreter function takes considerable time to compile and link.
+// We compile the explicit definitions separately to speed up the build.
+
+#include "interpreter_switch_impl-inl.h"
+
+namespace art {
+namespace interpreter {
+
+// Explicit definition of ExecuteSwitchImplCpp.
+template HOT_ATTR
+void ExecuteSwitchImplCpp<false, false>(SwitchImplContext* ctx);
+
+}  // namespace interpreter
+}  // namespace art
diff --git a/runtime/interpreter/interpreter_switch_impl1.cc b/runtime/interpreter/interpreter_switch_impl1.cc
new file mode 100644
index 0000000..3a86765
--- /dev/null
+++ b/runtime/interpreter/interpreter_switch_impl1.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// The interpreter function takes considerable time to compile and link.
+// We compile the explicit definitions separately to speed up the build.
+
+#include "interpreter_switch_impl-inl.h"
+
+namespace art {
+namespace interpreter {
+
+// Explicit definition of ExecuteSwitchImplCpp.
+template
+void ExecuteSwitchImplCpp<false, true>(SwitchImplContext* ctx);
+
+}  // namespace interpreter
+}  // namespace art
diff --git a/runtime/interpreter/interpreter_switch_impl2.cc b/runtime/interpreter/interpreter_switch_impl2.cc
new file mode 100644
index 0000000..c2739c1
--- /dev/null
+++ b/runtime/interpreter/interpreter_switch_impl2.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// The interpreter function takes considerable time to compile and link.
+// We compile the explicit definitions separately to speed up the build.
+
+#include "interpreter_switch_impl-inl.h"
+
+namespace art {
+namespace interpreter {
+
+// Explicit definition of ExecuteSwitchImplCpp.
+template HOT_ATTR
+void ExecuteSwitchImplCpp<true, false>(SwitchImplContext* ctx);
+
+}  // namespace interpreter
+}  // namespace art
diff --git a/runtime/interpreter/interpreter_switch_impl3.cc b/runtime/interpreter/interpreter_switch_impl3.cc
new file mode 100644
index 0000000..808e4bc
--- /dev/null
+++ b/runtime/interpreter/interpreter_switch_impl3.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// The interpreter function takes considerable time to compile and link.
+// We compile the explicit definitions separately to speed up the build.
+
+#include "interpreter_switch_impl-inl.h"
+
+namespace art {
+namespace interpreter {
+
+// Explicit definition of ExecuteSwitchImplCpp.
+template
+void ExecuteSwitchImplCpp<true, true>(SwitchImplContext* ctx);
+
+}  // namespace interpreter
+}  // namespace art
diff --git a/runtime/interpreter/lock_count_data.h b/runtime/interpreter/lock_count_data.h
index 3098d4f..efa14c5 100644
--- a/runtime/interpreter/lock_count_data.h
+++ b/runtime/interpreter/lock_count_data.h
@@ -20,7 +20,7 @@
 #include <memory>
 #include <vector>
 
-#include "base/mutex.h"
+#include "base/locks.h"
 
 namespace art {
 
diff --git a/runtime/interpreter/mterp/Makefile_mterp b/runtime/interpreter/mterp/Makefile_mterp
deleted file mode 100644
index ac8da69..0000000
--- a/runtime/interpreter/mterp/Makefile_mterp
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Makefile for the Art fast interpreter.  This is not currently
-# integrated into the build system.
-#
-
-SHELL := /bin/sh
-
-# Build system has TARGET_ARCH=arm, but we can support the exact architecture
-# if it is worthwhile.
-#
-# To generate sources:
-# for arch in arm arm64 x86 x86_64 mips mips64
-# do
-#   TARGET_ARCH_EXT=$arch make -f Makefile_mterp
-# done
-#
-
-OUTPUT_DIR := out
-
-# Accumulate all possible dependencies for the generated files in a very
-# conservative fashion.  If it's not one of the generated files in "out",
-# assume it's a dependency.
-SOURCE_DEPS := \
-	$(shell find . -path ./$(OUTPUT_DIR) -prune -o -type f -print) \
-
-# Source files generated by the script.  There's always one C and one
-# assembly file, though in practice one or the other could be empty.
-GEN_SOURCES := \
-	$(OUTPUT_DIR)/interp_asm_$(TARGET_ARCH_EXT).S
-
-target: $(GEN_SOURCES)
-
-$(GEN_SOURCES): $(SOURCE_DEPS)
-	@mkdir -p out
-	./gen_mterp.py $(TARGET_ARCH_EXT) $(OUTPUT_DIR)
diff --git a/runtime/interpreter/mterp/README.txt b/runtime/interpreter/mterp/README.txt
index 19e02be..54bb634 100644
--- a/runtime/interpreter/mterp/README.txt
+++ b/runtime/interpreter/mterp/README.txt
@@ -1,108 +1,29 @@
-rt "mterp" README
-
-NOTE: Find rebuilding instructions at the bottom of this file.
-
-
 ==== Overview ====
 
-Every configuration has a "config-*" file that controls how the sources
-are generated.  The sources are written into the "out" directory, where
+The assembly source code is produced from custom python-based templates.
+All the architecture-specific template files are concatenated to create
+one big python script. This generated python script is then executed to
+produced the final assembly file. The template syntax is:
+ * Lines starting with % are python code. They will be copied as-is to
+   the script (without the %) and thus executed during the generation.
+ * Other lines are text, and they are essentially syntax sugar for
+   out.write('''(line text)''') and thus they write the main output.
+ * Within a text line, $ can be used insert variables from code.
+
+The final assembly sources are written into the "out" directory, where
 they are picked up by the Android build system.
 
 The best way to become familiar with the interpreter is to look at the
 generated files in the "out" directory.
 
 
-==== Config file format ====
-
-The config files are parsed from top to bottom.  Each line in the file
-may be blank, hold a comment (line starts with '#'), or be a command.
-
-The commands are:
-
-  handler-style <computed-goto|jump-table>
-
-    Specify which style of interpreter to generate.  In computed-goto,
-    each handler is allocated a fixed region, allowing transitions to
-    be done via table-start-address + (opcode * handler-size). With
-    jump-table style, handlers may be of any length, and the generated
-    table is an array of pointers to the handlers.  This command is required,
-    and must be the first command in the config file.
-
-  handler-size <bytes>
-
-    Specify the size of the fixed region, in bytes.  On most platforms
-    this will need to be a power of 2.  For jump-table implementations,
-    this command is ignored.
-
-  import <filename>
-
-    The specified file is included immediately, in its entirety.  No
-    substitutions are performed.  ".cpp" and ".h" files are copied to the
-    C output, ".S" files are copied to the asm output.
-
-  asm-alt-stub <filename>
-
-    When present, this command will cause the generation of an alternate
-    set of entry points (for computed-goto interpreters) or an alternate
-    jump table (for jump-table interpreters).
-
-  fallback-stub <filename>
-
-    Specifies a file to be used for the special FALLBACK tag on the "op"
-    command below.  Intended to be used to transfer control to an alternate
-    interpreter to single-step a not-yet-implemented opcode.  Note: should
-    note be used on RETURN-class instructions.
-
-  op-start <directory>
-
-    Indicates the start of the opcode list.  Must precede any "op"
-    commands.  The specified directory is the default location to pull
-    instruction files from.
-
-  op <opcode> <directory>|FALLBACK
-
-    Can only appear after "op-start" and before "op-end".  Overrides the
-    default source file location of the specified opcode.  The opcode
-    definition will come from the specified file, e.g. "op OP_NOP arm"
-    will load from "arm/OP_NOP.S".  A substitution dictionary will be
-    applied (see below).  If the special "FALLBACK" token is used instead of
-    a directory name, the source file specified in fallback-stub will instead
-    be used for this opcode.
-
-  alt <opcode> <directory>
-
-    Can only appear after "op-start" and before "op-end".  Similar to the
-    "op" command above, but denotes a source file to override the entry
-    in the alternate handler table.  The opcode definition will come from
-    the specified file, e.g. "alt OP_NOP arm" will load from
-    "arm/ALT_OP_NOP.S".  A substitution dictionary will be applied
-    (see below).
-
-  op-end
-
-    Indicates the end of the opcode list.  All kNumPackedOpcodes
-    opcodes are emitted when this is seen, followed by any code that
-    didn't fit inside the fixed-size instruction handler space.
-
-The order of "op" and "alt" directives are not significant; the generation
-tool will extract ordering info from the VM sources.
-
-Typically the form in which most opcodes currently exist is used in
-the "op-start" directive.
-
 ==== Instruction file format ====
 
 The assembly instruction files are simply fragments of assembly sources.
 The starting label will be provided by the generation tool, as will
-declarations for the segment type and alignment.  The expected target
-assembler is GNU "as", but others will work (may require fiddling with
-some of the pseudo-ops emitted by the generation tool).
+declarations for the segment type and alignment.
 
-A substitution dictionary is applied to all opcode fragments as they are
-appended to the output.  Substitutions can look like "$value" or "${value}".
-
-The dictionary always includes:
+The following global variables are generally available:
 
   $opcode - opcode name, e.g. "OP_NOP"
   $opnum - opcode number, e.g. 0 for OP_NOP
@@ -113,29 +34,6 @@
 so you can take advantage of C-style comments and preprocessor directives
 like "#define".
 
-Some generator operations are available.
-
-  %include "filename" [subst-dict]
-
-    Includes the file, which should look like "arm/OP_NOP.S".  You can
-    specify values for the substitution dictionary, using standard Python
-    syntax.  For example, this:
-      %include "arm/unop.S" {"result":"r1"}
-    would insert "arm/unop.S" at the current file position, replacing
-    occurrences of "$result" with "r1".
-
-  %default <subst-dict>
-
-    Specify default substitution dictionary values, using standard Python
-    syntax.  Useful if you want to have a "base" version and variants.
-
-  %break
-
-    Identifies the split between the main portion of the instruction
-    handler (which must fit in "handler-size" bytes) and the "sister"
-    code, which is appended to the end of the instruction handler block.
-    In jump table implementations, %break is ignored.
-
 The generation tool does *not* print a warning if your instructions
 exceed "handler-size", but the VM will abort on startup if it detects an
 oversized handler.  On architectures with fixed-width instructions this
@@ -153,20 +51,6 @@
 message and abort during startup.
 
 
-==== Development tips ====
-
-If you need to debug the initial piece of an opcode handler, and your
-debug code expands it beyond the handler size limit, you can insert a
-generic header at the top:
-
-    b       ${opcode}_start
-%break
-${opcode}_start:
-
-If you already have a %break, it's okay to leave it in place -- the second
-%break is ignored.
-
-
 ==== Rebuilding ====
 
 If you change any of the source file fragments, you need to rebuild the
@@ -174,7 +58,7 @@
 "out" are editable, then:
 
     $ cd mterp
-    $ ./rebuild.sh
+    $ ./gen_mterp.py
 
 The ultimate goal is to have the build system generate the necessary
 output files without requiring this separate step, but we're not yet
diff --git a/runtime/interpreter/mterp/arm/alt_stub.S b/runtime/interpreter/mterp/arm/alt_stub.S
deleted file mode 100644
index 8799d95..0000000
--- a/runtime/interpreter/mterp/arm/alt_stub.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_${opcode}
-    sub    lr, lr, #(.L_ALT_${opcode} - .L_${opcode})               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
diff --git a/runtime/interpreter/mterp/arm/arithmetic.S b/runtime/interpreter/mterp/arm/arithmetic.S
new file mode 100644
index 0000000..a6ba454
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/arithmetic.S
@@ -0,0 +1,975 @@
+%def binop(preinstr="", result="r0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG r1, r3                     @ r1<- vCC
+    GET_VREG r0, r2                     @ r0<- vBB
+    .if $chkzero
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    $preinstr                           @ optional op; may set condition codes
+    $instr                              @ $result<- op, r0-r3 changed
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG $result, r9                @ vAA<- $result
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 11-14 instructions */
+
+%def binop2addr(preinstr="", result="r0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    GET_VREG r1, r3                     @ r1<- vB
+    GET_VREG r0, r9                     @ r0<- vA
+    .if $chkzero
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+
+    $preinstr                           @ optional op; may set condition codes
+    $instr                              @ $result<- op, r0-r3 changed
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG $result, r9                @ vAA<- $result
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 10-13 instructions */
+
+%def binopLit16(result="r0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
+    mov     r2, rINST, lsr #12          @ r2<- B
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    GET_VREG r0, r2                     @ r0<- vB
+    .if $chkzero
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+
+    $instr                              @ $result<- op, r0-r3 changed
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG $result, r9                @ vAA<- $result
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 10-13 instructions */
+
+%def binopLit8(extract="asr     r1, r3, #8", result="r0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * You can override "extract" if the extraction of the literal value
+     * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
+     * can be omitted completely if the shift is embedded in "instr".
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r3, #255                @ r2<- BB
+    GET_VREG r0, r2                     @ r0<- vBB
+    $extract                            @ optional; typically r1<- ssssssCC (sign extended)
+    .if $chkzero
+    @cmp     r1, #0                     @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+
+    $instr                              @ $result<- op, r0-r3 changed
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG $result, r9                @ vAA<- $result
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 10-12 instructions */
+
+%def binopWide(preinstr="", result0="r0", result1="r1", chkzero="0", instr=""):
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[AA]
+    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
+    GET_VREG_WIDE_BY_ADDR r0, r1, r2    @ r0/r1<- vBB/vBB+1
+    GET_VREG_WIDE_BY_ADDR r2, r3, r3    @ r2/r3<- vCC/vCC+1
+    .if $chkzero
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    $preinstr                           @ optional op; may set condition codes
+    $instr                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR $result0,$result1,r9  @ vAA/vAA+1<,  $result0/$result1
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 14-17 instructions */
+
+%def binopWide2addr(preinstr="", result0="r0", result1="r1", chkzero="0", instr=""):
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r1, rINST, lsr #12          @ r1<- B
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
+    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
+    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+    GET_VREG_WIDE_BY_ADDR r2, r3, r1    @ r2/r3<- vBB/vBB+1
+    GET_VREG_WIDE_BY_ADDR r0, r1, r9    @ r0/r1<- vAA/vAA+1
+    .if $chkzero
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    $preinstr                           @ optional op; may set condition codes
+    $instr                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR $result0,$result1,r9  @ vAA/vAA+1<- $result0/$result1
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 12-15 instructions */
+
+%def unop(preinstr="", instr=""):
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0".
+     * This could be an ARM instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    GET_VREG r0, r3                     @ r0<- vB
+    $preinstr                           @ optional op; may set condition codes
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    $instr                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG r0, r9                     @ vAA<- r0
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 8-9 instructions */
+
+%def unopNarrower(preinstr="", instr=""):
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op r0/r1", where
+     * "result" is a 32-bit quantity in r0.
+     *
+     * For: long-to-float
+     *
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for op_move.)
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
+    GET_VREG_WIDE_BY_ADDR r0, r1, r3    @ r0/r1<- vB/vB+1
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    $preinstr                           @ optional op; may set condition codes
+    $instr                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG r0, r9                     @ vA<- r0
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 9-10 instructions */
+
+%def unopWide(preinstr="", instr=""):
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0/r1".
+     * This could be an ARM instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
+    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+    GET_VREG_WIDE_BY_ADDR r0, r1, r3    @ r0/r1<- vAA
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    $preinstr                           @ optional op; may set condition codes
+    $instr                              @ r0/r1<- op, r2-r3 changed
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r9    @ vAA<- r0/r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 10-11 instructions */
+
+%def unopWider(preinstr="", instr=""):
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op r0", where
+     * "result" is a 64-bit quantity in r0/r1.
+     *
+     * For: int-to-long, int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
+    GET_VREG r0, r3                     @ r0<- vB
+    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+    $preinstr                           @ optional op; may set condition codes
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    $instr                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r9    @ vA/vA+1<- r0/r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 9-10 instructions */
+
+%def op_add_int():
+%  binop(instr="add     r0, r0, r1")
+
+%def op_add_int_2addr():
+%  binop2addr(instr="add     r0, r0, r1")
+
+%def op_add_int_lit16():
+%  binopLit16(instr="add     r0, r0, r1")
+
+%def op_add_int_lit8():
+%  binopLit8(extract="", instr="add     r0, r0, r3, asr #8")
+
+%def op_add_long():
+%  binopWide(preinstr="adds    r0, r0, r2", instr="adc     r1, r1, r3")
+
+%def op_add_long_2addr():
+%  binopWide2addr(preinstr="adds    r0, r0, r2", instr="adc     r1, r1, r3")
+
+%def op_and_int():
+%  binop(instr="and     r0, r0, r1")
+
+%def op_and_int_2addr():
+%  binop2addr(instr="and     r0, r0, r1")
+
+%def op_and_int_lit16():
+%  binopLit16(instr="and     r0, r0, r1")
+
+%def op_and_int_lit8():
+%  binopLit8(extract="", instr="and     r0, r0, r3, asr #8")
+
+%def op_and_long():
+%  binopWide(preinstr="and     r0, r0, r2", instr="and     r1, r1, r3")
+
+%def op_and_long_2addr():
+%  binopWide2addr(preinstr="and     r0, r0, r2", instr="and     r1, r1, r3")
+
+%def op_cmp_long():
+    /*
+     * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
+     * register based on the results of the comparison.
+     */
+    /* cmp-long vAA, vBB, vCC */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
+    GET_VREG_WIDE_BY_ADDR r0, r1, r2    @ r0/r1<- vBB/vBB+1
+    GET_VREG_WIDE_BY_ADDR r2, r3, r3    @ r2/r3<- vCC/vCC+1
+    cmp     r0, r2
+    sbcs    ip, r1, r3                  @ Sets correct CCs for checking LT (but not EQ/NE)
+    mov     ip, #0
+    mvnlt   ip, #0                      @ -1
+    cmpeq   r0, r2                      @ For correct EQ/NE, we may need to repeat the first CMP
+    orrne   ip, #1
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    SET_VREG ip, r9                     @ vAA<- ip
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_div_int():
+    /*
+     * Specialized 32-bit binary operation
+     *
+     * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+     * ARMv7 CPUs that have hardware division support).
+     *
+     * div-int
+     *
+     */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG r1, r3                     @ r1<- vCC
+    GET_VREG r0, r2                     @ r0<- vBB
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+#ifdef __ARM_ARCH_EXT_IDIV__
+    sdiv    r0, r0, r1                  @ r0<- op
+#else
+    bl    __aeabi_idiv                  @ r0<- op, r0-r3 changed
+#endif
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG r0, r9                     @ vAA<- r0
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 11-14 instructions */
+
+%def op_div_int_2addr():
+    /*
+     * Specialized 32-bit binary operation
+     *
+     * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+     * ARMv7 CPUs that have hardware division support).
+     *
+     * div-int/2addr
+     *
+     */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    GET_VREG r1, r3                     @ r1<- vB
+    GET_VREG r0, r9                     @ r0<- vA
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+    sdiv    r0, r0, r1                  @ r0<- op
+#else
+    bl       __aeabi_idiv               @ r0<- op, r0-r3 changed
+#endif
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG r0, r9                     @ vAA<- r0
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 10-13 instructions */
+
+
+%def op_div_int_lit16():
+    /*
+     * Specialized 32-bit binary operation
+     *
+     * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+     * ARMv7 CPUs that have hardware division support).
+     *
+     * div-int/lit16
+     *
+     */
+    FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
+    mov     r2, rINST, lsr #12          @ r2<- B
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    GET_VREG r0, r2                     @ r0<- vB
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+    sdiv    r0, r0, r1                  @ r0<- op
+#else
+    bl       __aeabi_idiv               @ r0<- op, r0-r3 changed
+#endif
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG r0, r9                     @ vAA<- r0
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 10-13 instructions */
+
+%def op_div_int_lit8():
+    /*
+     * Specialized 32-bit binary operation
+     *
+     * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+     * ARMv7 CPUs that have hardware division support).
+     *
+     * div-int/lit8
+     *
+     */
+    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r3, #255                @ r2<- BB
+    GET_VREG r0, r2                     @ r0<- vBB
+    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
+    @cmp     r1, #0                     @ is second operand zero?
+    beq     common_errDivideByZero
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+    sdiv    r0, r0, r1                  @ r0<- op
+#else
+    bl   __aeabi_idiv                   @ r0<- op, r0-r3 changed
+#endif
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG r0, r9                     @ vAA<- r0
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 10-12 instructions */
+
+%def op_div_long():
+%  binopWide(instr="bl      __aeabi_ldivmod", chkzero="1")
+
+%def op_div_long_2addr():
+%  binopWide2addr(instr="bl      __aeabi_ldivmod", chkzero="1")
+
+%def op_int_to_byte():
+%  unop(instr="sxtb    r0, r0")
+
+%def op_int_to_char():
+%  unop(instr="uxth    r0, r0")
+
+%def op_int_to_long():
+%  unopWider(instr="mov     r1, r0, asr #31")
+
+%def op_int_to_short():
+%  unop(instr="sxth    r0, r0")
+
+%def op_long_to_int():
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%  op_move()
+
+%def op_mul_int():
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%  binop(instr="mul     r0, r1, r0")
+
+%def op_mul_int_2addr():
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%  binop2addr(instr="mul     r0, r1, r0")
+
+%def op_mul_int_lit16():
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%  binopLit16(instr="mul     r0, r1, r0")
+
+%def op_mul_int_lit8():
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%  binopLit8(instr="mul     r0, r1, r0")
+
+%def op_mul_long():
+    /*
+     * Signed 64-bit integer multiply.
+     *
+     * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+     *        WX
+     *      x YZ
+     *  --------
+     *     ZW ZX
+     *  YW YX
+     *
+     * The low word of the result holds ZX, the high word holds
+     * (ZW+YX) + (the high overflow from ZX).  YW doesn't matter because
+     * it doesn't fit in the low 64 bits.
+     *
+     * Unlike most ARM math operations, multiply instructions have
+     * restrictions on using the same register more than once (Rd and Rm
+     * cannot be the same).
+     */
+    /* mul-long vAA, vBB, vCC */
+    FETCH r0, 1                         @ r0<- CCBB
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
+    GET_VREG_WIDE_BY_ADDR r0, r1, r2    @ r0/r1<- vBB/vBB+1
+    GET_VREG_WIDE_BY_ADDR r2, r3, r3    @ r2/r3<- vCC/vCC+1
+    mul     ip, r2, r1                  @ ip<- ZxW
+    umull   r1, lr, r2, r0              @ r1/lr <- ZxX
+    mla     r2, r0, r3, ip              @ r2<- YxX + (ZxW)
+    mov     r0, rINST, lsr #8           @ r0<- AA
+    add     r2, r2, lr                  @ r2<- lr + low(ZxW + (YxX))
+    CLEAR_SHADOW_PAIR r0, lr, ip        @ Zero out the shadow regs
+    VREG_INDEX_TO_ADDR r0, r0           @ r0<- &fp[AA]
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r1, r2 , r0   @ vAA/vAA+1<- r1/r2
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_mul_long_2addr():
+    /*
+     * Signed 64-bit integer multiply, "/2addr" version.
+     *
+     * See op_mul_long for an explanation.
+     *
+     * We get a little tight on registers, so to avoid looking up &fp[A]
+     * again we stuff it into rINST.
+     */
+    /* mul-long/2addr vA, vB */
+    mov     r1, rINST, lsr #12          @ r1<- B
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
+    VREG_INDEX_TO_ADDR rINST, r9        @ rINST<- &fp[A]
+    GET_VREG_WIDE_BY_ADDR r2, r3, r1    @ r2/r3<- vBB/vBB+1
+    GET_VREG_WIDE_BY_ADDR r0, r1, rINST @ r0/r1<- vAA/vAA+1
+    mul     ip, r2, r1                  @ ip<- ZxW
+    umull   r1, lr, r2, r0              @ r1/lr <- ZxX
+    mla     r2, r0, r3, ip              @ r2<- YxX + (ZxW)
+    mov     r0, rINST                   @ r0<- &fp[A] (free up rINST)
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    add     r2, r2, lr                  @ r2<- r2 + low(ZxW + (YxX))
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r1, r2, r0    @ vAA/vAA+1<- r1/r2
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_neg_int():
+%  unop(instr="rsb     r0, r0, #0")
+
+%def op_neg_long():
+%  unopWide(preinstr="rsbs    r0, r0, #0", instr="rsc     r1, r1, #0")
+
+%def op_not_int():
+%  unop(instr="mvn     r0, r0")
+
+%def op_not_long():
+%  unopWide(preinstr="mvn     r0, r0", instr="mvn     r1, r1")
+
+%def op_or_int():
+%  binop(instr="orr     r0, r0, r1")
+
+%def op_or_int_2addr():
+%  binop2addr(instr="orr     r0, r0, r1")
+
+%def op_or_int_lit16():
+%  binopLit16(instr="orr     r0, r0, r1")
+
+%def op_or_int_lit8():
+%  binopLit8(extract="", instr="orr     r0, r0, r3, asr #8")
+
+%def op_or_long():
+%  binopWide(preinstr="orr     r0, r0, r2", instr="orr     r1, r1, r3")
+
+%def op_or_long_2addr():
+%  binopWide2addr(preinstr="orr     r0, r0, r2", instr="orr     r1, r1, r3")
+
+%def op_rem_int():
+    /*
+     * Specialized 32-bit binary operation
+     *
+     * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+     * ARMv7 CPUs that have hardware division support).
+     *
+     * NOTE: idivmod returns quotient in r0 and remainder in r1
+     *
+     * rem-int
+     *
+     */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG r1, r3                     @ r1<- vCC
+    GET_VREG r0, r2                     @ r0<- vBB
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+#ifdef __ARM_ARCH_EXT_IDIV__
+    sdiv    r2, r0, r1
+    mls  r1, r1, r2, r0                 @ r1<- op, r0-r2 changed
+#else
+    bl   __aeabi_idivmod                @ r1<- op, r0-r3 changed
+#endif
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG r1, r9                     @ vAA<- r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 11-14 instructions */
+
+%def op_rem_int_2addr():
+    /*
+     * Specialized 32-bit binary operation
+     *
+     * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+     * ARMv7 CPUs that have hardware division support).
+     *
+     * NOTE: idivmod returns quotient in r0 and remainder in r1
+     *
+     * rem-int/2addr
+     *
+     */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    GET_VREG r1, r3                     @ r1<- vB
+    GET_VREG r0, r9                     @ r0<- vA
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+    sdiv    r2, r0, r1
+    mls     r1, r1, r2, r0              @ r1<- op
+#else
+    bl      __aeabi_idivmod             @ r1<- op, r0-r3 changed
+#endif
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG r1, r9                     @ vAA<- r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 10-13 instructions */
+
+
+%def op_rem_int_lit16():
+    /*
+     * Specialized 32-bit binary operation
+     *
+     * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+     * ARMv7 CPUs that have hardware division support).
+     *
+     * NOTE: idivmod returns quotient in r0 and remainder in r1
+     *
+     * rem-int/lit16
+     *
+     */
+    FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
+    mov     r2, rINST, lsr #12          @ r2<- B
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    GET_VREG r0, r2                     @ r0<- vB
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+    sdiv    r2, r0, r1
+    mls     r1, r1, r2, r0              @ r1<- op
+#else
+    bl     __aeabi_idivmod              @ r1<- op, r0-r3 changed
+#endif
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG r1, r9                     @ vAA<- r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 10-13 instructions */
+
+%def op_rem_int_lit8():
+    /*
+     * Specialized 32-bit binary operation
+     *
+     * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+     * ARMv7 CPUs that have hardware division support).
+     *
+     * NOTE: idivmod returns quotient in r0 and remainder in r1
+     *
+     * rem-int/lit8
+     *
+     */
+    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r3, #255                @ r2<- BB
+    GET_VREG r0, r2                     @ r0<- vBB
+    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
+    @cmp     r1, #0                     @ is second operand zero?
+    beq     common_errDivideByZero
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+    sdiv    r2, r0, r1
+    mls     r1, r1, r2, r0              @ r1<- op
+#else
+    bl       __aeabi_idivmod            @ r1<- op, r0-r3 changed
+#endif
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG r1, r9                     @ vAA<- r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* 10-12 instructions */
+
+%def op_rem_long():
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+%  binopWide(instr="bl      __aeabi_ldivmod", result0="r2", result1="r3", chkzero="1")
+
+%def op_rem_long_2addr():
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+%  binopWide2addr(instr="bl      __aeabi_ldivmod", result0="r2", result1="r3", chkzero="1")
+
+%def op_rsub_int():
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+%  binopLit16(instr="rsb     r0, r0, r1")
+
+%def op_rsub_int_lit8():
+%  binopLit8(extract="", instr="rsb     r0, r0, r3, asr #8")
+
+%def op_shl_int():
+%  binop(preinstr="and     r1, r1, #31", instr="mov     r0, r0, asl r1")
+
+%def op_shl_int_2addr():
+%  binop2addr(preinstr="and     r1, r1, #31", instr="mov     r0, r0, asl r1")
+
+%def op_shl_int_lit8():
+%  binopLit8(extract="ubfx    r1, r3, #8, #5", instr="mov     r0, r0, asl r1")
+
+%def op_shl_long():
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shl-long vAA, vBB, vCC */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r3, r0, #255                @ r3<- BB
+    mov     r0, r0, lsr #8              @ r0<- CC
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BB]
+    GET_VREG r2, r0                     @ r2<- vCC
+    GET_VREG_WIDE_BY_ADDR r0, r1, r3    @ r0/r1<- vBB/vBB+1
+    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
+    and     r2, r2, #63                 @ r2<- r2 & 0x3f
+    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
+    mov     r1, r1, asl r2              @ r1<- r1 << r2
+    rsb     r3, r2, #32                 @ r3<- 32 - r2
+    orr     r1, r1, r0, lsr r3          @ r1<- r1 | (r0 << (32-r2))
+    subs    ip, r2, #32                 @ ip<- r2 - 32
+    movpl   r1, r0, asl ip              @ if r2 >= 32, r1<- r0 << (r2-32)
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    mov     r0, r0, asl r2              @ r0<- r0 << r2
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r9    @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_shl_long_2addr():
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shl-long/2addr vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    GET_VREG r2, r3                     @ r2<- vB
+    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
+    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
+    and     r2, r2, #63                 @ r2<- r2 & 0x3f
+    GET_VREG_WIDE_BY_ADDR r0, r1, r9    @ r0/r1<- vAA/vAA+1
+    mov     r1, r1, asl r2              @ r1<- r1 << r2
+    rsb     r3, r2, #32                 @ r3<- 32 - r2
+    orr     r1, r1, r0, lsr r3          @ r1<- r1 | (r0 << (32-r2))
+    subs    ip, r2, #32                 @ ip<- r2 - 32
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    movpl   r1, r0, asl ip              @ if r2 >= 32, r1<- r0 << (r2-32)
+    mov     r0, r0, asl r2              @ r0<- r0 << r2
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r9    @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_shr_int():
+%  binop(preinstr="and     r1, r1, #31", instr="mov     r0, r0, asr r1")
+
+%def op_shr_int_2addr():
+%  binop2addr(preinstr="and     r1, r1, #31", instr="mov     r0, r0, asr r1")
+
+%def op_shr_int_lit8():
+%  binopLit8(extract="ubfx    r1, r3, #8, #5", instr="mov     r0, r0, asr r1")
+
+%def op_shr_long():
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shr-long vAA, vBB, vCC */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r3, r0, #255                @ r3<- BB
+    mov     r0, r0, lsr #8              @ r0<- CC
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BB]
+    GET_VREG r2, r0                     @ r2<- vCC
+    GET_VREG_WIDE_BY_ADDR r0, r1, r3    @ r0/r1<- vBB/vBB+1
+    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
+    and     r2, r2, #63                 @ r0<- r0 & 0x3f
+    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
+    mov     r0, r0, lsr r2              @ r0<- r2 >> r2
+    rsb     r3, r2, #32                 @ r3<- 32 - r2
+    orr     r0, r0, r1, asl r3          @ r0<- r0 | (r1 << (32-r2))
+    subs    ip, r2, #32                 @ ip<- r2 - 32
+    movpl   r0, r1, asr ip              @ if r2 >= 32, r0<-r1 >> (r2-32)
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    mov     r1, r1, asr r2              @ r1<- r1 >> r2
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r9    @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_shr_long_2addr():
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shr-long/2addr vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    GET_VREG r2, r3                     @ r2<- vB
+    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
+    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
+    and     r2, r2, #63                 @ r2<- r2 & 0x3f
+    GET_VREG_WIDE_BY_ADDR r0, r1, r9    @ r0/r1<- vAA/vAA+1
+    mov     r0, r0, lsr r2              @ r0<- r2 >> r2
+    rsb     r3, r2, #32                 @ r3<- 32 - r2
+    orr     r0, r0, r1, asl r3          @ r0<- r0 | (r1 << (32-r2))
+    subs    ip, r2, #32                 @ ip<- r2 - 32
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    movpl   r0, r1, asr ip              @ if r2 >= 32, r0<-r1 >> (r2-32)
+    mov     r1, r1, asr r2              @ r1<- r1 >> r2
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r9    @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_sub_int():
+%  binop(instr="sub     r0, r0, r1")
+
+%def op_sub_int_2addr():
+%  binop2addr(instr="sub     r0, r0, r1")
+
+%def op_sub_long():
+%  binopWide(preinstr="subs    r0, r0, r2", instr="sbc     r1, r1, r3")
+
+%def op_sub_long_2addr():
+%  binopWide2addr(preinstr="subs    r0, r0, r2", instr="sbc     r1, r1, r3")
+
+%def op_ushr_int():
+%  binop(preinstr="and     r1, r1, #31", instr="mov     r0, r0, lsr r1")
+
+%def op_ushr_int_2addr():
+%  binop2addr(preinstr="and     r1, r1, #31", instr="mov     r0, r0, lsr r1")
+
+%def op_ushr_int_lit8():
+%  binopLit8(extract="ubfx    r1, r3, #8, #5", instr="mov     r0, r0, lsr r1")
+
+%def op_ushr_long():
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* ushr-long vAA, vBB, vCC */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r3, r0, #255                @ r3<- BB
+    mov     r0, r0, lsr #8              @ r0<- CC
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BB]
+    GET_VREG r2, r0                     @ r2<- vCC
+    GET_VREG_WIDE_BY_ADDR r0, r1, r3    @ r0/r1<- vBB/vBB+1
+    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
+    and     r2, r2, #63                 @ r0<- r0 & 0x3f
+    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
+    mov     r0, r0, lsr r2              @ r0<- r2 >> r2
+    rsb     r3, r2, #32                 @ r3<- 32 - r2
+    orr     r0, r0, r1, asl r3          @ r0<- r0 | (r1 << (32-r2))
+    subs    ip, r2, #32                 @ ip<- r2 - 32
+    movpl   r0, r1, lsr ip              @ if r2 >= 32, r0<-r1 >>> (r2-32)
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    mov     r1, r1, lsr r2              @ r1<- r1 >>> r2
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r9    @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_ushr_long_2addr():
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* ushr-long/2addr vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    GET_VREG r2, r3                     @ r2<- vB
+    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
+    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
+    and     r2, r2, #63                 @ r2<- r2 & 0x3f
+    GET_VREG_WIDE_BY_ADDR r0, r1, r9    @ r0/r1<- vAA/vAA+1
+    mov     r0, r0, lsr r2              @ r0<- r2 >> r2
+    rsb     r3, r2, #32                 @ r3<- 32 - r2
+    orr     r0, r0, r1, asl r3          @ r0<- r0 | (r1 << (32-r2))
+    subs    ip, r2, #32                 @ ip<- r2 - 32
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    movpl   r0, r1, lsr ip              @ if r2 >= 32, r0<-r1 >>> (r2-32)
+    mov     r1, r1, lsr r2              @ r1<- r1 >>> r2
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r9    @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_xor_int():
+%  binop(instr="eor     r0, r0, r1")
+
+%def op_xor_int_2addr():
+%  binop2addr(instr="eor     r0, r0, r1")
+
+%def op_xor_int_lit16():
+%  binopLit16(instr="eor     r0, r0, r1")
+
+%def op_xor_int_lit8():
+%  binopLit8(extract="", instr="eor     r0, r0, r3, asr #8")
+
+%def op_xor_long():
+%  binopWide(preinstr="eor     r0, r0, r2", instr="eor     r1, r1, r3")
+
+%def op_xor_long_2addr():
+%  binopWide2addr(preinstr="eor     r0, r0, r2", instr="eor     r1, r1, r3")
diff --git a/runtime/interpreter/mterp/arm/array.S b/runtime/interpreter/mterp/arm/array.S
new file mode 100644
index 0000000..7b3db61
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/array.S
@@ -0,0 +1,250 @@
+%def op_aget(load="ldr", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B r2, 1, 0                    @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B r3, 1, 1                    @ r3<- CC
+    GET_VREG r0, r2                     @ r0<- vBB (array object)
+    GET_VREG r1, r3                     @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #$shift     @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    $load   r2, [r0, #$data_offset]     @ r2<- vBB[vCC]
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG r2, r9                     @ vAA<- r2
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_aget_boolean():
+%  op_aget(load="ldrb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aget_byte():
+%  op_aget(load="ldrsb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aget_char():
+%  op_aget(load="ldrh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aget_object():
+    /*
+     * Array object get.  vAA <- vBB[vCC].
+     *
+     * for: aget-object
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B r2, 1, 0                    @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B r3, 1, 1                    @ r3<- CC
+    EXPORT_PC
+    GET_VREG r0, r2                     @ r0<- vBB (array object)
+    GET_VREG r1, r3                     @ r1<- vCC (requested index)
+    bl       artAGetObjectFromMterp     @ (array, index)
+    ldr      r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+    PREFETCH_INST 2
+    cmp      r1, #0
+    bne      MterpException
+    SET_VREG_OBJECT r0, r9
+    ADVANCE 2
+    GET_INST_OPCODE ip
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_aget_short():
+%  op_aget(load="ldrsh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aget_wide():
+    /*
+     * Array get, 64 bits.  vAA <- vBB[vCC].
+     *
+     * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
+     */
+    /* aget-wide vAA, vBB, vCC */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    GET_VREG r0, r2                     @ r0<- vBB (array object)
+    GET_VREG r1, r3                     @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #3          @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
+    ldrd    r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]  @ r2/r3<- vBB[vCC]
+    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r2, r3, r9    @ vAA/vAA+1<- r2/r3
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_aput(store="str", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B r2, 1, 0                    @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B r3, 1, 1                    @ r3<- CC
+    GET_VREG r0, r2                     @ r0<- vBB (array object)
+    GET_VREG r1, r3                     @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]     @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #$shift     @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    GET_VREG r2, r9                     @ r2<- vAA
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    $store  r2, [r0, #$data_offset]     @ vBB[vCC]<- r2
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_aput_boolean():
+%  op_aput(store="strb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aput_byte():
+%  op_aput(store="strb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aput_char():
+%  op_aput(store="strh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aput_object():
+    /*
+     * Store an object into an array.  vBB[vCC] <- vAA.
+     */
+    /* op vAA, vBB, vCC */
+    EXPORT_PC
+    add     r0, rFP, #OFF_FP_SHADOWFRAME
+    mov     r1, rPC
+    mov     r2, rINST
+    bl      MterpAputObject
+    cmp     r0, #0
+    beq     MterpPossibleException
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_aput_short():
+%  op_aput(store="strh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aput_wide():
+    /*
+     * Array put, 64 bits.  vBB[vCC] <- vAA.
+     *
+     * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+     */
+    /* aput-wide vAA, vBB, vCC */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    GET_VREG r0, r2                     @ r0<- vBB (array object)
+    GET_VREG r1, r3                     @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #3          @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    GET_VREG_WIDE_BY_ADDR r2, r3, r9    @ r2/r3<- vAA/vAA+1
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    strd    r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]  @ r2/r3<- vBB[vCC]
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_array_length():
+    /*
+     * Return the length of an array.
+     */
+    mov     r1, rINST, lsr #12          @ r1<- B
+    ubfx    r2, rINST, #8, #4           @ r2<- A
+    GET_VREG r0, r1                     @ r0<- vB (object ref)
+    cmp     r0, #0                      @ is object null?
+    beq     common_errNullObject        @ yup, fail
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- array length
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG r3, r2                     @ vB<- length
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_fill_array_data():
+    /* fill-array-data vAA, +BBBBBBBB */
+    EXPORT_PC
+    FETCH r0, 1                         @ r0<- bbbb (lo)
+    FETCH r1, 2                         @ r1<- BBBB (hi)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    orr     r1, r0, r1, lsl #16         @ r1<- BBBBbbbb
+    GET_VREG r0, r3                     @ r0<- vAA (array object)
+    add     r1, rPC, r1, lsl #1         @ r1<- PC + BBBBbbbb*2 (array data off.)
+    bl      MterpFillArrayData          @ (obj, payload)
+    cmp     r0, #0                      @ 0 means an exception is thrown
+    beq     MterpPossibleException      @ exception?
+    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_filled_new_array(helper="MterpFilledNewArray"):
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+    .extern $helper
+    EXPORT_PC
+    add     r0, rFP, #OFF_FP_SHADOWFRAME
+    mov     r1, rPC
+    mov     r2, rSELF
+    bl      $helper
+    cmp     r0, #0
+    beq     MterpPossibleException
+    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_filled_new_array_range():
+%  op_filled_new_array(helper="MterpFilledNewArrayRange")
+
+%def op_new_array():
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array vA, vB, class@CCCC */
+    EXPORT_PC
+    add     r0, rFP, #OFF_FP_SHADOWFRAME
+    mov     r1, rPC
+    mov     r2, rINST
+    mov     r3, rSELF
+    bl      MterpNewArray
+    cmp     r0, #0
+    beq     MterpPossibleException
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/bincmp.S b/runtime/interpreter/mterp/arm/bincmp.S
deleted file mode 100644
index 8fad42f..0000000
--- a/runtime/interpreter/mterp/arm/bincmp.S
+++ /dev/null
@@ -1,19 +0,0 @@
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    GET_VREG r3, r1                     @ r3<- vB
-    GET_VREG r0, r0                     @ r0<- vA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r0, r3                      @ compare (vA, vB)
-    b${condition} MterpCommonTakenBranchNoFlags
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    beq     .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/binop.S b/runtime/interpreter/mterp/arm/binop.S
deleted file mode 100644
index eeb72ef..0000000
--- a/runtime/interpreter/mterp/arm/binop.S
+++ /dev/null
@@ -1,35 +0,0 @@
-%default {"preinstr":"", "result":"r0", "chkzero":"0"}
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    GET_VREG r1, r3                     @ r1<- vCC
-    GET_VREG r0, r2                     @ r0<- vBB
-    .if $chkzero
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    $preinstr                           @ optional op; may set condition codes
-    $instr                              @ $result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG $result, r9                @ vAA<- $result
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm/binop2addr.S b/runtime/interpreter/mterp/arm/binop2addr.S
deleted file mode 100644
index d09a43a..0000000
--- a/runtime/interpreter/mterp/arm/binop2addr.S
+++ /dev/null
@@ -1,32 +0,0 @@
-%default {"preinstr":"", "result":"r0", "chkzero":"0"}
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r1, r3                     @ r1<- vB
-    GET_VREG r0, r9                     @ r0<- vA
-    .if $chkzero
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-    $preinstr                           @ optional op; may set condition codes
-    $instr                              @ $result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG $result, r9                @ vAA<- $result
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopLit16.S b/runtime/interpreter/mterp/arm/binopLit16.S
deleted file mode 100644
index 065394e..0000000
--- a/runtime/interpreter/mterp/arm/binopLit16.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default {"result":"r0", "chkzero":"0"}
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
-    mov     r2, rINST, lsr #12          @ r2<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r2                     @ r0<- vB
-    .if $chkzero
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    $instr                              @ $result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG $result, r9                @ vAA<- $result
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopLit8.S b/runtime/interpreter/mterp/arm/binopLit8.S
deleted file mode 100644
index 7c9c631..0000000
--- a/runtime/interpreter/mterp/arm/binopLit8.S
+++ /dev/null
@@ -1,35 +0,0 @@
-%default {"extract":"asr     r1, r3, #8", "result":"r0", "chkzero":"0"}
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC)
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r3, #255                @ r2<- BB
-    GET_VREG r0, r2                     @ r0<- vBB
-    $extract                            @ optional; typically r1<- ssssssCC (sign extended)
-    .if $chkzero
-    @cmp     r1, #0                     @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    $instr                              @ $result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG $result, r9                @ vAA<- $result
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopWide.S b/runtime/interpreter/mterp/arm/binopWide.S
deleted file mode 100644
index 4d88001..0000000
--- a/runtime/interpreter/mterp/arm/binopWide.S
+++ /dev/null
@@ -1,38 +0,0 @@
-%default {"preinstr":"", "result0":"r0", "result1":"r1", "chkzero":"0"}
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     rINST, rINST, lsr #8        @ rINST<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[AA]
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
-    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
-    .if $chkzero
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    $preinstr                           @ optional op; may set condition codes
-    $instr                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {$result0,$result1}     @ vAA/vAA+1<- $result0/$result1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 14-17 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopWide2addr.S b/runtime/interpreter/mterp/arm/binopWide2addr.S
deleted file mode 100644
index bb16335..0000000
--- a/runtime/interpreter/mterp/arm/binopWide2addr.S
+++ /dev/null
@@ -1,34 +0,0 @@
-%default {"preinstr":"", "result0":"r0", "result1":"r1", "chkzero":"0"}
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
-     *      rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
-    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
-    .if $chkzero
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    $preinstr                           @ optional op; may set condition codes
-    $instr                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {$result0,$result1}     @ vAA/vAA+1<- $result0/$result1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 12-15 instructions */
diff --git a/runtime/interpreter/mterp/arm/const.S b/runtime/interpreter/mterp/arm/const.S
deleted file mode 100644
index f6f8157..0000000
--- a/runtime/interpreter/mterp/arm/const.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default { "helper":"UndefinedConstHandler" }
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern $helper
-    EXPORT_PC
-    FETCH   r0, 1                       @ r0<- BBBB
-    mov     r1, rINST, lsr #8           @ r1<- AA
-    add     r2, rFP, #OFF_FP_SHADOWFRAME
-    mov     r3, rSELF
-    bl      $helper                     @ (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     @ load rINST
-    cmp     r0, #0                      @ fail?
-    bne     MterpPossibleException      @ let reference interpreter deal with it.
-    ADVANCE 2                           @ advance rPC
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/control_flow.S b/runtime/interpreter/mterp/arm/control_flow.S
new file mode 100644
index 0000000..2299ef9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/control_flow.S
@@ -0,0 +1,209 @@
+%def bincmp(condition=""):
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform.
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    mov     r1, rINST, lsr #12          @ r1<- B
+    ubfx    r0, rINST, #8, #4           @ r0<- A
+    GET_VREG r3, r1                     @ r3<- vB
+    GET_VREG r0, r0                     @ r0<- vA
+    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
+    cmp     r0, r3                      @ compare (vA, vB)
+    b${condition} MterpCommonTakenBranchNoFlags
+    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
+    beq     .L_check_not_taken_osr
+    FETCH_ADVANCE_INST 2
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def zcmp(condition=""):
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform.
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    mov     r0, rINST, lsr #8           @ r0<- AA
+    GET_VREG r0, r0                     @ r0<- vAA
+    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
+    cmp     r0, #0                      @ compare (vA, 0)
+    b${condition} MterpCommonTakenBranchNoFlags
+    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
+    beq     .L_check_not_taken_osr
+    FETCH_ADVANCE_INST 2
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_goto():
+    /*
+     * Unconditional branch, 8-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto +AA */
+    sbfx    rINST, rINST, #8, #8           @ rINST<- ssssssAA (sign-extended)
+    b       MterpCommonTakenBranchNoFlags
+
+%def op_goto_16():
+    /*
+     * Unconditional branch, 16-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto/16 +AAAA */
+    FETCH_S rINST, 1                    @ rINST<- ssssAAAA (sign-extended)
+    b       MterpCommonTakenBranchNoFlags
+
+%def op_goto_32():
+    /*
+     * Unconditional branch, 32-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     *
+     * Unlike most opcodes, this one is allowed to branch to itself, so
+     * our "backward branch" test must be "<=0" instead of "<0".  Because
+     * we need the V bit set, we'll use an adds to convert from Dalvik
+     * offset to byte offset.
+     */
+    /* goto/32 +AAAAAAAA */
+    FETCH r0, 1                         @ r0<- aaaa (lo)
+    FETCH r3, 2                         @ r1<- AAAA (hi)
+    orrs    rINST, r0, r3, lsl #16      @ rINST<- AAAAaaaa
+    b       MterpCommonTakenBranch
+
+%def op_if_eq():
+%  bincmp(condition="eq")
+
+%def op_if_eqz():
+%  zcmp(condition="eq")
+
+%def op_if_ge():
+%  bincmp(condition="ge")
+
+%def op_if_gez():
+%  zcmp(condition="ge")
+
+%def op_if_gt():
+%  bincmp(condition="gt")
+
+%def op_if_gtz():
+%  zcmp(condition="gt")
+
+%def op_if_le():
+%  bincmp(condition="le")
+
+%def op_if_lez():
+%  zcmp(condition="le")
+
+%def op_if_lt():
+%  bincmp(condition="lt")
+
+%def op_if_ltz():
+%  zcmp(condition="lt")
+
+%def op_if_ne():
+%  bincmp(condition="ne")
+
+%def op_if_nez():
+%  zcmp(condition="ne")
+
+%def op_packed_switch(func="MterpDoPackedSwitch"):
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBB */
+    FETCH r0, 1                         @ r0<- bbbb (lo)
+    FETCH r1, 2                         @ r1<- BBBB (hi)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb
+    GET_VREG r1, r3                     @ r1<- vAA
+    add     r0, rPC, r0, lsl #1         @ r0<- PC + BBBBbbbb*2
+    bl      $func                       @ r0<- code-unit branch offset
+    movs    rINST, r0
+    b       MterpCommonTakenBranch
+
+%def op_return():
+    /*
+     * Return a 32-bit value.
+     *
+     * for: return, return-object
+     */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    bl      MterpThreadFenceForConstructor
+    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
+    mov     r0, rSELF
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    blne    MterpSuspendCheck                       @ (self)
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    GET_VREG r0, r2                     @ r0<- vAA
+    mov     r1, #0
+    b       MterpReturn
+
+%def op_return_object():
+%  op_return()
+
+%def op_return_void():
+    .extern MterpThreadFenceForConstructor
+    bl      MterpThreadFenceForConstructor
+    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
+    mov     r0, rSELF
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    blne    MterpSuspendCheck                       @ (self)
+    mov    r0, #0
+    mov    r1, #0
+    b      MterpReturn
+
+%def op_return_void_no_barrier():
+    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
+    mov     r0, rSELF
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    blne    MterpSuspendCheck                       @ (self)
+    mov    r0, #0
+    mov    r1, #0
+    b      MterpReturn
+
+%def op_return_wide():
+    /*
+     * Return a 64-bit value.
+     */
+    /* return-wide vAA */
+    .extern MterpThreadFenceForConstructor
+    bl      MterpThreadFenceForConstructor
+    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
+    mov     r0, rSELF
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    blne    MterpSuspendCheck                       @ (self)
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[AA]
+    GET_VREG_WIDE_BY_ADDR r0, r1, r2    @ r0/r1 <- vAA/vAA+1
+    b       MterpReturn
+
+%def op_sparse_switch():
+%  op_packed_switch(func="MterpDoSparseSwitch")
+
+%def op_throw():
+    /*
+     * Throw an exception object in the current thread.
+     */
+    /* throw vAA */
+    EXPORT_PC
+    mov      r2, rINST, lsr #8           @ r2<- AA
+    GET_VREG r1, r2                      @ r1<- vAA (exception object)
+    cmp      r1, #0                      @ null object?
+    beq      common_errNullObject        @ yes, throw an NPE instead
+    str      r1, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ thread->exception<- obj
+    b        MterpException
diff --git a/runtime/interpreter/mterp/arm/entry.S b/runtime/interpreter/mterp/arm/entry.S
deleted file mode 100644
index 7c7c527..0000000
--- a/runtime/interpreter/mterp/arm/entry.S
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
-    .text
-    .align  2
-
-/*
- * On entry:
- *  r0  Thread* self/
- *  r1  insns_
- *  r2  ShadowFrame
- *  r3  JValue* result_register
- *
- */
-
-ENTRY ExecuteMterpImpl
-    stmfd   sp!, {r3-r10,fp,lr}         @ save 10 regs, (r3 just to align 64)
-    .cfi_adjust_cfa_offset 40
-    .cfi_rel_offset r3, 0
-    .cfi_rel_offset r4, 4
-    .cfi_rel_offset r5, 8
-    .cfi_rel_offset r6, 12
-    .cfi_rel_offset r7, 16
-    .cfi_rel_offset r8, 20
-    .cfi_rel_offset r9, 24
-    .cfi_rel_offset r10, 28
-    .cfi_rel_offset fp, 32
-    .cfi_rel_offset lr, 36
-
-    /* Remember the return register */
-    str     r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
-
-    /* Remember the dex instruction pointer */
-    str     r1, [r2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
-
-    /* set up "named" registers */
-    mov     rSELF, r0
-    ldr     r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
-    add     rFP, r2, #SHADOWFRAME_VREGS_OFFSET     @ point to vregs.
-    VREG_INDEX_TO_ADDR rREFS, r0                   @ point to reference array in shadow frame
-    ldr     r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET]   @ Get starting dex_pc.
-    add     rPC, r1, r0, lsl #1                    @ Create direct pointer to 1st dex opcode
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-    EXPORT_PC
-
-    /* Starting ibase */
-    ldr     rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
-
-    /* Set up for backwards branches & osr profiling */
-    ldr     r0, [rFP, #OFF_FP_METHOD]
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rSELF
-    bl      MterpSetUpHotnessCountdown
-    mov     rPROFILE, r0                @ Starting hotness countdown to rPROFILE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST                          @ load rINST from rPC
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/arm/fallback.S b/runtime/interpreter/mterp/arm/fallback.S
deleted file mode 100644
index 44e7e12..0000000
--- a/runtime/interpreter/mterp/arm/fallback.S
+++ /dev/null
@@ -1,3 +0,0 @@
-/* Transfer stub to alternate interpreter */
-    b    MterpFallback
-
diff --git a/runtime/interpreter/mterp/arm/fbinop.S b/runtime/interpreter/mterp/arm/fbinop.S
deleted file mode 100644
index 594ee03..0000000
--- a/runtime/interpreter/mterp/arm/fbinop.S
+++ /dev/null
@@ -1,23 +0,0 @@
-    /*
-     * Generic 32-bit floating-point operation.  Provide an "instr" line that
-     * specifies an instruction that performs "s2 = s0 op s1".  Because we
-     * use the "softfp" ABI, this must be an instruction, not a function call.
-     *
-     * For: add-float, sub-float, mul-float, div-float
-     */
-    /* floatop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    flds    s1, [r3]                    @ s1<- vCC
-    flds    s0, [r2]                    @ s0<- vBB
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    $instr                              @ s2<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
-    fsts    s2, [r9]                    @ vAA<- s2
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/fbinop2addr.S b/runtime/interpreter/mterp/arm/fbinop2addr.S
deleted file mode 100644
index 53c87a0..0000000
--- a/runtime/interpreter/mterp/arm/fbinop2addr.S
+++ /dev/null
@@ -1,19 +0,0 @@
-    /*
-     * Generic 32-bit floating point "/2addr" binary operation.  Provide
-     * an "instr" line that specifies an instruction that performs
-     * "s2 = s0 op s1".
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    flds    s1, [r3]                    @ s1<- vB
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    flds    s0, [r9]                    @ s0<- vA
-    $instr                              @ s2<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fsts    s2, [r9]                    @ vAA<- s2
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/fbinopWide.S b/runtime/interpreter/mterp/arm/fbinopWide.S
deleted file mode 100644
index ca13bfb..0000000
--- a/runtime/interpreter/mterp/arm/fbinopWide.S
+++ /dev/null
@@ -1,23 +0,0 @@
-    /*
-     * Generic 64-bit double-precision floating point binary operation.
-     * Provide an "instr" line that specifies an instruction that performs
-     * "d2 = d0 op d1".
-     *
-     * for: add-double, sub-double, mul-double, div-double
-     */
-    /* doubleop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    fldd    d1, [r3]                    @ d1<- vCC
-    fldd    d0, [r2]                    @ d0<- vBB
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    $instr                              @ s2<- op
-    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
-    fstd    d2, [r9]                    @ vAA<- d2
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/fbinopWide2addr.S b/runtime/interpreter/mterp/arm/fbinopWide2addr.S
deleted file mode 100644
index 9766e2c..0000000
--- a/runtime/interpreter/mterp/arm/fbinopWide2addr.S
+++ /dev/null
@@ -1,21 +0,0 @@
-    /*
-     * Generic 64-bit floating point "/2addr" binary operation.  Provide
-     * an "instr" line that specifies an instruction that performs
-     * "d2 = d0 op d1".
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *      div-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    CLEAR_SHADOW_PAIR r9, ip, r0        @ Zero out shadow regs
-    fldd    d1, [r3]                    @ d1<- vB
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    fldd    d0, [r9]                    @ d0<- vA
-    $instr                              @ d2<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fstd    d2, [r9]                    @ vAA<- d2
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/field.S b/runtime/interpreter/mterp/arm/field.S
deleted file mode 100644
index c468788..0000000
--- a/runtime/interpreter/mterp/arm/field.S
+++ /dev/null
@@ -1,16 +0,0 @@
-%default { }
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern $helper
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       $helper
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/floating_point.S b/runtime/interpreter/mterp/arm/floating_point.S
new file mode 100644
index 0000000..035fc13
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/floating_point.S
@@ -0,0 +1,482 @@
+%def fbinop(instr=""):
+    /*
+     * Generic 32-bit floating-point operation.  Provide an "instr" line that
+     * specifies an instruction that performs "s2 = s0 op s1".  Because we
+     * use the "softfp" ABI, this must be an instruction, not a function call.
+     *
+     * For: add-float, sub-float, mul-float, div-float
+     */
+    /* floatop vAA, vBB, vCC */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
+    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
+    GET_VREG_FLOAT_BY_ADDR s1, r3       @ s1<- vCC
+    GET_VREG_FLOAT_BY_ADDR s0, r2       @ s0<- vBB
+
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    $instr                              @ s2<- op
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_FLOAT s2, r9, lr           @ vAA<- s2
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def fbinop2addr(instr=""):
+    /*
+     * Generic 32-bit floating point "/2addr" binary operation.  Provide
+     * an "instr" line that specifies an instruction that performs
+     * "s2 = s0 op s1".
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
+    GET_VREG_FLOAT_BY_ADDR s1, r3       @ s1<- vB
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    GET_VREG_FLOAT_BY_ADDR s0, r9       @ s0<- vA
+    $instr                              @ s2<- op
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_FLOAT_BY_ADDR s2, r9       @ vAA<- s2 No need to clear as it's 2addr
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def fbinopWide(instr=""):
+    /*
+     * Generic 64-bit double-precision floating point binary operation.
+     * Provide an "instr" line that specifies an instruction that performs
+     * "d2 = d0 op d1".
+     *
+     * for: add-double, sub-double, mul-double, div-double
+     */
+    /* doubleop vAA, vBB, vCC */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
+    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
+    GET_VREG_DOUBLE_BY_ADDR d1, r3      @ d1<- vCC
+    GET_VREG_DOUBLE_BY_ADDR d0, r2      @ d0<- vBB
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    $instr                              @ s2<- op
+    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
+    SET_VREG_DOUBLE_BY_ADDR d2, r9      @ vAA<- d2
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def fbinopWide2addr(instr=""):
+    /*
+     * Generic 64-bit floating point "/2addr" binary operation.  Provide
+     * an "instr" line that specifies an instruction that performs
+     * "d2 = d0 op d1".
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+     *      div-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+    CLEAR_SHADOW_PAIR r9, ip, r0        @ Zero out shadow regs
+    GET_VREG_DOUBLE_BY_ADDR d1, r3      @ d1<- vB
+    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    GET_VREG_DOUBLE_BY_ADDR d0, r9      @ d0<- vA
+    $instr                              @ d2<- op
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_DOUBLE_BY_ADDR d2, r9      @ vAA<- d2
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def funop(instr=""):
+    /*
+     * Generic 32-bit unary floating-point operation.  Provide an "instr"
+     * line that specifies an instruction that performs "s1 = op s0".
+     *
+     * for: int-to-float, float-to-int
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+    GET_VREG_FLOAT_BY_ADDR s0, r3       @ s0<- vB
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    $instr                              @ s1<- op
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_FLOAT s1, r9, lr           @ vA<- s1
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def funopNarrower(instr=""):
+    /*
+     * Generic 64bit-to-32bit unary floating point operation.  Provide an
+     * "instr" line that specifies an instruction that performs "s0 = op d0".
+     *
+     * For: double-to-int, double-to-float
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+    GET_VREG_DOUBLE_BY_ADDR d0, r3      @ d0<- vB
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    $instr                              @ s0<- op
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_FLOAT s0, r9, lr           @ vA<- s0
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def funopWider(instr=""):
+    /*
+     * Generic 32bit-to-64bit floating point unary operation.  Provide an
+     * "instr" line that specifies an instruction that performs "d0 = op s0".
+     *
+     * For: int-to-double, float-to-double
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+    GET_VREG_FLOAT_BY_ADDR s0, r3       @ s0<- vB
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    $instr                              @ d0<- op
+    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
+    SET_VREG_DOUBLE_BY_ADDR d0, r9      @ vA<- d0
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_add_double():
+%  fbinopWide(instr="faddd   d2, d0, d1")
+
+%def op_add_double_2addr():
+%  fbinopWide2addr(instr="faddd   d2, d0, d1")
+
+%def op_add_float():
+%  fbinop(instr="fadds   s2, s0, s1")
+
+%def op_add_float_2addr():
+%  fbinop2addr(instr="fadds   s2, s0, s1")
+
+%def op_cmpg_double():
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * int compare(x, y) {
+     *     if (x == y) {
+     *         return 0;
+     *     } else if (x < y) {
+     *         return -1;
+     *     } else if (x > y) {
+     *         return 1;
+     *     } else {
+     *         return 1;
+     *     }
+     * }
+     */
+    /* op vAA, vBB, vCC */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
+    GET_VREG_DOUBLE_BY_ADDR d0, r2      @ d0<- vBB
+    GET_VREG_DOUBLE_BY_ADDR d1, r3      @ d1<- vCC
+    vcmpe.f64 d0, d1                    @ compare (vBB, vCC)
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    mov     r0, #1                      @ r0<- 1 (default)
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    fmstat                              @ export status flags
+    mvnmi   r0, #0                      @ (less than) r1<- -1
+    moveq   r0, #0                      @ (equal) r1<- 0
+    SET_VREG r0, r9                     @ vAA<- r0
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_cmpg_float():
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * int compare(x, y) {
+     *     if (x == y) {
+     *         return 0;
+     *     } else if (x < y) {
+     *         return -1;
+     *     } else if (x > y) {
+     *         return 1;
+     *     } else {
+     *         return 1;
+     *     }
+     * }
+     */
+    /* op vAA, vBB, vCC */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
+    GET_VREG_FLOAT_BY_ADDR s0, r2       @ s0<- vBB
+    GET_VREG_FLOAT_BY_ADDR s1, r3       @ s1<- vCC
+    vcmpe.f32 s0, s1                    @ compare (vBB, vCC)
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    mov     r0, #1                      @ r0<- 1 (default)
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    fmstat                              @ export status flags
+    mvnmi   r0, #0                      @ (less than) r1<- -1
+    moveq   r0, #0                      @ (equal) r1<- 0
+    SET_VREG r0, r9                     @ vAA<- r0
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_cmpl_double():
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * int compare(x, y) {
+     *     if (x == y) {
+     *         return 0;
+     *     } else if (x > y) {
+     *         return 1;
+     *     } else if (x < y) {
+     *         return -1;
+     *     } else {
+     *         return -1;
+     *     }
+     * }
+     */
+    /* op vAA, vBB, vCC */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
+    GET_VREG_DOUBLE_BY_ADDR d0, r2      @ d0<- vBB
+    GET_VREG_DOUBLE_BY_ADDR d1, r3      @ d1<- vCC
+    vcmpe.f64 d0, d1                    @ compare (vBB, vCC)
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    mvn     r0, #0                      @ r0<- -1 (default)
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    fmstat                              @ export status flags
+    movgt   r0, #1                      @ (greater than) r1<- 1
+    moveq   r0, #0                      @ (equal) r1<- 0
+    SET_VREG r0, r9                     @ vAA<- r0
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_cmpl_float():
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * int compare(x, y) {
+     *     if (x == y) {
+     *         return 0;
+     *     } else if (x > y) {
+     *         return 1;
+     *     } else if (x < y) {
+     *         return -1;
+     *     } else {
+     *         return -1;
+     *     }
+     * }
+     */
+    /* op vAA, vBB, vCC */
+    FETCH r0, 1                         @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
+    GET_VREG_FLOAT_BY_ADDR s0, r2       @ s0<- vBB
+    GET_VREG_FLOAT_BY_ADDR s1, r3       @ s1<- vCC
+    vcmpe.f32  s0, s1                   @ compare (vBB, vCC)
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    mvn     r0, #0                      @ r0<- -1 (default)
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    fmstat                              @ export status flags
+    movgt   r0, #1                      @ (greater than) r1<- 1
+    moveq   r0, #0                      @ (equal) r1<- 0
+    SET_VREG r0, r9                     @ vAA<- r0
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_div_double():
+%  fbinopWide(instr="fdivd   d2, d0, d1")
+
+%def op_div_double_2addr():
+%  fbinopWide2addr(instr="fdivd   d2, d0, d1")
+
+%def op_div_float():
+%  fbinop(instr="fdivs   s2, s0, s1")
+
+%def op_div_float_2addr():
+%  fbinop2addr(instr="fdivs   s2, s0, s1")
+
+%def op_double_to_float():
+%  funopNarrower(instr="vcvt.f32.f64  s0, d0")
+
+%def op_double_to_int():
+%  funopNarrower(instr="ftosizd  s0, d0")
+
+%def op_double_to_long():
+%  unopWide(instr="bl      d2l_doconv")
+%  add_helper(op_double_to_long_helper)
+
+%def op_double_to_long_helper():
+/*
+ * Convert the double in r0/r1 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ */
+d2l_doconv:
+    ubfx    r2, r1, #20, #11            @ grab the exponent
+    movw    r3, #0x43e
+    cmp     r2, r3                      @ MINLONG < x > MAXLONG?
+    bhs     d2l_special_cases
+    b       __aeabi_d2lz                @ tail call to convert double to long
+d2l_special_cases:
+    movw    r3, #0x7ff
+    cmp     r2, r3
+    beq     d2l_maybeNaN                @ NaN?
+d2l_notNaN:
+    adds    r1, r1, r1                  @ sign bit to carry
+    mov     r0, #0xffffffff             @ assume maxlong for lsw
+    mov     r1, #0x7fffffff             @ assume maxlong for msw
+    adc     r0, r0, #0
+    adc     r1, r1, #0                  @ convert maxlong to minlong if exp negative
+    bx      lr                          @ return
+d2l_maybeNaN:
+    orrs    r3, r0, r1, lsl #12
+    beq     d2l_notNaN                  @ if fraction is non-zero, it's a NaN
+    mov     r0, #0
+    mov     r1, #0
+    bx      lr                          @ return 0 for NaN
+
+%def op_float_to_double():
+%  funopWider(instr="vcvt.f64.f32  d0, s0")
+
+%def op_float_to_int():
+%  funop(instr="ftosizs s1, s0")
+
+%def op_float_to_long():
+%  unopWider(instr="bl      f2l_doconv")
+%  add_helper(op_float_to_long_helper)
+
+%def op_float_to_long_helper():
+/*
+ * Convert the float in r0 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ */
+f2l_doconv:
+    ubfx    r2, r0, #23, #8             @ grab the exponent
+    cmp     r2, #0xbe                   @ MININT < x > MAXINT?
+    bhs     f2l_special_cases
+    b       __aeabi_f2lz                @ tail call to convert float to long
+f2l_special_cases:
+    cmp     r2, #0xff                   @ NaN or infinity?
+    beq     f2l_maybeNaN
+f2l_notNaN:
+    adds    r0, r0, r0                  @ sign bit to carry
+    mov     r0, #0xffffffff             @ assume maxlong for lsw
+    mov     r1, #0x7fffffff             @ assume maxlong for msw
+    adc     r0, r0, #0
+    adc     r1, r1, #0                  @ convert maxlong to minlong if exp negative
+    bx      lr                          @ return
+f2l_maybeNaN:
+    lsls    r3, r0, #9
+    beq     f2l_notNaN                  @ if fraction is non-zero, it's a NaN
+    mov     r0, #0
+    mov     r1, #0
+    bx      lr                          @ return 0 for NaN
+
+%def op_int_to_double():
+%  funopWider(instr="fsitod  d0, s0")
+
+%def op_int_to_float():
+%  funop(instr="fsitos  s1, s0")
+
+%def op_long_to_double():
+    /*
+     * Specialised 64-bit floating point operation.
+     *
+     * Note: The result will be returned in d2.
+     *
+     * For: long-to-double
+     */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    ubfx    r9, rINST, #8, #4           @ r9<- A
+    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
+    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
+    GET_VREG_DOUBLE_BY_ADDR d0, r3      @ d0<- vBB
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+
+    vcvt.f64.s32    d1, s1              @ d1<- (double)(vAAh)
+    vcvt.f64.u32    d2, s0              @ d2<- (double)(vAAl)
+    vldr            d3, constval$opcode
+    vmla.f64        d2, d1, d3          @ d2<- vAAh*2^32 + vAAl
+
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_DOUBLE_BY_ADDR d2, r9      @ vAA<- d2
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+    /* literal pool helper */
+constval${opcode}:
+    .8byte          0x41f0000000000000
+
+%def op_long_to_float():
+%  unopNarrower(instr="bl      __aeabi_l2f")
+
+%def op_mul_double():
+%  fbinopWide(instr="fmuld   d2, d0, d1")
+
+%def op_mul_double_2addr():
+%  fbinopWide2addr(instr="fmuld   d2, d0, d1")
+
+%def op_mul_float():
+%  fbinop(instr="fmuls   s2, s0, s1")
+
+%def op_mul_float_2addr():
+%  fbinop2addr(instr="fmuls   s2, s0, s1")
+
+%def op_neg_double():
+%  unopWide(instr="add     r1, r1, #0x80000000")
+
+%def op_neg_float():
+%  unop(instr="add     r0, r0, #0x80000000")
+
+%def op_rem_double():
+/* EABI doesn't define a double remainder function, but libm does */
+%  binopWide(instr="bl      fmod")
+
+%def op_rem_double_2addr():
+/* EABI doesn't define a double remainder function, but libm does */
+%  binopWide2addr(instr="bl      fmod")
+
+%def op_rem_float():
+/* EABI doesn't define a float remainder function, but libm does */
+%  binop(instr="bl      fmodf")
+
+%def op_rem_float_2addr():
+/* EABI doesn't define a float remainder function, but libm does */
+%  binop2addr(instr="bl      fmodf")
+
+%def op_sub_double():
+%  fbinopWide(instr="fsubd   d2, d0, d1")
+
+%def op_sub_double_2addr():
+%  fbinopWide2addr(instr="fsubd   d2, d0, d1")
+
+%def op_sub_float():
+%  fbinop(instr="fsubs   s2, s0, s1")
+
+%def op_sub_float_2addr():
+%  fbinop2addr(instr="fsubs   s2, s0, s1")
diff --git a/runtime/interpreter/mterp/arm/footer.S b/runtime/interpreter/mterp/arm/footer.S
deleted file mode 100644
index 8e9c3c2..0000000
--- a/runtime/interpreter/mterp/arm/footer.S
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * ===========================================================================
- *  Common subroutines and data
- * ===========================================================================
- */
-
-    .text
-    .align  2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogDivideByZeroException
-#endif
-    b MterpCommonFallback
-
-common_errArrayIndex:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogArrayIndexException
-#endif
-    b MterpCommonFallback
-
-common_errNegativeArraySize:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogNegativeArraySizeException
-#endif
-    b MterpCommonFallback
-
-common_errNoSuchMethod:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogNoSuchMethodException
-#endif
-    b MterpCommonFallback
-
-common_errNullObject:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogNullObjectException
-#endif
-    b MterpCommonFallback
-
-common_exceptionThrown:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogExceptionThrownException
-#endif
-    b MterpCommonFallback
-
-MterpSuspendFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    ldr  r2, [rSELF, #THREAD_FLAGS_OFFSET]
-    bl MterpLogSuspendFallback
-#endif
-    b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    ldr     r0, [rSELF, #THREAD_EXCEPTION_OFFSET]
-    cmp     r0, #0                                  @ Exception pending?
-    beq     MterpFallback                           @ If not, fall back to reference interpreter.
-    /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    bl      MterpHandleException                    @ (self, shadow_frame)
-    cmp     r0, #0
-    beq     MterpExceptionReturn                    @ no local catch, back to caller.
-    ldr     r0, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
-    ldr     r1, [rFP, #OFF_FP_DEX_PC]
-    ldr     rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
-    add     rPC, r0, r1, lsl #1                     @ generate new dex_pc_ptr
-    /* Do we need to switch interpreters? */
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    /* resume execution at catch block */
-    EXPORT_PC
-    FETCH_INST
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-    /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    rINST          <= signed offset
- *    rPROFILE       <= signed hotness countdown (expanded to 32 bits)
- *    condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
-    cmp     rINST, #0
-MterpCommonTakenBranch:
-    bgt     .L_forward_branch           @ don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-#  error "JIT_CHECK_OSR must be -1."
-#endif
-    cmp     rPROFILE, #JIT_CHECK_OSR
-    beq     .L_osr_check
-    subsgt  rPROFILE, #1
-    beq     .L_add_batch                @ counted down to zero - report
-.L_resume_backward_branch:
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    REFRESH_IBASE
-    add     r2, rINST, rINST            @ r2<- byte offset
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    bne     .L_suspend_request_pending
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-.L_suspend_request_pending:
-    EXPORT_PC
-    mov     r0, rSELF
-    bl      MterpSuspendCheck           @ (self)
-    cmp     r0, #0
-    bne     MterpFallback
-    REFRESH_IBASE                       @ might have changed during suspend
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-.L_no_count_backwards:
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    bne     .L_resume_backward_branch
-.L_osr_check:
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rINST
-    EXPORT_PC
-    bl      MterpMaybeDoOnStackReplacement  @ (self, shadow_frame, offset)
-    cmp     r0, #0
-    bne     MterpOnStackReplacement
-    b       .L_resume_backward_branch
-
-.L_forward_branch:
-    cmp     rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
-    beq     .L_check_osr_forward
-.L_resume_forward_branch:
-    add     r2, rINST, rINST            @ r2<- byte offset
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-.L_check_osr_forward:
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rINST
-    EXPORT_PC
-    bl      MterpMaybeDoOnStackReplacement  @ (self, shadow_frame, offset)
-    cmp     r0, #0
-    bne     MterpOnStackReplacement
-    b       .L_resume_forward_branch
-
-.L_add_batch:
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    strh    rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
-    ldr     r0, [rFP, #OFF_FP_METHOD]
-    mov     r2, rSELF
-    bl      MterpAddHotnessBatch        @ (method, shadow_frame, self)
-    mov     rPROFILE, r0                @ restore new hotness countdown to rPROFILE
-    b       .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, #2
-    EXPORT_PC
-    bl      MterpMaybeDoOnStackReplacement  @ (self, shadow_frame, offset)
-    cmp     r0, #0
-    bne     MterpOnStackReplacement
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    mov r0, rSELF
-    add r1, rFP, #OFF_FP_SHADOWFRAME
-    mov r2, rINST
-    bl MterpLogOSR
-#endif
-    mov r0, #1                          @ Signal normal return
-    b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogFallback
-#endif
-MterpCommonFallback:
-    mov     r0, #0                                  @ signal retry with reference interpreter.
-    b       MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR.  Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- *  uint32_t* rFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    mov     r0, #1                                  @ signal return to caller.
-    b MterpDone
-MterpReturn:
-    ldr     r2, [rFP, #OFF_FP_RESULT_REGISTER]
-    str     r0, [r2]
-    str     r1, [r2, #4]
-    mov     r0, #1                                  @ signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    cmp     rPROFILE, #0
-    bgt     MterpProfileActive                      @ if > 0, we may have some counts to report.
-    ldmfd   sp!, {r3-r10,fp,pc}                     @ restore 10 regs and return
-
-MterpProfileActive:
-    mov     rINST, r0                               @ stash return value
-    /* Report cached hotness counts */
-    ldr     r0, [rFP, #OFF_FP_METHOD]
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rSELF
-    strh    rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
-    bl      MterpAddHotnessBatch                    @ (method, shadow_frame, self)
-    mov     r0, rINST                               @ restore return value
-    ldmfd   sp!, {r3-r10,fp,pc}                     @ restore 10 regs and return
-
-    END ExecuteMterpImpl
-
diff --git a/runtime/interpreter/mterp/arm/funop.S b/runtime/interpreter/mterp/arm/funop.S
deleted file mode 100644
index 1b8bb8b..0000000
--- a/runtime/interpreter/mterp/arm/funop.S
+++ /dev/null
@@ -1,17 +0,0 @@
-    /*
-     * Generic 32-bit unary floating-point operation.  Provide an "instr"
-     * line that specifies an instruction that performs "s1 = op s0".
-     *
-     * for: int-to-float, float-to-int
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    flds    s0, [r3]                    @ s0<- vB
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    $instr                              @ s1<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    fsts    s1, [r9]                    @ vA<- s1
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/funopNarrower.S b/runtime/interpreter/mterp/arm/funopNarrower.S
deleted file mode 100644
index b9f758b..0000000
--- a/runtime/interpreter/mterp/arm/funopNarrower.S
+++ /dev/null
@@ -1,17 +0,0 @@
-    /*
-     * Generic 64bit-to-32bit unary floating point operation.  Provide an
-     * "instr" line that specifies an instruction that performs "s0 = op d0".
-     *
-     * For: double-to-int, double-to-float
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    fldd    d0, [r3]                    @ d0<- vB
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    $instr                              @ s0<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    fsts    s0, [r9]                    @ vA<- s0
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/funopWider.S b/runtime/interpreter/mterp/arm/funopWider.S
deleted file mode 100644
index 854cdc9..0000000
--- a/runtime/interpreter/mterp/arm/funopWider.S
+++ /dev/null
@@ -1,18 +0,0 @@
-    /*
-     * Generic 32bit-to-64bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "d0 = op s0".
-     *
-     * For: int-to-double, float-to-double
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    flds    s0, [r3]                    @ s0<- vB
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    $instr                              @ d0<- op
-    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    fstd    d0, [r9]                    @ vA<- d0
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/header.S b/runtime/interpreter/mterp/arm/header.S
deleted file mode 100644
index 8d9cab5..0000000
--- a/runtime/interpreter/mterp/arm/header.S
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-  Art assembly interpreter notes:
-
-  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
-  handle invoke, allows higher-level code to create frame & shadow frame.
-
-  Once that's working, support direct entry code & eliminate shadow frame (and
-  excess locals allocation.
-
-  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
-  base of the vreg array within the shadow frame.  Access the other fields,
-  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
-  the shadow frame mechanism of double-storing object references - via rFP &
-  number_of_vregs_.
-
- */
-
-/*
-ARM EABI general notes:
-
-r0-r3 hold first 4 args to a method; they are not preserved across method calls
-r4-r8 are available for general use
-r9 is given special treatment in some situations, but not for us
-r10 (sl) seems to be generally available
-r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
-r12 (ip) is scratch -- not preserved across method calls
-r13 (sp) should be managed carefully in case a signal arrives
-r14 (lr) must be preserved
-r15 (pc) can be tinkered with directly
-
-r0 holds returns of <= 4 bytes
-r0-r1 hold returns of 8 bytes, low word in r0
-
-Callee must save/restore r4+ (except r12) if it modifies them.  If VFP
-is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
-s0-s15 (d0-d7, q0-a3) do not need to be.
-
-Stack is "full descending".  Only the arguments that don't fit in the first 4
-registers are placed on the stack.  "sp" points at the first stacked argument
-(i.e. the 5th arg).
-
-VFP: single-precision results in s0, double-precision results in d0.
-
-In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
-64-bit quantities (long long, double) must be 64-bit aligned.
-*/
-
-/*
-Mterp and ARM notes:
-
-The following registers have fixed assignments:
-
-  reg nick      purpose
-  r4  rPC       interpreted program counter, used for fetching instructions
-  r5  rFP       interpreted frame pointer, used for accessing locals and args
-  r6  rSELF     self (Thread) pointer
-  r7  rINST     first 16-bit code unit of current instruction
-  r8  rIBASE    interpreted instruction base pointer, used for computed goto
-  r10 rPROFILE  branch profiling countdown
-  r11 rREFS     base of object references in shadow frame  (ideally, we'll get rid of this later).
-
-Macros are provided for common operations.  Each macro MUST emit only
-one instruction to make instruction-counting easier.  They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rPC      r4
-#define CFI_DEX  4  // DWARF register number of the register holding dex-pc (xPC).
-#define CFI_TMP  0  // DWARF register number of the first argument register (r0).
-#define rFP      r5
-#define rSELF    r6
-#define rINST    r7
-#define rIBASE   r8
-#define rPROFILE r10
-#define rREFS    r11
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
-    str  rPC, [rFP, #OFF_FP_DEX_PC_PTR]
-.endm
-
-.macro EXPORT_DEX_PC tmp
-    ldr  \tmp, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
-    str  rPC, [rFP, #OFF_FP_DEX_PC_PTR]
-    sub  \tmp, rPC, \tmp
-    asr  \tmp, #1
-    str  \tmp, [rFP, #OFF_FP_DEX_PC]
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
- */
-.macro FETCH_INST
-    ldrh    rINST, [rPC]
-.endm
-
-/*
- * Fetch the next instruction from the specified offset.  Advances rPC
- * to point to the next instruction.  "_count" is in 16-bit code units.
- *
- * Because of the limited size of immediate constants on ARM, this is only
- * suitable for small forward movements (i.e. don't try to implement "goto"
- * with this).
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss.  (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
-    ldrh    rINST, [rPC, #((\count)*2)]!
-.endm
-
-/*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to rPC and rINST).
- */
-.macro PREFETCH_ADVANCE_INST dreg, sreg, count
-    ldrh    \dreg, [\sreg, #((\count)*2)]!
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
- * rINST ahead of possible exception point.  Be sure to manually advance rPC
- * later.
- */
-.macro PREFETCH_INST count
-    ldrh    rINST, [rPC, #((\count)*2)]
-.endm
-
-/* Advance rPC by some number of code units. */
-.macro ADVANCE count
-  add  rPC, #((\count)*2)
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg.  Updates
- * rPC to point to the next instruction.  "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.
- *
- * We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
- * bits that hold the shift distance are used for the half/byte/sign flags.
- * In some cases we can pre-double _reg for free, so we require a byte offset
- * here.
- */
-.macro FETCH_ADVANCE_INST_RB reg
-    ldrh    rINST, [rPC, \reg]!
-.endm
-
-/*
- * Fetch a half-word code unit from an offset past the current PC.  The
- * "_count" value is in 16-bit code units.  Does not advance rPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-.macro FETCH reg, count
-    ldrh    \reg, [rPC, #((\count)*2)]
-.endm
-
-.macro FETCH_S reg, count
-    ldrsh   \reg, [rPC, #((\count)*2)]
-.endm
-
-/*
- * Fetch one byte from an offset past the current PC.  Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-.macro FETCH_B reg, count, byte
-    ldrb     \reg, [rPC, #((\count)*2+(\byte))]
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
-    and     \reg, rINST, #255
-.endm
-
-/*
- * Put the prefetched instruction's opcode field into the specified register.
- */
-.macro GET_PREFETCHED_OPCODE oreg, ireg
-    and     \oreg, \ireg, #255
-.endm
-
-/*
- * Begin executing the opcode in _reg.  Because this only jumps within the
- * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
- */
-.macro GOTO_OPCODE reg
-    add     pc, rIBASE, \reg, lsl #${handler_size_bits}
-.endm
-.macro GOTO_OPCODE_BASE base,reg
-    add     pc, \base, \reg, lsl #${handler_size_bits}
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-.macro GET_VREG reg, vreg
-    ldr     \reg, [rFP, \vreg, lsl #2]
-.endm
-.macro SET_VREG reg, vreg
-    str     \reg, [rFP, \vreg, lsl #2]
-    mov     \reg, #0
-    str     \reg, [rREFS, \vreg, lsl #2]
-.endm
-.macro SET_VREG_OBJECT reg, vreg, tmpreg
-    str     \reg, [rFP, \vreg, lsl #2]
-    str     \reg, [rREFS, \vreg, lsl #2]
-.endm
-.macro SET_VREG_SHADOW reg, vreg
-    str     \reg, [rREFS, \vreg, lsl #2]
-.endm
-
-/*
- * Clear the corresponding shadow regs for a vreg pair
- */
-.macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2
-    mov     \tmp1, #0
-    add     \tmp2, \vreg, #1
-    SET_VREG_SHADOW \tmp1, \vreg
-    SET_VREG_SHADOW \tmp1, \tmp2
-.endm
-
-/*
- * Convert a virtual register index into an address.
- */
-.macro VREG_INDEX_TO_ADDR reg, vreg
-    add     \reg, rFP, \vreg, lsl #2   /* WARNING/FIXME: handle shadow frame vreg zero if store */
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
-  ldr     rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
-.endm
-
-/*
- * cfi support macros.
- */
-.macro ENTRY name
-    .arm
-    .type \name, #function
-    .hidden \name  // Hide this as a global symbol, so we do not incur plt calls.
-    .global \name
-    /* Cache alignment for function entry */
-    .balign 16
-\name:
-    .cfi_startproc
-    .fnstart
-.endm
-
-.macro END name
-    .fnend
-    .cfi_endproc
-    .size \name, .-\name
-.endm
diff --git a/runtime/interpreter/mterp/arm/instruction_end.S b/runtime/interpreter/mterp/arm/instruction_end.S
deleted file mode 100644
index f90ebd0..0000000
--- a/runtime/interpreter/mterp/arm/instruction_end.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
-    .type artMterpAsmInstructionEnd, #object
-    .hidden artMterpAsmInstructionEnd
-    .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
diff --git a/runtime/interpreter/mterp/arm/instruction_end_alt.S b/runtime/interpreter/mterp/arm/instruction_end_alt.S
deleted file mode 100644
index 0b66dbb..0000000
--- a/runtime/interpreter/mterp/arm/instruction_end_alt.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
-    .type artMterpAsmAltInstructionEnd, #object
-    .hidden artMterpAsmAltInstructionEnd
-    .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
diff --git a/runtime/interpreter/mterp/arm/instruction_end_sister.S b/runtime/interpreter/mterp/arm/instruction_end_sister.S
deleted file mode 100644
index 71c0300..0000000
--- a/runtime/interpreter/mterp/arm/instruction_end_sister.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
-    .type artMterpAsmSisterEnd, #object
-    .hidden artMterpAsmSisterEnd
-    .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
diff --git a/runtime/interpreter/mterp/arm/instruction_start.S b/runtime/interpreter/mterp/arm/instruction_start.S
deleted file mode 100644
index b7e9cf5..0000000
--- a/runtime/interpreter/mterp/arm/instruction_start.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
-    .type artMterpAsmInstructionStart, #object
-    .hidden artMterpAsmInstructionStart
-    .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
-    .text
diff --git a/runtime/interpreter/mterp/arm/instruction_start_alt.S b/runtime/interpreter/mterp/arm/instruction_start_alt.S
deleted file mode 100644
index 7a67ba0..0000000
--- a/runtime/interpreter/mterp/arm/instruction_start_alt.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
-    .type artMterpAsmAltInstructionStart, #object
-    .hidden artMterpAsmAltInstructionStart
-    .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
-    .text
diff --git a/runtime/interpreter/mterp/arm/instruction_start_sister.S b/runtime/interpreter/mterp/arm/instruction_start_sister.S
deleted file mode 100644
index 0036061..0000000
--- a/runtime/interpreter/mterp/arm/instruction_start_sister.S
+++ /dev/null
@@ -1,7 +0,0 @@
-
-    .type artMterpAsmSisterStart, #object
-    .hidden artMterpAsmSisterStart
-    .global artMterpAsmSisterStart
-    .text
-    .balign 4
-artMterpAsmSisterStart:
diff --git a/runtime/interpreter/mterp/arm/invoke.S b/runtime/interpreter/mterp/arm/invoke.S
index e47dd1b..08fd1bb 100644
--- a/runtime/interpreter/mterp/arm/invoke.S
+++ b/runtime/interpreter/mterp/arm/invoke.S
@@ -1,4 +1,4 @@
-%default { "helper":"UndefinedInvokeHandler" }
+%def invoke(helper="UndefinedInvokeHandler"):
     /*
      * Generic invoke handler wrapper.
      */
@@ -14,9 +14,108 @@
     cmp     r0, #0
     beq     MterpException
     FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
+    ldr     r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
     cmp     r0, #0
-    bne     MterpFallback
+    beq     MterpFallback
     GET_INST_OPCODE ip
     GOTO_OPCODE ip
 
+
+%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
+    /*
+     * invoke-polymorphic handler wrapper.
+     */
+    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+    .extern $helper
+    EXPORT_PC
+    mov     r0, rSELF
+    add     r1, rFP, #OFF_FP_SHADOWFRAME
+    mov     r2, rPC
+    mov     r3, rINST
+    bl      $helper
+    cmp     r0, #0
+    beq     MterpException
+    FETCH_ADVANCE_INST 4
+    ldr     r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
+    cmp     r0, #0
+    beq     MterpFallback
+    GET_INST_OPCODE ip
+    GOTO_OPCODE ip
+
+%def op_invoke_custom():
+%  invoke(helper="MterpInvokeCustom")
+    /*
+     * Handle an invoke-custom invocation.
+     *
+     * for: invoke-custom, invoke-custom/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, call_site@BBBB */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, call_site@BBBB */
+
+%def op_invoke_custom_range():
+%  invoke(helper="MterpInvokeCustomRange")
+
+%def op_invoke_direct():
+%  invoke(helper="MterpInvokeDirect")
+
+%def op_invoke_direct_range():
+%  invoke(helper="MterpInvokeDirectRange")
+
+%def op_invoke_interface():
+%  invoke(helper="MterpInvokeInterface")
+    /*
+     * Handle an interface method call.
+     *
+     * for: invoke-interface, invoke-interface/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_interface_range():
+%  invoke(helper="MterpInvokeInterfaceRange")
+
+%def op_invoke_polymorphic():
+%  invoke_polymorphic(helper="MterpInvokePolymorphic")
+
+%def op_invoke_polymorphic_range():
+%  invoke_polymorphic(helper="MterpInvokePolymorphicRange")
+
+%def op_invoke_static():
+%  invoke(helper="MterpInvokeStatic")
+
+
+%def op_invoke_static_range():
+%  invoke(helper="MterpInvokeStaticRange")
+
+%def op_invoke_super():
+%  invoke(helper="MterpInvokeSuper")
+    /*
+     * Handle a "super" method call.
+     *
+     * for: invoke-super, invoke-super/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_super_range():
+%  invoke(helper="MterpInvokeSuperRange")
+
+%def op_invoke_virtual():
+%  invoke(helper="MterpInvokeVirtual")
+    /*
+     * Handle a virtual method call.
+     *
+     * for: invoke-virtual, invoke-virtual/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_virtual_quick():
+%  invoke(helper="MterpInvokeVirtualQuick")
+
+%def op_invoke_virtual_range():
+%  invoke(helper="MterpInvokeVirtualRange")
+
+%def op_invoke_virtual_range_quick():
+%  invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/arm/invoke_polymorphic.S b/runtime/interpreter/mterp/arm/invoke_polymorphic.S
deleted file mode 100644
index f569d61..0000000
--- a/runtime/interpreter/mterp/arm/invoke_polymorphic.S
+++ /dev/null
@@ -1,21 +0,0 @@
-%default { "helper":"UndefinedInvokeHandler" }
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern $helper
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      $helper
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 4
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
diff --git a/runtime/interpreter/mterp/arm/main.S b/runtime/interpreter/mterp/arm/main.S
new file mode 100644
index 0000000..4cf65d1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/main.S
@@ -0,0 +1,775 @@
+%def header():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+  Art assembly interpreter notes:
+
+  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+  handle invoke, allows higher-level code to create frame & shadow frame.
+
+  Once that's working, support direct entry code & eliminate shadow frame (and
+  excess locals allocation.
+
+  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
+  base of the vreg array within the shadow frame.  Access the other fields,
+  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
+  the shadow frame mechanism of double-storing object references - via rFP &
+  number_of_vregs_.
+
+ */
+
+/*
+ARM EABI general notes:
+
+r0-r3 hold first 4 args to a method; they are not preserved across method calls
+r4-r8 are available for general use
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them.  If VFP
+is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
+s0-s15 (d0-d7, q0-a3) do not need to be.
+
+Stack is "full descending".  Only the arguments that don't fit in the first 4
+registers are placed on the stack.  "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.
+*/
+
+/*
+Mterp and ARM notes:
+
+The following registers have fixed assignments:
+
+  reg nick      purpose
+  r4  rPC       interpreted program counter, used for fetching instructions
+  r5  rFP       interpreted frame pointer, used for accessing locals and args
+  r6  rSELF     self (Thread) pointer
+  r7  rINST     first 16-bit code unit of current instruction
+  r8  rIBASE    interpreted instruction base pointer, used for computed goto
+  r10 rPROFILE  branch profiling countdown
+  r11 rREFS     base of object references in shadow frame  (ideally, we'll get rid of this later).
+
+Macros are provided for common operations.  Each macro MUST emit only
+one instruction to make instruction-counting easier.  They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+#include "interpreter/cfi_asm_support.h"
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rPC      r4
+#define CFI_DEX  4  // DWARF register number of the register holding dex-pc (xPC).
+#define CFI_TMP  0  // DWARF register number of the first argument register (r0).
+#define rFP      r5
+#define rSELF    r6
+#define rINST    r7
+#define rIBASE   r8
+#define rPROFILE r10
+#define rREFS    r11
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
+ * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
+#define OFF_FP_SHADOWFRAME OFF_FP(0)
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array.  For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+    str  rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+.endm
+
+.macro EXPORT_DEX_PC tmp
+    ldr  \tmp, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
+    str  rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+    sub  \tmp, rPC, \tmp
+    asr  \tmp, #1
+    str  \tmp, [rFP, #OFF_FP_DEX_PC]
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
+ */
+.macro FETCH_INST
+    ldrh    rINST, [rPC]
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset.  Advances rPC
+ * to point to the next instruction.  "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss.  (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+    ldrh    rINST, [rPC, #((\count)*2)]!
+.endm
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to rPC and rINST).
+ */
+.macro PREFETCH_ADVANCE_INST dreg, sreg, count
+    ldrh    \dreg, [\sreg, #((\count)*2)]!
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
+ * rINST ahead of possible exception point.  Be sure to manually advance rPC
+ * later.
+ */
+.macro PREFETCH_INST count
+    ldrh    rINST, [rPC, #((\count)*2)]
+.endm
+
+/* Advance rPC by some number of code units. */
+.macro ADVANCE count
+  add  rPC, #((\count)*2)
+.endm
+
+/*
+ * Fetch the next instruction from an offset specified by _reg.  Updates
+ * rPC to point to the next instruction.  "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ *
+ * We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
+ * bits that hold the shift distance are used for the half/byte/sign flags.
+ * In some cases we can pre-double _reg for free, so we require a byte offset
+ * here.
+ */
+.macro FETCH_ADVANCE_INST_RB reg
+    ldrh    rINST, [rPC, \reg]!
+.endm
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC.  The
+ * "_count" value is in 16-bit code units.  Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+.macro FETCH reg, count
+    ldrh    \reg, [rPC, #((\count)*2)]
+.endm
+
+.macro FETCH_S reg, count
+    ldrsh   \reg, [rPC, #((\count)*2)]
+.endm
+
+/*
+ * Fetch one byte from an offset past the current PC.  Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+.macro FETCH_B reg, count, byte
+    ldrb     \reg, [rPC, #((\count)*2+(\byte))]
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+    and     \reg, rINST, #255
+.endm
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+.macro GET_PREFETCHED_OPCODE oreg, ireg
+    and     \oreg, \ireg, #255
+.endm
+
+/*
+ * Begin executing the opcode in _reg.  Because this only jumps within the
+ * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
+ */
+.macro GOTO_OPCODE reg
+    add     pc, rIBASE, \reg, lsl #${handler_size_bits}
+.endm
+.macro GOTO_OPCODE_BASE base,reg
+    add     pc, \base, \reg, lsl #${handler_size_bits}
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+.macro GET_VREG reg, vreg
+    ldr     \reg, [rFP, \vreg, lsl #2]
+.endm
+.macro SET_VREG reg, vreg
+    str     \reg, [rFP, \vreg, lsl #2]
+    mov     \reg, #0
+    str     \reg, [rREFS, \vreg, lsl #2]
+.endm
+.macro SET_VREG_WIDE regLo, regHi, vreg
+    add     ip, rFP, \vreg, lsl #2
+    strd    \regLo, \regHi, [ip]
+    mov     \regLo, #0
+    mov     \regHi, #0
+    add     ip, rREFS, \vreg, lsl #2
+    strd    \regLo, \regHi, [ip]
+.endm
+.macro SET_VREG_OBJECT reg, vreg, tmpreg
+    str     \reg, [rFP, \vreg, lsl #2]
+    str     \reg, [rREFS, \vreg, lsl #2]
+.endm
+.macro SET_VREG_SHADOW reg, vreg
+    str     \reg, [rREFS, \vreg, lsl #2]
+.endm
+.macro SET_VREG_FLOAT reg, vreg, tmpreg
+    add     \tmpreg, rFP, \vreg, lsl #2
+    fsts    \reg, [\tmpreg]
+    mov     \tmpreg, #0
+    str     \tmpreg, [rREFS, \vreg, lsl #2]
+.endm
+
+/*
+ * Clear the corresponding shadow regs for a vreg pair
+ */
+.macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2
+    mov     \tmp1, #0
+    add     \tmp2, \vreg, #1
+    SET_VREG_SHADOW \tmp1, \vreg
+    SET_VREG_SHADOW \tmp1, \tmp2
+.endm
+
+/*
+ * Convert a virtual register index into an address.
+ */
+.macro VREG_INDEX_TO_ADDR reg, vreg
+    add     \reg, rFP, \vreg, lsl #2   /* WARNING/FIXME: handle shadow frame vreg zero if store */
+.endm
+
+.macro GET_VREG_WIDE_BY_ADDR reg0, reg1, addr
+    ldmia \addr, {\reg0, \reg1}
+.endm
+.macro SET_VREG_WIDE_BY_ADDR reg0, reg1, addr
+    stmia \addr, {\reg0, \reg1}
+.endm
+.macro GET_VREG_FLOAT_BY_ADDR reg, addr
+    flds \reg, [\addr]
+.endm
+.macro SET_VREG_FLOAT_BY_ADDR reg, addr
+    fsts \reg, [\addr]
+.endm
+.macro GET_VREG_DOUBLE_BY_ADDR reg, addr
+    fldd \reg, [\addr]
+.endm
+.macro SET_VREG_DOUBLE_BY_ADDR reg, addr
+    fstd \reg, [\addr]
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+  ldr     rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+.endm
+
+/*
+ * function support macros.
+ */
+.macro ENTRY name
+    .arm
+    .type \name, #function
+    .hidden \name  // Hide this as a global symbol, so we do not incur plt calls.
+    .global \name
+    /* Cache alignment for function entry */
+    .balign 16
+\name:
+.endm
+
+.macro END name
+    .size \name, .-\name
+.endm
+
+// Macro to unpoison (negate) the reference for heap poisoning.
+.macro UNPOISON_HEAP_REF rRef
+#ifdef USE_HEAP_POISONING
+    rsb \rRef, \rRef, #0
+#endif  // USE_HEAP_POISONING
+.endm
+
+%def entry():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+    .text
+    .align  2
+
+/*
+ * On entry:
+ *  r0  Thread* self/
+ *  r1  insns_
+ *  r2  ShadowFrame
+ *  r3  JValue* result_register
+ *
+ */
+
+ENTRY ExecuteMterpImpl
+    .cfi_startproc
+    stmfd   sp!, {r3-r10,fp,lr}         @ save 10 regs, (r3 just to align 64)
+    .cfi_adjust_cfa_offset 40
+    .cfi_rel_offset r3, 0
+    .cfi_rel_offset r4, 4
+    .cfi_rel_offset r5, 8
+    .cfi_rel_offset r6, 12
+    .cfi_rel_offset r7, 16
+    .cfi_rel_offset r8, 20
+    .cfi_rel_offset r9, 24
+    .cfi_rel_offset r10, 28
+    .cfi_rel_offset fp, 32
+    .cfi_rel_offset lr, 36
+
+    /* Remember the return register */
+    str     r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
+
+    /* Remember the dex instruction pointer */
+    str     r1, [r2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
+
+    /* set up "named" registers */
+    mov     rSELF, r0
+    ldr     r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
+    add     rFP, r2, #SHADOWFRAME_VREGS_OFFSET     @ point to vregs.
+    VREG_INDEX_TO_ADDR rREFS, r0                   @ point to reference array in shadow frame
+    ldr     r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET]   @ Get starting dex_pc.
+    add     rPC, r1, r0, lsl #1                    @ Create direct pointer to 1st dex opcode
+    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
+    EXPORT_PC
+
+    /* Starting ibase */
+    ldr     rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+
+    /* Set up for backwards branches & osr profiling */
+    ldr     r0, [rFP, #OFF_FP_METHOD]
+    add     r1, rFP, #OFF_FP_SHADOWFRAME
+    mov     r2, rSELF
+    bl      MterpSetUpHotnessCountdown
+    mov     rPROFILE, r0                @ Starting hotness countdown to rPROFILE
+
+    /* start executing the instruction at rPC */
+    FETCH_INST                          @ load rINST from rPC
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+    /* NOTE: no fallthrough */
+    // cfi info continues, and covers the whole mterp implementation.
+    END ExecuteMterpImpl
+
+%def dchecks_before_helper():
+    // Call C++ to do debug checks and return to the handler using tail call.
+    .extern MterpCheckBefore
+    mov    r0, rSELF
+    add    r1, rFP, #OFF_FP_SHADOWFRAME
+    mov    r2, rPC
+    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
+
+%def opcode_pre():
+%  add_helper(dchecks_before_helper, "Mterp_dchecks_before_helper")
+    #if !defined(NDEBUG)
+    bl     Mterp_dchecks_before_helper
+    #endif
+
+%def fallback():
+/* Transfer stub to alternate interpreter */
+    b    MterpFallback
+
+
+%def helpers():
+    ENTRY MterpHelpers
+
+%def footer():
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+    .text
+    .align  2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  r0, rSELF
+    add  r1, rFP, #OFF_FP_SHADOWFRAME
+    bl MterpLogDivideByZeroException
+#endif
+    b MterpCommonFallback
+
+common_errArrayIndex:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  r0, rSELF
+    add  r1, rFP, #OFF_FP_SHADOWFRAME
+    bl MterpLogArrayIndexException
+#endif
+    b MterpCommonFallback
+
+common_errNegativeArraySize:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  r0, rSELF
+    add  r1, rFP, #OFF_FP_SHADOWFRAME
+    bl MterpLogNegativeArraySizeException
+#endif
+    b MterpCommonFallback
+
+common_errNoSuchMethod:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  r0, rSELF
+    add  r1, rFP, #OFF_FP_SHADOWFRAME
+    bl MterpLogNoSuchMethodException
+#endif
+    b MterpCommonFallback
+
+common_errNullObject:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  r0, rSELF
+    add  r1, rFP, #OFF_FP_SHADOWFRAME
+    bl MterpLogNullObjectException
+#endif
+    b MterpCommonFallback
+
+common_exceptionThrown:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  r0, rSELF
+    add  r1, rFP, #OFF_FP_SHADOWFRAME
+    bl MterpLogExceptionThrownException
+#endif
+    b MterpCommonFallback
+
+MterpSuspendFallback:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  r0, rSELF
+    add  r1, rFP, #OFF_FP_SHADOWFRAME
+    ldr  r2, [rSELF, #THREAD_FLAGS_OFFSET]
+    bl MterpLogSuspendFallback
+#endif
+    b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary.  If there is a pending
+ * exception, handle it.  Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+    ldr     r0, [rSELF, #THREAD_EXCEPTION_OFFSET]
+    cmp     r0, #0                                  @ Exception pending?
+    beq     MterpFallback                           @ If not, fall back to reference interpreter.
+    /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+    mov     r0, rSELF
+    add     r1, rFP, #OFF_FP_SHADOWFRAME
+    bl      MterpHandleException                    @ (self, shadow_frame)
+    cmp     r0, #0
+    beq     MterpExceptionReturn                    @ no local catch, back to caller.
+    ldr     r0, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
+    ldr     r1, [rFP, #OFF_FP_DEX_PC]
+    ldr     rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+    add     rPC, r0, r1, lsl #1                     @ generate new dex_pc_ptr
+    /* Do we need to switch interpreters? */
+    ldr     r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
+    cmp     r0, #0
+    beq     MterpFallback
+    /* resume execution at catch block */
+    EXPORT_PC
+    FETCH_INST
+    GET_INST_OPCODE ip
+    GOTO_OPCODE ip
+    /* NOTE: no fallthrough */
+
+/*
+ * Common handling for branches with support for Jit profiling.
+ * On entry:
+ *    rINST          <= signed offset
+ *    rPROFILE       <= signed hotness countdown (expanded to 32 bits)
+ *    condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
+ *
+ * We have quite a few different cases for branch profiling, OSR detection and
+ * suspend check support here.
+ *
+ * Taken backward branches:
+ *    If profiling active, do hotness countdown and report if we hit zero.
+ *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *    Is there a pending suspend request?  If so, suspend.
+ *
+ * Taken forward branches and not-taken backward branches:
+ *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *
+ * Our most common case is expected to be a taken backward branch with active jit profiling,
+ * but no full OSR check and no pending suspend request.
+ * Next most common case is not-taken branch with no full OSR check.
+ *
+ */
+MterpCommonTakenBranchNoFlags:
+    cmp     rINST, #0
+MterpCommonTakenBranch:
+    bgt     .L_forward_branch           @ don't add forward branches to hotness
+/*
+ * We need to subtract 1 from positive values and we should not see 0 here,
+ * so we may use the result of the comparison with -1.
+ */
+#if JIT_CHECK_OSR != -1
+#  error "JIT_CHECK_OSR must be -1."
+#endif
+    cmp     rPROFILE, #JIT_CHECK_OSR
+    beq     .L_osr_check
+    subsgt  rPROFILE, #1
+    beq     .L_add_batch                @ counted down to zero - report
+.L_resume_backward_branch:
+    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
+    REFRESH_IBASE
+    add     r2, rINST, rINST            @ r2<- byte offset
+    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    bne     .L_suspend_request_pending
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+.L_suspend_request_pending:
+    EXPORT_PC
+    mov     r0, rSELF
+    bl      MterpSuspendCheck           @ (self)
+    cmp     r0, #0
+    bne     MterpFallback
+    REFRESH_IBASE                       @ might have changed during suspend
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+.L_no_count_backwards:
+    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
+    bne     .L_resume_backward_branch
+.L_osr_check:
+    mov     r0, rSELF
+    add     r1, rFP, #OFF_FP_SHADOWFRAME
+    mov     r2, rINST
+    EXPORT_PC
+    bl      MterpMaybeDoOnStackReplacement  @ (self, shadow_frame, offset)
+    cmp     r0, #0
+    bne     MterpOnStackReplacement
+    b       .L_resume_backward_branch
+
+.L_forward_branch:
+    cmp     rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
+    beq     .L_check_osr_forward
+.L_resume_forward_branch:
+    add     r2, rINST, rINST            @ r2<- byte offset
+    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+.L_check_osr_forward:
+    mov     r0, rSELF
+    add     r1, rFP, #OFF_FP_SHADOWFRAME
+    mov     r2, rINST
+    EXPORT_PC
+    bl      MterpMaybeDoOnStackReplacement  @ (self, shadow_frame, offset)
+    cmp     r0, #0
+    bne     MterpOnStackReplacement
+    b       .L_resume_forward_branch
+
+.L_add_batch:
+    add     r1, rFP, #OFF_FP_SHADOWFRAME
+    strh    rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
+    ldr     r0, [rFP, #OFF_FP_METHOD]
+    mov     r2, rSELF
+    bl      MterpAddHotnessBatch        @ (method, shadow_frame, self)
+    mov     rPROFILE, r0                @ restore new hotness countdown to rPROFILE
+    b       .L_no_count_backwards
+
+/*
+ * Entered from the conditional branch handlers when OSR check request active on
+ * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
+ */
+.L_check_not_taken_osr:
+    mov     r0, rSELF
+    add     r1, rFP, #OFF_FP_SHADOWFRAME
+    mov     r2, #2
+    EXPORT_PC
+    bl      MterpMaybeDoOnStackReplacement  @ (self, shadow_frame, offset)
+    cmp     r0, #0
+    bne     MterpOnStackReplacement
+    FETCH_ADVANCE_INST 2
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+    mov r0, rSELF
+    add r1, rFP, #OFF_FP_SHADOWFRAME
+    mov r2, rINST
+    bl MterpLogOSR
+#endif
+    mov r0, #1                          @ Signal normal return
+    b MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  r0, rSELF
+    add  r1, rFP, #OFF_FP_SHADOWFRAME
+    bl MterpLogFallback
+#endif
+MterpCommonFallback:
+    mov     r0, #0                                  @ signal retry with reference interpreter.
+    b       MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR.  Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ *  uint32_t* rFP  (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+    mov     r0, #1                                  @ signal return to caller.
+    b MterpDone
+MterpReturn:
+    ldr     r2, [rFP, #OFF_FP_RESULT_REGISTER]
+    str     r0, [r2]
+    str     r1, [r2, #4]
+    mov     r0, #1                                  @ signal return to caller.
+MterpDone:
+/*
+ * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
+ * checking for OSR.  If greater than zero, we might have unreported hotness to register
+ * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
+ * should only reach zero immediately after a hotness decrement, and is then reset to either
+ * a negative special state or the new non-zero countdown value.
+ */
+    cmp     rPROFILE, #0
+    bgt     MterpProfileActive                      @ if > 0, we may have some counts to report.
+    ldmfd   sp!, {r3-r10,fp,pc}                     @ restore 10 regs and return
+
+MterpProfileActive:
+    mov     rINST, r0                               @ stash return value
+    /* Report cached hotness counts */
+    ldr     r0, [rFP, #OFF_FP_METHOD]
+    add     r1, rFP, #OFF_FP_SHADOWFRAME
+    mov     r2, rSELF
+    strh    rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
+    bl      MterpAddHotnessBatch                    @ (method, shadow_frame, self)
+    mov     r0, rINST                               @ restore return value
+    ldmfd   sp!, {r3-r10,fp,pc}                     @ restore 10 regs and return
+
+    .cfi_endproc
+    END MterpHelpers
+
+%def instruction_end():
+
+    .type artMterpAsmInstructionEnd, #object
+    .hidden artMterpAsmInstructionEnd
+    .global artMterpAsmInstructionEnd
+artMterpAsmInstructionEnd:
+
+%def instruction_start():
+
+    .type artMterpAsmInstructionStart, #object
+    .hidden artMterpAsmInstructionStart
+    .global artMterpAsmInstructionStart
+artMterpAsmInstructionStart = .L_op_nop
+    .text
+
+%def opcode_start():
+    ENTRY Mterp_${opcode}
+%def opcode_end():
+    END Mterp_${opcode}
+%def helper_start(name):
+    ENTRY ${name}
+%def helper_end(name):
+    END ${name}
diff --git a/runtime/interpreter/mterp/arm/object.S b/runtime/interpreter/mterp/arm/object.S
new file mode 100644
index 0000000..7736383
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/object.S
@@ -0,0 +1,322 @@
+%def field(helper=""):
+    /*
+     * General field read / write (iget-* iput-* sget-* sput-*).
+     */
+    .extern $helper
+    mov      r0, rPC                       @ arg0: Instruction* inst
+    mov      r1, rINST                     @ arg1: uint16_t inst_data
+    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
+    mov      r3, rSELF                     @ arg3: Thread* self
+    PREFETCH_INST 2                        @ prefetch next opcode
+    bl       $helper
+    cmp      r0, #0
+    beq      MterpPossibleException
+    ADVANCE 2
+    GET_INST_OPCODE ip                     @ extract opcode from rINST
+    GOTO_OPCODE ip                         @ jump to next instruction
+
+%def op_check_cast():
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast vAA, class@BBBB */
+    EXPORT_PC
+    FETCH    r0, 1                      @ r0<- BBBB
+    mov      r1, rINST, lsr #8          @ r1<- AA
+    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &object
+    ldr      r2, [rFP, #OFF_FP_METHOD]  @ r2<- method
+    mov      r3, rSELF                  @ r3<- self
+    bl       MterpCheckCast             @ (index, &obj, method, self)
+    PREFETCH_INST 2
+    cmp      r0, #0
+    bne      MterpPossibleException
+    ADVANCE  2
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_iget(is_object=False, is_wide=False, load="ldr", helper="MterpIGetU32"):
+   @ Fast-path which gets the field offset from thread-local cache.
+   add      r0, rSELF, #THREAD_INTERPRETER_CACHE_OFFSET       @ cache address
+   ubfx     r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2  @ entry index
+   add      r0, r0, r1, lsl #3            @ entry address within the cache
+   ldrd     r0, r1, [r0]                  @ entry key (pc) and value (offset)
+   mov      r2, rINST, lsr #12            @ B
+   GET_VREG r2, r2                        @ object we're operating on
+   cmp      r0, rPC
+%  slow_path_label = add_helper(lambda: field(helper))
+   bne      ${slow_path_label}            @ cache miss
+   cmp      r2, #0
+   beq      common_errNullObject          @ null object
+%  if is_wide:
+     ldrd     r0, r1, [r2, r1]            @ r0,r1 <- obj.field
+%  else:
+     ${load}  r0, [r2, r1]                @ r0 <- obj.field
+%  #endif
+%  if is_object:
+     UNPOISON_HEAP_REF r0
+#if defined(USE_READ_BARRIER)
+# if defined(USE_BAKER_READ_BARRIER)
+     ldr    ip, [rSELF, #THREAD_IS_GC_MARKING_OFFSET]
+     cmp    ip, #0
+     bne    .L_${opcode}_mark             @ GC is active
+.L_${opcode}_marked:
+# else
+     bl artReadBarrierMark                @ r0 <- artReadBarrierMark(r0)
+# endif
+#endif
+%  #endif
+   ubfx     r2, rINST, #8, #4             @ A
+   FETCH_ADVANCE_INST 2                   @ advance rPC, load rINST
+%  if is_object:
+     SET_VREG_OBJECT r0, r2               @ fp[A]<- r0
+%  elif is_wide:
+     SET_VREG_WIDE r0, r1, r2             @ fp[A]<- r0, r1
+%  else:
+     SET_VREG r0, r2                      @ fp[A]<- r0
+%  #endif
+   GET_INST_OPCODE ip                     @ extract opcode from rINST
+   GOTO_OPCODE ip                         @ jump to next instruction
+%  if is_object:
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+.L_${opcode}_mark:
+     bl artReadBarrierMark                @ r0 <- artReadBarrierMark(r0)
+     b .L_${opcode}_marked
+#endif
+%  #endif
+
+%def op_iget_boolean():
+%  op_iget(load="ldrb", helper="MterpIGetU8")
+
+%def op_iget_boolean_quick():
+%  op_iget_quick(load="ldrb")
+
+%def op_iget_byte():
+%  op_iget(load="ldrsb", helper="MterpIGetI8")
+
+%def op_iget_byte_quick():
+%  op_iget_quick(load="ldrsb")
+
+%def op_iget_char():
+%  op_iget(load="ldrh", helper="MterpIGetU16")
+
+%def op_iget_char_quick():
+%  op_iget_quick(load="ldrh")
+
+%def op_iget_object():
+%  op_iget(is_object=True, helper="MterpIGetObj")
+
+%def op_iget_object_quick():
+    /* For: iget-object-quick */
+    /* op vA, vB, offset@CCCC */
+    mov     r2, rINST, lsr #12          @ r2<- B
+    FETCH r1, 1                         @ r1<- field byte offset
+    EXPORT_PC
+    GET_VREG r0, r2                     @ r0<- object we're operating on
+    bl      artIGetObjectFromMterp      @ (obj, offset)
+    ldr     r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+    ubfx    r2, rINST, #8, #4           @ r2<- A
+    PREFETCH_INST 2
+    cmp     r3, #0
+    bne     MterpPossibleException      @ bail out
+    SET_VREG_OBJECT r0, r2              @ fp[A]<- r0
+    ADVANCE 2                           @ advance rPC
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_iget_quick(load="ldr"):
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    /* op vA, vB, offset@CCCC */
+    mov     r2, rINST, lsr #12          @ r2<- B
+    FETCH r1, 1                         @ r1<- field byte offset
+    GET_VREG r3, r2                     @ r3<- object we're operating on
+    ubfx    r2, rINST, #8, #4           @ r2<- A
+    cmp     r3, #0                      @ check object for null
+    beq     common_errNullObject        @ object was null
+    $load   r0, [r3, r1]                @ r0<- obj.field
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    SET_VREG r0, r2                     @ fp[A]<- r0
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_iget_short():
+%  op_iget(load="ldrsh", helper="MterpIGetI16")
+
+%def op_iget_short_quick():
+%  op_iget_quick(load="ldrsh")
+
+%def op_iget_wide():
+%  op_iget(is_wide=True, helper="MterpIGetU64")
+
+%def op_iget_wide_quick():
+    /* iget-wide-quick vA, vB, offset@CCCC */
+    mov     r2, rINST, lsr #12          @ r2<- B
+    FETCH ip, 1                         @ ip<- field byte offset
+    GET_VREG r3, r2                     @ r3<- object we're operating on
+    ubfx    r2, rINST, #8, #4           @ r2<- A
+    cmp     r3, #0                      @ check object for null
+    beq     common_errNullObject        @ object was null
+    ldrd    r0, [r3, ip]                @ r0<- obj.field (64 bits, aligned)
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    VREG_INDEX_TO_ADDR r3, r2           @ r3<- &fp[A]
+    CLEAR_SHADOW_PAIR r2, ip, lr        @ Zero out the shadow regs
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r3    @ fp[A]<- r0/r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_instance_of():
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     */
+    /* instance-of vA, vB, class@CCCC */
+    EXPORT_PC
+    FETCH     r0, 1                     @ r0<- CCCC
+    mov       r1, rINST, lsr #12        @ r1<- B
+    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &object
+    ldr       r2, [rFP, #OFF_FP_METHOD] @ r2<- method
+    mov       r3, rSELF                 @ r3<- self
+    bl        MterpInstanceOf           @ (index, &obj, method, self)
+    ldr       r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+    ubfx      r9, rINST, #8, #4         @ r9<- A
+    PREFETCH_INST 2
+    cmp       r1, #0                    @ exception pending?
+    bne       MterpException
+    ADVANCE 2                           @ advance rPC
+    SET_VREG r0, r9                     @ vA<- r0
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_iput(helper="MterpIPutU32"):
+%  field(helper=helper)
+
+%def op_iput_boolean():
+%  op_iput(helper="MterpIPutU8")
+
+%def op_iput_boolean_quick():
+%  op_iput_quick(store="strb")
+
+%def op_iput_byte():
+%  op_iput(helper="MterpIPutI8")
+
+%def op_iput_byte_quick():
+%  op_iput_quick(store="strb")
+
+%def op_iput_char():
+%  op_iput(helper="MterpIPutU16")
+
+%def op_iput_char_quick():
+%  op_iput_quick(store="strh")
+
+%def op_iput_object():
+%  op_iput(helper="MterpIPutObj")
+
+%def op_iput_object_quick():
+    EXPORT_PC
+    add     r0, rFP, #OFF_FP_SHADOWFRAME
+    mov     r1, rPC
+    mov     r2, rINST
+    bl      MterpIputObjectQuick
+    cmp     r0, #0
+    beq     MterpException
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_iput_quick(store="str"):
+    /* For: iput-quick, iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    mov     r2, rINST, lsr #12          @ r2<- B
+    FETCH r1, 1                         @ r1<- field byte offset
+    GET_VREG r3, r2                     @ r3<- fp[B], the object pointer
+    ubfx    r2, rINST, #8, #4           @ r2<- A
+    cmp     r3, #0                      @ check object for null
+    beq     common_errNullObject        @ object was null
+    GET_VREG r0, r2                     @ r0<- fp[A]
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    $store     r0, [r3, r1]             @ obj.field<- r0
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_iput_short():
+%  op_iput(helper="MterpIPutI16")
+
+%def op_iput_short_quick():
+%  op_iput_quick(store="strh")
+
+%def op_iput_wide():
+%  op_iput(helper="MterpIPutU64")
+
+%def op_iput_wide_quick():
+    /* iput-wide-quick vA, vB, offset@CCCC */
+    mov     r2, rINST, lsr #12          @ r2<- B
+    FETCH r3, 1                         @ r3<- field byte offset
+    GET_VREG r2, r2                     @ r2<- fp[B], the object pointer
+    ubfx    r0, rINST, #8, #4           @ r0<- A
+    cmp     r2, #0                      @ check object for null
+    beq     common_errNullObject        @ object was null
+    VREG_INDEX_TO_ADDR r0, r0           @ r0<- &fp[A]
+    GET_VREG_WIDE_BY_ADDR r0, r1, r0    @ r0/r1<- fp[A]/fp[A+1]
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    strd    r0, [r2, r3]                @ obj.field<- r0/r1
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_new_instance():
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance vAA, class@BBBB */
+    EXPORT_PC
+    add     r0, rFP, #OFF_FP_SHADOWFRAME
+    mov     r1, rSELF
+    mov     r2, rINST
+    bl      MterpNewInstance           @ (shadow_frame, self, inst_data)
+    cmp     r0, #0
+    beq     MterpPossibleException
+    FETCH_ADVANCE_INST 2               @ advance rPC, load rINST
+    GET_INST_OPCODE ip                 @ extract opcode from rINST
+    GOTO_OPCODE ip                     @ jump to next instruction
+
+%def op_sget(helper="MterpSGetU32"):
+%  field(helper=helper)
+
+%def op_sget_boolean():
+%  op_sget(helper="MterpSGetU8")
+
+%def op_sget_byte():
+%  op_sget(helper="MterpSGetI8")
+
+%def op_sget_char():
+%  op_sget(helper="MterpSGetU16")
+
+%def op_sget_object():
+%  op_sget(helper="MterpSGetObj")
+
+%def op_sget_short():
+%  op_sget(helper="MterpSGetI16")
+
+%def op_sget_wide():
+%  op_sget(helper="MterpSGetU64")
+
+%def op_sput(helper="MterpSPutU32"):
+%  field(helper=helper)
+
+%def op_sput_boolean():
+%  op_sput(helper="MterpSPutU8")
+
+%def op_sput_byte():
+%  op_sput(helper="MterpSPutI8")
+
+%def op_sput_char():
+%  op_sput(helper="MterpSPutU16")
+
+%def op_sput_object():
+%  op_sput(helper="MterpSPutObj")
+
+%def op_sput_short():
+%  op_sput(helper="MterpSPutI16")
+
+%def op_sput_wide():
+%  op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/arm/op_add_double.S b/runtime/interpreter/mterp/arm/op_add_double.S
deleted file mode 100644
index 9332bf2..0000000
--- a/runtime/interpreter/mterp/arm/op_add_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide.S" {"instr":"faddd   d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_double_2addr.S b/runtime/interpreter/mterp/arm/op_add_double_2addr.S
deleted file mode 100644
index 3242c53..0000000
--- a/runtime/interpreter/mterp/arm/op_add_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide2addr.S" {"instr":"faddd   d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_float.S b/runtime/interpreter/mterp/arm/op_add_float.S
deleted file mode 100644
index afb7967..0000000
--- a/runtime/interpreter/mterp/arm/op_add_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop.S" {"instr":"fadds   s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_float_2addr.S b/runtime/interpreter/mterp/arm/op_add_float_2addr.S
deleted file mode 100644
index 0067b6a..0000000
--- a/runtime/interpreter/mterp/arm/op_add_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop2addr.S" {"instr":"fadds   s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int.S b/runtime/interpreter/mterp/arm/op_add_int.S
deleted file mode 100644
index 1dcae7e..0000000
--- a/runtime/interpreter/mterp/arm/op_add_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"instr":"add     r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int_2addr.S b/runtime/interpreter/mterp/arm/op_add_int_2addr.S
deleted file mode 100644
index 9ea98f1..0000000
--- a/runtime/interpreter/mterp/arm/op_add_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"instr":"add     r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int_lit16.S b/runtime/interpreter/mterp/arm/op_add_int_lit16.S
deleted file mode 100644
index 5763ab8..0000000
--- a/runtime/interpreter/mterp/arm/op_add_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit16.S" {"instr":"add     r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int_lit8.S b/runtime/interpreter/mterp/arm/op_add_int_lit8.S
deleted file mode 100644
index 035510d..0000000
--- a/runtime/interpreter/mterp/arm/op_add_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"", "instr":"add     r0, r0, r3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm/op_add_long.S b/runtime/interpreter/mterp/arm/op_add_long.S
deleted file mode 100644
index 093223e..0000000
--- a/runtime/interpreter/mterp/arm/op_add_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide.S" {"preinstr":"adds    r0, r0, r2", "instr":"adc     r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_add_long_2addr.S b/runtime/interpreter/mterp/arm/op_add_long_2addr.S
deleted file mode 100644
index c11e0af..0000000
--- a/runtime/interpreter/mterp/arm/op_add_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide2addr.S" {"preinstr":"adds    r0, r0, r2", "instr":"adc     r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_aget.S b/runtime/interpreter/mterp/arm/op_aget.S
deleted file mode 100644
index 11f7079..0000000
--- a/runtime/interpreter/mterp/arm/op_aget.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default { "load":"ldr", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B r2, 1, 0                    @ r2<- BB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    FETCH_B r3, 1, 1                    @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #$shift     @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    $load   r2, [r0, #$data_offset]     @ r2<- vBB[vCC]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r2, r9                     @ vAA<- r2
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aget_boolean.S b/runtime/interpreter/mterp/arm/op_aget_boolean.S
deleted file mode 100644
index 8f678dc..0000000
--- a/runtime/interpreter/mterp/arm/op_aget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aget.S" { "load":"ldrb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_byte.S b/runtime/interpreter/mterp/arm/op_aget_byte.S
deleted file mode 100644
index a304650..0000000
--- a/runtime/interpreter/mterp/arm/op_aget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aget.S" { "load":"ldrsb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_char.S b/runtime/interpreter/mterp/arm/op_aget_char.S
deleted file mode 100644
index 4908306..0000000
--- a/runtime/interpreter/mterp/arm/op_aget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aget.S" { "load":"ldrh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_object.S b/runtime/interpreter/mterp/arm/op_aget_object.S
deleted file mode 100644
index 4e0aab5..0000000
--- a/runtime/interpreter/mterp/arm/op_aget_object.S
+++ /dev/null
@@ -1,21 +0,0 @@
-    /*
-     * Array object get.  vAA <- vBB[vCC].
-     *
-     * for: aget-object
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B r2, 1, 0                    @ r2<- BB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    FETCH_B r3, 1, 1                    @ r3<- CC
-    EXPORT_PC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    bl       artAGetObjectFromMterp     @ (array, index)
-    ldr      r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
-    PREFETCH_INST 2
-    cmp      r1, #0
-    bne      MterpException
-    SET_VREG_OBJECT r0, r9
-    ADVANCE 2
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aget_short.S b/runtime/interpreter/mterp/arm/op_aget_short.S
deleted file mode 100644
index b71e659..0000000
--- a/runtime/interpreter/mterp/arm/op_aget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aget.S" { "load":"ldrsh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_wide.S b/runtime/interpreter/mterp/arm/op_aget_wide.S
deleted file mode 100644
index 66ec950..0000000
--- a/runtime/interpreter/mterp/arm/op_aget_wide.S
+++ /dev/null
@@ -1,25 +0,0 @@
-    /*
-     * Array get, 64 bits.  vAA <- vBB[vCC].
-     *
-     * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
-     */
-    /* aget-wide vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #3          @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
-    ldrd    r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]  @ r2/r3<- vBB[vCC]
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r2-r3}                 @ vAA/vAA+1<- r2/r3
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_and_int.S b/runtime/interpreter/mterp/arm/op_and_int.S
deleted file mode 100644
index 7c16d37..0000000
--- a/runtime/interpreter/mterp/arm/op_and_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"instr":"and     r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_and_int_2addr.S b/runtime/interpreter/mterp/arm/op_and_int_2addr.S
deleted file mode 100644
index 0fbab02..0000000
--- a/runtime/interpreter/mterp/arm/op_and_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"instr":"and     r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_and_int_lit16.S b/runtime/interpreter/mterp/arm/op_and_int_lit16.S
deleted file mode 100644
index 541e9b7..0000000
--- a/runtime/interpreter/mterp/arm/op_and_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit16.S" {"instr":"and     r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_and_int_lit8.S b/runtime/interpreter/mterp/arm/op_and_int_lit8.S
deleted file mode 100644
index af746b5..0000000
--- a/runtime/interpreter/mterp/arm/op_and_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"", "instr":"and     r0, r0, r3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm/op_and_long.S b/runtime/interpreter/mterp/arm/op_and_long.S
deleted file mode 100644
index 4ad5158..0000000
--- a/runtime/interpreter/mterp/arm/op_and_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide.S" {"preinstr":"and     r0, r0, r2", "instr":"and     r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_and_long_2addr.S b/runtime/interpreter/mterp/arm/op_and_long_2addr.S
deleted file mode 100644
index e23ea44..0000000
--- a/runtime/interpreter/mterp/arm/op_and_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide2addr.S" {"preinstr":"and     r0, r0, r2", "instr":"and     r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_aput.S b/runtime/interpreter/mterp/arm/op_aput.S
deleted file mode 100644
index a511fa5..0000000
--- a/runtime/interpreter/mterp/arm/op_aput.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default { "store":"str", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B r2, 1, 0                    @ r2<- BB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    FETCH_B r3, 1, 1                    @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]     @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #$shift     @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_VREG r2, r9                     @ r2<- vAA
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    $store  r2, [r0, #$data_offset]     @ vBB[vCC]<- r2
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aput_boolean.S b/runtime/interpreter/mterp/arm/op_aput_boolean.S
deleted file mode 100644
index e86663f..0000000
--- a/runtime/interpreter/mterp/arm/op_aput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_byte.S b/runtime/interpreter/mterp/arm/op_aput_byte.S
deleted file mode 100644
index 83694b7..0000000
--- a/runtime/interpreter/mterp/arm/op_aput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_char.S b/runtime/interpreter/mterp/arm/op_aput_char.S
deleted file mode 100644
index 3551cac..0000000
--- a/runtime/interpreter/mterp/arm/op_aput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_object.S b/runtime/interpreter/mterp/arm/op_aput_object.S
deleted file mode 100644
index c539916..0000000
--- a/runtime/interpreter/mterp/arm/op_aput_object.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /*
-     * Store an object into an array.  vBB[vCC] <- vAA.
-     */
-    /* op vAA, vBB, vCC */
-    EXPORT_PC
-    add     r0, rFP, #OFF_FP_SHADOWFRAME
-    mov     r1, rPC
-    mov     r2, rINST
-    bl      MterpAputObject
-    cmp     r0, #0
-    beq     MterpPossibleException
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aput_short.S b/runtime/interpreter/mterp/arm/op_aput_short.S
deleted file mode 100644
index 0a0590e..0000000
--- a/runtime/interpreter/mterp/arm/op_aput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_wide.S b/runtime/interpreter/mterp/arm/op_aput_wide.S
deleted file mode 100644
index 0057507..0000000
--- a/runtime/interpreter/mterp/arm/op_aput_wide.S
+++ /dev/null
@@ -1,24 +0,0 @@
-    /*
-     * Array put, 64 bits.  vBB[vCC] <- vAA.
-     *
-     * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
-     */
-    /* aput-wide vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #3          @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    ldmia   r9, {r2-r3}                 @ r2/r3<- vAA/vAA+1
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    strd    r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]  @ r2/r3<- vBB[vCC]
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_array_length.S b/runtime/interpreter/mterp/arm/op_array_length.S
deleted file mode 100644
index 43b1682..0000000
--- a/runtime/interpreter/mterp/arm/op_array_length.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    /*
-     * Return the length of an array.
-     */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    GET_VREG r0, r1                     @ r0<- vB (object ref)
-    cmp     r0, #0                      @ is object null?
-    beq     common_errNullObject        @ yup, fail
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- array length
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r3, r2                     @ vB<- length
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_check_cast.S b/runtime/interpreter/mterp/arm/op_check_cast.S
deleted file mode 100644
index 24eba45..0000000
--- a/runtime/interpreter/mterp/arm/op_check_cast.S
+++ /dev/null
@@ -1,17 +0,0 @@
-    /*
-     * Check to see if a cast from one class to another is allowed.
-     */
-    /* check-cast vAA, class@BBBB */
-    EXPORT_PC
-    FETCH    r0, 1                      @ r0<- BBBB
-    mov      r1, rINST, lsr #8          @ r1<- AA
-    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &object
-    ldr      r2, [rFP, #OFF_FP_METHOD]  @ r2<- method
-    mov      r3, rSELF                  @ r3<- self
-    bl       MterpCheckCast             @ (index, &obj, method, self)
-    PREFETCH_INST 2
-    cmp      r0, #0
-    bne      MterpPossibleException
-    ADVANCE  2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmp_long.S b/runtime/interpreter/mterp/arm/op_cmp_long.S
deleted file mode 100644
index 6626ff0..0000000
--- a/runtime/interpreter/mterp/arm/op_cmp_long.S
+++ /dev/null
@@ -1,23 +0,0 @@
-    /*
-     * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
-     * register based on the results of the comparison.
-     */
-    /* cmp-long vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
-    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
-    cmp     r0, r2
-    sbcs    ip, r1, r3                  @ Sets correct CCs for checking LT (but not EQ/NE)
-    mov     ip, #0
-    mvnlt   ip, #0                      @ -1
-    cmpeq   r0, r2                      @ For correct EQ/NE, we may need to repeat the first CMP
-    orrne   ip, #1
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    SET_VREG ip, r9                     @ vAA<- ip
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpg_double.S b/runtime/interpreter/mterp/arm/op_cmpg_double.S
deleted file mode 100644
index 602a4b1..0000000
--- a/runtime/interpreter/mterp/arm/op_cmpg_double.S
+++ /dev/null
@@ -1,34 +0,0 @@
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * int compare(x, y) {
-     *     if (x == y) {
-     *         return 0;
-     *     } else if (x < y) {
-     *         return -1;
-     *     } else if (x > y) {
-     *         return 1;
-     *     } else {
-     *         return 1;
-     *     }
-     * }
-     */
-    /* op vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    fldd    d0, [r2]                    @ d0<- vBB
-    fldd    d1, [r3]                    @ d1<- vCC
-    vcmpe.f64 d0, d1                    @ compare (vBB, vCC)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    mov     r0, #1                      @ r0<- 1 (default)
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fmstat                              @ export status flags
-    mvnmi   r0, #0                      @ (less than) r1<- -1
-    moveq   r0, #0                      @ (equal) r1<- 0
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpg_float.S b/runtime/interpreter/mterp/arm/op_cmpg_float.S
deleted file mode 100644
index 965091f..0000000
--- a/runtime/interpreter/mterp/arm/op_cmpg_float.S
+++ /dev/null
@@ -1,34 +0,0 @@
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * int compare(x, y) {
-     *     if (x == y) {
-     *         return 0;
-     *     } else if (x < y) {
-     *         return -1;
-     *     } else if (x > y) {
-     *         return 1;
-     *     } else {
-     *         return 1;
-     *     }
-     * }
-     */
-    /* op vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    flds    s0, [r2]                    @ s0<- vBB
-    flds    s1, [r3]                    @ s1<- vCC
-    vcmpe.f32 s0, s1                    @ compare (vBB, vCC)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    mov     r0, #1                      @ r0<- 1 (default)
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fmstat                              @ export status flags
-    mvnmi   r0, #0                      @ (less than) r1<- -1
-    moveq   r0, #0                      @ (equal) r1<- 0
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpl_double.S b/runtime/interpreter/mterp/arm/op_cmpl_double.S
deleted file mode 100644
index 8a5e509..0000000
--- a/runtime/interpreter/mterp/arm/op_cmpl_double.S
+++ /dev/null
@@ -1,34 +0,0 @@
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * int compare(x, y) {
-     *     if (x == y) {
-     *         return 0;
-     *     } else if (x > y) {
-     *         return 1;
-     *     } else if (x < y) {
-     *         return -1;
-     *     } else {
-     *         return -1;
-     *     }
-     * }
-     */
-    /* op vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    fldd    d0, [r2]                    @ d0<- vBB
-    fldd    d1, [r3]                    @ d1<- vCC
-    vcmpe.f64 d0, d1                    @ compare (vBB, vCC)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    mvn     r0, #0                      @ r0<- -1 (default)
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fmstat                              @ export status flags
-    movgt   r0, #1                      @ (greater than) r1<- 1
-    moveq   r0, #0                      @ (equal) r1<- 0
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpl_float.S b/runtime/interpreter/mterp/arm/op_cmpl_float.S
deleted file mode 100644
index 9df0c2c..0000000
--- a/runtime/interpreter/mterp/arm/op_cmpl_float.S
+++ /dev/null
@@ -1,34 +0,0 @@
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * int compare(x, y) {
-     *     if (x == y) {
-     *         return 0;
-     *     } else if (x > y) {
-     *         return 1;
-     *     } else if (x < y) {
-     *         return -1;
-     *     } else {
-     *         return -1;
-     *     }
-     * }
-     */
-    /* op vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    flds    s0, [r2]                    @ s0<- vBB
-    flds    s1, [r3]                    @ s1<- vCC
-    vcmpe.f32  s0, s1                   @ compare (vBB, vCC)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    mvn     r0, #0                      @ r0<- -1 (default)
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fmstat                              @ export status flags
-    movgt   r0, #1                      @ (greater than) r1<- 1
-    moveq   r0, #0                      @ (equal) r1<- 0
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const.S b/runtime/interpreter/mterp/arm/op_const.S
deleted file mode 100644
index 39890a0..0000000
--- a/runtime/interpreter/mterp/arm/op_const.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* const vAA, #+BBBBbbbb */
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    FETCH r0, 1                         @ r0<- bbbb (low)
-    FETCH r1, 2                         @ r1<- BBBB (high)
-    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r3                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_16.S b/runtime/interpreter/mterp/arm/op_const_16.S
deleted file mode 100644
index a30cf3a..0000000
--- a/runtime/interpreter/mterp/arm/op_const_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* const/16 vAA, #+BBBB */
-    FETCH_S r0, 1                       @ r0<- ssssBBBB (sign-extended)
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    SET_VREG r0, r3                     @ vAA<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_4.S b/runtime/interpreter/mterp/arm/op_const_4.S
deleted file mode 100644
index c97b0e9..0000000
--- a/runtime/interpreter/mterp/arm/op_const_4.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* const/4 vA, #+B */
-    sbfx    r1, rINST, #12, #4          @ r1<- sssssssB (sign-extended)
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ ip<- opcode from rINST
-    SET_VREG r1, r0                     @ fp[A]<- r1
-    GOTO_OPCODE ip                      @ execute next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_class.S b/runtime/interpreter/mterp/arm/op_const_class.S
deleted file mode 100644
index ff5c98c..0000000
--- a/runtime/interpreter/mterp/arm/op_const_class.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/arm/op_const_high16.S b/runtime/interpreter/mterp/arm/op_const_high16.S
deleted file mode 100644
index 536276d..0000000
--- a/runtime/interpreter/mterp/arm/op_const_high16.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* const/high16 vAA, #+BBBB0000 */
-    FETCH r0, 1                         @ r0<- 0000BBBB (zero-extended)
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    mov     r0, r0, lsl #16             @ r0<- BBBB0000
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    SET_VREG r0, r3                     @ vAA<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_method_handle.S b/runtime/interpreter/mterp/arm/op_const_method_handle.S
deleted file mode 100644
index 71f0550..0000000
--- a/runtime/interpreter/mterp/arm/op_const_method_handle.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/arm/op_const_method_type.S b/runtime/interpreter/mterp/arm/op_const_method_type.S
deleted file mode 100644
index 2cccdaf..0000000
--- a/runtime/interpreter/mterp/arm/op_const_method_type.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/arm/op_const_string.S b/runtime/interpreter/mterp/arm/op_const_string.S
deleted file mode 100644
index 75ec34f..0000000
--- a/runtime/interpreter/mterp/arm/op_const_string.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/arm/op_const_string_jumbo.S b/runtime/interpreter/mterp/arm/op_const_string_jumbo.S
deleted file mode 100644
index 1255c07..0000000
--- a/runtime/interpreter/mterp/arm/op_const_string_jumbo.S
+++ /dev/null
@@ -1,15 +0,0 @@
-    /* const/string vAA, String@BBBBBBBB */
-    EXPORT_PC
-    FETCH r0, 1                         @ r0<- bbbb (low)
-    FETCH r2, 2                         @ r2<- BBBB (high)
-    mov     r1, rINST, lsr #8           @ r1<- AA
-    orr     r0, r0, r2, lsl #16         @ r1<- BBBBbbbb
-    add     r2, rFP, #OFF_FP_SHADOWFRAME
-    mov     r3, rSELF
-    bl      MterpConstString            @ (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 3                     @ advance rPC
-    cmp     r0, #0                      @ fail?
-    bne     MterpPossibleException      @ let reference interpreter deal with it.
-    ADVANCE 3                           @ advance rPC
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide.S b/runtime/interpreter/mterp/arm/op_const_wide.S
deleted file mode 100644
index 8310a4c..0000000
--- a/runtime/interpreter/mterp/arm/op_const_wide.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
-    FETCH r0, 1                         @ r0<- bbbb (low)
-    FETCH r1, 2                         @ r1<- BBBB (low middle)
-    FETCH r2, 3                         @ r2<- hhhh (high middle)
-    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb (low word)
-    FETCH r3, 4                         @ r3<- HHHH (high)
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    orr     r1, r2, r3, lsl #16         @ r1<- HHHHhhhh (high word)
-    CLEAR_SHADOW_PAIR r9, r2, r3        @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 5                @ advance rPC, load rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_16.S b/runtime/interpreter/mterp/arm/op_const_wide_16.S
deleted file mode 100644
index 28abb51..0000000
--- a/runtime/interpreter/mterp/arm/op_const_wide_16.S
+++ /dev/null
@@ -1,10 +0,0 @@
-    /* const-wide/16 vAA, #+BBBB */
-    FETCH_S r0, 1                       @ r0<- ssssBBBB (sign-extended)
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    mov     r1, r0, asr #31             @ r1<- ssssssss
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    CLEAR_SHADOW_PAIR r3, r2, lr        @ Zero out the shadow regs
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[AA]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r3, {r0-r1}                 @ vAA<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_32.S b/runtime/interpreter/mterp/arm/op_const_wide_32.S
deleted file mode 100644
index c10bb04..0000000
--- a/runtime/interpreter/mterp/arm/op_const_wide_32.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /* const-wide/32 vAA, #+BBBBbbbb */
-    FETCH r0, 1                         @ r0<- 0000bbbb (low)
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    FETCH_S r2, 2                       @ r2<- ssssBBBB (high)
-    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    orr     r0, r0, r2, lsl #16         @ r0<- BBBBbbbb
-    CLEAR_SHADOW_PAIR r3, r2, lr        @ Zero out the shadow regs
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[AA]
-    mov     r1, r0, asr #31             @ r1<- ssssssss
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r3, {r0-r1}                 @ vAA<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_high16.S b/runtime/interpreter/mterp/arm/op_const_wide_high16.S
deleted file mode 100644
index d7e38ec..0000000
--- a/runtime/interpreter/mterp/arm/op_const_wide_high16.S
+++ /dev/null
@@ -1,11 +0,0 @@
-    /* const-wide/high16 vAA, #+BBBB000000000000 */
-    FETCH r1, 1                         @ r1<- 0000BBBB (zero-extended)
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    mov     r0, #0                      @ r0<- 00000000
-    mov     r1, r1, lsl #16             @ r1<- BBBB0000
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    CLEAR_SHADOW_PAIR r3, r0, r2        @ Zero shadow regs
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[AA]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r3, {r0-r1}                 @ vAA<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_div_double.S b/runtime/interpreter/mterp/arm/op_div_double.S
deleted file mode 100644
index 5147550..0000000
--- a/runtime/interpreter/mterp/arm/op_div_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide.S" {"instr":"fdivd   d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_double_2addr.S b/runtime/interpreter/mterp/arm/op_div_double_2addr.S
deleted file mode 100644
index b812f17..0000000
--- a/runtime/interpreter/mterp/arm/op_div_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide2addr.S" {"instr":"fdivd   d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_float.S b/runtime/interpreter/mterp/arm/op_div_float.S
deleted file mode 100644
index 0f24d11..0000000
--- a/runtime/interpreter/mterp/arm/op_div_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop.S" {"instr":"fdivs   s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_float_2addr.S b/runtime/interpreter/mterp/arm/op_div_float_2addr.S
deleted file mode 100644
index a1dbf01..0000000
--- a/runtime/interpreter/mterp/arm/op_div_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop2addr.S" {"instr":"fdivs   s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_int.S b/runtime/interpreter/mterp/arm/op_div_int.S
deleted file mode 100644
index 251064b..0000000
--- a/runtime/interpreter/mterp/arm/op_div_int.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {}
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * div-int
-     *
-     */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    GET_VREG r1, r3                     @ r1<- vCC
-    GET_VREG r0, r2                     @ r0<- vBB
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r0, r0, r1                  @ r0<- op
-#else
-    bl    __aeabi_idiv                  @ r0<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_div_int_2addr.S b/runtime/interpreter/mterp/arm/op_div_int_2addr.S
deleted file mode 100644
index 9be4cd8..0000000
--- a/runtime/interpreter/mterp/arm/op_div_int_2addr.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default {}
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * div-int/2addr
-     *
-     */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r1, r3                     @ r1<- vB
-    GET_VREG r0, r9                     @ r0<- vA
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r0, r0, r1                  @ r0<- op
-#else
-    bl       __aeabi_idiv               @ r0<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
diff --git a/runtime/interpreter/mterp/arm/op_div_int_lit16.S b/runtime/interpreter/mterp/arm/op_div_int_lit16.S
deleted file mode 100644
index d9bc7d6..0000000
--- a/runtime/interpreter/mterp/arm/op_div_int_lit16.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default {}
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * div-int/lit16
-     *
-     */
-    FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
-    mov     r2, rINST, lsr #12          @ r2<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r2                     @ r0<- vB
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r0, r0, r1                  @ r0<- op
-#else
-    bl       __aeabi_idiv               @ r0<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_div_int_lit8.S b/runtime/interpreter/mterp/arm/op_div_int_lit8.S
deleted file mode 100644
index 5d2dbd3..0000000
--- a/runtime/interpreter/mterp/arm/op_div_int_lit8.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default {}
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * div-int/lit8
-     *
-     */
-    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r3, #255                @ r2<- BB
-    GET_VREG r0, r2                     @ r0<- vBB
-    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
-    @cmp     r1, #0                     @ is second operand zero?
-    beq     common_errDivideByZero
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r0, r0, r1                  @ r0<- op
-#else
-    bl   __aeabi_idiv                   @ r0<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_div_long.S b/runtime/interpreter/mterp/arm/op_div_long.S
deleted file mode 100644
index 0f21a84..0000000
--- a/runtime/interpreter/mterp/arm/op_div_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide.S" {"instr":"bl      __aeabi_ldivmod", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_long_2addr.S b/runtime/interpreter/mterp/arm/op_div_long_2addr.S
deleted file mode 100644
index e172b29..0000000
--- a/runtime/interpreter/mterp/arm/op_div_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide2addr.S" {"instr":"bl      __aeabi_ldivmod", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_double_to_float.S b/runtime/interpreter/mterp/arm/op_double_to_float.S
deleted file mode 100644
index 98fdfbc..0000000
--- a/runtime/interpreter/mterp/arm/op_double_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/funopNarrower.S" {"instr":"vcvt.f32.f64  s0, d0"}
diff --git a/runtime/interpreter/mterp/arm/op_double_to_int.S b/runtime/interpreter/mterp/arm/op_double_to_int.S
deleted file mode 100644
index aa035de..0000000
--- a/runtime/interpreter/mterp/arm/op_double_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/funopNarrower.S" {"instr":"ftosizd  s0, d0"}
diff --git a/runtime/interpreter/mterp/arm/op_double_to_long.S b/runtime/interpreter/mterp/arm/op_double_to_long.S
deleted file mode 100644
index 19ff723..0000000
--- a/runtime/interpreter/mterp/arm/op_double_to_long.S
+++ /dev/null
@@ -1,33 +0,0 @@
-%include "arm/unopWide.S" {"instr":"bl      d2l_doconv"}
-
-%break
-/*
- * Convert the double in r0/r1 to a long in r0/r1.
- *
- * We have to clip values to long min/max per the specification.  The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer.  The EABI convert function isn't doing this for us.
- */
-d2l_doconv:
-    ubfx    r2, r1, #20, #11            @ grab the exponent
-    movw    r3, #0x43e
-    cmp     r2, r3                      @ MINLONG < x > MAXLONG?
-    bhs     d2l_special_cases
-    b       __aeabi_d2lz                @ tail call to convert double to long
-d2l_special_cases:
-    movw    r3, #0x7ff
-    cmp     r2, r3
-    beq     d2l_maybeNaN                @ NaN?
-d2l_notNaN:
-    adds    r1, r1, r1                  @ sign bit to carry
-    mov     r0, #0xffffffff             @ assume maxlong for lsw
-    mov     r1, #0x7fffffff             @ assume maxlong for msw
-    adc     r0, r0, #0
-    adc     r1, r1, #0                  @ convert maxlong to minlong if exp negative
-    bx      lr                          @ return
-d2l_maybeNaN:
-    orrs    r3, r0, r1, lsl #12
-    beq     d2l_notNaN                  @ if fraction is non-zero, it's a NaN
-    mov     r0, #0
-    mov     r1, #0
-    bx      lr                          @ return 0 for NaN
diff --git a/runtime/interpreter/mterp/arm/op_fill_array_data.S b/runtime/interpreter/mterp/arm/op_fill_array_data.S
deleted file mode 100644
index e1ca85c..0000000
--- a/runtime/interpreter/mterp/arm/op_fill_array_data.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /* fill-array-data vAA, +BBBBBBBB */
-    EXPORT_PC
-    FETCH r0, 1                         @ r0<- bbbb (lo)
-    FETCH r1, 2                         @ r1<- BBBB (hi)
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    orr     r1, r0, r1, lsl #16         @ r1<- BBBBbbbb
-    GET_VREG r0, r3                     @ r0<- vAA (array object)
-    add     r1, rPC, r1, lsl #1         @ r1<- PC + BBBBbbbb*2 (array data off.)
-    bl      MterpFillArrayData          @ (obj, payload)
-    cmp     r0, #0                      @ 0 means an exception is thrown
-    beq     MterpPossibleException      @ exception?
-    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_filled_new_array.S b/runtime/interpreter/mterp/arm/op_filled_new_array.S
deleted file mode 100644
index 1075f0c..0000000
--- a/runtime/interpreter/mterp/arm/op_filled_new_array.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default { "helper":"MterpFilledNewArray" }
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    .extern $helper
-    EXPORT_PC
-    add     r0, rFP, #OFF_FP_SHADOWFRAME
-    mov     r1, rPC
-    mov     r2, rSELF
-    bl      $helper
-    cmp     r0, #0
-    beq     MterpPossibleException
-    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_filled_new_array_range.S b/runtime/interpreter/mterp/arm/op_filled_new_array_range.S
deleted file mode 100644
index 16567af..0000000
--- a/runtime/interpreter/mterp/arm/op_filled_new_array_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/arm/op_float_to_double.S b/runtime/interpreter/mterp/arm/op_float_to_double.S
deleted file mode 100644
index b1e12bd..0000000
--- a/runtime/interpreter/mterp/arm/op_float_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/funopWider.S" {"instr":"vcvt.f64.f32  d0, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_float_to_int.S b/runtime/interpreter/mterp/arm/op_float_to_int.S
deleted file mode 100644
index aab8716..0000000
--- a/runtime/interpreter/mterp/arm/op_float_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/funop.S" {"instr":"ftosizs s1, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_float_to_long.S b/runtime/interpreter/mterp/arm/op_float_to_long.S
deleted file mode 100644
index 42bd98d..0000000
--- a/runtime/interpreter/mterp/arm/op_float_to_long.S
+++ /dev/null
@@ -1,31 +0,0 @@
-%include "arm/unopWider.S" {"instr":"bl      f2l_doconv"}
-
-%break
-/*
- * Convert the float in r0 to a long in r0/r1.
- *
- * We have to clip values to long min/max per the specification.  The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer.  The EABI convert function isn't doing this for us.
- */
-f2l_doconv:
-    ubfx    r2, r0, #23, #8             @ grab the exponent
-    cmp     r2, #0xbe                   @ MININT < x > MAXINT?
-    bhs     f2l_special_cases
-    b       __aeabi_f2lz                @ tail call to convert float to long
-f2l_special_cases:
-    cmp     r2, #0xff                   @ NaN or infinity?
-    beq     f2l_maybeNaN
-f2l_notNaN:
-    adds    r0, r0, r0                  @ sign bit to carry
-    mov     r0, #0xffffffff             @ assume maxlong for lsw
-    mov     r1, #0x7fffffff             @ assume maxlong for msw
-    adc     r0, r0, #0
-    adc     r1, r1, #0                  @ convert maxlong to minlong if exp negative
-    bx      lr                          @ return
-f2l_maybeNaN:
-    lsls    r3, r0, #9
-    beq     f2l_notNaN                  @ if fraction is non-zero, it's a NaN
-    mov     r0, #0
-    mov     r1, #0
-    bx      lr                          @ return 0 for NaN
diff --git a/runtime/interpreter/mterp/arm/op_goto.S b/runtime/interpreter/mterp/arm/op_goto.S
deleted file mode 100644
index aa42dfd..0000000
--- a/runtime/interpreter/mterp/arm/op_goto.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /*
-     * Unconditional branch, 8-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto +AA */
-    sbfx    rINST, rINST, #8, #8           @ rINST<- ssssssAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/arm/op_goto_16.S b/runtime/interpreter/mterp/arm/op_goto_16.S
deleted file mode 100644
index 12a6bc0..0000000
--- a/runtime/interpreter/mterp/arm/op_goto_16.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /*
-     * Unconditional branch, 16-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto/16 +AAAA */
-    FETCH_S rINST, 1                    @ rINST<- ssssAAAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/arm/op_goto_32.S b/runtime/interpreter/mterp/arm/op_goto_32.S
deleted file mode 100644
index 7325a1c..0000000
--- a/runtime/interpreter/mterp/arm/op_goto_32.S
+++ /dev/null
@@ -1,16 +0,0 @@
-    /*
-     * Unconditional branch, 32-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     *
-     * Unlike most opcodes, this one is allowed to branch to itself, so
-     * our "backward branch" test must be "<=0" instead of "<0".  Because
-     * we need the V bit set, we'll use an adds to convert from Dalvik
-     * offset to byte offset.
-     */
-    /* goto/32 +AAAAAAAA */
-    FETCH r0, 1                         @ r0<- aaaa (lo)
-    FETCH r3, 2                         @ r1<- AAAA (hi)
-    orrs    rINST, r0, r3, lsl #16      @ rINST<- AAAAaaaa
-    b       MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/arm/op_if_eq.S b/runtime/interpreter/mterp/arm/op_if_eq.S
deleted file mode 100644
index b8b6a6e..0000000
--- a/runtime/interpreter/mterp/arm/op_if_eq.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/bincmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/arm/op_if_eqz.S b/runtime/interpreter/mterp/arm/op_if_eqz.S
deleted file mode 100644
index 7012f61..0000000
--- a/runtime/interpreter/mterp/arm/op_if_eqz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/zcmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/arm/op_if_ge.S b/runtime/interpreter/mterp/arm/op_if_ge.S
deleted file mode 100644
index eb29e63..0000000
--- a/runtime/interpreter/mterp/arm/op_if_ge.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/bincmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/arm/op_if_gez.S b/runtime/interpreter/mterp/arm/op_if_gez.S
deleted file mode 100644
index d9da374..0000000
--- a/runtime/interpreter/mterp/arm/op_if_gez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/zcmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/arm/op_if_gt.S b/runtime/interpreter/mterp/arm/op_if_gt.S
deleted file mode 100644
index a35eab8..0000000
--- a/runtime/interpreter/mterp/arm/op_if_gt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/bincmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_gtz.S b/runtime/interpreter/mterp/arm/op_if_gtz.S
deleted file mode 100644
index 4ef4d8e..0000000
--- a/runtime/interpreter/mterp/arm/op_if_gtz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/zcmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_le.S b/runtime/interpreter/mterp/arm/op_if_le.S
deleted file mode 100644
index c7c31bc..0000000
--- a/runtime/interpreter/mterp/arm/op_if_le.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/bincmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/arm/op_if_lez.S b/runtime/interpreter/mterp/arm/op_if_lez.S
deleted file mode 100644
index 9fbf6c9..0000000
--- a/runtime/interpreter/mterp/arm/op_if_lez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/zcmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/arm/op_if_lt.S b/runtime/interpreter/mterp/arm/op_if_lt.S
deleted file mode 100644
index 9469fbb..0000000
--- a/runtime/interpreter/mterp/arm/op_if_lt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/bincmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_ltz.S b/runtime/interpreter/mterp/arm/op_if_ltz.S
deleted file mode 100644
index a4fc1b8..0000000
--- a/runtime/interpreter/mterp/arm/op_if_ltz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/zcmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_ne.S b/runtime/interpreter/mterp/arm/op_if_ne.S
deleted file mode 100644
index c945331..0000000
--- a/runtime/interpreter/mterp/arm/op_if_ne.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/bincmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/arm/op_if_nez.S b/runtime/interpreter/mterp/arm/op_if_nez.S
deleted file mode 100644
index 2d81fda..0000000
--- a/runtime/interpreter/mterp/arm/op_if_nez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/zcmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/arm/op_iget.S b/runtime/interpreter/mterp/arm/op_iget.S
deleted file mode 100644
index 1fa32fa..0000000
--- a/runtime/interpreter/mterp/arm/op_iget.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIGetU32"}
-%include "arm/field.S" { }
diff --git a/runtime/interpreter/mterp/arm/op_iget_boolean.S b/runtime/interpreter/mterp/arm/op_iget_boolean.S
deleted file mode 100644
index f23cb3a..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_boolean_quick.S b/runtime/interpreter/mterp/arm/op_iget_boolean_quick.S
deleted file mode 100644
index 0ae4843..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget_quick.S" { "load":"ldrb" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_byte.S b/runtime/interpreter/mterp/arm/op_iget_byte.S
deleted file mode 100644
index 9c4f37c..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_byte_quick.S b/runtime/interpreter/mterp/arm/op_iget_byte_quick.S
deleted file mode 100644
index e1b3083..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget_quick.S" { "load":"ldrsb" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_char.S b/runtime/interpreter/mterp/arm/op_iget_char.S
deleted file mode 100644
index 80c4227..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_char_quick.S b/runtime/interpreter/mterp/arm/op_iget_char_quick.S
deleted file mode 100644
index b44d8f1..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget_quick.S" { "load":"ldrh" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_object.S b/runtime/interpreter/mterp/arm/op_iget_object.S
deleted file mode 100644
index e30b129..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_object_quick.S b/runtime/interpreter/mterp/arm/op_iget_object_quick.S
deleted file mode 100644
index 16cb118..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_object_quick.S
+++ /dev/null
@@ -1,16 +0,0 @@
-    /* For: iget-object-quick */
-    /* op vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r1, 1                         @ r1<- field byte offset
-    EXPORT_PC
-    GET_VREG r0, r2                     @ r0<- object we're operating on
-    bl      artIGetObjectFromMterp      @ (obj, offset)
-    ldr     r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    PREFETCH_INST 2
-    cmp     r3, #0
-    bne     MterpPossibleException      @ bail out
-    SET_VREG_OBJECT r0, r2              @ fp[A]<- r0
-    ADVANCE 2                           @ advance rPC
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_quick.S b/runtime/interpreter/mterp/arm/op_iget_quick.S
deleted file mode 100644
index 0eaf364..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "load":"ldr" }
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r1, 1                         @ r1<- field byte offset
-    GET_VREG r3, r2                     @ r3<- object we're operating on
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    cmp     r3, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    $load   r0, [r3, r1]                @ r0<- obj.field
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    SET_VREG r0, r2                     @ fp[A]<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_short.S b/runtime/interpreter/mterp/arm/op_iget_short.S
deleted file mode 100644
index dd6bc99..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_short_quick.S b/runtime/interpreter/mterp/arm/op_iget_short_quick.S
deleted file mode 100644
index 1831b99..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget_quick.S" { "load":"ldrsh" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide.S b/runtime/interpreter/mterp/arm/op_iget_wide.S
deleted file mode 100644
index ede21eb..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget.S" { "helper":"MterpIGetU64" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide_quick.S b/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
deleted file mode 100644
index 5a7177d..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /* iget-wide-quick vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH ip, 1                         @ ip<- field byte offset
-    GET_VREG r3, r2                     @ r3<- object we're operating on
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    cmp     r3, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    ldrd    r0, [r3, ip]                @ r0<- obj.field (64 bits, aligned)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    VREG_INDEX_TO_ADDR r3, r2           @ r3<- &fp[A]
-    CLEAR_SHADOW_PAIR r2, ip, lr        @ Zero out the shadow regs
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r3, {r0-r1}                 @ fp[A]<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_instance_of.S b/runtime/interpreter/mterp/arm/op_instance_of.S
deleted file mode 100644
index 019929e..0000000
--- a/runtime/interpreter/mterp/arm/op_instance_of.S
+++ /dev/null
@@ -1,23 +0,0 @@
-    /*
-     * Check to see if an object reference is an instance of a class.
-     *
-     * Most common situation is a non-null object, being compared against
-     * an already-resolved class.
-     */
-    /* instance-of vA, vB, class@CCCC */
-    EXPORT_PC
-    FETCH     r0, 1                     @ r0<- CCCC
-    mov       r1, rINST, lsr #12        @ r1<- B
-    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &object
-    ldr       r2, [rFP, #OFF_FP_METHOD] @ r2<- method
-    mov       r3, rSELF                 @ r3<- self
-    bl        MterpInstanceOf           @ (index, &obj, method, self)
-    ldr       r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
-    ubfx      r9, rINST, #8, #4         @ r9<- A
-    PREFETCH_INST 2
-    cmp       r1, #0                    @ exception pending?
-    bne       MterpException
-    ADVANCE 2                           @ advance rPC
-    SET_VREG r0, r9                     @ vA<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_int_to_byte.S b/runtime/interpreter/mterp/arm/op_int_to_byte.S
deleted file mode 100644
index 059d5c2..0000000
--- a/runtime/interpreter/mterp/arm/op_int_to_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unop.S" {"instr":"sxtb    r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_char.S b/runtime/interpreter/mterp/arm/op_int_to_char.S
deleted file mode 100644
index 83a0c19..0000000
--- a/runtime/interpreter/mterp/arm/op_int_to_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unop.S" {"instr":"uxth    r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_double.S b/runtime/interpreter/mterp/arm/op_int_to_double.S
deleted file mode 100644
index 810c2e4..0000000
--- a/runtime/interpreter/mterp/arm/op_int_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/funopWider.S" {"instr":"fsitod  d0, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_float.S b/runtime/interpreter/mterp/arm/op_int_to_float.S
deleted file mode 100644
index f41654c..0000000
--- a/runtime/interpreter/mterp/arm/op_int_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/funop.S" {"instr":"fsitos  s1, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_long.S b/runtime/interpreter/mterp/arm/op_int_to_long.S
deleted file mode 100644
index b5aed8e..0000000
--- a/runtime/interpreter/mterp/arm/op_int_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unopWider.S" {"instr":"mov     r1, r0, asr #31"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_short.S b/runtime/interpreter/mterp/arm/op_int_to_short.S
deleted file mode 100644
index 717bd96..0000000
--- a/runtime/interpreter/mterp/arm/op_int_to_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unop.S" {"instr":"sxth    r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_invoke_custom.S b/runtime/interpreter/mterp/arm/op_invoke_custom.S
deleted file mode 100644
index 2af875c..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_custom.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeCustom" }
-    /*
-     * Handle an invoke-custom invocation.
-     *
-     * for: invoke-custom, invoke-custom/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, call_site@BBBB */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, call_site@BBBB */
diff --git a/runtime/interpreter/mterp/arm/op_invoke_custom_range.S b/runtime/interpreter/mterp/arm/op_invoke_custom_range.S
deleted file mode 100644
index 32575c4..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_custom_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_direct.S b/runtime/interpreter/mterp/arm/op_invoke_direct.S
deleted file mode 100644
index 1edf221..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_direct.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_direct_range.S b/runtime/interpreter/mterp/arm/op_invoke_direct_range.S
deleted file mode 100644
index 3097b8e..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_direct_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_interface.S b/runtime/interpreter/mterp/arm/op_invoke_interface.S
deleted file mode 100644
index f6d565b..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_interface.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeInterface" }
-    /*
-     * Handle an interface method call.
-     *
-     * for: invoke-interface, invoke-interface/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm/op_invoke_interface_range.S b/runtime/interpreter/mterp/arm/op_invoke_interface_range.S
deleted file mode 100644
index c8443b0..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_interface_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_polymorphic.S b/runtime/interpreter/mterp/arm/op_invoke_polymorphic.S
deleted file mode 100644
index 816a7ae..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_polymorphic.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/arm/op_invoke_polymorphic_range.S
deleted file mode 100644
index 2541c27..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_polymorphic_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_static.S b/runtime/interpreter/mterp/arm/op_invoke_static.S
deleted file mode 100644
index c3cefcf..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_static.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeStatic" }
-
diff --git a/runtime/interpreter/mterp/arm/op_invoke_static_range.S b/runtime/interpreter/mterp/arm/op_invoke_static_range.S
deleted file mode 100644
index dd60d7b..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_static_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_super.S b/runtime/interpreter/mterp/arm/op_invoke_super.S
deleted file mode 100644
index 92ef2a4..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_super.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeSuper" }
-    /*
-     * Handle a "super" method call.
-     *
-     * for: invoke-super, invoke-super/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm/op_invoke_super_range.S b/runtime/interpreter/mterp/arm/op_invoke_super_range.S
deleted file mode 100644
index 9e4fb1c..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_super_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual.S b/runtime/interpreter/mterp/arm/op_invoke_virtual.S
deleted file mode 100644
index 5b893ff..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_virtual.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeVirtual" }
-    /*
-     * Handle a virtual method call.
-     *
-     * for: invoke-virtual, invoke-virtual/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/arm/op_invoke_virtual_quick.S
deleted file mode 100644
index 020e8b8..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_virtual_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual_range.S b/runtime/interpreter/mterp/arm/op_invoke_virtual_range.S
deleted file mode 100644
index 2b42a78..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_virtual_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/arm/op_invoke_virtual_range_quick.S
deleted file mode 100644
index 42f2ded..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_virtual_range_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/arm/op_iput.S b/runtime/interpreter/mterp/arm/op_iput.S
deleted file mode 100644
index 6201d80..0000000
--- a/runtime/interpreter/mterp/arm/op_iput.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIPutU32" }
-%include "arm/field.S" { }
diff --git a/runtime/interpreter/mterp/arm/op_iput_boolean.S b/runtime/interpreter/mterp/arm/op_iput_boolean.S
deleted file mode 100644
index 57edadd..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_boolean_quick.S b/runtime/interpreter/mterp/arm/op_iput_boolean_quick.S
deleted file mode 100644
index f0a2777..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput_quick.S" { "store":"strb" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_byte.S b/runtime/interpreter/mterp/arm/op_iput_byte.S
deleted file mode 100644
index ab283b9..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_byte_quick.S b/runtime/interpreter/mterp/arm/op_iput_byte_quick.S
deleted file mode 100644
index f0a2777..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput_quick.S" { "store":"strb" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_char.S b/runtime/interpreter/mterp/arm/op_iput_char.S
deleted file mode 100644
index 0fe5d96..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_char_quick.S b/runtime/interpreter/mterp/arm/op_iput_char_quick.S
deleted file mode 100644
index 5212fc3..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput_quick.S" { "store":"strh" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_object.S b/runtime/interpreter/mterp/arm/op_iput_object.S
deleted file mode 100644
index 1003d10..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput.S" { "is_object":"1", "helper":"MterpIPutObj" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_object_quick.S b/runtime/interpreter/mterp/arm/op_iput_object_quick.S
deleted file mode 100644
index 876b3da..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_object_quick.S
+++ /dev/null
@@ -1,10 +0,0 @@
-    EXPORT_PC
-    add     r0, rFP, #OFF_FP_SHADOWFRAME
-    mov     r1, rPC
-    mov     r2, rINST
-    bl      MterpIputObjectQuick
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_quick.S b/runtime/interpreter/mterp/arm/op_iput_quick.S
deleted file mode 100644
index 98c8150..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "store":"str" }
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r1, 1                         @ r1<- field byte offset
-    GET_VREG r3, r2                     @ r3<- fp[B], the object pointer
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    cmp     r3, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    GET_VREG r0, r2                     @ r0<- fp[A]
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    $store     r0, [r3, r1]             @ obj.field<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_short.S b/runtime/interpreter/mterp/arm/op_iput_short.S
deleted file mode 100644
index cc98363..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_short_quick.S b/runtime/interpreter/mterp/arm/op_iput_short_quick.S
deleted file mode 100644
index 5212fc3..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput_quick.S" { "store":"strh" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_wide.S b/runtime/interpreter/mterp/arm/op_iput_wide.S
deleted file mode 100644
index f2845ad..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput.S" { "helper":"MterpIPutU64" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_wide_quick.S b/runtime/interpreter/mterp/arm/op_iput_wide_quick.S
deleted file mode 100644
index 88e6ea1..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_wide_quick.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    /* iput-wide-quick vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r3, 1                         @ r3<- field byte offset
-    GET_VREG r2, r2                     @ r2<- fp[B], the object pointer
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    cmp     r2, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    VREG_INDEX_TO_ADDR r0, r0           @ r0<- &fp[A]
-    ldmia   r0, {r0-r1}                 @ r0/r1<- fp[A]/fp[A+1]
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    strd    r0, [r2, r3]                @ obj.field<- r0/r1
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_long_to_double.S b/runtime/interpreter/mterp/arm/op_long_to_double.S
deleted file mode 100644
index cac12d4..0000000
--- a/runtime/interpreter/mterp/arm/op_long_to_double.S
+++ /dev/null
@@ -1,27 +0,0 @@
-%default {}
-    /*
-     * Specialised 64-bit floating point operation.
-     *
-     * Note: The result will be returned in d2.
-     *
-     * For: long-to-double
-     */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
-    vldr    d0, [r3]                    @ d0<- vAA
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-    vcvt.f64.s32    d1, s1              @ d1<- (double)(vAAh)
-    vcvt.f64.u32    d2, s0              @ d2<- (double)(vAAl)
-    vldr            d3, constval$opcode
-    vmla.f64        d2, d1, d3          @ d2<- vAAh*2^32 + vAAl
-
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    vstr.64 d2, [r9]                    @ vAA<- d2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-    /* literal pool helper */
-constval${opcode}:
-    .8byte          0x41f0000000000000
diff --git a/runtime/interpreter/mterp/arm/op_long_to_float.S b/runtime/interpreter/mterp/arm/op_long_to_float.S
deleted file mode 100644
index efa5a66..0000000
--- a/runtime/interpreter/mterp/arm/op_long_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unopNarrower.S" {"instr":"bl      __aeabi_l2f"}
diff --git a/runtime/interpreter/mterp/arm/op_long_to_int.S b/runtime/interpreter/mterp/arm/op_long_to_int.S
deleted file mode 100644
index 3e91f23..0000000
--- a/runtime/interpreter/mterp/arm/op_long_to_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%include "arm/op_move.S"
diff --git a/runtime/interpreter/mterp/arm/op_monitor_enter.S b/runtime/interpreter/mterp/arm/op_monitor_enter.S
deleted file mode 100644
index 3c34f75..0000000
--- a/runtime/interpreter/mterp/arm/op_monitor_enter.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /*
-     * Synchronize on an object.
-     */
-    /* monitor-enter vAA */
-    EXPORT_PC
-    mov      r2, rINST, lsr #8           @ r2<- AA
-    GET_VREG r0, r2                      @ r0<- vAA (object)
-    mov      r1, rSELF                   @ r1<- self
-    bl       artLockObjectFromCode
-    cmp      r0, #0
-    bne      MterpException
-    FETCH_ADVANCE_INST 1
-    GET_INST_OPCODE ip                   @ extract opcode from rINST
-    GOTO_OPCODE ip                       @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_monitor_exit.S b/runtime/interpreter/mterp/arm/op_monitor_exit.S
deleted file mode 100644
index fc7cef5..0000000
--- a/runtime/interpreter/mterp/arm/op_monitor_exit.S
+++ /dev/null
@@ -1,18 +0,0 @@
-    /*
-     * Unlock an object.
-     *
-     * Exceptions that occur when unlocking a monitor need to appear as
-     * if they happened at the following instruction.  See the Dalvik
-     * instruction spec.
-     */
-    /* monitor-exit vAA */
-    EXPORT_PC
-    mov      r2, rINST, lsr #8          @ r2<- AA
-    GET_VREG r0, r2                     @ r0<- vAA (object)
-    mov      r1, rSELF                  @ r0<- self
-    bl       artUnlockObjectFromCode    @ r0<- success for unlock(self, obj)
-    cmp     r0, #0                      @ failed?
-    bne     MterpException
-    FETCH_ADVANCE_INST 1                @ before throw: advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move.S b/runtime/interpreter/mterp/arm/op_move.S
deleted file mode 100644
index dfecc24..0000000
--- a/runtime/interpreter/mterp/arm/op_move.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    mov     r1, rINST, lsr #12          @ r1<- B from 15:12
-    ubfx    r0, rINST, #8, #4           @ r0<- A from 11:8
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    GET_VREG r2, r1                     @ r2<- fp[B]
-    GET_INST_OPCODE ip                  @ ip<- opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT r2, r0              @ fp[A]<- r2
-    .else
-    SET_VREG r2, r0                     @ fp[A]<- r2
-    .endif
-    GOTO_OPCODE ip                      @ execute next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_16.S b/runtime/interpreter/mterp/arm/op_move_16.S
deleted file mode 100644
index 78138a2..0000000
--- a/runtime/interpreter/mterp/arm/op_move_16.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    FETCH r1, 2                         @ r1<- BBBB
-    FETCH r0, 1                         @ r0<- AAAA
-    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    GET_VREG r2, r1                     @ r2<- fp[BBBB]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT r2, r0              @ fp[AAAA]<- r2
-    .else
-    SET_VREG r2, r0                     @ fp[AAAA]<- r2
-    .endif
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_exception.S b/runtime/interpreter/mterp/arm/op_move_exception.S
deleted file mode 100644
index 0242e26..0000000
--- a/runtime/interpreter/mterp/arm/op_move_exception.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* move-exception vAA */
-    mov     r2, rINST, lsr #8           @ r2<- AA
-    ldr     r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
-    mov     r1, #0                      @ r1<- 0
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    SET_VREG_OBJECT r3, r2              @ fp[AA]<- exception obj
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    str     r1, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ clear exception
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_from16.S b/runtime/interpreter/mterp/arm/op_move_from16.S
deleted file mode 100644
index 3e79417..0000000
--- a/runtime/interpreter/mterp/arm/op_move_from16.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    FETCH r1, 1                         @ r1<- BBBB
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_VREG r2, r1                     @ r2<- fp[BBBB]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT r2, r0              @ fp[AA]<- r2
-    .else
-    SET_VREG r2, r0                     @ fp[AA]<- r2
-    .endif
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_object.S b/runtime/interpreter/mterp/arm/op_move_object.S
deleted file mode 100644
index 16de57b..0000000
--- a/runtime/interpreter/mterp/arm/op_move_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_object_16.S b/runtime/interpreter/mterp/arm/op_move_object_16.S
deleted file mode 100644
index 2534300..0000000
--- a/runtime/interpreter/mterp/arm/op_move_object_16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_object_from16.S b/runtime/interpreter/mterp/arm/op_move_object_from16.S
deleted file mode 100644
index 9e0cf02..0000000
--- a/runtime/interpreter/mterp/arm/op_move_object_from16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_result.S b/runtime/interpreter/mterp/arm/op_move_result.S
deleted file mode 100644
index f2586a0..0000000
--- a/runtime/interpreter/mterp/arm/op_move_result.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    mov     r2, rINST, lsr #8           @ r2<- AA
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    ldr     r0, [rFP, #OFF_FP_RESULT_REGISTER]  @ get pointer to result JType.
-    ldr     r0, [r0]                    @ r0 <- result.i.
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT r0, r2, r1          @ fp[AA]<- r0
-    .else
-    SET_VREG r0, r2                     @ fp[AA]<- r0
-    .endif
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_result_object.S b/runtime/interpreter/mterp/arm/op_move_result_object.S
deleted file mode 100644
index 643296a..0000000
--- a/runtime/interpreter/mterp/arm/op_move_result_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_result_wide.S b/runtime/interpreter/mterp/arm/op_move_result_wide.S
deleted file mode 100644
index 87929ea..0000000
--- a/runtime/interpreter/mterp/arm/op_move_result_wide.S
+++ /dev/null
@@ -1,10 +0,0 @@
-    /* move-result-wide vAA */
-    mov     rINST, rINST, lsr #8        @ rINST<- AA
-    ldr     r3, [rFP, #OFF_FP_RESULT_REGISTER]
-    VREG_INDEX_TO_ADDR r2, rINST        @ r2<- &fp[AA]
-    ldmia   r3, {r0-r1}                 @ r0/r1<- retval.j
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    stmia   r2, {r0-r1}                 @ fp[AA]<- r0/r1
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_wide.S b/runtime/interpreter/mterp/arm/op_move_wide.S
deleted file mode 100644
index ff353ea..0000000
--- a/runtime/interpreter/mterp/arm/op_move_wide.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
-    VREG_INDEX_TO_ADDR r2, rINST        @ r2<- &fp[A]
-    ldmia   r3, {r0-r1}                 @ r0/r1<- fp[B]
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r2, {r0-r1}                 @ fp[A]<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_wide_16.S b/runtime/interpreter/mterp/arm/op_move_wide_16.S
deleted file mode 100644
index 9812b66..0000000
--- a/runtime/interpreter/mterp/arm/op_move_wide_16.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    FETCH r3, 2                         @ r3<- BBBB
-    FETCH r2, 1                         @ r2<- AAAA
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BBBB]
-    VREG_INDEX_TO_ADDR lr, r2           @ r2<- &fp[AAAA]
-    ldmia   r3, {r0-r1}                 @ r0/r1<- fp[BBBB]
-    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    CLEAR_SHADOW_PAIR r2, r3, ip        @ Zero out the shadow regs
-    stmia   lr, {r0-r1}                 @ fp[AAAA]<- r0/r1
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_wide_from16.S b/runtime/interpreter/mterp/arm/op_move_wide_from16.S
deleted file mode 100644
index d2cc60c..0000000
--- a/runtime/interpreter/mterp/arm/op_move_wide_from16.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    FETCH r3, 1                         @ r3<- BBBB
-    mov     rINST, rINST, lsr #8        @ rINST<- AA
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BBBB]
-    VREG_INDEX_TO_ADDR r2, rINST        @ r2<- &fp[AA]
-    ldmia   r3, {r0-r1}                 @ r0/r1<- fp[BBBB]
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r2, {r0-r1}                 @ fp[AA]<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_mul_double.S b/runtime/interpreter/mterp/arm/op_mul_double.S
deleted file mode 100644
index 530e85a..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide.S" {"instr":"fmuld   d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_double_2addr.S b/runtime/interpreter/mterp/arm/op_mul_double_2addr.S
deleted file mode 100644
index da1abc6..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide2addr.S" {"instr":"fmuld   d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_float.S b/runtime/interpreter/mterp/arm/op_mul_float.S
deleted file mode 100644
index 6a72e6f..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop.S" {"instr":"fmuls   s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_float_2addr.S b/runtime/interpreter/mterp/arm/op_mul_float_2addr.S
deleted file mode 100644
index edb5101..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop2addr.S" {"instr":"fmuls   s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int.S b/runtime/interpreter/mterp/arm/op_mul_int.S
deleted file mode 100644
index d6151d4..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-%include "arm/binop.S" {"instr":"mul     r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int_2addr.S b/runtime/interpreter/mterp/arm/op_mul_int_2addr.S
deleted file mode 100644
index 66a797d..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_int_2addr.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-%include "arm/binop2addr.S" {"instr":"mul     r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int_lit16.S b/runtime/interpreter/mterp/arm/op_mul_int_lit16.S
deleted file mode 100644
index 4e40c43..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_int_lit16.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-%include "arm/binopLit16.S" {"instr":"mul     r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int_lit8.S b/runtime/interpreter/mterp/arm/op_mul_int_lit8.S
deleted file mode 100644
index dbafae9..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_int_lit8.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-%include "arm/binopLit8.S" {"instr":"mul     r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_long.S b/runtime/interpreter/mterp/arm/op_mul_long.S
deleted file mode 100644
index 4f55280..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_long.S
+++ /dev/null
@@ -1,37 +0,0 @@
-    /*
-     * Signed 64-bit integer multiply.
-     *
-     * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
-     *        WX
-     *      x YZ
-     *  --------
-     *     ZW ZX
-     *  YW YX
-     *
-     * The low word of the result holds ZX, the high word holds
-     * (ZW+YX) + (the high overflow from ZX).  YW doesn't matter because
-     * it doesn't fit in the low 64 bits.
-     *
-     * Unlike most ARM math operations, multiply instructions have
-     * restrictions on using the same register more than once (Rd and Rm
-     * cannot be the same).
-     */
-    /* mul-long vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
-    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
-    mul     ip, r2, r1                  @ ip<- ZxW
-    umull   r1, lr, r2, r0              @ r1/lr <- ZxX
-    mla     r2, r0, r3, ip              @ r2<- YxX + (ZxW)
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    add     r2, r2, lr                  @ r2<- lr + low(ZxW + (YxX))
-    CLEAR_SHADOW_PAIR r0, lr, ip        @ Zero out the shadow regs
-    VREG_INDEX_TO_ADDR r0, r0           @ r0<- &fp[AA]
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r0, {r1-r2 }                @ vAA/vAA+1<- r1/r2
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_mul_long_2addr.S b/runtime/interpreter/mterp/arm/op_mul_long_2addr.S
deleted file mode 100644
index 4c1f058..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_long_2addr.S
+++ /dev/null
@@ -1,24 +0,0 @@
-    /*
-     * Signed 64-bit integer multiply, "/2addr" version.
-     *
-     * See op_mul_long for an explanation.
-     *
-     * We get a little tight on registers, so to avoid looking up &fp[A]
-     * again we stuff it into rINST.
-     */
-    /* mul-long/2addr vA, vB */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
-    VREG_INDEX_TO_ADDR rINST, r9        @ rINST<- &fp[A]
-    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
-    ldmia   rINST, {r0-r1}              @ r0/r1<- vAA/vAA+1
-    mul     ip, r2, r1                  @ ip<- ZxW
-    umull   r1, lr, r2, r0              @ r1/lr <- ZxX
-    mla     r2, r0, r3, ip              @ r2<- YxX + (ZxW)
-    mov     r0, rINST                   @ r0<- &fp[A] (free up rINST)
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    add     r2, r2, lr                  @ r2<- r2 + low(ZxW + (YxX))
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r0, {r1-r2}                 @ vAA/vAA+1<- r1/r2
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_neg_double.S b/runtime/interpreter/mterp/arm/op_neg_double.S
deleted file mode 100644
index 33e609c..0000000
--- a/runtime/interpreter/mterp/arm/op_neg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unopWide.S" {"instr":"add     r1, r1, #0x80000000"}
diff --git a/runtime/interpreter/mterp/arm/op_neg_float.S b/runtime/interpreter/mterp/arm/op_neg_float.S
deleted file mode 100644
index 993583f..0000000
--- a/runtime/interpreter/mterp/arm/op_neg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unop.S" {"instr":"add     r0, r0, #0x80000000"}
diff --git a/runtime/interpreter/mterp/arm/op_neg_int.S b/runtime/interpreter/mterp/arm/op_neg_int.S
deleted file mode 100644
index ec0b253..0000000
--- a/runtime/interpreter/mterp/arm/op_neg_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unop.S" {"instr":"rsb     r0, r0, #0"}
diff --git a/runtime/interpreter/mterp/arm/op_neg_long.S b/runtime/interpreter/mterp/arm/op_neg_long.S
deleted file mode 100644
index dab2eb4..0000000
--- a/runtime/interpreter/mterp/arm/op_neg_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unopWide.S" {"preinstr":"rsbs    r0, r0, #0", "instr":"rsc     r1, r1, #0"}
diff --git a/runtime/interpreter/mterp/arm/op_new_array.S b/runtime/interpreter/mterp/arm/op_new_array.S
deleted file mode 100644
index 8bb792c..0000000
--- a/runtime/interpreter/mterp/arm/op_new_array.S
+++ /dev/null
@@ -1,19 +0,0 @@
-    /*
-     * Allocate an array of objects, specified with the array class
-     * and a count.
-     *
-     * The verifier guarantees that this is an array class, so we don't
-     * check for it here.
-     */
-    /* new-array vA, vB, class@CCCC */
-    EXPORT_PC
-    add     r0, rFP, #OFF_FP_SHADOWFRAME
-    mov     r1, rPC
-    mov     r2, rINST
-    mov     r3, rSELF
-    bl      MterpNewArray
-    cmp     r0, #0
-    beq     MterpPossibleException
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_new_instance.S b/runtime/interpreter/mterp/arm/op_new_instance.S
deleted file mode 100644
index 95d4be8..0000000
--- a/runtime/interpreter/mterp/arm/op_new_instance.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /*
-     * Create a new instance of a class.
-     */
-    /* new-instance vAA, class@BBBB */
-    EXPORT_PC
-    add     r0, rFP, #OFF_FP_SHADOWFRAME
-    mov     r1, rSELF
-    mov     r2, rINST
-    bl      MterpNewInstance           @ (shadow_frame, self, inst_data)
-    cmp     r0, #0
-    beq     MterpPossibleException
-    FETCH_ADVANCE_INST 2               @ advance rPC, load rINST
-    GET_INST_OPCODE ip                 @ extract opcode from rINST
-    GOTO_OPCODE ip                     @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_nop.S b/runtime/interpreter/mterp/arm/op_nop.S
deleted file mode 100644
index af0f88f..0000000
--- a/runtime/interpreter/mterp/arm/op_nop.S
+++ /dev/null
@@ -1,3 +0,0 @@
-    FETCH_ADVANCE_INST 1                @ advance to next instr, load rINST
-    GET_INST_OPCODE ip                  @ ip<- opcode from rINST
-    GOTO_OPCODE ip                      @ execute it
diff --git a/runtime/interpreter/mterp/arm/op_not_int.S b/runtime/interpreter/mterp/arm/op_not_int.S
deleted file mode 100644
index 816485a..0000000
--- a/runtime/interpreter/mterp/arm/op_not_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unop.S" {"instr":"mvn     r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_not_long.S b/runtime/interpreter/mterp/arm/op_not_long.S
deleted file mode 100644
index 49a5905..0000000
--- a/runtime/interpreter/mterp/arm/op_not_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unopWide.S" {"preinstr":"mvn     r0, r0", "instr":"mvn     r1, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int.S b/runtime/interpreter/mterp/arm/op_or_int.S
deleted file mode 100644
index b046e8d..0000000
--- a/runtime/interpreter/mterp/arm/op_or_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"instr":"orr     r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int_2addr.S b/runtime/interpreter/mterp/arm/op_or_int_2addr.S
deleted file mode 100644
index 493c59f..0000000
--- a/runtime/interpreter/mterp/arm/op_or_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"instr":"orr     r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int_lit16.S b/runtime/interpreter/mterp/arm/op_or_int_lit16.S
deleted file mode 100644
index 0a01db8..0000000
--- a/runtime/interpreter/mterp/arm/op_or_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit16.S" {"instr":"orr     r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int_lit8.S b/runtime/interpreter/mterp/arm/op_or_int_lit8.S
deleted file mode 100644
index 9882bfc..0000000
--- a/runtime/interpreter/mterp/arm/op_or_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"", "instr":"orr     r0, r0, r3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm/op_or_long.S b/runtime/interpreter/mterp/arm/op_or_long.S
deleted file mode 100644
index 048c45c..0000000
--- a/runtime/interpreter/mterp/arm/op_or_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide.S" {"preinstr":"orr     r0, r0, r2", "instr":"orr     r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_or_long_2addr.S b/runtime/interpreter/mterp/arm/op_or_long_2addr.S
deleted file mode 100644
index 9395346..0000000
--- a/runtime/interpreter/mterp/arm/op_or_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide2addr.S" {"preinstr":"orr     r0, r0, r2", "instr":"orr     r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_packed_switch.S b/runtime/interpreter/mterp/arm/op_packed_switch.S
deleted file mode 100644
index 412c58f..0000000
--- a/runtime/interpreter/mterp/arm/op_packed_switch.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "func":"MterpDoPackedSwitch" }
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBB */
-    FETCH r0, 1                         @ r0<- bbbb (lo)
-    FETCH r1, 2                         @ r1<- BBBB (hi)
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb
-    GET_VREG r1, r3                     @ r1<- vAA
-    add     r0, rPC, r0, lsl #1         @ r0<- PC + BBBBbbbb*2
-    bl      $func                       @ r0<- code-unit branch offset
-    movs    rINST, r0
-    b       MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/arm/op_rem_double.S b/runtime/interpreter/mterp/arm/op_rem_double.S
deleted file mode 100644
index b539221..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_double.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* EABI doesn't define a double remainder function, but libm does */
-%include "arm/binopWide.S" {"instr":"bl      fmod"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_double_2addr.S b/runtime/interpreter/mterp/arm/op_rem_double_2addr.S
deleted file mode 100644
index 372ef1d..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_double_2addr.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* EABI doesn't define a double remainder function, but libm does */
-%include "arm/binopWide2addr.S" {"instr":"bl      fmod"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_float.S b/runtime/interpreter/mterp/arm/op_rem_float.S
deleted file mode 100644
index 7bd10de..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_float.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* EABI doesn't define a float remainder function, but libm does */
-%include "arm/binop.S" {"instr":"bl      fmodf"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_float_2addr.S b/runtime/interpreter/mterp/arm/op_rem_float_2addr.S
deleted file mode 100644
index 93c5fae..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_float_2addr.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* EABI doesn't define a float remainder function, but libm does */
-%include "arm/binop2addr.S" {"instr":"bl      fmodf"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_int.S b/runtime/interpreter/mterp/arm/op_rem_int.S
deleted file mode 100644
index ff62573..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_int.S
+++ /dev/null
@@ -1,33 +0,0 @@
-%default {}
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * NOTE: idivmod returns quotient in r0 and remainder in r1
-     *
-     * rem-int
-     *
-     */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    GET_VREG r1, r3                     @ r1<- vCC
-    GET_VREG r0, r2                     @ r0<- vBB
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r2, r0, r1
-    mls  r1, r1, r2, r0                 @ r1<- op, r0-r2 changed
-#else
-    bl   __aeabi_idivmod                @ r1<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r1, r9                     @ vAA<- r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_rem_int_2addr.S b/runtime/interpreter/mterp/arm/op_rem_int_2addr.S
deleted file mode 100644
index ba5751a..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_int_2addr.S
+++ /dev/null
@@ -1,32 +0,0 @@
-%default {}
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * NOTE: idivmod returns quotient in r0 and remainder in r1
-     *
-     * rem-int/2addr
-     *
-     */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r1, r3                     @ r1<- vB
-    GET_VREG r0, r9                     @ r0<- vA
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r2, r0, r1
-    mls     r1, r1, r2, r0              @ r1<- op
-#else
-    bl      __aeabi_idivmod             @ r1<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r1, r9                     @ vAA<- r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
diff --git a/runtime/interpreter/mterp/arm/op_rem_int_lit16.S b/runtime/interpreter/mterp/arm/op_rem_int_lit16.S
deleted file mode 100644
index 4edb187..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_int_lit16.S
+++ /dev/null
@@ -1,31 +0,0 @@
-%default {}
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * NOTE: idivmod returns quotient in r0 and remainder in r1
-     *
-     * rem-int/lit16
-     *
-     */
-    FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
-    mov     r2, rINST, lsr #12          @ r2<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r2                     @ r0<- vB
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r2, r0, r1
-    mls     r1, r1, r2, r0              @ r1<- op
-#else
-    bl     __aeabi_idivmod              @ r1<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r1, r9                     @ vAA<- r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_rem_int_lit8.S b/runtime/interpreter/mterp/arm/op_rem_int_lit8.S
deleted file mode 100644
index 3888361..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_int_lit8.S
+++ /dev/null
@@ -1,32 +0,0 @@
-%default {}
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * NOTE: idivmod returns quotient in r0 and remainder in r1
-     *
-     * rem-int/lit8
-     *
-     */
-    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC)
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r3, #255                @ r2<- BB
-    GET_VREG r0, r2                     @ r0<- vBB
-    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
-    @cmp     r1, #0                     @ is second operand zero?
-    beq     common_errDivideByZero
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r2, r0, r1
-    mls     r1, r1, r2, r0              @ r1<- op
-#else
-    bl       __aeabi_idivmod            @ r1<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r1, r9                     @ vAA<- r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_rem_long.S b/runtime/interpreter/mterp/arm/op_rem_long.S
deleted file mode 100644
index b2b1c24..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_long.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
-%include "arm/binopWide.S" {"instr":"bl      __aeabi_ldivmod", "result0":"r2", "result1":"r3", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_long_2addr.S b/runtime/interpreter/mterp/arm/op_rem_long_2addr.S
deleted file mode 100644
index f87d493..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_long_2addr.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
-%include "arm/binopWide2addr.S" {"instr":"bl      __aeabi_ldivmod", "result0":"r2", "result1":"r3", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_return.S b/runtime/interpreter/mterp/arm/op_return.S
deleted file mode 100644
index f9c0f0f..0000000
--- a/runtime/interpreter/mterp/arm/op_return.S
+++ /dev/null
@@ -1,16 +0,0 @@
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return, return-object
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    bl      MterpThreadFenceForConstructor
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    mov     r0, rSELF
-    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    blne    MterpSuspendCheck                       @ (self)
-    mov     r2, rINST, lsr #8           @ r2<- AA
-    GET_VREG r0, r2                     @ r0<- vAA
-    mov     r1, #0
-    b       MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_return_object.S b/runtime/interpreter/mterp/arm/op_return_object.S
deleted file mode 100644
index c490730..0000000
--- a/runtime/interpreter/mterp/arm/op_return_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_return.S"
diff --git a/runtime/interpreter/mterp/arm/op_return_void.S b/runtime/interpreter/mterp/arm/op_return_void.S
deleted file mode 100644
index a91ccb3..0000000
--- a/runtime/interpreter/mterp/arm/op_return_void.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    .extern MterpThreadFenceForConstructor
-    bl      MterpThreadFenceForConstructor
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    mov     r0, rSELF
-    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    blne    MterpSuspendCheck                       @ (self)
-    mov    r0, #0
-    mov    r1, #0
-    b      MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S b/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S
deleted file mode 100644
index b953f4c..0000000
--- a/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    mov     r0, rSELF
-    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    blne    MterpSuspendCheck                       @ (self)
-    mov    r0, #0
-    mov    r1, #0
-    b      MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_return_wide.S b/runtime/interpreter/mterp/arm/op_return_wide.S
deleted file mode 100644
index df582c0..0000000
--- a/runtime/interpreter/mterp/arm/op_return_wide.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /*
-     * Return a 64-bit value.
-     */
-    /* return-wide vAA */
-    .extern MterpThreadFenceForConstructor
-    bl      MterpThreadFenceForConstructor
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    mov     r0, rSELF
-    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    blne    MterpSuspendCheck                       @ (self)
-    mov     r2, rINST, lsr #8           @ r2<- AA
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[AA]
-    ldmia   r2, {r0-r1}                 @ r0/r1 <- vAA/vAA+1
-    b       MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_rsub_int.S b/runtime/interpreter/mterp/arm/op_rsub_int.S
deleted file mode 100644
index 1508dd4..0000000
--- a/runtime/interpreter/mterp/arm/op_rsub_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-%include "arm/binopLit16.S" {"instr":"rsb     r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S b/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S
deleted file mode 100644
index dc953dc..0000000
--- a/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"", "instr":"rsb     r0, r0, r3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm/op_sget.S b/runtime/interpreter/mterp/arm/op_sget.S
deleted file mode 100644
index b382de4..0000000
--- a/runtime/interpreter/mterp/arm/op_sget.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSGetU32" }
-%include "arm/field.S" { }
diff --git a/runtime/interpreter/mterp/arm/op_sget_boolean.S b/runtime/interpreter/mterp/arm/op_sget_boolean.S
deleted file mode 100644
index df1a024..0000000
--- a/runtime/interpreter/mterp/arm/op_sget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sget.S" {"helper":"MterpSGetU8"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_byte.S b/runtime/interpreter/mterp/arm/op_sget_byte.S
deleted file mode 100644
index 8ad3ff0..0000000
--- a/runtime/interpreter/mterp/arm/op_sget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sget.S" {"helper":"MterpSGetI8"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_char.S b/runtime/interpreter/mterp/arm/op_sget_char.S
deleted file mode 100644
index 5239514..0000000
--- a/runtime/interpreter/mterp/arm/op_sget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sget.S" {"helper":"MterpSGetU16"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_object.S b/runtime/interpreter/mterp/arm/op_sget_object.S
deleted file mode 100644
index e61a5a7..0000000
--- a/runtime/interpreter/mterp/arm/op_sget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_short.S b/runtime/interpreter/mterp/arm/op_sget_short.S
deleted file mode 100644
index 49493eb..0000000
--- a/runtime/interpreter/mterp/arm/op_sget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sget.S" {"helper":"MterpSGetI16"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_wide.S b/runtime/interpreter/mterp/arm/op_sget_wide.S
deleted file mode 100644
index d6905df..0000000
--- a/runtime/interpreter/mterp/arm/op_sget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sget.S" {"helper":"MterpSGetU64"}
diff --git a/runtime/interpreter/mterp/arm/op_shl_int.S b/runtime/interpreter/mterp/arm/op_shl_int.S
deleted file mode 100644
index 7e4c768..0000000
--- a/runtime/interpreter/mterp/arm/op_shl_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"preinstr":"and     r1, r1, #31", "instr":"mov     r0, r0, asl r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shl_int_2addr.S b/runtime/interpreter/mterp/arm/op_shl_int_2addr.S
deleted file mode 100644
index 4286577..0000000
--- a/runtime/interpreter/mterp/arm/op_shl_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"preinstr":"and     r1, r1, #31", "instr":"mov     r0, r0, asl r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shl_int_lit8.S b/runtime/interpreter/mterp/arm/op_shl_int_lit8.S
deleted file mode 100644
index 60a1498..0000000
--- a/runtime/interpreter/mterp/arm/op_shl_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"ubfx    r1, r3, #8, #5", "instr":"mov     r0, r0, asl r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shl_long.S b/runtime/interpreter/mterp/arm/op_shl_long.S
deleted file mode 100644
index 82ec6ed..0000000
--- a/runtime/interpreter/mterp/arm/op_shl_long.S
+++ /dev/null
@@ -1,27 +0,0 @@
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* shl-long vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r3, r0, #255                @ r3<- BB
-    mov     r0, r0, lsr #8              @ r0<- CC
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BB]
-    GET_VREG r2, r0                     @ r2<- vCC
-    ldmia   r3, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
-    and     r2, r2, #63                 @ r2<- r2 & 0x3f
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
-    mov     r1, r1, asl r2              @ r1<- r1 << r2
-    rsb     r3, r2, #32                 @ r3<- 32 - r2
-    orr     r1, r1, r0, lsr r3          @ r1<- r1 | (r0 << (32-r2))
-    subs    ip, r2, #32                 @ ip<- r2 - 32
-    movpl   r1, r0, asl ip              @ if r2 >= 32, r1<- r0 << (r2-32)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    mov     r0, r0, asl r2              @ r0<- r0 << r2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_shl_long_2addr.S b/runtime/interpreter/mterp/arm/op_shl_long_2addr.S
deleted file mode 100644
index f361a7d..0000000
--- a/runtime/interpreter/mterp/arm/op_shl_long_2addr.S
+++ /dev/null
@@ -1,22 +0,0 @@
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* shl-long/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r2, r3                     @ r2<- vB
-    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
-    and     r2, r2, #63                 @ r2<- r2 & 0x3f
-    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
-    mov     r1, r1, asl r2              @ r1<- r1 << r2
-    rsb     r3, r2, #32                 @ r3<- 32 - r2
-    orr     r1, r1, r0, lsr r3          @ r1<- r1 | (r0 << (32-r2))
-    subs    ip, r2, #32                 @ ip<- r2 - 32
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    movpl   r1, r0, asl ip              @ if r2 >= 32, r1<- r0 << (r2-32)
-    mov     r0, r0, asl r2              @ r0<- r0 << r2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_shr_int.S b/runtime/interpreter/mterp/arm/op_shr_int.S
deleted file mode 100644
index 6317605..0000000
--- a/runtime/interpreter/mterp/arm/op_shr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"preinstr":"and     r1, r1, #31", "instr":"mov     r0, r0, asr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shr_int_2addr.S b/runtime/interpreter/mterp/arm/op_shr_int_2addr.S
deleted file mode 100644
index cc8632f..0000000
--- a/runtime/interpreter/mterp/arm/op_shr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"preinstr":"and     r1, r1, #31", "instr":"mov     r0, r0, asr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shr_int_lit8.S b/runtime/interpreter/mterp/arm/op_shr_int_lit8.S
deleted file mode 100644
index c2f6cb0..0000000
--- a/runtime/interpreter/mterp/arm/op_shr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"ubfx    r1, r3, #8, #5", "instr":"mov     r0, r0, asr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shr_long.S b/runtime/interpreter/mterp/arm/op_shr_long.S
deleted file mode 100644
index a0afe5b..0000000
--- a/runtime/interpreter/mterp/arm/op_shr_long.S
+++ /dev/null
@@ -1,27 +0,0 @@
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* shr-long vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r3, r0, #255                @ r3<- BB
-    mov     r0, r0, lsr #8              @ r0<- CC
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BB]
-    GET_VREG r2, r0                     @ r2<- vCC
-    ldmia   r3, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
-    and     r2, r2, #63                 @ r0<- r0 & 0x3f
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
-    mov     r0, r0, lsr r2              @ r0<- r2 >> r2
-    rsb     r3, r2, #32                 @ r3<- 32 - r2
-    orr     r0, r0, r1, asl r3          @ r0<- r0 | (r1 << (32-r2))
-    subs    ip, r2, #32                 @ ip<- r2 - 32
-    movpl   r0, r1, asr ip              @ if r2 >= 32, r0<-r1 >> (r2-32)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    mov     r1, r1, asr r2              @ r1<- r1 >> r2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_shr_long_2addr.S b/runtime/interpreter/mterp/arm/op_shr_long_2addr.S
deleted file mode 100644
index 976110e..0000000
--- a/runtime/interpreter/mterp/arm/op_shr_long_2addr.S
+++ /dev/null
@@ -1,22 +0,0 @@
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* shr-long/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r2, r3                     @ r2<- vB
-    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
-    and     r2, r2, #63                 @ r2<- r2 & 0x3f
-    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
-    mov     r0, r0, lsr r2              @ r0<- r2 >> r2
-    rsb     r3, r2, #32                 @ r3<- 32 - r2
-    orr     r0, r0, r1, asl r3          @ r0<- r0 | (r1 << (32-r2))
-    subs    ip, r2, #32                 @ ip<- r2 - 32
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    movpl   r0, r1, asr ip              @ if r2 >= 32, r0<-r1 >> (r2-32)
-    mov     r1, r1, asr r2              @ r1<- r1 >> r2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_sparse_switch.S b/runtime/interpreter/mterp/arm/op_sparse_switch.S
deleted file mode 100644
index 9f7a42b..0000000
--- a/runtime/interpreter/mterp/arm/op_sparse_switch.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/arm/op_sput.S b/runtime/interpreter/mterp/arm/op_sput.S
deleted file mode 100644
index 171f024..0000000
--- a/runtime/interpreter/mterp/arm/op_sput.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSPutU32"}
-%include "arm/field.S" { }
diff --git a/runtime/interpreter/mterp/arm/op_sput_boolean.S b/runtime/interpreter/mterp/arm/op_sput_boolean.S
deleted file mode 100644
index 0c37623..0000000
--- a/runtime/interpreter/mterp/arm/op_sput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_byte.S b/runtime/interpreter/mterp/arm/op_sput_byte.S
deleted file mode 100644
index 8d4e754..0000000
--- a/runtime/interpreter/mterp/arm/op_sput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_char.S b/runtime/interpreter/mterp/arm/op_sput_char.S
deleted file mode 100644
index 442b56f..0000000
--- a/runtime/interpreter/mterp/arm/op_sput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_object.S b/runtime/interpreter/mterp/arm/op_sput_object.S
deleted file mode 100644
index 8fcd52e..0000000
--- a/runtime/interpreter/mterp/arm/op_sput_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sput.S" {"is_object":"1", "helper":"MterpSPutObj"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_short.S b/runtime/interpreter/mterp/arm/op_sput_short.S
deleted file mode 100644
index 0eb533f..0000000
--- a/runtime/interpreter/mterp/arm/op_sput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_wide.S b/runtime/interpreter/mterp/arm/op_sput_wide.S
deleted file mode 100644
index c254f78..0000000
--- a/runtime/interpreter/mterp/arm/op_sput_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sput.S" {"helper":"MterpSPutU64"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_double.S b/runtime/interpreter/mterp/arm/op_sub_double.S
deleted file mode 100644
index 69bcc67..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide.S" {"instr":"fsubd   d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_double_2addr.S b/runtime/interpreter/mterp/arm/op_sub_double_2addr.S
deleted file mode 100644
index 2ea59fe..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide2addr.S" {"instr":"fsubd   d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_float.S b/runtime/interpreter/mterp/arm/op_sub_float.S
deleted file mode 100644
index 3f17a0d..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop.S" {"instr":"fsubs   s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_float_2addr.S b/runtime/interpreter/mterp/arm/op_sub_float_2addr.S
deleted file mode 100644
index 2f4aac4..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop2addr.S" {"instr":"fsubs   s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_int.S b/runtime/interpreter/mterp/arm/op_sub_int.S
deleted file mode 100644
index efb9e10..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"instr":"sub     r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_int_2addr.S b/runtime/interpreter/mterp/arm/op_sub_int_2addr.S
deleted file mode 100644
index 4d3036b..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"instr":"sub     r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_long.S b/runtime/interpreter/mterp/arm/op_sub_long.S
deleted file mode 100644
index 6f1eb6e..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide.S" {"preinstr":"subs    r0, r0, r2", "instr":"sbc     r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_long_2addr.S b/runtime/interpreter/mterp/arm/op_sub_long_2addr.S
deleted file mode 100644
index 8e9da05..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide2addr.S" {"preinstr":"subs    r0, r0, r2", "instr":"sbc     r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_throw.S b/runtime/interpreter/mterp/arm/op_throw.S
deleted file mode 100644
index be49ada..0000000
--- a/runtime/interpreter/mterp/arm/op_throw.S
+++ /dev/null
@@ -1,11 +0,0 @@
-    /*
-     * Throw an exception object in the current thread.
-     */
-    /* throw vAA */
-    EXPORT_PC
-    mov      r2, rINST, lsr #8           @ r2<- AA
-    GET_VREG r1, r2                      @ r1<- vAA (exception object)
-    cmp      r1, #0                      @ null object?
-    beq      common_errNullObject        @ yes, throw an NPE instead
-    str      r1, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ thread->exception<- obj
-    b        MterpException
diff --git a/runtime/interpreter/mterp/arm/op_unused_3e.S b/runtime/interpreter/mterp/arm/op_unused_3e.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_3e.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_3f.S b/runtime/interpreter/mterp/arm/op_unused_3f.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_3f.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_40.S b/runtime/interpreter/mterp/arm/op_unused_40.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_40.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_41.S b/runtime/interpreter/mterp/arm/op_unused_41.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_41.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_42.S b/runtime/interpreter/mterp/arm/op_unused_42.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_42.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_43.S b/runtime/interpreter/mterp/arm/op_unused_43.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_43.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_73.S b/runtime/interpreter/mterp/arm/op_unused_73.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_73.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_79.S b/runtime/interpreter/mterp/arm/op_unused_79.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_79.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_7a.S b/runtime/interpreter/mterp/arm/op_unused_7a.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_7a.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f3.S b/runtime/interpreter/mterp/arm/op_unused_f3.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_f3.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f4.S b/runtime/interpreter/mterp/arm/op_unused_f4.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_f4.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f5.S b/runtime/interpreter/mterp/arm/op_unused_f5.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_f5.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f6.S b/runtime/interpreter/mterp/arm/op_unused_f6.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_f6.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f7.S b/runtime/interpreter/mterp/arm/op_unused_f7.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_f7.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f8.S b/runtime/interpreter/mterp/arm/op_unused_f8.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_f8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f9.S b/runtime/interpreter/mterp/arm/op_unused_f9.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_f9.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_fc.S b/runtime/interpreter/mterp/arm/op_unused_fc.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_fc.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_fd.S b/runtime/interpreter/mterp/arm/op_unused_fd.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_fd.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_ushr_int.S b/runtime/interpreter/mterp/arm/op_ushr_int.S
deleted file mode 100644
index a74361b..0000000
--- a/runtime/interpreter/mterp/arm/op_ushr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"preinstr":"and     r1, r1, #31", "instr":"mov     r0, r0, lsr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_ushr_int_2addr.S b/runtime/interpreter/mterp/arm/op_ushr_int_2addr.S
deleted file mode 100644
index f2d1d13..0000000
--- a/runtime/interpreter/mterp/arm/op_ushr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"preinstr":"and     r1, r1, #31", "instr":"mov     r0, r0, lsr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S b/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S
deleted file mode 100644
index 5554eb0..0000000
--- a/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"ubfx    r1, r3, #8, #5", "instr":"mov     r0, r0, lsr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_ushr_long.S b/runtime/interpreter/mterp/arm/op_ushr_long.S
deleted file mode 100644
index c817bc9..0000000
--- a/runtime/interpreter/mterp/arm/op_ushr_long.S
+++ /dev/null
@@ -1,27 +0,0 @@
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* ushr-long vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r3, r0, #255                @ r3<- BB
-    mov     r0, r0, lsr #8              @ r0<- CC
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BB]
-    GET_VREG r2, r0                     @ r2<- vCC
-    ldmia   r3, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
-    and     r2, r2, #63                 @ r0<- r0 & 0x3f
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
-    mov     r0, r0, lsr r2              @ r0<- r2 >> r2
-    rsb     r3, r2, #32                 @ r3<- 32 - r2
-    orr     r0, r0, r1, asl r3          @ r0<- r0 | (r1 << (32-r2))
-    subs    ip, r2, #32                 @ ip<- r2 - 32
-    movpl   r0, r1, lsr ip              @ if r2 >= 32, r0<-r1 >>> (r2-32)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    mov     r1, r1, lsr r2              @ r1<- r1 >>> r2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S b/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S
deleted file mode 100644
index 2735f87..0000000
--- a/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S
+++ /dev/null
@@ -1,22 +0,0 @@
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* ushr-long/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r2, r3                     @ r2<- vB
-    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
-    and     r2, r2, #63                 @ r2<- r2 & 0x3f
-    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
-    mov     r0, r0, lsr r2              @ r0<- r2 >> r2
-    rsb     r3, r2, #32                 @ r3<- 32 - r2
-    orr     r0, r0, r1, asl r3          @ r0<- r0 | (r1 << (32-r2))
-    subs    ip, r2, #32                 @ ip<- r2 - 32
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    movpl   r0, r1, lsr ip              @ if r2 >= 32, r0<-r1 >>> (r2-32)
-    mov     r1, r1, lsr r2              @ r1<- r1 >>> r2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_xor_int.S b/runtime/interpreter/mterp/arm/op_xor_int.S
deleted file mode 100644
index fd7a4b7..0000000
--- a/runtime/interpreter/mterp/arm/op_xor_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"instr":"eor     r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_int_2addr.S b/runtime/interpreter/mterp/arm/op_xor_int_2addr.S
deleted file mode 100644
index 196a665..0000000
--- a/runtime/interpreter/mterp/arm/op_xor_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"instr":"eor     r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_int_lit16.S b/runtime/interpreter/mterp/arm/op_xor_int_lit16.S
deleted file mode 100644
index 39f2a47..0000000
--- a/runtime/interpreter/mterp/arm/op_xor_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit16.S" {"instr":"eor     r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_int_lit8.S b/runtime/interpreter/mterp/arm/op_xor_int_lit8.S
deleted file mode 100644
index 97d0b9e..0000000
--- a/runtime/interpreter/mterp/arm/op_xor_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"", "instr":"eor     r0, r0, r3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_long.S b/runtime/interpreter/mterp/arm/op_xor_long.S
deleted file mode 100644
index 4f830d0..0000000
--- a/runtime/interpreter/mterp/arm/op_xor_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide.S" {"preinstr":"eor     r0, r0, r2", "instr":"eor     r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_long_2addr.S b/runtime/interpreter/mterp/arm/op_xor_long_2addr.S
deleted file mode 100644
index 5b5ed88..0000000
--- a/runtime/interpreter/mterp/arm/op_xor_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide2addr.S" {"preinstr":"eor     r0, r0, r2", "instr":"eor     r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/other.S b/runtime/interpreter/mterp/arm/other.S
new file mode 100644
index 0000000..31b9354
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/other.S
@@ -0,0 +1,385 @@
+%def const(helper="UndefinedConstHandler"):
+    /* const/class vAA, type@BBBB */
+    /* const/method-handle vAA, method_handle@BBBB */
+    /* const/method-type vAA, proto@BBBB */
+    /* const/string vAA, string@@BBBB */
+    .extern $helper
+    EXPORT_PC
+    FETCH   r0, 1                       @ r0<- BBBB
+    mov     r1, rINST, lsr #8           @ r1<- AA
+    add     r2, rFP, #OFF_FP_SHADOWFRAME
+    mov     r3, rSELF
+    bl      $helper                     @ (index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST 2                     @ load rINST
+    cmp     r0, #0                      @ fail?
+    bne     MterpPossibleException      @ let reference interpreter deal with it.
+    ADVANCE 2                           @ advance rPC
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def unused():
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+%def op_const():
+    /* const vAA, #+BBBBbbbb */
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH r0, 1                         @ r0<- bbbb (low)
+    FETCH r1, 2                         @ r1<- BBBB (high)
+    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
+    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG r0, r3                     @ vAA<- r0
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_const_16():
+    /* const/16 vAA, #+BBBB */
+    FETCH_S r0, 1                       @ r0<- ssssBBBB (sign-extended)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    SET_VREG r0, r3                     @ vAA<- r0
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_const_4():
+    /* const/4 vA, #+B */
+    sbfx    r1, rINST, #12, #4          @ r1<- sssssssB (sign-extended)
+    ubfx    r0, rINST, #8, #4           @ r0<- A
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    GET_INST_OPCODE ip                  @ ip<- opcode from rINST
+    SET_VREG r1, r0                     @ fp[A]<- r1
+    GOTO_OPCODE ip                      @ execute next instruction
+
+%def op_const_class():
+%  const(helper="MterpConstClass")
+
+%def op_const_high16():
+    /* const/high16 vAA, #+BBBB0000 */
+    FETCH r0, 1                         @ r0<- 0000BBBB (zero-extended)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    mov     r0, r0, lsl #16             @ r0<- BBBB0000
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    SET_VREG r0, r3                     @ vAA<- r0
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_const_method_handle():
+%  const(helper="MterpConstMethodHandle")
+
+%def op_const_method_type():
+%  const(helper="MterpConstMethodType")
+
+%def op_const_string():
+%  const(helper="MterpConstString")
+
+%def op_const_string_jumbo():
+    /* const/string vAA, String@BBBBBBBB */
+    EXPORT_PC
+    FETCH r0, 1                         @ r0<- bbbb (low)
+    FETCH r2, 2                         @ r2<- BBBB (high)
+    mov     r1, rINST, lsr #8           @ r1<- AA
+    orr     r0, r0, r2, lsl #16         @ r1<- BBBBbbbb
+    add     r2, rFP, #OFF_FP_SHADOWFRAME
+    mov     r3, rSELF
+    bl      MterpConstString            @ (index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST 3                     @ advance rPC
+    cmp     r0, #0                      @ fail?
+    bne     MterpPossibleException      @ let reference interpreter deal with it.
+    ADVANCE 3                           @ advance rPC
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_const_wide():
+    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+    FETCH r0, 1                         @ r0<- bbbb (low)
+    FETCH r1, 2                         @ r1<- BBBB (low middle)
+    FETCH r2, 3                         @ r2<- hhhh (high middle)
+    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb (low word)
+    FETCH r3, 4                         @ r3<- HHHH (high)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    orr     r1, r2, r3, lsl #16         @ r1<- HHHHhhhh (high word)
+    CLEAR_SHADOW_PAIR r9, r2, r3        @ Zero out the shadow regs
+    FETCH_ADVANCE_INST 5                @ advance rPC, load rINST
+    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r9    @ vAA<- r0/r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_const_wide_16():
+    /* const-wide/16 vAA, #+BBBB */
+    FETCH_S r0, 1                       @ r0<- ssssBBBB (sign-extended)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    mov     r1, r0, asr #31             @ r1<- ssssssss
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    CLEAR_SHADOW_PAIR r3, r2, lr        @ Zero out the shadow regs
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[AA]
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r3    @ vAA<- r0/r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_const_wide_32():
+    /* const-wide/32 vAA, #+BBBBbbbb */
+    FETCH r0, 1                         @ r0<- 0000bbbb (low)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH_S r2, 2                       @ r2<- ssssBBBB (high)
+    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
+    orr     r0, r0, r2, lsl #16         @ r0<- BBBBbbbb
+    CLEAR_SHADOW_PAIR r3, r2, lr        @ Zero out the shadow regs
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[AA]
+    mov     r1, r0, asr #31             @ r1<- ssssssss
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r3    @ vAA<- r0/r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_const_wide_high16():
+    /* const-wide/high16 vAA, #+BBBB000000000000 */
+    FETCH r1, 1                         @ r1<- 0000BBBB (zero-extended)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    mov     r0, #0                      @ r0<- 00000000
+    mov     r1, r1, lsl #16             @ r1<- BBBB0000
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    CLEAR_SHADOW_PAIR r3, r0, r2        @ Zero shadow regs
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[AA]
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r3    @ vAA<- r0/r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_monitor_enter():
+    /*
+     * Synchronize on an object.
+     */
+    /* monitor-enter vAA */
+    EXPORT_PC
+    mov      r2, rINST, lsr #8           @ r2<- AA
+    GET_VREG r0, r2                      @ r0<- vAA (object)
+    mov      r1, rSELF                   @ r1<- self
+    bl       artLockObjectFromCode
+    cmp      r0, #0
+    bne      MterpException
+    FETCH_ADVANCE_INST 1
+    ldr      r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
+    cmp      r0, #0
+    beq      MterpFallback
+    GET_INST_OPCODE ip                   @ extract opcode from rINST
+    GOTO_OPCODE ip                       @ jump to next instruction
+
+%def op_monitor_exit():
+    /*
+     * Unlock an object.
+     *
+     * Exceptions that occur when unlocking a monitor need to appear as
+     * if they happened at the following instruction.  See the Dalvik
+     * instruction spec.
+     */
+    /* monitor-exit vAA */
+    EXPORT_PC
+    mov      r2, rINST, lsr #8          @ r2<- AA
+    GET_VREG r0, r2                     @ r0<- vAA (object)
+    mov      r1, rSELF                  @ r0<- self
+    bl       artUnlockObjectFromCode    @ r0<- success for unlock(self, obj)
+    cmp     r0, #0                      @ failed?
+    bne     MterpException
+    FETCH_ADVANCE_INST 1                @ before throw: advance rPC, load rINST
+    ldr      r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
+    cmp      r0, #0
+    beq      MterpFallback
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_move(is_object="0"):
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    mov     r1, rINST, lsr #12          @ r1<- B from 15:12
+    ubfx    r0, rINST, #8, #4           @ r0<- A from 11:8
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    GET_VREG r2, r1                     @ r2<- fp[B]
+    GET_INST_OPCODE ip                  @ ip<- opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT r2, r0              @ fp[A]<- r2
+    .else
+    SET_VREG r2, r0                     @ fp[A]<- r2
+    .endif
+    GOTO_OPCODE ip                      @ execute next instruction
+
+%def op_move_16(is_object="0"):
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    FETCH r1, 2                         @ r1<- BBBB
+    FETCH r0, 1                         @ r0<- AAAA
+    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
+    GET_VREG r2, r1                     @ r2<- fp[BBBB]
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT r2, r0              @ fp[AAAA]<- r2
+    .else
+    SET_VREG r2, r0                     @ fp[AAAA]<- r2
+    .endif
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_move_exception():
+    /* move-exception vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    ldr     r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+    mov     r1, #0                      @ r1<- 0
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    SET_VREG_OBJECT r3, r2              @ fp[AA]<- exception obj
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    str     r1, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ clear exception
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_move_from16(is_object="0"):
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    FETCH r1, 1                         @ r1<- BBBB
+    mov     r0, rINST, lsr #8           @ r0<- AA
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    GET_VREG r2, r1                     @ r2<- fp[BBBB]
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT r2, r0              @ fp[AA]<- r2
+    .else
+    SET_VREG r2, r0                     @ fp[AA]<- r2
+    .endif
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_move_object():
+%  op_move(is_object="1")
+
+%def op_move_object_16():
+%  op_move_16(is_object="1")
+
+%def op_move_object_from16():
+%  op_move_from16(is_object="1")
+
+%def op_move_result(is_object="0"):
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    ldr     r0, [rFP, #OFF_FP_RESULT_REGISTER]  @ get pointer to result JType.
+    ldr     r0, [r0]                    @ r0 <- result.i.
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT r0, r2, r1          @ fp[AA]<- r0
+    .else
+    SET_VREG r0, r2                     @ fp[AA]<- r0
+    .endif
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_move_result_object():
+%  op_move_result(is_object="1")
+
+%def op_move_result_wide():
+    /* move-result-wide vAA */
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
+    ldr     r3, [rFP, #OFF_FP_RESULT_REGISTER]
+    VREG_INDEX_TO_ADDR r2, rINST        @ r2<- &fp[AA]
+    ldmia   r3, {r0-r1}                 @ r0/r1<- retval.j
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r2    @ fp[AA]<- r0/r1
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_move_wide():
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
+    VREG_INDEX_TO_ADDR r2, rINST        @ r2<- &fp[A]
+    GET_VREG_WIDE_BY_ADDR r0, r1, r3    @ r0/r1<- fp[B]
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
+    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r2    @ fp[A]<- r0/r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_move_wide_16():
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    FETCH r3, 2                         @ r3<- BBBB
+    FETCH r2, 1                         @ r2<- AAAA
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BBBB]
+    VREG_INDEX_TO_ADDR lr, r2           @ r2<- &fp[AAAA]
+    GET_VREG_WIDE_BY_ADDR r0, r1, r3    @ r0/r1<- fp[BBBB]
+    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
+    CLEAR_SHADOW_PAIR r2, r3, ip        @ Zero out the shadow regs
+    SET_VREG_WIDE_BY_ADDR r0, r1, lr    @ fp[AAAA]<- r0/r1
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_move_wide_from16():
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    FETCH r3, 1                         @ r3<- BBBB
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
+    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BBBB]
+    VREG_INDEX_TO_ADDR r2, rINST        @ r2<- &fp[AA]
+    GET_VREG_WIDE_BY_ADDR r0, r1, r3    @ r0/r1<- fp[BBBB]
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
+    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    SET_VREG_WIDE_BY_ADDR r0, r1, r2    @ fp[AA]<- r0/r1
+    GOTO_OPCODE ip                      @ jump to next instruction
+
+%def op_nop():
+    FETCH_ADVANCE_INST 1                @ advance to next instr, load rINST
+    GET_INST_OPCODE ip                  @ ip<- opcode from rINST
+    GOTO_OPCODE ip                      @ execute it
+
+%def op_unused_3e():
+%  unused()
+
+%def op_unused_3f():
+%  unused()
+
+%def op_unused_40():
+%  unused()
+
+%def op_unused_41():
+%  unused()
+
+%def op_unused_42():
+%  unused()
+
+%def op_unused_43():
+%  unused()
+
+%def op_unused_73():
+%  unused()
+
+%def op_unused_79():
+%  unused()
+
+%def op_unused_7a():
+%  unused()
+
+%def op_unused_f3():
+%  unused()
+
+%def op_unused_f4():
+%  unused()
+
+%def op_unused_f5():
+%  unused()
+
+%def op_unused_f6():
+%  unused()
+
+%def op_unused_f7():
+%  unused()
+
+%def op_unused_f8():
+%  unused()
+
+%def op_unused_f9():
+%  unused()
+
+%def op_unused_fc():
+%  unused()
+
+%def op_unused_fd():
+%  unused()
diff --git a/runtime/interpreter/mterp/arm/unop.S b/runtime/interpreter/mterp/arm/unop.S
deleted file mode 100644
index 56518b5..0000000
--- a/runtime/interpreter/mterp/arm/unop.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default {"preinstr":""}
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op r0".
-     * This could be an ARM instruction or a function call.
-     *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r3                     @ r0<- vB
-    $preinstr                           @ optional op; may set condition codes
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    $instr                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 8-9 instructions */
diff --git a/runtime/interpreter/mterp/arm/unopNarrower.S b/runtime/interpreter/mterp/arm/unopNarrower.S
deleted file mode 100644
index 2d0453a..0000000
--- a/runtime/interpreter/mterp/arm/unopNarrower.S
+++ /dev/null
@@ -1,23 +0,0 @@
-%default {"preinstr":""}
-    /*
-     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = op r0/r1", where
-     * "result" is a 32-bit quantity in r0.
-     *
-     * For: long-to-float, double-to-int, double-to-float
-     *
-     * (This would work for long-to-int, but that instruction is actually
-     * an exact match for op_move.)
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
-    ldmia   r3, {r0-r1}                 @ r0/r1<- vB/vB+1
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    $preinstr                           @ optional op; may set condition codes
-    $instr                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 9-10 instructions */
diff --git a/runtime/interpreter/mterp/arm/unopWide.S b/runtime/interpreter/mterp/arm/unopWide.S
deleted file mode 100644
index cd5defd..0000000
--- a/runtime/interpreter/mterp/arm/unopWide.S
+++ /dev/null
@@ -1,22 +0,0 @@
-%default {"preinstr":""}
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op r0/r1".
-     * This could be an ARM instruction or a function call.
-     *
-     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-    ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    $preinstr                           @ optional op; may set condition codes
-    $instr                              @ r0/r1<- op, r2-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-11 instructions */
diff --git a/runtime/interpreter/mterp/arm/unopWider.S b/runtime/interpreter/mterp/arm/unopWider.S
deleted file mode 100644
index 9d50489..0000000
--- a/runtime/interpreter/mterp/arm/unopWider.S
+++ /dev/null
@@ -1,21 +0,0 @@
-%default {"preinstr":""}
-    /*
-     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = op r0", where
-     * "result" is a 64-bit quantity in r0/r1.
-     *
-     * For: int-to-long, int-to-double, float-to-long, float-to-double
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    GET_VREG r0, r3                     @ r0<- vB
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-    $preinstr                           @ optional op; may set condition codes
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    $instr                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vA/vA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 9-10 instructions */
diff --git a/runtime/interpreter/mterp/arm/unused.S b/runtime/interpreter/mterp/arm/unused.S
deleted file mode 100644
index ffa00be..0000000
--- a/runtime/interpreter/mterp/arm/unused.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
diff --git a/runtime/interpreter/mterp/arm/zcmp.S b/runtime/interpreter/mterp/arm/zcmp.S
deleted file mode 100644
index 5db8b6c..0000000
--- a/runtime/interpreter/mterp/arm/zcmp.S
+++ /dev/null
@@ -1,17 +0,0 @@
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    GET_VREG r0, r0                     @ r0<- vAA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r0, #0                      @ compare (vA, 0)
-    b${condition} MterpCommonTakenBranchNoFlags
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    beq     .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/alt_stub.S b/runtime/interpreter/mterp/arm64/alt_stub.S
deleted file mode 100644
index 3a463fe..0000000
--- a/runtime/interpreter/mterp/arm64/alt_stub.S
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (${opnum} * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
diff --git a/runtime/interpreter/mterp/arm64/arithmetic.S b/runtime/interpreter/mterp/arm64/arithmetic.S
new file mode 100644
index 0000000..cf9dd86
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/arithmetic.S
@@ -0,0 +1,507 @@
+%def binop(preinstr="", result="w0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = w0 op w1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than w0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (w1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH w0, 1                         // w0<- CCBB
+    lsr     w9, wINST, #8               // w9<- AA
+    lsr     w3, w0, #8                  // w3<- CC
+    and     w2, w0, #255                // w2<- BB
+    GET_VREG w1, w3                     // w1<- vCC
+    GET_VREG w0, w2                     // w0<- vBB
+    .if $chkzero
+    cbz     w1, common_errDivideByZero  // is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    $preinstr                           // optional op; may set condition codes
+    $instr                              // $result<- op, w0-w3 changed
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG $result, w9                // vAA<- $result
+    GOTO_OPCODE ip                      // jump to next instruction
+    /* 11-14 instructions */
+
+%def binop2addr(preinstr="", result="w0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = w0 op w1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than w0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (w1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    lsr     w3, wINST, #12              // w3<- B
+    ubfx    w9, wINST, #8, #4           // w9<- A
+    GET_VREG w1, w3                     // w1<- vB
+    GET_VREG w0, w9                     // w0<- vA
+    .if $chkzero
+    cbz     w1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
+    $preinstr                           // optional op; may set condition codes
+    $instr                              // $result<- op, w0-w3 changed
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG $result, w9                // vAA<- $result
+    GOTO_OPCODE ip                      // jump to next instruction
+    /* 10-13 instructions */
+
+%def binopLit16(preinstr="", result="w0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = w0 op w1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than w0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (w1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    FETCH_S w1, 1                       // w1<- ssssCCCC (sign-extended)
+    lsr     w2, wINST, #12              // w2<- B
+    ubfx    w9, wINST, #8, #4           // w9<- A
+    GET_VREG w0, w2                     // w0<- vB
+    .if $chkzero
+    cbz     w1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    $preinstr
+    $instr                              // $result<- op, w0-w3 changed
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG $result, w9                // vAA<- $result
+    GOTO_OPCODE ip                      // jump to next instruction
+    /* 10-13 instructions */
+
+%def binopLit8(extract="asr     w1, w3, #8", preinstr="", result="w0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = w0 op w1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than w0, you can override "result".)
+     *
+     * You can override "extract" if the extraction of the literal value
+     * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
+     * can be omitted completely if the shift is embedded in "instr".
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (w1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    FETCH_S w3, 1                       // w3<- ssssCCBB (sign-extended for CC)
+    lsr     w9, wINST, #8               // w9<- AA
+    and     w2, w3, #255                // w2<- BB
+    GET_VREG w0, w2                     // w0<- vBB
+    $extract                            // optional; typically w1<- ssssssCC (sign extended)
+    .if $chkzero
+    cbz     w1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    $preinstr                           // optional op; may set condition codes
+    $instr                              // $result<- op, w0-w3 changed
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG $result, w9                // vAA<- $result
+    GOTO_OPCODE ip                      // jump to next instruction
+    /* 10-12 instructions */
+
+%def binopWide(preinstr="", instr="add x0, x1, x2", result="x0", r1="x1", r2="x2", chkzero="0"):
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = x1 op x2".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than x0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (w1).  Useful for integer division and modulus.
+     *
+     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double, rem-double
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH w0, 1                         // w0<- CCBB
+    lsr     w4, wINST, #8               // w4<- AA
+    lsr     w2, w0, #8                  // w2<- CC
+    and     w1, w0, #255                // w1<- BB
+    GET_VREG_WIDE $r2, w2               // w2<- vCC
+    GET_VREG_WIDE $r1, w1               // w1<- vBB
+    .if $chkzero
+    cbz     $r2, common_errDivideByZero  // is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    $preinstr
+    $instr                              // $result<- op, w0-w4 changed
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG_WIDE $result, w4           // vAA<- $result
+    GOTO_OPCODE ip                      // jump to next instruction
+    /* 11-14 instructions */
+
+%def binopWide2addr(preinstr="", instr="add x0, x0, x1", r0="x0", r1="x1", chkzero="0"):
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "x0 = x0 op x1".
+     * This must not be a function call, as we keep w2 live across it.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (w1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr,
+     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
+     *      sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    lsr     w1, wINST, #12              // w1<- B
+    ubfx    w2, wINST, #8, #4           // w2<- A
+    GET_VREG_WIDE $r1, w1               // x1<- vB
+    GET_VREG_WIDE $r0, w2               // x0<- vA
+    .if $chkzero
+    cbz     $r1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
+    $preinstr
+    $instr                              // result<- op
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG_WIDE $r0, w2               // vAA<- result
+    GOTO_OPCODE ip                      // jump to next instruction
+    /* 10-13 instructions */
+
+%def shiftWide(opcode="shl"):
+    /*
+     * 64-bit shift operation.
+     *
+     * For: shl-long, shr-long, ushr-long
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH w0, 1                         // w0<- CCBB
+    lsr      w3, wINST, #8               // w3<- AA
+    lsr      w2, w0, #8                  // w2<- CC
+    GET_VREG w2, w2                     // w2<- vCC (shift count)
+    and      w1, w0, #255                // w1<- BB
+    GET_VREG_WIDE x1, w1                // x1<- vBB
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    $opcode  x0, x1, x2                 // Do the shift. Only low 6 bits of x2 are used.
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG_WIDE x0, w3                // vAA<- x0
+    GOTO_OPCODE ip                      // jump to next instruction
+    /* 11-14 instructions */
+
+%def shiftWide2addr(opcode="lsl"):
+    /*
+     * Generic 64-bit shift operation.
+     */
+    /* binop/2addr vA, vB */
+    lsr     w1, wINST, #12              // w1<- B
+    ubfx    w2, wINST, #8, #4           // w2<- A
+    GET_VREG w1, w1                     // x1<- vB
+    GET_VREG_WIDE x0, w2                // x0<- vA
+    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
+    $opcode x0, x0, x1                  // Do the shift. Only low 6 bits of x1 are used.
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG_WIDE x0, w2               // vAA<- result
+    GOTO_OPCODE ip                      // jump to next instruction
+    /* 10-13 instructions */
+
+%def unop(instr=""):
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op w0".
+     * This could be an ARM instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    lsr     w3, wINST, #12              // w3<- B
+    GET_VREG w0, w3                     // w0<- vB
+    ubfx    w9, wINST, #8, #4           // w9<- A
+    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
+    $instr                              // w0<- op, w0-w3 changed
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG w0, w9                     // vAA<- w0
+    GOTO_OPCODE ip                      // jump to next instruction
+    /* 8-9 instructions */
+
+%def unopWide(instr="sub x0, xzr, x0"):
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op x0".
+     *
+     * For: neg-long, not-long
+     */
+    /* unop vA, vB */
+    lsr     w3, wINST, #12              // w3<- B
+    ubfx    w4, wINST, #8, #4           // w4<- A
+    GET_VREG_WIDE x0, w3
+    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
+    $instr
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    SET_VREG_WIDE x0, w4
+    GOTO_OPCODE ip                      // jump to next instruction
+    /* 10-11 instructions */
+
+%def op_add_int():
+%  binop(instr="add     w0, w0, w1")
+
+%def op_add_int_2addr():
+%  binop2addr(instr="add     w0, w0, w1")
+
+%def op_add_int_lit16():
+%  binopLit16(instr="add     w0, w0, w1")
+
+%def op_add_int_lit8():
+%  binopLit8(extract="", instr="add     w0, w0, w3, asr #8")
+
+%def op_add_long():
+%  binopWide(instr="add x0, x1, x2")
+
+%def op_add_long_2addr():
+%  binopWide2addr(instr="add     x0, x0, x1")
+
+%def op_and_int():
+%  binop(instr="and     w0, w0, w1")
+
+%def op_and_int_2addr():
+%  binop2addr(instr="and     w0, w0, w1")
+
+%def op_and_int_lit16():
+%  binopLit16(instr="and     w0, w0, w1")
+
+%def op_and_int_lit8():
+%  binopLit8(extract="", instr="and     w0, w0, w3, asr #8")
+
+%def op_and_long():
+%  binopWide(instr="and x0, x1, x2")
+
+%def op_and_long_2addr():
+%  binopWide2addr(instr="and     x0, x0, x1")
+
+%def op_cmp_long():
+    FETCH w0, 1                         // w0<- CCBB
+    lsr     w4, wINST, #8               // w4<- AA
+    and     w2, w0, #255                // w2<- BB
+    lsr     w3, w0, #8                  // w3<- CC
+    GET_VREG_WIDE x1, w2
+    GET_VREG_WIDE x2, w3
+    cmp     x1, x2
+    cset    w0, ne
+    cneg    w0, w0, lt
+    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
+    SET_VREG w0, w4
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_div_int():
+%  binop(instr="sdiv     w0, w0, w1", chkzero="1")
+
+%def op_div_int_2addr():
+%  binop2addr(instr="sdiv     w0, w0, w1", chkzero="1")
+
+%def op_div_int_lit16():
+%  binopLit16(instr="sdiv w0, w0, w1", chkzero="1")
+
+%def op_div_int_lit8():
+%  binopLit8(instr="sdiv     w0, w0, w1", chkzero="1")
+
+%def op_div_long():
+%  binopWide(instr="sdiv x0, x1, x2", chkzero="1")
+
+%def op_div_long_2addr():
+%  binopWide2addr(instr="sdiv     x0, x0, x1", chkzero="1")
+
+%def op_int_to_byte():
+%  unop(instr="sxtb    w0, w0")
+
+%def op_int_to_char():
+%  unop(instr="uxth    w0, w0")
+
+%def op_int_to_long():
+    /* int-to-long vA, vB */
+    lsr     w3, wINST, #12              // w3<- B
+    ubfx    w4, wINST, #8, #4           // w4<- A
+    GET_VREG_S x0, w3                   // x0<- sign_extend(fp[B])
+    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    SET_VREG_WIDE x0, w4                // fp[A]<- x0
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_int_to_short():
+%  unop(instr="sxth    w0, w0")
+
+%def op_long_to_int():
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%  op_move()
+
+%def op_mul_int():
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+%  binop(instr="mul     w0, w1, w0")
+
+%def op_mul_int_2addr():
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+%  binop2addr(instr="mul     w0, w1, w0")
+
+%def op_mul_int_lit16():
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+%  binopLit16(instr="mul     w0, w1, w0")
+
+%def op_mul_int_lit8():
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+%  binopLit8(instr="mul     w0, w1, w0")
+
+%def op_mul_long():
+%  binopWide(instr="mul x0, x1, x2")
+
+%def op_mul_long_2addr():
+%  binopWide2addr(instr="mul     x0, x0, x1")
+
+%def op_neg_int():
+%  unop(instr="sub     w0, wzr, w0")
+
+%def op_neg_long():
+%  unopWide(instr="sub x0, xzr, x0")
+
+%def op_not_int():
+%  unop(instr="mvn     w0, w0")
+
+%def op_not_long():
+%  unopWide(instr="mvn     x0, x0")
+
+%def op_or_int():
+%  binop(instr="orr     w0, w0, w1")
+
+%def op_or_int_2addr():
+%  binop2addr(instr="orr     w0, w0, w1")
+
+%def op_or_int_lit16():
+%  binopLit16(instr="orr     w0, w0, w1")
+
+%def op_or_int_lit8():
+%  binopLit8(extract="", instr="orr     w0, w0, w3, asr #8")
+
+%def op_or_long():
+%  binopWide(instr="orr x0, x1, x2")
+
+%def op_or_long_2addr():
+%  binopWide2addr(instr="orr     x0, x0, x1")
+
+%def op_rem_int():
+%  binop(preinstr="sdiv     w2, w0, w1", instr="msub w0, w2, w1, w0", chkzero="1")
+
+%def op_rem_int_2addr():
+%  binop2addr(preinstr="sdiv     w2, w0, w1", instr="msub w0, w2, w1, w0", chkzero="1")
+
+%def op_rem_int_lit16():
+%  binopLit16(preinstr="sdiv w3, w0, w1", instr="msub w0, w3, w1, w0", chkzero="1")
+
+%def op_rem_int_lit8():
+%  binopLit8(preinstr="sdiv w3, w0, w1", instr="msub w0, w3, w1, w0", chkzero="1")
+
+%def op_rem_long():
+%  binopWide(preinstr="sdiv x3, x1, x2", instr="msub x0, x3, x2, x1", chkzero="1")
+
+%def op_rem_long_2addr():
+%  binopWide2addr(preinstr="sdiv x3, x0, x1", instr="msub x0, x3, x1, x0", chkzero="1")
+
+%def op_rsub_int():
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+%  binopLit16(instr="sub     w0, w1, w0")
+
+%def op_rsub_int_lit8():
+%  binopLit8(instr="sub     w0, w1, w0")
+
+%def op_shl_int():
+%  binop(instr="lsl     w0, w0, w1")
+
+%def op_shl_int_2addr():
+%  binop2addr(instr="lsl     w0, w0, w1")
+
+%def op_shl_int_lit8():
+%  binopLit8(extract="ubfx    w1, w3, #8, #5", instr="lsl     w0, w0, w1")
+
+%def op_shl_long():
+%  shiftWide(opcode="lsl")
+
+%def op_shl_long_2addr():
+%  shiftWide2addr(opcode="lsl")
+
+%def op_shr_int():
+%  binop(instr="asr     w0, w0, w1")
+
+%def op_shr_int_2addr():
+%  binop2addr(instr="asr     w0, w0, w1")
+
+%def op_shr_int_lit8():
+%  binopLit8(extract="ubfx    w1, w3, #8, #5", instr="asr     w0, w0, w1")
+
+%def op_shr_long():
+%  shiftWide(opcode="asr")
+
+%def op_shr_long_2addr():
+%  shiftWide2addr(opcode="asr")
+
+%def op_sub_int():
+%  binop(instr="sub     w0, w0, w1")
+
+%def op_sub_int_2addr():
+%  binop2addr(instr="sub     w0, w0, w1")
+
+%def op_sub_long():
+%  binopWide(instr="sub x0, x1, x2")
+
+%def op_sub_long_2addr():
+%  binopWide2addr(instr="sub     x0, x0, x1")
+
+%def op_ushr_int():
+%  binop(instr="lsr     w0, w0, w1")
+
+%def op_ushr_int_2addr():
+%  binop2addr(instr="lsr     w0, w0, w1")
+
+%def op_ushr_int_lit8():
+%  binopLit8(extract="ubfx    w1, w3, #8, #5", instr="lsr     w0, w0, w1")
+
+%def op_ushr_long():
+%  shiftWide(opcode="lsr")
+
+%def op_ushr_long_2addr():
+%  shiftWide2addr(opcode="lsr")
+
+%def op_xor_int():
+%  binop(instr="eor     w0, w0, w1")
+
+%def op_xor_int_2addr():
+%  binop2addr(instr="eor     w0, w0, w1")
+
+%def op_xor_int_lit16():
+%  binopLit16(instr="eor     w0, w0, w1")
+
+%def op_xor_int_lit8():
+%  binopLit8(extract="", instr="eor     w0, w0, w3, asr #8")
+
+%def op_xor_long():
+%  binopWide(instr="eor x0, x1, x2")
+
+%def op_xor_long_2addr():
+%  binopWide2addr(instr="eor     x0, x0, x1")
diff --git a/runtime/interpreter/mterp/arm64/array.S b/runtime/interpreter/mterp/arm64/array.S
new file mode 100644
index 0000000..a023d22
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/array.S
@@ -0,0 +1,235 @@
+%def op_aget(load="ldr", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B w2, 1, 0                    // w2<- BB
+    lsr     w9, wINST, #8               // w9<- AA
+    FETCH_B w3, 1, 1                    // w3<- CC
+    GET_VREG w0, w2                     // w0<- vBB (array object)
+    GET_VREG w1, w3                     // w1<- vCC (requested index)
+    cbz     x0, common_errNullObject    // bail if null array object.
+    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- arrayObj->length
+    add     x0, x0, w1, uxtw #$shift    // w0<- arrayObj + index*width
+    cmp     w1, w3                      // compare unsigned index, length
+    bcs     common_errArrayIndex        // index >= length, bail
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    $load   w2, [x0, #$data_offset]     // w2<- vBB[vCC]
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG w2, w9                     // vAA<- w2
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_aget_boolean():
+%  op_aget(load="ldrb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aget_byte():
+%  op_aget(load="ldrsb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aget_char():
+%  op_aget(load="ldrh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aget_object():
+    /*
+     * Array object get.  vAA <- vBB[vCC].
+     *
+     * for: aget-object
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B w2, 1, 0                    // w2<- BB
+    FETCH_B w3, 1, 1                    // w3<- CC
+    EXPORT_PC
+    GET_VREG w0, w2                     // w0<- vBB (array object)
+    GET_VREG w1, w3                     // w1<- vCC (requested index)
+    bl       artAGetObjectFromMterp     // (array, index)
+    ldr      x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
+    lsr      w2, wINST, #8               // w9<- AA
+    PREFETCH_INST 2
+    cbnz     w1, MterpException
+    SET_VREG_OBJECT w0, w2
+    ADVANCE 2
+    GET_INST_OPCODE ip
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_aget_short():
+%  op_aget(load="ldrsh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aget_wide():
+    /*
+     * Array get, 64 bits.  vAA <- vBB[vCC].
+     *
+     */
+    /* aget-wide vAA, vBB, vCC */
+    FETCH w0, 1                         // w0<- CCBB
+    lsr     w4, wINST, #8               // w4<- AA
+    and     w2, w0, #255                // w2<- BB
+    lsr     w3, w0, #8                  // w3<- CC
+    GET_VREG w0, w2                     // w0<- vBB (array object)
+    GET_VREG w1, w3                     // w1<- vCC (requested index)
+    cbz     w0, common_errNullObject        // yes, bail
+    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- arrayObj->length
+    add     x0, x0, w1, lsl #3          // w0<- arrayObj + index*width
+    cmp     w1, w3                      // compare unsigned index, length
+    bcs     common_errArrayIndex        // index >= length, bail
+    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
+    ldr     x2, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]  // x2<- vBB[vCC]
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    SET_VREG_WIDE x2, w4
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_aput(store="str", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B w2, 1, 0                    // w2<- BB
+    lsr     w9, wINST, #8               // w9<- AA
+    FETCH_B w3, 1, 1                    // w3<- CC
+    GET_VREG w0, w2                     // w0<- vBB (array object)
+    GET_VREG w1, w3                     // w1<- vCC (requested index)
+    cbz     w0, common_errNullObject    // bail if null
+    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]     // w3<- arrayObj->length
+    add     x0, x0, w1, lsl #$shift     // w0<- arrayObj + index*width
+    cmp     w1, w3                      // compare unsigned index, length
+    bcs     common_errArrayIndex        // index >= length, bail
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    GET_VREG w2, w9                     // w2<- vAA
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    $store  w2, [x0, #$data_offset]     // vBB[vCC]<- w2
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_aput_boolean():
+%  op_aput(store="strb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aput_byte():
+%  op_aput(store="strb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aput_char():
+%  op_aput(store="strh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aput_object():
+    /*
+     * Store an object into an array.  vBB[vCC] <- vAA.
+     */
+    /* op vAA, vBB, vCC */
+    EXPORT_PC
+    add     x0, xFP, #OFF_FP_SHADOWFRAME
+    mov     x1, xPC
+    mov     w2, wINST
+    bl      MterpAputObject
+    cbz     w0, MterpPossibleException
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_aput_short():
+%  op_aput(store="strh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aput_wide():
+    /*
+     * Array put, 64 bits.  vBB[vCC] <- vAA.
+     *
+     */
+    /* aput-wide vAA, vBB, vCC */
+    FETCH w0, 1                         // w0<- CCBB
+    lsr     w4, wINST, #8               // w4<- AA
+    and     w2, w0, #255                // w2<- BB
+    lsr     w3, w0, #8                  // w3<- CC
+    GET_VREG w0, w2                     // w0<- vBB (array object)
+    GET_VREG w1, w3                     // w1<- vCC (requested index)
+    cbz     w0, common_errNullObject    // bail if null
+    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- arrayObj->length
+    add     x0, x0, w1, lsl #3          // w0<- arrayObj + index*width
+    cmp     w1, w3                      // compare unsigned index, length
+    bcs     common_errArrayIndex        // index >= length, bail
+    GET_VREG_WIDE x1, w4
+    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    str     x1, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_array_length():
+    /*
+     * Return the length of an array.
+     */
+    lsr     w1, wINST, #12              // w1<- B
+    ubfx    w2, wINST, #8, #4           // w2<- A
+    GET_VREG w0, w1                     // w0<- vB (object ref)
+    cbz     w0, common_errNullObject    // yup, fail
+    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
+    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- array length
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG w3, w2                     // vB<- length
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_fill_array_data():
+    /* fill-array-data vAA, +BBBBBBBB */
+    EXPORT_PC
+    FETCH   w0, 1                       // x0<- 000000000000bbbb (lo)
+    FETCH_S x1, 2                       // x1<- ssssssssssssBBBB (hi)
+    lsr     w3, wINST, #8               // w3<- AA
+    orr     x1, x0, x1, lsl #16         // x1<- ssssssssBBBBbbbb
+    GET_VREG w0, w3                     // w0<- vAA (array object)
+    add     x1, xPC, x1, lsl #1         // x1<- PC + ssssssssBBBBbbbb*2 (array data off.)
+    bl      MterpFillArrayData          // (obj, payload)
+    cbz     w0, MterpPossibleException      // exception?
+    FETCH_ADVANCE_INST 3                // advance rPC, load rINST
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_filled_new_array(helper="MterpFilledNewArray"):
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
+    .extern $helper
+    EXPORT_PC
+    add     x0, xFP, #OFF_FP_SHADOWFRAME
+    mov     x1, xPC
+    mov     x2, xSELF
+    bl      $helper
+    cbz     w0, MterpPossibleException
+    FETCH_ADVANCE_INST 3                // advance rPC, load rINST
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_filled_new_array_range():
+%  op_filled_new_array(helper="MterpFilledNewArrayRange")
+
+%def op_new_array():
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array vA, vB, class//CCCC */
+    EXPORT_PC
+    add     x0, xFP, #OFF_FP_SHADOWFRAME
+    mov     x1, xPC
+    mov     w2, wINST
+    mov     x3, xSELF
+    bl      MterpNewArray
+    cbz     w0, MterpPossibleException
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/bincmp.S b/runtime/interpreter/mterp/arm64/bincmp.S
deleted file mode 100644
index 8dd4fed..0000000
--- a/runtime/interpreter/mterp/arm64/bincmp.S
+++ /dev/null
@@ -1,19 +0,0 @@
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    GET_VREG w3, w1                     // w3<- vB
-    GET_VREG w2, w0                     // w2<- vA
-    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
-    cmp     w2, w3                      // compare (vA, vB)
-    b.${condition} MterpCommonTakenBranchNoFlags
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/binop.S b/runtime/interpreter/mterp/arm64/binop.S
deleted file mode 100644
index b629b0b..0000000
--- a/runtime/interpreter/mterp/arm64/binop.S
+++ /dev/null
@@ -1,33 +0,0 @@
-%default {"preinstr":"", "result":"w0", "chkzero":"0"}
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w9, wINST, #8               // w9<- AA
-    lsr     w3, w0, #8                  // w3<- CC
-    and     w2, w0, #255                // w2<- BB
-    GET_VREG w1, w3                     // w1<- vCC
-    GET_VREG w0, w2                     // w0<- vBB
-    .if $chkzero
-    cbz     w1, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    $preinstr                           // optional op; may set condition codes
-    $instr                              // $result<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG $result, w9                // vAA<- $result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm64/binop2addr.S b/runtime/interpreter/mterp/arm64/binop2addr.S
deleted file mode 100644
index a480a7d..0000000
--- a/runtime/interpreter/mterp/arm64/binop2addr.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "result":"w0", "chkzero":"0"}
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w1, w3                     // w1<- vB
-    GET_VREG w0, w9                     // w0<- vA
-    .if $chkzero
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    $preinstr                           // optional op; may set condition codes
-    $instr                              // $result<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG $result, w9                // vAA<- $result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm64/binopLit16.S b/runtime/interpreter/mterp/arm64/binopLit16.S
deleted file mode 100644
index 4f9d205..0000000
--- a/runtime/interpreter/mterp/arm64/binopLit16.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default {"preinstr":"", "result":"w0", "chkzero":"0"}
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S w1, 1                       // w1<- ssssCCCC (sign-extended)
-    lsr     w2, wINST, #12              // w2<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w0, w2                     // w0<- vB
-    .if $chkzero
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    $preinstr
-    $instr                              // $result<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG $result, w9                // vAA<- $result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm64/binopLit8.S b/runtime/interpreter/mterp/arm64/binopLit8.S
deleted file mode 100644
index dfa3169..0000000
--- a/runtime/interpreter/mterp/arm64/binopLit8.S
+++ /dev/null
@@ -1,34 +0,0 @@
-%default {"extract": "asr     w1, w3, #8", "preinstr":"", "result":"w0", "chkzero":"0"}
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S w3, 1                       // w3<- ssssCCBB (sign-extended for CC)
-    lsr     w9, wINST, #8               // w9<- AA
-    and     w2, w3, #255                // w2<- BB
-    GET_VREG w0, w2                     // w0<- vBB
-    $extract                            // optional; typically w1<- ssssssCC (sign extended)
-    .if $chkzero
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    $preinstr                           // optional op; may set condition codes
-    $instr                              // $result<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG $result, w9                // vAA<- $result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/arm64/binopWide.S b/runtime/interpreter/mterp/arm64/binopWide.S
deleted file mode 100644
index 9de24f1..0000000
--- a/runtime/interpreter/mterp/arm64/binopWide.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "instr":"add x0, x1, x2", "result":"x0", "r1":"x1", "r2":"x2", "chkzero":"0"}
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = x1 op x2".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than x0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double, rem-double
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    lsr     w2, w0, #8                  // w2<- CC
-    and     w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE $r2, w2               // w2<- vCC
-    GET_VREG_WIDE $r1, w1               // w1<- vBB
-    .if $chkzero
-    cbz     $r2, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    $preinstr
-    $instr                              // $result<- op, w0-w4 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE $result, w4           // vAA<- $result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm64/binopWide2addr.S b/runtime/interpreter/mterp/arm64/binopWide2addr.S
deleted file mode 100644
index d9927a2..0000000
--- a/runtime/interpreter/mterp/arm64/binopWide2addr.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default {"preinstr":"", "instr":"add x0, x0, x1", "r0":"x0", "r1":"x1", "chkzero":"0"}
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "x0 = x0 op x1".
-     * This must not be a function call, as we keep w2 live across it.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE $r1, w1               // x1<- vB
-    GET_VREG_WIDE $r0, w2               // x0<- vA
-    .if $chkzero
-    cbz     $r1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    $preinstr
-    $instr                              // result<- op
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE $r0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm64/close_cfi.S b/runtime/interpreter/mterp/arm64/close_cfi.S
deleted file mode 100644
index 7ba04860..0000000
--- a/runtime/interpreter/mterp/arm64/close_cfi.S
+++ /dev/null
@@ -1,4 +0,0 @@
-// Close out the cfi info.  We're treating mterp as a single function.
-
-END ExecuteMterpImpl
-
diff --git a/runtime/interpreter/mterp/arm64/const.S b/runtime/interpreter/mterp/arm64/const.S
deleted file mode 100644
index 6f82bbf..0000000
--- a/runtime/interpreter/mterp/arm64/const.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default { "helper":"UndefinedConstHandler" }
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern $helper
-    EXPORT_PC
-    FETCH w0, 1                         // w0<- BBBB
-    lsr     w1, wINST, #8               // w1<- AA
-    add     x2, xFP, #OFF_FP_SHADOWFRAME
-    mov     x3, xSELF
-    bl      $helper                     // (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     // load rINST
-    cbnz    w0, MterpPossibleException  // let reference interpreter deal with it.
-    ADVANCE 2                           // advance rPC
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/control_flow.S b/runtime/interpreter/mterp/arm64/control_flow.S
new file mode 100644
index 0000000..b634c98
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/control_flow.S
@@ -0,0 +1,223 @@
+%def bincmp(condition=""):
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform.
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    lsr     w1, wINST, #12              // w1<- B
+    ubfx    w0, wINST, #8, #4           // w0<- A
+    GET_VREG w3, w1                     // w3<- vB
+    GET_VREG w2, w0                     // w2<- vA
+    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
+    cmp     w2, w3                      // compare (vA, vB)
+    b.${condition} MterpCommonTakenBranchNoFlags
+    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
+    b.eq    .L_check_not_taken_osr
+    FETCH_ADVANCE_INST 2
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def zcmp(compare="1", branch=""):
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform.
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    lsr     w0, wINST, #8               // w0<- AA
+    GET_VREG w2, w0                     // w2<- vAA
+    FETCH_S wINST, 1                    // w1<- branch offset, in code units
+    .if ${compare}
+    cmp     w2, #0                      // compare (vA, 0)
+    .endif
+    ${branch} MterpCommonTakenBranchNoFlags
+    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
+    b.eq    .L_check_not_taken_osr
+    FETCH_ADVANCE_INST 2
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_goto():
+    /*
+     * Unconditional branch, 8-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto +AA */
+    sbfx    wINST, wINST, #8, #8           // wINST<- ssssssAA (sign-extended)
+    b       MterpCommonTakenBranchNoFlags
+
+%def op_goto_16():
+    /*
+     * Unconditional branch, 16-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto/16 +AAAA */
+    FETCH_S wINST, 1                    // wINST<- ssssAAAA (sign-extended)
+    b       MterpCommonTakenBranchNoFlags
+
+%def op_goto_32():
+    /*
+     * Unconditional branch, 32-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     *
+     * Unlike most opcodes, this one is allowed to branch to itself, so
+     * our "backward branch" test must be "<=0" instead of "<0".  Because
+     * we need the V bit set, we'll use an adds to convert from Dalvik
+     * offset to byte offset.
+     */
+    /* goto/32 +AAAAAAAA */
+    FETCH w0, 1                         // w0<- aaaa (lo)
+    FETCH w1, 2                         // w1<- AAAA (hi)
+    orr     wINST, w0, w1, lsl #16      // wINST<- AAAAaaaa
+    b       MterpCommonTakenBranchNoFlags
+
+%def op_if_eq():
+%  bincmp(condition="eq")
+
+%def op_if_eqz():
+%  zcmp(compare="0", branch="cbz     w2,")
+
+%def op_if_ge():
+%  bincmp(condition="ge")
+
+%def op_if_gez():
+%  zcmp(compare="0", branch="tbz     w2, #31,")
+
+%def op_if_gt():
+%  bincmp(condition="gt")
+
+%def op_if_gtz():
+%  zcmp(branch="b.gt")
+
+%def op_if_le():
+%  bincmp(condition="le")
+
+%def op_if_lez():
+%  zcmp(branch="b.le")
+
+%def op_if_lt():
+%  bincmp(condition="lt")
+
+%def op_if_ltz():
+%  zcmp(compare="0", branch="tbnz    w2, #31,")
+
+%def op_if_ne():
+%  bincmp(condition="ne")
+
+%def op_if_nez():
+%  zcmp(compare="0", branch="cbnz    w2,")
+
+%def op_packed_switch(func="MterpDoPackedSwitch"):
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBB */
+    FETCH   w0, 1                       // x0<- 000000000000bbbb (lo)
+    FETCH_S x1, 2                       // x1<- ssssssssssssBBBB (hi)
+    lsr     w3, wINST, #8               // w3<- AA
+    orr     x0, x0, x1, lsl #16         // x0<- ssssssssBBBBbbbb
+    GET_VREG w1, w3                     // w1<- vAA
+    add     x0, xPC, x0, lsl #1         // x0<- PC + ssssssssBBBBbbbb*2
+    bl      $func                       // w0<- code-unit branch offset
+    sxtw    xINST, w0
+    b       MterpCommonTakenBranchNoFlags
+
+%def op_return():
+    /*
+     * Return a 32-bit value.
+     *
+     * for: return, return-object
+     */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    bl      MterpThreadFenceForConstructor
+    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
+    mov     x0, xSELF
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    b.ne    .L${opcode}_check
+.L${opcode}_return:
+    lsr     w2, wINST, #8               // r2<- AA
+    GET_VREG w0, w2                     // r0<- vAA
+    b       MterpReturn
+.L${opcode}_check:
+    bl      MterpSuspendCheck           // (self)
+    b       .L${opcode}_return
+
+%def op_return_object():
+%  op_return()
+
+%def op_return_void():
+    .extern MterpThreadFenceForConstructor
+    bl      MterpThreadFenceForConstructor
+    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
+    mov     x0, xSELF
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    b.ne    .L${opcode}_check
+.L${opcode}_return:
+    mov     x0, #0
+    b       MterpReturn
+.L${opcode}_check:
+    bl      MterpSuspendCheck           // (self)
+    b       .L${opcode}_return
+
+%def op_return_void_no_barrier():
+    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
+    mov     x0, xSELF
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    b.ne    .L${opcode}_check
+.L${opcode}_return:
+    mov     x0, #0
+    b       MterpReturn
+.L${opcode}_check:
+    bl      MterpSuspendCheck           // (self)
+    b       .L${opcode}_return
+
+%def op_return_wide():
+    /*
+     * Return a 64-bit value.
+     */
+    /* return-wide vAA */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    bl      MterpThreadFenceForConstructor
+    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
+    mov     x0, xSELF
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    b.ne    .L${opcode}_check
+.L${opcode}_return:
+    lsr     w2, wINST, #8               // w2<- AA
+    GET_VREG_WIDE x0, w2                // x0<- vAA
+    b       MterpReturn
+.L${opcode}_check:
+    bl      MterpSuspendCheck           // (self)
+    b       .L${opcode}_return
+
+%def op_sparse_switch():
+%  op_packed_switch(func="MterpDoSparseSwitch")
+
+%def op_throw():
+    /*
+     * Throw an exception object in the current thread.
+     */
+    /* throw vAA */
+    EXPORT_PC
+    lsr      w2, wINST, #8               // r2<- AA
+    GET_VREG w1, w2                      // r1<- vAA (exception object)
+    cbz      w1, common_errNullObject
+    str      x1, [xSELF, #THREAD_EXCEPTION_OFFSET]  // thread->exception<- obj
+    b        MterpException
diff --git a/runtime/interpreter/mterp/arm64/entry.S b/runtime/interpreter/mterp/arm64/entry.S
deleted file mode 100644
index cf38a29..0000000
--- a/runtime/interpreter/mterp/arm64/entry.S
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-    .text
-
-/*
- * Interpreter entry point.
- * On entry:
- *  x0  Thread* self/
- *  x1  insns_
- *  x2  ShadowFrame
- *  x3  JValue* result_register
- *
- */
-ENTRY ExecuteMterpImpl
-    SAVE_TWO_REGS_INCREASE_FRAME xPROFILE, x27, 80
-    SAVE_TWO_REGS                xIBASE, xREFS, 16
-    SAVE_TWO_REGS                xSELF, xINST, 32
-    SAVE_TWO_REGS                xPC, xFP, 48
-    SAVE_TWO_REGS                fp, lr, 64
-    add     fp, sp, #64
-
-    /* Remember the return register */
-    str     x3, [x2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
-
-    /* Remember the dex instruction pointer */
-    str     x1, [x2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
-
-    /* set up "named" registers */
-    mov     xSELF, x0
-    ldr     w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
-    add     xFP, x2, #SHADOWFRAME_VREGS_OFFSET     // point to vregs.
-    add     xREFS, xFP, w0, lsl #2                 // point to reference array in shadow frame
-    ldr     w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET]   // Get starting dex_pc.
-    add     xPC, x1, w0, lsl #1                    // Create direct pointer to 1st dex opcode
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-    EXPORT_PC
-
-    /* Starting ibase */
-    ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
-
-    /* Set up for backwards branches & osr profiling */
-    ldr     x0, [xFP, #OFF_FP_METHOD]
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xSELF
-    bl      MterpSetUpHotnessCountdown
-    mov     wPROFILE, w0                // Starting hotness countdown to xPROFILE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST                          // load wINST from rPC
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/arm64/fallback.S b/runtime/interpreter/mterp/arm64/fallback.S
deleted file mode 100644
index 44e7e12..0000000
--- a/runtime/interpreter/mterp/arm64/fallback.S
+++ /dev/null
@@ -1,3 +0,0 @@
-/* Transfer stub to alternate interpreter */
-    b    MterpFallback
-
diff --git a/runtime/interpreter/mterp/arm64/fbinop.S b/runtime/interpreter/mterp/arm64/fbinop.S
deleted file mode 100644
index 926d078..0000000
--- a/runtime/interpreter/mterp/arm64/fbinop.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {}
-    /*:
-     * Generic 32-bit floating-point operation.
-     *
-     * For: add-float, sub-float, mul-float, div-float
-     * form: <op> s0, s0, s1
-     */
-    /* floatop vAA, vBB, vCC */
-    FETCH w0, 1                         // r0<- CCBB
-    lsr     w1, w0, #8                  // r2<- CC
-    and     w0, w0, #255                // r1<- BB
-    GET_VREG  s1, w1
-    GET_VREG  s0, w0
-    $instr                              // s0<- op
-    lsr     w1, wINST, #8               // r1<- AA
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG  s0, w1
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/fbinop2addr.S b/runtime/interpreter/mterp/arm64/fbinop2addr.S
deleted file mode 100644
index 04236ad..0000000
--- a/runtime/interpreter/mterp/arm64/fbinop2addr.S
+++ /dev/null
@@ -1,17 +0,0 @@
-    /*
-     * Generic 32-bit floating point "/2addr" binary operation.  Provide
-     * an "instr" line that specifies an instruction that performs
-     * "s2 = s0 op s1".
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG s1, w3
-    GET_VREG s0, w9
-    $instr                              // s2<- op
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG s2, w9
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/fcmp.S b/runtime/interpreter/mterp/arm64/fcmp.S
deleted file mode 100644
index cad6318..0000000
--- a/runtime/interpreter/mterp/arm64/fcmp.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"wide":"", "r1":"s1", "r2":"s2", "cond":"lt"}
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    and     w2, w0, #255                // w2<- BB
-    lsr     w3, w0, #8                  // w3<- CC
-    GET_VREG$wide $r1, w2
-    GET_VREG$wide $r2, w3
-    fcmp $r1, $r2
-    cset w0, ne
-    cneg w0, w0, $cond
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w4                     // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/field.S b/runtime/interpreter/mterp/arm64/field.S
deleted file mode 100644
index 631c8d1..0000000
--- a/runtime/interpreter/mterp/arm64/field.S
+++ /dev/null
@@ -1,15 +0,0 @@
-%default { }
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern $helper
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       $helper
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/floating_point.S b/runtime/interpreter/mterp/arm64/floating_point.S
new file mode 100644
index 0000000..ad42db3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/floating_point.S
@@ -0,0 +1,318 @@
+%def fbinop(instr=""):
+    /*
+     * Generic 32-bit floating-point operation.
+     *
+     * For: add-float, sub-float, mul-float, div-float
+     * form: <op> s0, s0, s1
+     */
+    /* floatop vAA, vBB, vCC */
+    FETCH w0, 1                         // r0<- CCBB
+    lsr     w1, w0, #8                  // r2<- CC
+    and     w0, w0, #255                // r1<- BB
+    GET_VREG  s1, w1
+    GET_VREG  s0, w0
+    $instr                              // s0<- op
+    lsr     w1, wINST, #8               // r1<- AA
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG_FLOAT s0, w1
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def fbinopWide(instr="fadd d0, d1, d2", result="d0", r1="d1", r2="d2"):
+    /*
+     * Generic 64-bit floating-point operation.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH w0, 1                         // w0<- CCBB
+    lsr     w4, wINST, #8               // w4<- AA
+    lsr     w2, w0, #8                  // w2<- CC
+    and     w1, w0, #255                // w1<- BB
+    GET_VREG_DOUBLE $r2, w2             // w2<- vCC
+    GET_VREG_DOUBLE $r1, w1             // w1<- vBB
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    $instr                              // $result<- op, w0-w4 changed
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG_DOUBLE $result, w4         // vAA<- $result
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def fbinop2addr(instr=""):
+    /*
+     * Generic 32-bit floating point "/2addr" binary operation.  Provide
+     * an "instr" line that specifies an instruction that performs
+     * "s2 = s0 op s1".
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    lsr     w3, wINST, #12              // w3<- B
+    ubfx    w9, wINST, #8, #4           // w9<- A
+    GET_VREG s1, w3
+    GET_VREG s0, w9
+    $instr                              // s2<- op
+    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG_FLOAT s2, w9
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def fbinopWide2addr(instr="fadd d0, d0, d1", r0="d0", r1="d1"):
+    /*
+     * Generic 64-bit floating point "/2addr" binary operation.
+     */
+    /* binop/2addr vA, vB */
+    lsr     w1, wINST, #12              // w1<- B
+    ubfx    w2, wINST, #8, #4           // w2<- A
+    GET_VREG_DOUBLE $r1, w1             // x1<- vB
+    GET_VREG_DOUBLE $r0, w2             // x0<- vA
+    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
+    $instr                              // result<- op
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG_DOUBLE $r0, w2             // vAA<- result
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def fcmp(wide="", r1="s1", r2="s2", cond="lt"):
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH w0, 1                         // w0<- CCBB
+    lsr     w4, wINST, #8               // w4<- AA
+    and     w2, w0, #255                // w2<- BB
+    lsr     w3, w0, #8                  // w3<- CC
+%  if r1.startswith("d"):
+    GET_VREG_DOUBLE $r1, w2
+    GET_VREG_DOUBLE $r2, w3
+%  else:
+    GET_VREG $r1, w2
+    GET_VREG $r2, w3
+%  #endif
+    fcmp $r1, $r2
+    cset w0, ne
+    cneg w0, w0, $cond
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG w0, w4                     // vAA<- w0
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def funopNarrow(srcreg="s0", tgtreg="d0", instr=""):
+    /*
+     * Generic 32bit-to-32bit floating point unary operation.  Provide an
+     * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
+     *
+     * For: int-to-float, float-to-int
+     * TODO: refactor all of the conversions - parameterize width and use same template.
+     */
+    /* unop vA, vB */
+    lsr     w3, wINST, #12              // w3<- B
+    ubfx    w4, wINST, #8, #4           // w4<- A
+    GET_VREG $srcreg, w3
+    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
+    $instr                              // d0<- op
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    SET_VREG_FLOAT $tgtreg, w4          // vA<- d0
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def funopNarrower(srcreg="s0", tgtreg="d0", instr=""):
+    /*
+     * Generic 64bit-to-32bit floating point unary operation.  Provide an
+     * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
+     *
+     * For: int-to-double, float-to-double, float-to-long
+     */
+    /* unop vA, vB */
+    lsr     w3, wINST, #12              // w3<- B
+    ubfx    w4, wINST, #8, #4           // w4<- A
+%  if srcreg.startswith("d"):
+    GET_VREG_DOUBLE $srcreg, w3
+%  else:
+    GET_VREG_WIDE $srcreg, w3
+%  #endif
+    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
+    $instr                              // d0<- op
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    SET_VREG_FLOAT $tgtreg, w4          // vA<- d0
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def funopWide(srcreg="s0", tgtreg="d0", instr=""):
+    /*
+     * Generic 64bit-to-64bit floating point unary operation.  Provide an
+     * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
+     *
+     * For: long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    lsr     w3, wINST, #12              // w3<- B
+    ubfx    w4, wINST, #8, #4           // w4<- A
+%  if srcreg.startswith("d"):
+    GET_VREG_DOUBLE $srcreg, w3
+%  else:
+    GET_VREG_WIDE $srcreg, w3
+%  #endif
+    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
+    $instr                              // d0<- op
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+%  if tgtreg.startswith("d"):
+    SET_VREG_DOUBLE $tgtreg, w4         // vA<- d0
+%  else:
+    SET_VREG_WIDE $tgtreg, w4           // vA<- d0
+%  #endif
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def funopWider(srcreg="s0", tgtreg="d0", instr=""):
+    /*
+     * Generic 32bit-to-64bit floating point unary operation.  Provide an
+     * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
+     *
+     * For: int-to-double, float-to-double, float-to-long
+     */
+    /* unop vA, vB */
+    lsr     w3, wINST, #12              // w3<- B
+    ubfx    w4, wINST, #8, #4           // w4<- A
+    GET_VREG $srcreg, w3
+    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
+    $instr                              // d0<- op
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    SET_VREG_WIDE $tgtreg, w4           // vA<- d0
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_add_double():
+%  fbinopWide(instr="fadd d0, d1, d2", result="d0", r1="d1", r2="d2")
+
+%def op_add_double_2addr():
+%  fbinopWide2addr(instr="fadd     d0, d0, d1", r0="d0", r1="d1")
+
+%def op_add_float():
+%  fbinop(instr="fadd   s0, s0, s1")
+
+%def op_add_float_2addr():
+%  fbinop2addr(instr="fadd   s2, s0, s1")
+
+%def op_cmpg_double():
+%  fcmp(wide="_WIDE", r1="d1", r2="d2", cond="cc")
+
+%def op_cmpg_float():
+%  fcmp(wide="", r1="s1", r2="s2", cond="cc")
+
+%def op_cmpl_double():
+%  fcmp(wide="_WIDE", r1="d1", r2="d2", cond="lt")
+
+%def op_cmpl_float():
+%  fcmp(wide="", r1="s1", r2="s2", cond="lt")
+
+%def op_div_double():
+%  fbinopWide(instr="fdiv d0, d1, d2", result="d0", r1="d1", r2="d2")
+
+%def op_div_double_2addr():
+%  fbinopWide2addr(instr="fdiv     d0, d0, d1", r0="d0", r1="d1")
+
+%def op_div_float():
+%  fbinop(instr="fdiv   s0, s0, s1")
+
+%def op_div_float_2addr():
+%  fbinop2addr(instr="fdiv   s2, s0, s1")
+
+%def op_double_to_float():
+%  funopNarrower(instr="fcvt s0, d0", srcreg="d0", tgtreg="s0")
+
+%def op_double_to_int():
+%  funopNarrower(instr="fcvtzs w0, d0", srcreg="d0", tgtreg="w0")
+
+%def op_double_to_long():
+%  funopWide(instr="fcvtzs x0, d0", srcreg="d0", tgtreg="x0")
+
+%def op_float_to_double():
+%  funopWider(instr="fcvt  d0, s0", srcreg="s0", tgtreg="d0")
+
+%def op_float_to_int():
+%  funopNarrow(instr="fcvtzs w0, s0", srcreg="s0", tgtreg="w0")
+
+%def op_float_to_long():
+%  funopWider(instr="fcvtzs x0, s0", srcreg="s0", tgtreg="x0")
+
+%def op_int_to_double():
+%  funopWider(instr="scvtf d0, w0", srcreg="w0", tgtreg="d0")
+
+%def op_int_to_float():
+%  funopNarrow(instr="scvtf s0, w0", srcreg="w0", tgtreg="s0")
+
+%def op_long_to_double():
+%  funopWide(instr="scvtf d0, x0", srcreg="x0", tgtreg="d0")
+
+%def op_long_to_float():
+%  funopNarrower(instr="scvtf s0, x0", srcreg="x0", tgtreg="s0")
+
+%def op_mul_double():
+%  fbinopWide(instr="fmul d0, d1, d2", result="d0", r1="d1", r2="d2")
+
+%def op_mul_double_2addr():
+%  fbinopWide2addr(instr="fmul     d0, d0, d1", r0="d0", r1="d1")
+
+%def op_mul_float():
+%  fbinop(instr="fmul   s0, s0, s1")
+
+%def op_mul_float_2addr():
+%  fbinop2addr(instr="fmul   s2, s0, s1")
+
+%def op_neg_double():
+%  unopWide(instr="eor     x0, x0, #0x8000000000000000")
+
+%def op_neg_float():
+%  unop(instr="eor     w0, w0, #0x80000000")
+
+%def op_rem_double():
+    /* rem vAA, vBB, vCC */
+    FETCH w0, 1                         // w0<- CCBB
+    lsr     w2, w0, #8                  // w2<- CC
+    and     w1, w0, #255                // w1<- BB
+    GET_VREG_DOUBLE d1, w2              // d1<- vCC
+    GET_VREG_DOUBLE d0, w1              // d0<- vBB
+    bl  fmod
+    lsr     w4, wINST, #8               // w4<- AA
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG_WIDE d0, w4                // vAA<- result
+    GOTO_OPCODE ip                      // jump to next instruction
+    /* 11-14 instructions */
+
+%def op_rem_double_2addr():
+    /* rem vA, vB */
+    lsr     w1, wINST, #12              // w1<- B
+    ubfx    w2, wINST, #8, #4           // w2<- A
+    GET_VREG_DOUBLE d1, w1              // d1<- vB
+    GET_VREG_DOUBLE d0, w2              // d0<- vA
+    bl fmod
+    ubfx    w2, wINST, #8, #4           // w2<- A (need to reload - killed across call)
+    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG_WIDE d0, w2                // vAA<- result
+    GOTO_OPCODE ip                      // jump to next instruction
+    /* 10-13 instructions */
+
+%def op_rem_float():
+/* EABI doesn't define a float remainder function, but libm does */
+%  fbinop(instr="bl      fmodf")
+
+%def op_rem_float_2addr():
+    /* rem vA, vB */
+    lsr     w3, wINST, #12              // w3<- B
+    ubfx    w9, wINST, #8, #4           // w9<- A
+    GET_VREG s1, w3
+    GET_VREG s0, w9
+    bl  fmodf
+    ubfx    w9, wINST, #8, #4           // w9<- A
+    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG_FLOAT s0, w9
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_sub_double():
+%  fbinopWide(instr="fsub d0, d1, d2", result="d0", r1="d1", r2="d2")
+
+%def op_sub_double_2addr():
+%  fbinopWide2addr(instr="fsub     d0, d0, d1", r0="d0", r1="d1")
+
+%def op_sub_float():
+%  fbinop(instr="fsub   s0, s0, s1")
+
+%def op_sub_float_2addr():
+%  fbinop2addr(instr="fsub   s2, s0, s1")
diff --git a/runtime/interpreter/mterp/arm64/footer.S b/runtime/interpreter/mterp/arm64/footer.S
deleted file mode 100644
index 0ce3543..0000000
--- a/runtime/interpreter/mterp/arm64/footer.S
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * ===========================================================================
- *  Common subroutines and data
- * ===========================================================================
- */
-
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogDivideByZeroException
-#endif
-    b MterpCommonFallback
-
-common_errArrayIndex:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogArrayIndexException
-#endif
-    b MterpCommonFallback
-
-common_errNegativeArraySize:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogNegativeArraySizeException
-#endif
-    b MterpCommonFallback
-
-common_errNoSuchMethod:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogNoSuchMethodException
-#endif
-    b MterpCommonFallback
-
-common_errNullObject:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogNullObjectException
-#endif
-    b MterpCommonFallback
-
-common_exceptionThrown:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogExceptionThrownException
-#endif
-    b MterpCommonFallback
-
-MterpSuspendFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    ldr  x2, [xSELF, #THREAD_FLAGS_OFFSET]
-    bl MterpLogSuspendFallback
-#endif
-    b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    ldr     x0, [xSELF, #THREAD_EXCEPTION_OFFSET]
-    cbz     x0, MterpFallback                       // If not, fall back to reference interpreter.
-    /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    bl      MterpHandleException                    // (self, shadow_frame)
-    cbz     w0, MterpExceptionReturn                // no local catch, back to caller.
-    ldr     x0, [xFP, #OFF_FP_DEX_INSTRUCTIONS]
-    ldr     w1, [xFP, #OFF_FP_DEX_PC]
-    ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
-    add     xPC, x0, x1, lsl #1                     // generate new dex_pc_ptr
-    /* Do we need to switch interpreters? */
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    /* resume execution at catch block */
-    EXPORT_PC
-    FETCH_INST
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-    /* NOTE: no fallthrough */
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    wINST          <= signed offset
- *    wPROFILE       <= signed hotness countdown (expanded to 32 bits)
- *    condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
-    cmp     wINST, #0
-    b.gt    .L_forward_branch           // don't add forward branches to hotness
-    tbnz    wPROFILE, #31, .L_no_count_backwards  // go if negative
-    subs    wPROFILE, wPROFILE, #1      // countdown
-    b.eq    .L_add_batch                // counted down to zero - report
-.L_resume_backward_branch:
-    ldr     lr, [xSELF, #THREAD_FLAGS_OFFSET]
-    add     w2, wINST, wINST            // w2<- byte offset
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    REFRESH_IBASE
-    ands    lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    b.ne    .L_suspend_request_pending
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-.L_suspend_request_pending:
-    EXPORT_PC
-    mov     x0, xSELF
-    bl      MterpSuspendCheck           // (self)
-    cbnz    x0, MterpFallback
-    REFRESH_IBASE                       // might have changed during suspend
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-.L_no_count_backwards:
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.ne    .L_resume_backward_branch
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xINST
-    EXPORT_PC
-    bl      MterpMaybeDoOnStackReplacement  // (self, shadow_frame, offset)
-    cbnz    x0, MterpOnStackReplacement
-    b       .L_resume_backward_branch
-
-.L_forward_branch:
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_osr_forward
-.L_resume_forward_branch:
-    add     w2, wINST, wINST            // w2<- byte offset
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-.L_check_osr_forward:
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xINST
-    EXPORT_PC
-    bl      MterpMaybeDoOnStackReplacement  // (self, shadow_frame, offset)
-    cbnz    x0, MterpOnStackReplacement
-    b       .L_resume_forward_branch
-
-.L_add_batch:
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    strh    wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
-    ldr     x0, [xFP, #OFF_FP_METHOD]
-    mov     x2, xSELF
-    bl      MterpAddHotnessBatch        // (method, shadow_frame, self)
-    mov     wPROFILE, w0                // restore new hotness countdown to wPROFILE
-    b       .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, #2
-    EXPORT_PC
-    bl      MterpMaybeDoOnStackReplacement  // (self, shadow_frame, offset)
-    cbnz    x0, MterpOnStackReplacement
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/*
- * Check for suspend check request.  Assumes wINST already loaded, xPC advanced and
- * still needs to get the opcode and branch to it, and flags are in lr.
- */
-MterpCheckSuspendAndContinue:
-    ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]  // refresh xIBASE
-    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    b.ne    check1
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-check1:
-    EXPORT_PC
-    mov     x0, xSELF
-    bl      MterpSuspendCheck           // (self)
-    cbnz    x0, MterpFallback           // Something in the environment changed, switch interpreters
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    sxtw x2, wINST
-    bl MterpLogOSR
-#endif
-    mov  x0, #1                         // Signal normal return
-    b    MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogFallback
-#endif
-MterpCommonFallback:
-    mov     x0, #0                                  // signal retry with reference interpreter.
-    b       MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR.  Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- *  uint32_t* xFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    mov     x0, #1                                  // signal return to caller.
-    b MterpDone
-MterpReturn:
-    ldr     x2, [xFP, #OFF_FP_RESULT_REGISTER]
-    str     x0, [x2]
-    mov     x0, #1                                  // signal return to caller.
-MterpDone:
-/*
- * At this point, we expect wPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending wPROFILE and the cached hotness counter).  wPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    cmp     wPROFILE, #0
-    bgt     MterpProfileActive                      // if > 0, we may have some counts to report.
-    .cfi_remember_state
-    RESTORE_TWO_REGS                fp, lr, 64
-    RESTORE_TWO_REGS                xPC, xFP, 48
-    RESTORE_TWO_REGS                xSELF, xINST, 32
-    RESTORE_TWO_REGS                xIBASE, xREFS, 16
-    RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
-    ret
-    .cfi_restore_state                              // Reset unwind info so following code unwinds.
-    .cfi_def_cfa_offset 80                          // workaround for clang bug: 31975598
-
-MterpProfileActive:
-    mov     xINST, x0                               // stash return value
-    /* Report cached hotness counts */
-    ldr     x0, [xFP, #OFF_FP_METHOD]
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xSELF
-    strh    wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
-    bl      MterpAddHotnessBatch                    // (method, shadow_frame, self)
-    mov     x0, xINST                               // restore return value
-    RESTORE_TWO_REGS                fp, lr, 64
-    RESTORE_TWO_REGS                xPC, xFP, 48
-    RESTORE_TWO_REGS                xSELF, xINST, 32
-    RESTORE_TWO_REGS                xIBASE, xREFS, 16
-    RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
-    ret
-
diff --git a/runtime/interpreter/mterp/arm64/funopNarrow.S b/runtime/interpreter/mterp/arm64/funopNarrow.S
deleted file mode 100644
index aed830b..0000000
--- a/runtime/interpreter/mterp/arm64/funopNarrow.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {"srcreg":"s0", "tgtreg":"d0"}
-    /*
-     * Generic 32bit-to-32bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
-     *
-     * For: int-to-float, float-to-int
-     * TODO: refactor all of the conversions - parameterize width and use same template.
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG $srcreg, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    $instr                              // d0<- op
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG $tgtreg, w4                // vA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/funopNarrower.S b/runtime/interpreter/mterp/arm64/funopNarrower.S
deleted file mode 100644
index 6fddfea..0000000
--- a/runtime/interpreter/mterp/arm64/funopNarrower.S
+++ /dev/null
@@ -1,16 +0,0 @@
-%default {"srcreg":"s0", "tgtreg":"d0"}
-    /*
-     * Generic 64bit-to-32bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
-     *
-     * For: int-to-double, float-to-double, float-to-long
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG_WIDE $srcreg, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    $instr                              // d0<- op
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG $tgtreg, w4                // vA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/funopWide.S b/runtime/interpreter/mterp/arm64/funopWide.S
deleted file mode 100644
index 409e26b..0000000
--- a/runtime/interpreter/mterp/arm64/funopWide.S
+++ /dev/null
@@ -1,16 +0,0 @@
-%default {"srcreg":"s0", "tgtreg":"d0"}
-    /*
-     * Generic 64bit-to-64bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
-     *
-     * For: long-to-double, double-to-long
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG_WIDE $srcreg, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    $instr                              // d0<- op
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE $tgtreg, w4           // vA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/funopWider.S b/runtime/interpreter/mterp/arm64/funopWider.S
deleted file mode 100644
index 4c91ebc..0000000
--- a/runtime/interpreter/mterp/arm64/funopWider.S
+++ /dev/null
@@ -1,16 +0,0 @@
-%default {"srcreg":"s0", "tgtreg":"d0"}
-    /*
-     * Generic 32bit-to-64bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
-     *
-     * For: int-to-double, float-to-double, float-to-long
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG $srcreg, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    $instr                              // d0<- op
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE $tgtreg, w4           // vA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/header.S b/runtime/interpreter/mterp/arm64/header.S
deleted file mode 100644
index 0722804..0000000
--- a/runtime/interpreter/mterp/arm64/header.S
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-  Art assembly interpreter notes:
-
-  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
-  handle invoke, allows higher-level code to create frame & shadow frame.
-
-  Once that's working, support direct entry code & eliminate shadow frame (and
-  excess locals allocation.
-
-  Some (hopefully) temporary ugliness.  We'll treat xFP as pointing to the
-  base of the vreg array within the shadow frame.  Access the other fields,
-  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
-  the shadow frame mechanism of double-storing object references - via xFP &
-  number_of_vregs_.
-
- */
-
-/*
-ARM64 Runtime register usage conventions.
-
-  r0     : w0 is 32-bit return register and x0 is 64-bit.
-  r0-r7  : Argument registers.
-  r8-r15 : Caller save registers (used as temporary registers).
-  r16-r17: Also known as ip0-ip1, respectively. Used as scratch registers by
-           the linker, by the trampolines and other stubs (the backend uses
-           these as temporary registers).
-  r18    : Caller save register (used as temporary register).
-  r19    : Pointer to thread-local storage.
-  r20-r29: Callee save registers.
-  r30    : (lr) is reserved (the link register).
-  rsp    : (sp) is reserved (the stack pointer).
-  rzr    : (zr) is reserved (the zero register).
-
-  Floating-point registers
-  v0-v31
-
-  v0     : s0 is return register for singles (32-bit) and d0 for doubles (64-bit).
-           This is analogous to the C/C++ (hard-float) calling convention.
-  v0-v7  : Floating-point argument registers in both Dalvik and C/C++ conventions.
-           Also used as temporary and codegen scratch registers.
-
-  v0-v7 and v16-v31 : trashed across C calls.
-  v8-v15 : bottom 64-bits preserved across C calls (d8-d15 are preserved).
-
-  v16-v31: Used as codegen temp/scratch.
-  v8-v15 : Can be used for promotion.
-
-  Must maintain 16-byte stack alignment.
-
-Mterp notes:
-
-The following registers have fixed assignments:
-
-  reg nick      purpose
-  x20  xPC       interpreted program counter, used for fetching instructions
-  x21  xFP       interpreted frame pointer, used for accessing locals and args
-  x22  xSELF     self (Thread) pointer
-  x23  xINST     first 16-bit code unit of current instruction
-  x24  xIBASE    interpreted instruction base pointer, used for computed goto
-  x25  xREFS     base of object references in shadow frame  (ideally, we'll get rid of this later).
-  x26  wPROFILE  jit profile hotness countdown
-  x16  ip        scratch reg
-  x17  ip2       scratch reg (used by macros)
-
-Macros are provided for common operations.  They MUST NOT alter unspecified registers or condition
-codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/* During bringup, we'll use the shadow frame model instead of xFP */
-/* single-purpose registers, given names for clarity */
-#define xPC      x20
-#define CFI_DEX  20 // DWARF register number of the register holding dex-pc (xPC).
-#define CFI_TMP  0  // DWARF register number of the first argument register (r0).
-#define xFP      x21
-#define xSELF    x22
-#define xINST    x23
-#define wINST    w23
-#define xIBASE   x24
-#define xREFS    x25
-#define wPROFILE w26
-#define xPROFILE x26
-#define ip       x16
-#define ip2      x17
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep xFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
-    str  xPC, [xFP, #OFF_FP_DEX_PC_PTR]
-.endm
-
-/*
- * Fetch the next instruction from xPC into wINST.  Does not advance xPC.
- */
-.macro FETCH_INST
-    ldrh    wINST, [xPC]
-.endm
-
-/*
- * Fetch the next instruction from the specified offset.  Advances xPC
- * to point to the next instruction.  "_count" is in 16-bit code units.
- *
- * Because of the limited size of immediate constants on ARM, this is only
- * suitable for small forward movements (i.e. don't try to implement "goto"
- * with this).
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss.  (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
-    ldrh    wINST, [xPC, #((\count)*2)]!
-.endm
-
-/*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to xPC and xINST).
- */
-.macro PREFETCH_ADVANCE_INST dreg, sreg, count
-    ldrh    \dreg, [\sreg, #((\count)*2)]!
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update xPC.  Used to load
- * xINST ahead of possible exception point.  Be sure to manually advance xPC
- * later.
- */
-.macro PREFETCH_INST count
-    ldrh    wINST, [xPC, #((\count)*2)]
-.endm
-
-/* Advance xPC by some number of code units. */
-.macro ADVANCE count
-  add  xPC, xPC, #((\count)*2)
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg and advance xPC.
- * xPC to point to the next instruction.  "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.  Must not set flags.
- *
- */
-.macro FETCH_ADVANCE_INST_RB reg
-    add     xPC, xPC, \reg, sxtw
-    ldrh    wINST, [xPC]
-.endm
-
-/*
- * Fetch a half-word code unit from an offset past the current PC.  The
- * "_count" value is in 16-bit code units.  Does not advance xPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-.macro FETCH reg, count
-    ldrh    \reg, [xPC, #((\count)*2)]
-.endm
-
-.macro FETCH_S reg, count
-    ldrsh   \reg, [xPC, #((\count)*2)]
-.endm
-
-/*
- * Fetch one byte from an offset past the current PC.  Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-.macro FETCH_B reg, count, byte
-    ldrb     \reg, [xPC, #((\count)*2+(\byte))]
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
-    and     \reg, xINST, #255
-.endm
-
-/*
- * Put the prefetched instruction's opcode field into the specified register.
- */
-.macro GET_PREFETCHED_OPCODE oreg, ireg
-    and     \oreg, \ireg, #255
-.endm
-
-/*
- * Begin executing the opcode in _reg.  Clobbers reg
- */
-
-.macro GOTO_OPCODE reg
-    add     \reg, xIBASE, \reg, lsl #${handler_size_bits}
-    br      \reg
-.endm
-.macro GOTO_OPCODE_BASE base,reg
-    add     \reg, \base, \reg, lsl #${handler_size_bits}
-    br      \reg
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-.macro GET_VREG reg, vreg
-    ldr     \reg, [xFP, \vreg, uxtw #2]
-.endm
-.macro SET_VREG reg, vreg
-    str     \reg, [xFP, \vreg, uxtw #2]
-    str     wzr, [xREFS, \vreg, uxtw #2]
-.endm
-.macro SET_VREG_OBJECT reg, vreg, tmpreg
-    str     \reg, [xFP, \vreg, uxtw #2]
-    str     \reg, [xREFS, \vreg, uxtw #2]
-.endm
-
-/*
- * Get/set the 64-bit value from a Dalvik register.
- * TUNING: can we do better here?
- */
-.macro GET_VREG_WIDE reg, vreg
-    add     ip2, xFP, \vreg, lsl #2
-    ldr     \reg, [ip2]
-.endm
-.macro SET_VREG_WIDE reg, vreg
-    add     ip2, xFP, \vreg, lsl #2
-    str     \reg, [ip2]
-    add     ip2, xREFS, \vreg, lsl #2
-    str     xzr, [ip2]
-.endm
-
-/*
- * Get the 32-bit value from a Dalvik register and sign-extend to 64-bit.
- * Used to avoid an extra instruction in int-to-long.
- */
-.macro GET_VREG_S reg, vreg
-    ldrsw   \reg, [xFP, \vreg, uxtw #2]
-.endm
-
-/*
- * Convert a virtual register index into an address.
- */
-.macro VREG_INDEX_TO_ADDR reg, vreg
-    add     \reg, xFP, \vreg, lsl #2   /* WARNING: handle shadow frame vreg zero if store */
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
-  ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
-.endm
-
-/*
- * Save two registers to the stack.
- */
-.macro SAVE_TWO_REGS reg1, reg2, offset
-    stp \reg1, \reg2, [sp, #(\offset)]
-    .cfi_rel_offset \reg1, (\offset)
-    .cfi_rel_offset \reg2, (\offset) + 8
-.endm
-
-/*
- * Restore two registers from the stack.
- */
-.macro RESTORE_TWO_REGS reg1, reg2, offset
-    ldp \reg1, \reg2, [sp, #(\offset)]
-    .cfi_restore \reg1
-    .cfi_restore \reg2
-.endm
-
-/*
- * Increase frame size and save two registers to the bottom of the stack.
- */
-.macro SAVE_TWO_REGS_INCREASE_FRAME reg1, reg2, frame_adjustment
-    stp \reg1, \reg2, [sp, #-(\frame_adjustment)]!
-    .cfi_adjust_cfa_offset (\frame_adjustment)
-    .cfi_rel_offset \reg1, 0
-    .cfi_rel_offset \reg2, 8
-.endm
-
-/*
- * Restore two registers from the bottom of the stack and decrease frame size.
- */
-.macro RESTORE_TWO_REGS_DECREASE_FRAME reg1, reg2, frame_adjustment
-    ldp \reg1, \reg2, [sp], #(\frame_adjustment)
-    .cfi_restore \reg1
-    .cfi_restore \reg2
-    .cfi_adjust_cfa_offset -(\frame_adjustment)
-.endm
-
-/*
- * cfi support macros.
- */
-.macro ENTRY name
-    .type \name, #function
-    .hidden \name  // Hide this as a global symbol, so we do not incur plt calls.
-    .global \name
-    /* Cache alignment for function entry */
-    .balign 16
-\name:
-    .cfi_startproc
-.endm
-
-.macro END name
-    .cfi_endproc
-    .size \name, .-\name
-.endm
diff --git a/runtime/interpreter/mterp/arm64/instruction_end.S b/runtime/interpreter/mterp/arm64/instruction_end.S
deleted file mode 100644
index f90ebd0..0000000
--- a/runtime/interpreter/mterp/arm64/instruction_end.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
-    .type artMterpAsmInstructionEnd, #object
-    .hidden artMterpAsmInstructionEnd
-    .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
diff --git a/runtime/interpreter/mterp/arm64/instruction_end_alt.S b/runtime/interpreter/mterp/arm64/instruction_end_alt.S
deleted file mode 100644
index 0b66dbb..0000000
--- a/runtime/interpreter/mterp/arm64/instruction_end_alt.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
-    .type artMterpAsmAltInstructionEnd, #object
-    .hidden artMterpAsmAltInstructionEnd
-    .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
diff --git a/runtime/interpreter/mterp/arm64/instruction_end_sister.S b/runtime/interpreter/mterp/arm64/instruction_end_sister.S
deleted file mode 100644
index 71c0300..0000000
--- a/runtime/interpreter/mterp/arm64/instruction_end_sister.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
-    .type artMterpAsmSisterEnd, #object
-    .hidden artMterpAsmSisterEnd
-    .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
diff --git a/runtime/interpreter/mterp/arm64/instruction_start.S b/runtime/interpreter/mterp/arm64/instruction_start.S
deleted file mode 100644
index b7e9cf5..0000000
--- a/runtime/interpreter/mterp/arm64/instruction_start.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
-    .type artMterpAsmInstructionStart, #object
-    .hidden artMterpAsmInstructionStart
-    .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
-    .text
diff --git a/runtime/interpreter/mterp/arm64/instruction_start_alt.S b/runtime/interpreter/mterp/arm64/instruction_start_alt.S
deleted file mode 100644
index 7a67ba0..0000000
--- a/runtime/interpreter/mterp/arm64/instruction_start_alt.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
-    .type artMterpAsmAltInstructionStart, #object
-    .hidden artMterpAsmAltInstructionStart
-    .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
-    .text
diff --git a/runtime/interpreter/mterp/arm64/instruction_start_sister.S b/runtime/interpreter/mterp/arm64/instruction_start_sister.S
deleted file mode 100644
index 0036061..0000000
--- a/runtime/interpreter/mterp/arm64/instruction_start_sister.S
+++ /dev/null
@@ -1,7 +0,0 @@
-
-    .type artMterpAsmSisterStart, #object
-    .hidden artMterpAsmSisterStart
-    .global artMterpAsmSisterStart
-    .text
-    .balign 4
-artMterpAsmSisterStart:
diff --git a/runtime/interpreter/mterp/arm64/invoke.S b/runtime/interpreter/mterp/arm64/invoke.S
index 7a32df7..4844213 100644
--- a/runtime/interpreter/mterp/arm64/invoke.S
+++ b/runtime/interpreter/mterp/arm64/invoke.S
@@ -1,4 +1,4 @@
-%default { "helper":"UndefinedInvokeHandler" }
+%def invoke(helper="UndefinedInvokeHandler"):
     /*
      * Generic invoke handler wrapper.
      */
@@ -13,8 +13,98 @@
     bl      $helper
     cbz     w0, MterpException
     FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
+    ldr     w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+    cbz     w0, MterpFallback
     GET_INST_OPCODE ip
     GOTO_OPCODE ip
 
+
+%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
+    /*
+     * invoke-polymorphic handler wrapper.
+     */
+    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+    .extern $helper
+    EXPORT_PC
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    mov     x2, xPC
+    mov     x3, xINST
+    bl      $helper
+    cbz     w0, MterpException
+    FETCH_ADVANCE_INST 4
+    ldr     w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+    cbz     w0, MterpFallback
+    GET_INST_OPCODE ip
+    GOTO_OPCODE ip
+
+%def op_invoke_custom():
+%  invoke(helper="MterpInvokeCustom")
+
+%def op_invoke_custom_range():
+%  invoke(helper="MterpInvokeCustomRange")
+
+%def op_invoke_direct():
+%  invoke(helper="MterpInvokeDirect")
+
+%def op_invoke_direct_range():
+%  invoke(helper="MterpInvokeDirectRange")
+
+%def op_invoke_interface():
+%  invoke(helper="MterpInvokeInterface")
+    /*
+     * Handle an interface method call.
+     *
+     * for: invoke-interface, invoke-interface/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_interface_range():
+%  invoke(helper="MterpInvokeInterfaceRange")
+
+%def op_invoke_polymorphic():
+%  invoke_polymorphic(helper="MterpInvokePolymorphic")
+
+%def op_invoke_polymorphic_range():
+%  invoke_polymorphic(helper="MterpInvokePolymorphicRange")
+
+%def op_invoke_static():
+%  invoke(helper="MterpInvokeStatic")
+
+
+%def op_invoke_static_range():
+%  invoke(helper="MterpInvokeStaticRange")
+
+%def op_invoke_super():
+%  invoke(helper="MterpInvokeSuper")
+    /*
+     * Handle a "super" method call.
+     *
+     * for: invoke-super, invoke-super/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_super_range():
+%  invoke(helper="MterpInvokeSuperRange")
+
+%def op_invoke_virtual():
+%  invoke(helper="MterpInvokeVirtual")
+    /*
+     * Handle a virtual method call.
+     *
+     * for: invoke-virtual, invoke-virtual/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_virtual_quick():
+%  invoke(helper="MterpInvokeVirtualQuick")
+
+%def op_invoke_virtual_range():
+%  invoke(helper="MterpInvokeVirtualRange")
+
+%def op_invoke_virtual_range_quick():
+%  invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/arm64/invoke_polymorphic.S b/runtime/interpreter/mterp/arm64/invoke_polymorphic.S
deleted file mode 100644
index 7906f0a..0000000
--- a/runtime/interpreter/mterp/arm64/invoke_polymorphic.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default { "helper":"UndefinedInvokeHandler" }
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern $helper
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      $helper
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 4
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
diff --git a/runtime/interpreter/mterp/arm64/main.S b/runtime/interpreter/mterp/arm64/main.S
new file mode 100644
index 0000000..0cfbbff
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/main.S
@@ -0,0 +1,797 @@
+%def header():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+  Art assembly interpreter notes:
+
+  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+  handle invoke, allows higher-level code to create frame & shadow frame.
+
+  Once that's working, support direct entry code & eliminate shadow frame (and
+  excess locals allocation.
+
+  Some (hopefully) temporary ugliness.  We'll treat xFP as pointing to the
+  base of the vreg array within the shadow frame.  Access the other fields,
+  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
+  the shadow frame mechanism of double-storing object references - via xFP &
+  number_of_vregs_.
+
+ */
+
+/*
+ARM64 Runtime register usage conventions.
+
+  r0     : w0 is 32-bit return register and x0 is 64-bit.
+  r0-r7  : Argument registers.
+  r8-r15 : Caller save registers (used as temporary registers).
+  r16-r17: Also known as ip0-ip1, respectively. Used as scratch registers by
+           the linker, by the trampolines and other stubs (the backend uses
+           these as temporary registers).
+  r18    : Caller save register (used as temporary register).
+  r19    : Pointer to thread-local storage.
+  r20-r29: Callee save registers.
+  r30    : (lr) is reserved (the link register).
+  rsp    : (sp) is reserved (the stack pointer).
+  rzr    : (zr) is reserved (the zero register).
+
+  Floating-point registers
+  v0-v31
+
+  v0     : s0 is return register for singles (32-bit) and d0 for doubles (64-bit).
+           This is analogous to the C/C++ (hard-float) calling convention.
+  v0-v7  : Floating-point argument registers in both Dalvik and C/C++ conventions.
+           Also used as temporary and codegen scratch registers.
+
+  v0-v7 and v16-v31 : trashed across C calls.
+  v8-v15 : bottom 64-bits preserved across C calls (d8-d15 are preserved).
+
+  v16-v31: Used as codegen temp/scratch.
+  v8-v15 : Can be used for promotion.
+
+  Must maintain 16-byte stack alignment.
+
+Mterp notes:
+
+The following registers have fixed assignments:
+
+  reg nick      purpose
+  x20  xPC       interpreted program counter, used for fetching instructions
+  x21  xFP       interpreted frame pointer, used for accessing locals and args
+  x22  xSELF     self (Thread) pointer
+  x23  xINST     first 16-bit code unit of current instruction
+  x24  xIBASE    interpreted instruction base pointer, used for computed goto
+  x25  xREFS     base of object references in shadow frame  (ideally, we'll get rid of this later).
+  x26  wPROFILE  jit profile hotness countdown
+  x16  ip        scratch reg
+  x17  ip2       scratch reg (used by macros)
+
+Macros are provided for common operations.  They MUST NOT alter unspecified registers or condition
+codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+#include "interpreter/cfi_asm_support.h"
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/* During bringup, we'll use the shadow frame model instead of xFP */
+/* single-purpose registers, given names for clarity */
+#define xPC      x20
+#define CFI_DEX  20 // DWARF register number of the register holding dex-pc (xPC).
+#define CFI_TMP  0  // DWARF register number of the first argument register (r0).
+#define xFP      x21
+#define xSELF    x22
+#define xINST    x23
+#define wINST    w23
+#define xIBASE   x24
+#define xREFS    x25
+#define wPROFILE w26
+#define xPROFILE x26
+#define ip       x16
+#define ip2      x17
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep xFP at the base of the vregs.  So,
+ * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
+#define OFF_FP_SHADOWFRAME OFF_FP(0)
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array.  For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+    str  xPC, [xFP, #OFF_FP_DEX_PC_PTR]
+.endm
+
+/*
+ * Fetch the next instruction from xPC into wINST.  Does not advance xPC.
+ */
+.macro FETCH_INST
+    ldrh    wINST, [xPC]
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset.  Advances xPC
+ * to point to the next instruction.  "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss.  (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+    ldrh    wINST, [xPC, #((\count)*2)]!
+.endm
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to xPC and xINST).
+ */
+.macro PREFETCH_ADVANCE_INST dreg, sreg, count
+    ldrh    \dreg, [\sreg, #((\count)*2)]!
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update xPC.  Used to load
+ * xINST ahead of possible exception point.  Be sure to manually advance xPC
+ * later.
+ */
+.macro PREFETCH_INST count
+    ldrh    wINST, [xPC, #((\count)*2)]
+.endm
+
+/* Advance xPC by some number of code units. */
+.macro ADVANCE count
+  add  xPC, xPC, #((\count)*2)
+.endm
+
+/*
+ * Fetch the next instruction from an offset specified by _reg and advance xPC.
+ * xPC to point to the next instruction.  "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.  Must not set flags.
+ *
+ */
+.macro FETCH_ADVANCE_INST_RB reg
+    add     xPC, xPC, \reg, sxtw
+    ldrh    wINST, [xPC]
+.endm
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC.  The
+ * "_count" value is in 16-bit code units.  Does not advance xPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+.macro FETCH reg, count
+    ldrh    \reg, [xPC, #((\count)*2)]
+.endm
+
+.macro FETCH_S reg, count
+    ldrsh   \reg, [xPC, #((\count)*2)]
+.endm
+
+/*
+ * Fetch one byte from an offset past the current PC.  Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+.macro FETCH_B reg, count, byte
+    ldrb     \reg, [xPC, #((\count)*2+(\byte))]
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+    and     \reg, xINST, #255
+.endm
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+.macro GET_PREFETCHED_OPCODE oreg, ireg
+    and     \oreg, \ireg, #255
+.endm
+
+/*
+ * Begin executing the opcode in _reg.  Clobbers reg
+ */
+
+.macro GOTO_OPCODE reg
+    add     \reg, xIBASE, \reg, lsl #${handler_size_bits}
+    br      \reg
+.endm
+.macro GOTO_OPCODE_BASE base,reg
+    add     \reg, \base, \reg, lsl #${handler_size_bits}
+    br      \reg
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+.macro GET_VREG reg, vreg
+    ldr     \reg, [xFP, \vreg, uxtw #2]
+.endm
+.macro SET_VREG reg, vreg
+    str     \reg, [xFP, \vreg, uxtw #2]
+    str     wzr, [xREFS, \vreg, uxtw #2]
+.endm
+.macro SET_VREG_OBJECT reg, vreg, tmpreg
+    str     \reg, [xFP, \vreg, uxtw #2]
+    str     \reg, [xREFS, \vreg, uxtw #2]
+.endm
+.macro SET_VREG_FLOAT reg, vreg
+    str     \reg, [xFP, \vreg, uxtw #2]
+    str     wzr, [xREFS, \vreg, uxtw #2]
+.endm
+
+/*
+ * Get/set the 64-bit value from a Dalvik register.
+ */
+.macro GET_VREG_WIDE reg, vreg
+    add     ip2, xFP, \vreg, lsl #2
+    ldr     \reg, [ip2]
+.endm
+.macro SET_VREG_WIDE reg, vreg
+    add     ip2, xFP, \vreg, lsl #2
+    str     \reg, [ip2]
+    add     ip2, xREFS, \vreg, lsl #2
+    str     xzr, [ip2]
+.endm
+.macro GET_VREG_DOUBLE reg, vreg
+    add     ip2, xFP, \vreg, lsl #2
+    ldr     \reg, [ip2]
+.endm
+.macro SET_VREG_DOUBLE reg, vreg
+    add     ip2, xFP, \vreg, lsl #2
+    str     \reg, [ip2]
+    add     ip2, xREFS, \vreg, lsl #2
+    str     xzr, [ip2]
+.endm
+
+/*
+ * Get the 32-bit value from a Dalvik register and sign-extend to 64-bit.
+ * Used to avoid an extra instruction in int-to-long.
+ */
+.macro GET_VREG_S reg, vreg
+    ldrsw   \reg, [xFP, \vreg, uxtw #2]
+.endm
+
+/*
+ * Convert a virtual register index into an address.
+ */
+.macro VREG_INDEX_TO_ADDR reg, vreg
+    add     \reg, xFP, \vreg, lsl #2   /* WARNING: handle shadow frame vreg zero if store */
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+  ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
+.endm
+
+/*
+ * Save two registers to the stack.
+ */
+.macro SAVE_TWO_REGS reg1, reg2, offset
+    stp \reg1, \reg2, [sp, #(\offset)]
+    .cfi_rel_offset \reg1, (\offset)
+    .cfi_rel_offset \reg2, (\offset) + 8
+.endm
+
+/*
+ * Restore two registers from the stack.
+ */
+.macro RESTORE_TWO_REGS reg1, reg2, offset
+    ldp \reg1, \reg2, [sp, #(\offset)]
+    .cfi_restore \reg1
+    .cfi_restore \reg2
+.endm
+
+/*
+ * Increase frame size and save two registers to the bottom of the stack.
+ */
+.macro SAVE_TWO_REGS_INCREASE_FRAME reg1, reg2, frame_adjustment
+    stp \reg1, \reg2, [sp, #-(\frame_adjustment)]!
+    .cfi_adjust_cfa_offset (\frame_adjustment)
+    .cfi_rel_offset \reg1, 0
+    .cfi_rel_offset \reg2, 8
+.endm
+
+/*
+ * Restore two registers from the bottom of the stack and decrease frame size.
+ */
+.macro RESTORE_TWO_REGS_DECREASE_FRAME reg1, reg2, frame_adjustment
+    ldp \reg1, \reg2, [sp], #(\frame_adjustment)
+    .cfi_restore \reg1
+    .cfi_restore \reg2
+    .cfi_adjust_cfa_offset -(\frame_adjustment)
+.endm
+
+/*
+ * function support macros.
+ */
+.macro ENTRY name
+    .type \name, #function
+    .hidden \name  // Hide this as a global symbol, so we do not incur plt calls.
+    .global \name
+    /* Cache alignment for function entry */
+    .balign 16
+\name:
+.endm
+
+.macro END name
+    .size \name, .-\name
+.endm
+
+// Macro to unpoison (negate) the reference for heap poisoning.
+.macro UNPOISON_HEAP_REF rRef
+#ifdef USE_HEAP_POISONING
+    neg \rRef, \rRef
+#endif  // USE_HEAP_POISONING
+.endm
+
+%def entry():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+    .text
+
+/*
+ * Interpreter entry point.
+ * On entry:
+ *  x0  Thread* self/
+ *  x1  insns_
+ *  x2  ShadowFrame
+ *  x3  JValue* result_register
+ *
+ */
+ENTRY ExecuteMterpImpl
+    .cfi_startproc
+    SAVE_TWO_REGS_INCREASE_FRAME xPROFILE, x27, 80
+    SAVE_TWO_REGS                xIBASE, xREFS, 16
+    SAVE_TWO_REGS                xSELF, xINST, 32
+    SAVE_TWO_REGS                xPC, xFP, 48
+    SAVE_TWO_REGS                fp, lr, 64
+    add     fp, sp, #64
+
+    /* Remember the return register */
+    str     x3, [x2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
+
+    /* Remember the dex instruction pointer */
+    str     x1, [x2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
+
+    /* set up "named" registers */
+    mov     xSELF, x0
+    ldr     w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
+    add     xFP, x2, #SHADOWFRAME_VREGS_OFFSET     // point to vregs.
+    add     xREFS, xFP, w0, lsl #2                 // point to reference array in shadow frame
+    ldr     w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET]   // Get starting dex_pc.
+    add     xPC, x1, w0, lsl #1                    // Create direct pointer to 1st dex opcode
+    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
+    EXPORT_PC
+
+    /* Starting ibase */
+    ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
+
+    /* Set up for backwards branches & osr profiling */
+    ldr     x0, [xFP, #OFF_FP_METHOD]
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    mov     x2, xSELF
+    bl      MterpSetUpHotnessCountdown
+    mov     wPROFILE, w0                // Starting hotness countdown to xPROFILE
+
+    /* start executing the instruction at rPC */
+    FETCH_INST                          // load wINST from rPC
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    GOTO_OPCODE ip                      // jump to next instruction
+    /* NOTE: no fallthrough */
+    // cfi info continues, and covers the whole mterp implementation.
+    END ExecuteMterpImpl
+
+%def dchecks_before_helper():
+    // Call C++ to do debug checks and return to the handler using tail call.
+    .extern MterpCheckBefore
+    mov    x0, xSELF
+    add    x1, xFP, #OFF_FP_SHADOWFRAME
+    mov    x2, xPC
+    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
+
+%def opcode_pre():
+%  add_helper(dchecks_before_helper, "Mterp_dchecks_before_helper")
+    #if !defined(NDEBUG)
+    bl     Mterp_dchecks_before_helper
+    #endif
+
+%def footer():
+    .cfi_endproc
+    END MterpHelpers
+
+%def fallback():
+/* Transfer stub to alternate interpreter */
+    b    MterpFallback
+
+
+%def helpers():
+    ENTRY MterpHelpers
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  x0, xSELF
+    add  x1, xFP, #OFF_FP_SHADOWFRAME
+    bl MterpLogDivideByZeroException
+#endif
+    b MterpCommonFallback
+
+common_errArrayIndex:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  x0, xSELF
+    add  x1, xFP, #OFF_FP_SHADOWFRAME
+    bl MterpLogArrayIndexException
+#endif
+    b MterpCommonFallback
+
+common_errNegativeArraySize:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  x0, xSELF
+    add  x1, xFP, #OFF_FP_SHADOWFRAME
+    bl MterpLogNegativeArraySizeException
+#endif
+    b MterpCommonFallback
+
+common_errNoSuchMethod:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  x0, xSELF
+    add  x1, xFP, #OFF_FP_SHADOWFRAME
+    bl MterpLogNoSuchMethodException
+#endif
+    b MterpCommonFallback
+
+common_errNullObject:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  x0, xSELF
+    add  x1, xFP, #OFF_FP_SHADOWFRAME
+    bl MterpLogNullObjectException
+#endif
+    b MterpCommonFallback
+
+common_exceptionThrown:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  x0, xSELF
+    add  x1, xFP, #OFF_FP_SHADOWFRAME
+    bl MterpLogExceptionThrownException
+#endif
+    b MterpCommonFallback
+
+MterpSuspendFallback:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  x0, xSELF
+    add  x1, xFP, #OFF_FP_SHADOWFRAME
+    ldr  x2, [xSELF, #THREAD_FLAGS_OFFSET]
+    bl MterpLogSuspendFallback
+#endif
+    b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary.  If there is a pending
+ * exception, handle it.  Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+    ldr     x0, [xSELF, #THREAD_EXCEPTION_OFFSET]
+    cbz     x0, MterpFallback                       // If not, fall back to reference interpreter.
+    /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    bl      MterpHandleException                    // (self, shadow_frame)
+    cbz     w0, MterpExceptionReturn                // no local catch, back to caller.
+    ldr     x0, [xFP, #OFF_FP_DEX_INSTRUCTIONS]
+    ldr     w1, [xFP, #OFF_FP_DEX_PC]
+    ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
+    add     xPC, x0, x1, lsl #1                     // generate new dex_pc_ptr
+    /* Do we need to switch interpreters? */
+    ldr     w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+    cbz     w0, MterpFallback
+    /* resume execution at catch block */
+    EXPORT_PC
+    FETCH_INST
+    GET_INST_OPCODE ip
+    GOTO_OPCODE ip
+    /* NOTE: no fallthrough */
+/*
+ * Common handling for branches with support for Jit profiling.
+ * On entry:
+ *    wINST          <= signed offset
+ *    wPROFILE       <= signed hotness countdown (expanded to 32 bits)
+ *    condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
+ *
+ * We have quite a few different cases for branch profiling, OSR detection and
+ * suspend check support here.
+ *
+ * Taken backward branches:
+ *    If profiling active, do hotness countdown and report if we hit zero.
+ *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *    Is there a pending suspend request?  If so, suspend.
+ *
+ * Taken forward branches and not-taken backward branches:
+ *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *
+ * Our most common case is expected to be a taken backward branch with active jit profiling,
+ * but no full OSR check and no pending suspend request.
+ * Next most common case is not-taken branch with no full OSR check.
+ *
+ */
+MterpCommonTakenBranchNoFlags:
+    cmp     wINST, #0
+    b.gt    .L_forward_branch           // don't add forward branches to hotness
+    tbnz    wPROFILE, #31, .L_no_count_backwards  // go if negative
+    subs    wPROFILE, wPROFILE, #1      // countdown
+    b.eq    .L_add_batch                // counted down to zero - report
+.L_resume_backward_branch:
+    ldr     lr, [xSELF, #THREAD_FLAGS_OFFSET]
+    add     w2, wINST, wINST            // w2<- byte offset
+    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
+    REFRESH_IBASE
+    ands    lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    b.ne    .L_suspend_request_pending
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+.L_suspend_request_pending:
+    EXPORT_PC
+    mov     x0, xSELF
+    bl      MterpSuspendCheck           // (self)
+    cbnz    x0, MterpFallback
+    REFRESH_IBASE                       // might have changed during suspend
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+.L_no_count_backwards:
+    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
+    b.ne    .L_resume_backward_branch
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    mov     x2, xINST
+    EXPORT_PC
+    bl      MterpMaybeDoOnStackReplacement  // (self, shadow_frame, offset)
+    cbnz    x0, MterpOnStackReplacement
+    b       .L_resume_backward_branch
+
+.L_forward_branch:
+    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
+    b.eq    .L_check_osr_forward
+.L_resume_forward_branch:
+    add     w2, wINST, wINST            // w2<- byte offset
+    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+.L_check_osr_forward:
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    mov     x2, xINST
+    EXPORT_PC
+    bl      MterpMaybeDoOnStackReplacement  // (self, shadow_frame, offset)
+    cbnz    x0, MterpOnStackReplacement
+    b       .L_resume_forward_branch
+
+.L_add_batch:
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    strh    wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
+    ldr     x0, [xFP, #OFF_FP_METHOD]
+    mov     x2, xSELF
+    bl      MterpAddHotnessBatch        // (method, shadow_frame, self)
+    mov     wPROFILE, w0                // restore new hotness countdown to wPROFILE
+    b       .L_no_count_backwards
+
+/*
+ * Entered from the conditional branch handlers when OSR check request active on
+ * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
+ */
+.L_check_not_taken_osr:
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    mov     x2, #2
+    EXPORT_PC
+    bl      MterpMaybeDoOnStackReplacement  // (self, shadow_frame, offset)
+    cbnz    x0, MterpOnStackReplacement
+    FETCH_ADVANCE_INST 2
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+/*
+ * Check for suspend check request.  Assumes wINST already loaded, xPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+    ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]  // refresh xIBASE
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    b.ne    check1
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    GOTO_OPCODE ip                      // jump to next instruction
+check1:
+    EXPORT_PC
+    mov     x0, xSELF
+    bl      MterpSuspendCheck           // (self)
+    cbnz    x0, MterpFallback           // Something in the environment changed, switch interpreters
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+    mov  x0, xSELF
+    add  x1, xFP, #OFF_FP_SHADOWFRAME
+    sxtw x2, wINST
+    bl MterpLogOSR
+#endif
+    mov  x0, #1                         // Signal normal return
+    b    MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+    EXPORT_PC
+#if MTERP_LOGGING
+    mov  x0, xSELF
+    add  x1, xFP, #OFF_FP_SHADOWFRAME
+    bl MterpLogFallback
+#endif
+MterpCommonFallback:
+    mov     x0, #0                                  // signal retry with reference interpreter.
+    b       MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR.  Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ *  uint32_t* xFP  (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+    mov     x0, #1                                  // signal return to caller.
+    b MterpDone
+MterpReturn:
+    ldr     x2, [xFP, #OFF_FP_RESULT_REGISTER]
+    str     x0, [x2]
+    mov     x0, #1                                  // signal return to caller.
+MterpDone:
+/*
+ * At this point, we expect wPROFILE to be non-zero.  If negative, hotness is disabled or we're
+ * checking for OSR.  If greater than zero, we might have unreported hotness to register
+ * (the difference between the ending wPROFILE and the cached hotness counter).  wPROFILE
+ * should only reach zero immediately after a hotness decrement, and is then reset to either
+ * a negative special state or the new non-zero countdown value.
+ */
+    cmp     wPROFILE, #0
+    bgt     MterpProfileActive                      // if > 0, we may have some counts to report.
+    .cfi_remember_state
+    RESTORE_TWO_REGS                fp, lr, 64
+    RESTORE_TWO_REGS                xPC, xFP, 48
+    RESTORE_TWO_REGS                xSELF, xINST, 32
+    RESTORE_TWO_REGS                xIBASE, xREFS, 16
+    RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
+    ret
+    .cfi_restore_state                              // Reset unwind info so following code unwinds.
+    .cfi_def_cfa_offset 80                          // workaround for clang bug: 31975598
+
+MterpProfileActive:
+    mov     xINST, x0                               // stash return value
+    /* Report cached hotness counts */
+    ldr     x0, [xFP, #OFF_FP_METHOD]
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    mov     x2, xSELF
+    strh    wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
+    bl      MterpAddHotnessBatch                    // (method, shadow_frame, self)
+    mov     x0, xINST                               // restore return value
+    RESTORE_TWO_REGS                fp, lr, 64
+    RESTORE_TWO_REGS                xPC, xFP, 48
+    RESTORE_TWO_REGS                xSELF, xINST, 32
+    RESTORE_TWO_REGS                xIBASE, xREFS, 16
+    RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
+    ret
+
+
+%def instruction_end():
+
+    .type artMterpAsmInstructionEnd, #object
+    .hidden artMterpAsmInstructionEnd
+    .global artMterpAsmInstructionEnd
+artMterpAsmInstructionEnd:
+
+%def instruction_start():
+
+    .type artMterpAsmInstructionStart, #object
+    .hidden artMterpAsmInstructionStart
+    .global artMterpAsmInstructionStart
+artMterpAsmInstructionStart = .L_op_nop
+    .text
+
+%def opcode_start():
+    ENTRY Mterp_${opcode}
+%def opcode_end():
+    END Mterp_${opcode}
+%def helper_start(name):
+    ENTRY ${name}
+%def helper_end(name):
+    END ${name}
diff --git a/runtime/interpreter/mterp/arm64/object.S b/runtime/interpreter/mterp/arm64/object.S
new file mode 100644
index 0000000..3cc688e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/object.S
@@ -0,0 +1,308 @@
+%def field(helper=""):
+    /*
+     * General field read / write (iget-* iput-* sget-* sput-*).
+     */
+    .extern $helper
+    mov      x0, xPC                       // arg0: Instruction* inst
+    mov      x1, xINST                     // arg1: uint16_t inst_data
+    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
+    mov      x3, xSELF                     // arg3: Thread* self
+    PREFETCH_INST 2                        // prefetch next opcode
+    bl       $helper
+    cbz      x0, MterpPossibleException
+    ADVANCE 2
+    GET_INST_OPCODE ip                     // extract opcode from rINST
+    GOTO_OPCODE ip                         // jump to next instruction
+
+%def op_check_cast():
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast vAA, class//BBBB */
+    EXPORT_PC
+    FETCH    w0, 1                      // w0<- BBBB
+    lsr      w1, wINST, #8              // w1<- AA
+    VREG_INDEX_TO_ADDR x1, w1           // w1<- &object
+    ldr      x2, [xFP, #OFF_FP_METHOD]  // w2<- method
+    mov      x3, xSELF                  // w3<- self
+    bl       MterpCheckCast             // (index, &obj, method, self)
+    PREFETCH_INST 2
+    cbnz     w0, MterpPossibleException
+    ADVANCE  2
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_iget(is_object=False, is_wide=False, load="ldr", helper="MterpIGetU32"):
+   // Fast-path which gets the field offset from thread-local cache.
+   add      x0, xSELF, #THREAD_INTERPRETER_CACHE_OFFSET       // cache address
+   ubfx     x1, xPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2  // entry index
+   add      x0, x0, x1, lsl #4            // entry address within the cache
+   ldp      x0, x1, [x0]                  // entry key (pc) and value (offset)
+   lsr      w2, wINST, #12                // B
+   GET_VREG w2, w2                        // object we're operating on
+   cmp      x0, xPC
+%  slow_path_label = add_helper(lambda: field(helper))
+   b.ne     ${slow_path_label}            // cache miss
+   cbz      w2, common_errNullObject      // null object
+%  if is_wide:
+     ldr      x0, [x2, x1]                // x0<- obj.field
+%  else:
+     ${load}  w0, [x2, x1]                // w0<- obj.field
+%  #endif
+%  if is_object:
+     UNPOISON_HEAP_REF w0
+#if defined(USE_READ_BARRIER)
+# if defined(USE_BAKER_READ_BARRIER)
+     ldr    w1, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
+     cbnz   w1, .L_${opcode}_mark         // GC is active.
+.L_${opcode}_marked:
+# else
+     bl artReadBarrierMark                // x0 <- artReadBarrierMark(x0)
+# endif
+#endif
+%  #endif
+   ubfx     w2, wINST, #8, #4             // w2<- A
+   FETCH_ADVANCE_INST 2                   // advance rPC, load rINST
+%  if is_object:
+     SET_VREG_OBJECT w0, w2               // fp[A]<- w0
+%  elif is_wide:
+     SET_VREG_WIDE x0, w2                 // fp[A]<- x0
+%  else:
+     SET_VREG w0, w2                      // fp[A]<- w0
+%  #endif
+   GET_INST_OPCODE ip                     // extract opcode from rINST
+   GOTO_OPCODE ip                         // jump to next instruction
+%  if is_object:
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+.L_${opcode}_mark:
+     bl artReadBarrierMark                // x0 <- artReadBarrierMark(x0)
+     b .L_${opcode}_marked
+#endif
+%  #endif
+
+%def op_iget_boolean():
+%  op_iget(load="ldrb", helper="MterpIGetU8")
+
+%def op_iget_boolean_quick():
+%  op_iget_quick(load="ldrb")
+
+%def op_iget_byte():
+%  op_iget(load="ldrsb", helper="MterpIGetI8")
+
+%def op_iget_byte_quick():
+%  op_iget_quick(load="ldrsb")
+
+%def op_iget_char():
+%  op_iget(load="ldrh", helper="MterpIGetU16")
+
+%def op_iget_char_quick():
+%  op_iget_quick(load="ldrh")
+
+%def op_iget_object():
+%  op_iget(is_object=True, helper="MterpIGetObj")
+
+%def op_iget_object_quick():
+    /* For: iget-object-quick */
+    /* op vA, vB, offset//CCCC */
+    lsr     w2, wINST, #12              // w2<- B
+    FETCH w1, 1                         // w1<- field byte offset
+    EXPORT_PC
+    GET_VREG w0, w2                     // w0<- object we're operating on
+    bl      artIGetObjectFromMterp      // (obj, offset)
+    ldr     x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+    ubfx    w2, wINST, #8, #4           // w2<- A
+    PREFETCH_INST 2
+    cbnz    w3, MterpPossibleException      // bail out
+    SET_VREG_OBJECT w0, w2              // fp[A]<- w0
+    ADVANCE 2                           // advance rPC
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_iget_quick(load="ldr", extend=""):
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    /* op vA, vB, offset//CCCC */
+    lsr     w2, wINST, #12              // w2<- B
+    FETCH w1, 1                         // w1<- field byte offset
+    GET_VREG w3, w2                     // w3<- object we're operating on
+    ubfx    w2, wINST, #8, #4           // w2<- A
+    cbz     w3, common_errNullObject    // object was null
+    $load   w0, [x3, x1]                // w0<- obj.field
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    $extend
+    SET_VREG w0, w2                     // fp[A]<- w0
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_iget_short():
+%  op_iget(load="ldrsh", helper="MterpIGetI16")
+
+%def op_iget_short_quick():
+%  op_iget_quick(load="ldrsh")
+
+%def op_iget_wide():
+%  op_iget(is_wide=True, helper="MterpIGetU64")
+
+%def op_iget_wide_quick():
+    /* iget-wide-quick vA, vB, offset//CCCC */
+    lsr     w2, wINST, #12              // w2<- B
+    FETCH w4, 1                         // w4<- field byte offset
+    GET_VREG w3, w2                     // w3<- object we're operating on
+    ubfx    w2, wINST, #8, #4           // w2<- A
+    cbz     w3, common_errNullObject    // object was null
+    ldr     x0, [x3, x4]                // x0<- obj.field
+    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
+    SET_VREG_WIDE x0, w2
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_instance_of():
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     */
+    /* instance-of vA, vB, class//CCCC */
+    EXPORT_PC
+    FETCH     w0, 1                     // w0<- CCCC
+    lsr       w1, wINST, #12            // w1<- B
+    VREG_INDEX_TO_ADDR x1, w1           // w1<- &object
+    ldr       x2, [xFP, #OFF_FP_METHOD] // w2<- method
+    mov       x3, xSELF                 // w3<- self
+    bl        MterpInstanceOf           // (index, &obj, method, self)
+    ldr       x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
+    ubfx      w2, wINST, #8, #4         // w2<- A
+    PREFETCH_INST 2
+    cbnz      x1, MterpException
+    ADVANCE 2                           // advance rPC
+    SET_VREG w0, w2                     // vA<- w0
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_iput(helper="MterpIPutU32"):
+%  field(helper=helper)
+
+%def op_iput_boolean():
+%  op_iput(helper="MterpIPutU8")
+
+%def op_iput_boolean_quick():
+%  op_iput_quick(store="strb")
+
+%def op_iput_byte():
+%  op_iput(helper="MterpIPutI8")
+
+%def op_iput_byte_quick():
+%  op_iput_quick(store="strb")
+
+%def op_iput_char():
+%  op_iput(helper="MterpIPutU16")
+
+%def op_iput_char_quick():
+%  op_iput_quick(store="strh")
+
+%def op_iput_object():
+%  op_iput(helper="MterpIPutObj")
+
+%def op_iput_object_quick():
+    EXPORT_PC
+    add     x0, xFP, #OFF_FP_SHADOWFRAME
+    mov     x1, xPC
+    mov     w2, wINST
+    bl      MterpIputObjectQuick
+    cbz     w0, MterpException
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_iput_quick(store="str"):
+    /* For: iput-quick, iput-object-quick */
+    /* op vA, vB, offset//CCCC */
+    lsr     w2, wINST, #12              // w2<- B
+    FETCH w1, 1                         // w1<- field byte offset
+    GET_VREG w3, w2                     // w3<- fp[B], the object pointer
+    ubfx    w2, wINST, #8, #4           // w2<- A
+    cbz     w3, common_errNullObject    // object was null
+    GET_VREG w0, w2                     // w0<- fp[A]
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    $store     w0, [x3, x1]             // obj.field<- w0
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_iput_short():
+%  op_iput(helper="MterpIPutI16")
+
+%def op_iput_short_quick():
+%  op_iput_quick(store="strh")
+
+%def op_iput_wide():
+%  op_iput(helper="MterpIPutU64")
+
+%def op_iput_wide_quick():
+    /* iput-wide-quick vA, vB, offset//CCCC */
+    lsr     w2, wINST, #12              // w2<- B
+    FETCH w3, 1                         // w3<- field byte offset
+    GET_VREG w2, w2                     // w2<- fp[B], the object pointer
+    ubfx    w0, wINST, #8, #4           // w0<- A
+    cbz     w2, common_errNullObject    // object was null
+    GET_VREG_WIDE x0, w0                // x0<- fp[A]
+    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
+    str     x0, [x2, x3]                // obj.field<- x0
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_new_instance():
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance vAA, class//BBBB */
+    EXPORT_PC
+    add     x0, xFP, #OFF_FP_SHADOWFRAME
+    mov     x1, xSELF
+    mov     w2, wINST
+    bl      MterpNewInstance           // (shadow_frame, self, inst_data)
+    cbz     w0, MterpPossibleException
+    FETCH_ADVANCE_INST 2               // advance rPC, load rINST
+    GET_INST_OPCODE ip                 // extract opcode from rINST
+    GOTO_OPCODE ip                     // jump to next instruction
+
+%def op_sget(helper="MterpSGetU32"):
+%  field(helper=helper)
+
+%def op_sget_boolean():
+%  op_sget(helper="MterpSGetU8")
+
+%def op_sget_byte():
+%  op_sget(helper="MterpSGetI8")
+
+%def op_sget_char():
+%  op_sget(helper="MterpSGetU16")
+
+%def op_sget_object():
+%  op_sget(helper="MterpSGetObj")
+
+%def op_sget_short():
+%  op_sget(helper="MterpSGetI16")
+
+%def op_sget_wide():
+%  op_sget(helper="MterpSGetU64")
+
+%def op_sput(helper="MterpSPutU32"):
+%  field(helper=helper)
+
+%def op_sput_boolean():
+%  op_sput(helper="MterpSPutU8")
+
+%def op_sput_byte():
+%  op_sput(helper="MterpSPutI8")
+
+%def op_sput_char():
+%  op_sput(helper="MterpSPutU16")
+
+%def op_sput_object():
+%  op_sput(helper="MterpSPutObj")
+
+%def op_sput_short():
+%  op_sput(helper="MterpSPutI16")
+
+%def op_sput_wide():
+%  op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/arm64/op_add_double.S b/runtime/interpreter/mterp/arm64/op_add_double.S
deleted file mode 100644
index 8509f70..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"fadd d0, d1, d2", "result":"d0", "r1":"d1", "r2":"d2"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_double_2addr.S b/runtime/interpreter/mterp/arm64/op_add_double_2addr.S
deleted file mode 100644
index 61fd58f..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"fadd     d0, d0, d1", "r0":"d0", "r1":"d1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_float.S b/runtime/interpreter/mterp/arm64/op_add_float.S
deleted file mode 100644
index 7d09fef..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop.S" {"instr":"fadd   s0, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_float_2addr.S b/runtime/interpreter/mterp/arm64/op_add_float_2addr.S
deleted file mode 100644
index 7b378e2..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop2addr.S" {"instr":"fadd   s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_int.S b/runtime/interpreter/mterp/arm64/op_add_int.S
deleted file mode 100644
index 6eadb54..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"add     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_int_2addr.S b/runtime/interpreter/mterp/arm64/op_add_int_2addr.S
deleted file mode 100644
index d35bc8e..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"add     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_int_lit16.S b/runtime/interpreter/mterp/arm64/op_add_int_lit16.S
deleted file mode 100644
index 4930ad7..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit16.S" {"instr":"add     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_int_lit8.S b/runtime/interpreter/mterp/arm64/op_add_int_lit8.S
deleted file mode 100644
index 2dfb8b9..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"extract":"", "instr":"add     w0, w0, w3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_long.S b/runtime/interpreter/mterp/arm64/op_add_long.S
deleted file mode 100644
index bc334aa..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"add x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_long_2addr.S b/runtime/interpreter/mterp/arm64/op_add_long_2addr.S
deleted file mode 100644
index 5e5dbce..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"add     x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_aget.S b/runtime/interpreter/mterp/arm64/op_aget.S
deleted file mode 100644
index 662c9cc..0000000
--- a/runtime/interpreter/mterp/arm64/op_aget.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default { "load":"ldr", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B w2, 1, 0                    // w2<- BB
-    lsr     w9, wINST, #8               // w9<- AA
-    FETCH_B w3, 1, 1                    // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     x0, common_errNullObject    // bail if null array object.
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- arrayObj->length
-    add     x0, x0, w1, uxtw #$shift    // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    $load   w2, [x0, #$data_offset]     // w2<- vBB[vCC]
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w2, w9                     // vAA<- w2
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_aget_boolean.S b/runtime/interpreter/mterp/arm64/op_aget_boolean.S
deleted file mode 100644
index 6ab6cc1..0000000
--- a/runtime/interpreter/mterp/arm64/op_aget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aget.S" { "load":"ldrb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aget_byte.S b/runtime/interpreter/mterp/arm64/op_aget_byte.S
deleted file mode 100644
index c7f5b23..0000000
--- a/runtime/interpreter/mterp/arm64/op_aget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aget.S" { "load":"ldrsb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aget_char.S b/runtime/interpreter/mterp/arm64/op_aget_char.S
deleted file mode 100644
index 9fddf17..0000000
--- a/runtime/interpreter/mterp/arm64/op_aget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aget.S" { "load":"ldrh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aget_object.S b/runtime/interpreter/mterp/arm64/op_aget_object.S
deleted file mode 100644
index 1bbe3e8..0000000
--- a/runtime/interpreter/mterp/arm64/op_aget_object.S
+++ /dev/null
@@ -1,20 +0,0 @@
-    /*
-     * Array object get.  vAA <- vBB[vCC].
-     *
-     * for: aget-object
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B w2, 1, 0                    // w2<- BB
-    FETCH_B w3, 1, 1                    // w3<- CC
-    EXPORT_PC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    bl       artAGetObjectFromMterp     // (array, index)
-    ldr      x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
-    lsr      w2, wINST, #8               // w9<- AA
-    PREFETCH_INST 2
-    cbnz     w1, MterpException
-    SET_VREG_OBJECT w0, w2
-    ADVANCE 2
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_aget_short.S b/runtime/interpreter/mterp/arm64/op_aget_short.S
deleted file mode 100644
index 39554de..0000000
--- a/runtime/interpreter/mterp/arm64/op_aget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aget.S" { "load":"ldrsh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aget_wide.S b/runtime/interpreter/mterp/arm64/op_aget_wide.S
deleted file mode 100644
index 6f990ba..0000000
--- a/runtime/interpreter/mterp/arm64/op_aget_wide.S
+++ /dev/null
@@ -1,21 +0,0 @@
-    /*
-     * Array get, 64 bits.  vAA <- vBB[vCC].
-     *
-     */
-    /* aget-wide vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    and     w2, w0, #255                // w2<- BB
-    lsr     w3, w0, #8                  // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     w0, common_errNullObject        // yes, bail
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- arrayObj->length
-    add     x0, x0, w1, lsl #3          // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    ldr     x2, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]  // x2<- vBB[vCC]
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE x2, w4
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_and_int.S b/runtime/interpreter/mterp/arm64/op_and_int.S
deleted file mode 100644
index 31f3f73..0000000
--- a/runtime/interpreter/mterp/arm64/op_and_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"and     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_int_2addr.S b/runtime/interpreter/mterp/arm64/op_and_int_2addr.S
deleted file mode 100644
index e59632c..0000000
--- a/runtime/interpreter/mterp/arm64/op_and_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"and     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_int_lit16.S b/runtime/interpreter/mterp/arm64/op_and_int_lit16.S
deleted file mode 100644
index 6540f81..0000000
--- a/runtime/interpreter/mterp/arm64/op_and_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit16.S" {"instr":"and     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_int_lit8.S b/runtime/interpreter/mterp/arm64/op_and_int_lit8.S
deleted file mode 100644
index 495b5cd..0000000
--- a/runtime/interpreter/mterp/arm64/op_and_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"extract":"", "instr":"and     w0, w0, w3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_long.S b/runtime/interpreter/mterp/arm64/op_and_long.S
deleted file mode 100644
index ede047d..0000000
--- a/runtime/interpreter/mterp/arm64/op_and_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"and x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_long_2addr.S b/runtime/interpreter/mterp/arm64/op_and_long_2addr.S
deleted file mode 100644
index d62ccef..0000000
--- a/runtime/interpreter/mterp/arm64/op_and_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"and     x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_aput.S b/runtime/interpreter/mterp/arm64/op_aput.S
deleted file mode 100644
index 175b483..0000000
--- a/runtime/interpreter/mterp/arm64/op_aput.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default { "store":"str", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B w2, 1, 0                    // w2<- BB
-    lsr     w9, wINST, #8               // w9<- AA
-    FETCH_B w3, 1, 1                    // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     w0, common_errNullObject    // bail if null
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]     // w3<- arrayObj->length
-    add     x0, x0, w1, lsl #$shift     // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_VREG w2, w9                     // w2<- vAA
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    $store  w2, [x0, #$data_offset]     // vBB[vCC]<- w2
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_aput_boolean.S b/runtime/interpreter/mterp/arm64/op_aput_boolean.S
deleted file mode 100644
index 5e7a86f..0000000
--- a/runtime/interpreter/mterp/arm64/op_aput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aput_byte.S b/runtime/interpreter/mterp/arm64/op_aput_byte.S
deleted file mode 100644
index d659ebc..0000000
--- a/runtime/interpreter/mterp/arm64/op_aput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aput_char.S b/runtime/interpreter/mterp/arm64/op_aput_char.S
deleted file mode 100644
index 7547c80..0000000
--- a/runtime/interpreter/mterp/arm64/op_aput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aput_object.S b/runtime/interpreter/mterp/arm64/op_aput_object.S
deleted file mode 100644
index 0146fdc..0000000
--- a/runtime/interpreter/mterp/arm64/op_aput_object.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    /*
-     * Store an object into an array.  vBB[vCC] <- vAA.
-     */
-    /* op vAA, vBB, vCC */
-    EXPORT_PC
-    add     x0, xFP, #OFF_FP_SHADOWFRAME
-    mov     x1, xPC
-    mov     w2, wINST
-    bl      MterpAputObject
-    cbz     w0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_aput_short.S b/runtime/interpreter/mterp/arm64/op_aput_short.S
deleted file mode 100644
index 8631e28..0000000
--- a/runtime/interpreter/mterp/arm64/op_aput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aput_wide.S b/runtime/interpreter/mterp/arm64/op_aput_wide.S
deleted file mode 100644
index e1cf9c1..0000000
--- a/runtime/interpreter/mterp/arm64/op_aput_wide.S
+++ /dev/null
@@ -1,21 +0,0 @@
-    /*
-     * Array put, 64 bits.  vBB[vCC] <- vAA.
-     *
-     */
-    /* aput-wide vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    and     w2, w0, #255                // w2<- BB
-    lsr     w3, w0, #8                  // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     w0, common_errNullObject    // bail if null
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- arrayObj->length
-    add     x0, x0, w1, lsl #3          // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    GET_VREG_WIDE x1, w4
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    str     x1, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_array_length.S b/runtime/interpreter/mterp/arm64/op_array_length.S
deleted file mode 100644
index 0cce917..0000000
--- a/runtime/interpreter/mterp/arm64/op_array_length.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /*
-     * Return the length of an array.
-     */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG w0, w1                     // w0<- vB (object ref)
-    cbz     w0, common_errNullObject    // yup, fail
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- array length
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w3, w2                     // vB<- length
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_check_cast.S b/runtime/interpreter/mterp/arm64/op_check_cast.S
deleted file mode 100644
index cb9f606..0000000
--- a/runtime/interpreter/mterp/arm64/op_check_cast.S
+++ /dev/null
@@ -1,16 +0,0 @@
-    /*
-     * Check to see if a cast from one class to another is allowed.
-     */
-    /* check-cast vAA, class//BBBB */
-    EXPORT_PC
-    FETCH    w0, 1                      // w0<- BBBB
-    lsr      w1, wINST, #8              // w1<- AA
-    VREG_INDEX_TO_ADDR x1, w1           // w1<- &object
-    ldr      x2, [xFP, #OFF_FP_METHOD]  // w2<- method
-    mov      x3, xSELF                  // w3<- self
-    bl       MterpCheckCast             // (index, &obj, method, self)
-    PREFETCH_INST 2
-    cbnz     w0, MterpPossibleException
-    ADVANCE  2
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_cmp_long.S b/runtime/interpreter/mterp/arm64/op_cmp_long.S
deleted file mode 100644
index c4ad984..0000000
--- a/runtime/interpreter/mterp/arm64/op_cmp_long.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    and     w2, w0, #255                // w2<- BB
-    lsr     w3, w0, #8                  // w3<- CC
-    GET_VREG_WIDE x1, w2
-    GET_VREG_WIDE x2, w3
-    cmp     x1, x2
-    cset    w0, ne
-    cneg    w0, w0, lt
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    SET_VREG w0, w4
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_cmpg_double.S b/runtime/interpreter/mterp/arm64/op_cmpg_double.S
deleted file mode 100644
index 30cb7eb..0000000
--- a/runtime/interpreter/mterp/arm64/op_cmpg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fcmp.S" {"wide":"_WIDE", "r1":"d1", "r2":"d2", "cond":"cc"}
diff --git a/runtime/interpreter/mterp/arm64/op_cmpg_float.S b/runtime/interpreter/mterp/arm64/op_cmpg_float.S
deleted file mode 100644
index ba23f43..0000000
--- a/runtime/interpreter/mterp/arm64/op_cmpg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fcmp.S" {"wide":"", "r1":"s1", "r2":"s2", "cond":"cc"}
diff --git a/runtime/interpreter/mterp/arm64/op_cmpl_double.S b/runtime/interpreter/mterp/arm64/op_cmpl_double.S
deleted file mode 100644
index c739685..0000000
--- a/runtime/interpreter/mterp/arm64/op_cmpl_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fcmp.S" {"wide":"_WIDE", "r1":"d1", "r2":"d2", "cond":"lt"}
diff --git a/runtime/interpreter/mterp/arm64/op_cmpl_float.S b/runtime/interpreter/mterp/arm64/op_cmpl_float.S
deleted file mode 100644
index 32a9319..0000000
--- a/runtime/interpreter/mterp/arm64/op_cmpl_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fcmp.S" {"wide":"", "r1":"s1", "r2":"s2", "cond":"lt"}
diff --git a/runtime/interpreter/mterp/arm64/op_const.S b/runtime/interpreter/mterp/arm64/op_const.S
deleted file mode 100644
index 031ede1..0000000
--- a/runtime/interpreter/mterp/arm64/op_const.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* const vAA, #+BBBBbbbb */
-    lsr     w3, wINST, #8               // w3<- AA
-    FETCH w0, 1                         // w0<- bbbb (low
-    FETCH w1, 2                         // w1<- BBBB (high
-    FETCH_ADVANCE_INST 3                // advance rPC, load wINST
-    orr     w0, w0, w1, lsl #16         // w0<- BBBBbbbb
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG w0, w3                     // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_16.S b/runtime/interpreter/mterp/arm64/op_const_16.S
deleted file mode 100644
index f0e8192..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* const/16 vAA, #+BBBB */
-    FETCH_S w0, 1                       // w0<- ssssBBBB (sign-extended)
-    lsr     w3, wINST, #8               // w3<- AA
-    FETCH_ADVANCE_INST 2                // advance xPC, load wINST
-    SET_VREG w0, w3                     // vAA<- w0
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_4.S b/runtime/interpreter/mterp/arm64/op_const_4.S
deleted file mode 100644
index 9a36115..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_4.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* const/4 vA, #+B */
-    sbfx    w1, wINST, #12, #4          // w1<- sssssssB
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    FETCH_ADVANCE_INST 1                // advance xPC, load wINST
-    GET_INST_OPCODE ip                  // ip<- opcode from xINST
-    SET_VREG w1, w0                     // fp[A]<- w1
-    GOTO_OPCODE ip                      // execute next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_class.S b/runtime/interpreter/mterp/arm64/op_const_class.S
deleted file mode 100644
index 7228245..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_class.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/arm64/op_const_high16.S b/runtime/interpreter/mterp/arm64/op_const_high16.S
deleted file mode 100644
index 3a9edff..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_high16.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* const/high16 vAA, #+BBBB0000 */
-    FETCH   w0, 1                       // r0<- 0000BBBB (zero-extended)
-    lsr     w3, wINST, #8               // r3<- AA
-    lsl     w0, w0, #16                 // r0<- BBBB0000
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    SET_VREG w0, w3                     // vAA<- r0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_method_handle.S b/runtime/interpreter/mterp/arm64/op_const_method_handle.S
deleted file mode 100644
index 0df0fa6..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_method_handle.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/arm64/op_const_method_type.S b/runtime/interpreter/mterp/arm64/op_const_method_type.S
deleted file mode 100644
index 1adfe5a..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_method_type.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/arm64/op_const_string.S b/runtime/interpreter/mterp/arm64/op_const_string.S
deleted file mode 100644
index 8cf0d6d..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_string.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/arm64/op_const_string_jumbo.S b/runtime/interpreter/mterp/arm64/op_const_string_jumbo.S
deleted file mode 100644
index e1a7339..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_string_jumbo.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /* const/string vAA, String//BBBBBBBB */
-    EXPORT_PC
-    FETCH w0, 1                         // w0<- bbbb (low
-    FETCH w2, 2                         // w2<- BBBB (high
-    lsr     w1, wINST, #8               // w1<- AA
-    orr     w0, w0, w2, lsl #16         // w1<- BBBBbbbb
-    add     x2, xFP, #OFF_FP_SHADOWFRAME
-    mov     x3, xSELF
-    bl      MterpConstString            // (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 3                     // advance rPC
-    cbnz    w0, MterpPossibleException      // let reference interpreter deal with it.
-    ADVANCE 3                           // advance rPC
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_wide.S b/runtime/interpreter/mterp/arm64/op_const_wide.S
deleted file mode 100644
index 8f57dda..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_wide.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
-    FETCH w0, 1                         // w0<- bbbb (low)
-    FETCH w1, 2                         // w1<- BBBB (low middle)
-    FETCH w2, 3                         // w2<- hhhh (high middle)
-    FETCH w3, 4                         // w3<- HHHH (high)
-    lsr     w4, wINST, #8               // r4<- AA
-    FETCH_ADVANCE_INST 5                // advance rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    orr     w0, w0, w1, lsl #16         // w0<-         BBBBbbbb
-    orr     x0, x0, x2, lsl #32         // w0<-     hhhhBBBBbbbb
-    orr     x0, x0, x3, lsl #48         // w0<- HHHHhhhhBBBBbbbb
-    SET_VREG_WIDE x0, w4
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_wide_16.S b/runtime/interpreter/mterp/arm64/op_const_wide_16.S
deleted file mode 100644
index 553d481..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_wide_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* const-wide/16 vAA, #+BBBB */
-    FETCH_S x0, 1                       // x0<- ssssssssssssBBBB (sign-extended)
-    lsr     w3, wINST, #8               // w3<- AA
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w3
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_wide_32.S b/runtime/interpreter/mterp/arm64/op_const_wide_32.S
deleted file mode 100644
index 9dc4fc3..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_wide_32.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* const-wide/32 vAA, #+BBBBbbbb */
-    FETCH   w0, 1                       // x0<- 000000000000bbbb (low)
-    lsr     w3, wINST, #8               // w3<- AA
-    FETCH_S x2, 2                       // x2<- ssssssssssssBBBB (high)
-    FETCH_ADVANCE_INST 3                // advance rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    orr     x0, x0, x2, lsl #16         // x0<- ssssssssBBBBbbbb
-    SET_VREG_WIDE x0, w3
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_wide_high16.S b/runtime/interpreter/mterp/arm64/op_const_wide_high16.S
deleted file mode 100644
index 94ab987..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_wide_high16.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* const-wide/high16 vAA, #+BBBB000000000000 */
-    FETCH w0, 1                         // w0<- 0000BBBB (zero-extended)
-    lsr     w1, wINST, #8               // w1<- AA
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    lsl     x0, x0, #48
-    SET_VREG_WIDE x0, w1
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_div_double.S b/runtime/interpreter/mterp/arm64/op_div_double.S
deleted file mode 100644
index 1f7dad0..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"fdiv d0, d1, d2", "result":"d0", "r1":"d1", "r2":"d2"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_double_2addr.S b/runtime/interpreter/mterp/arm64/op_div_double_2addr.S
deleted file mode 100644
index 414a175..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"fdiv     d0, d0, d1", "r0":"d0", "r1":"d1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_float.S b/runtime/interpreter/mterp/arm64/op_div_float.S
deleted file mode 100644
index f24a26c..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop.S" {"instr":"fdiv   s0, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_float_2addr.S b/runtime/interpreter/mterp/arm64/op_div_float_2addr.S
deleted file mode 100644
index 2888049..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop2addr.S" {"instr":"fdiv   s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_int.S b/runtime/interpreter/mterp/arm64/op_div_int.S
deleted file mode 100644
index 88371c0..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"sdiv     w0, w0, w1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_int_2addr.S b/runtime/interpreter/mterp/arm64/op_div_int_2addr.S
deleted file mode 100644
index 5f5a80f..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"sdiv     w0, w0, w1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_int_lit16.S b/runtime/interpreter/mterp/arm64/op_div_int_lit16.S
deleted file mode 100644
index dc7a484..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit16.S" {"instr":"sdiv w0, w0, w1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_int_lit8.S b/runtime/interpreter/mterp/arm64/op_div_int_lit8.S
deleted file mode 100644
index c06521c..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"instr":"sdiv     w0, w0, w1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_long.S b/runtime/interpreter/mterp/arm64/op_div_long.S
deleted file mode 100644
index 820ae3d..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"sdiv x0, x1, x2", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_long_2addr.S b/runtime/interpreter/mterp/arm64/op_div_long_2addr.S
deleted file mode 100644
index da7eabd..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"sdiv     x0, x0, x1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_double_to_float.S b/runtime/interpreter/mterp/arm64/op_double_to_float.S
deleted file mode 100644
index c1555fd..0000000
--- a/runtime/interpreter/mterp/arm64/op_double_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopNarrower.S" {"instr":"fcvt s0, d0", "srcreg":"d0", "tgtreg":"s0"}
diff --git a/runtime/interpreter/mterp/arm64/op_double_to_int.S b/runtime/interpreter/mterp/arm64/op_double_to_int.S
deleted file mode 100644
index 7244bac..0000000
--- a/runtime/interpreter/mterp/arm64/op_double_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopNarrower.S" {"instr":"fcvtzs w0, d0", "srcreg":"d0", "tgtreg":"w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_double_to_long.S b/runtime/interpreter/mterp/arm64/op_double_to_long.S
deleted file mode 100644
index 741160b..0000000
--- a/runtime/interpreter/mterp/arm64/op_double_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopWide.S" {"instr":"fcvtzs x0, d0", "srcreg":"d0", "tgtreg":"x0"}
diff --git a/runtime/interpreter/mterp/arm64/op_fill_array_data.S b/runtime/interpreter/mterp/arm64/op_fill_array_data.S
deleted file mode 100644
index 86fa6db..0000000
--- a/runtime/interpreter/mterp/arm64/op_fill_array_data.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    /* fill-array-data vAA, +BBBBBBBB */
-    EXPORT_PC
-    FETCH   w0, 1                       // x0<- 000000000000bbbb (lo)
-    FETCH_S x1, 2                       // x1<- ssssssssssssBBBB (hi)
-    lsr     w3, wINST, #8               // w3<- AA
-    orr     x1, x0, x1, lsl #16         // x1<- ssssssssBBBBbbbb
-    GET_VREG w0, w3                     // w0<- vAA (array object)
-    add     x1, xPC, x1, lsl #1         // x1<- PC + ssssssssBBBBbbbb*2 (array data off.)
-    bl      MterpFillArrayData          // (obj, payload)
-    cbz     w0, MterpPossibleException      // exception?
-    FETCH_ADVANCE_INST 3                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_filled_new_array.S b/runtime/interpreter/mterp/arm64/op_filled_new_array.S
deleted file mode 100644
index 806a1b1..0000000
--- a/runtime/interpreter/mterp/arm64/op_filled_new_array.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default { "helper":"MterpFilledNewArray" }
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
-    .extern $helper
-    EXPORT_PC
-    add     x0, xFP, #OFF_FP_SHADOWFRAME
-    mov     x1, xPC
-    mov     x2, xSELF
-    bl      $helper
-    cbz     w0, MterpPossibleException
-    FETCH_ADVANCE_INST 3                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_filled_new_array_range.S b/runtime/interpreter/mterp/arm64/op_filled_new_array_range.S
deleted file mode 100644
index 3c9a419..0000000
--- a/runtime/interpreter/mterp/arm64/op_filled_new_array_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_float_to_double.S b/runtime/interpreter/mterp/arm64/op_float_to_double.S
deleted file mode 100644
index 892feca..0000000
--- a/runtime/interpreter/mterp/arm64/op_float_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopWider.S" {"instr":"fcvt  d0, s0", "srcreg":"s0", "tgtreg":"d0"}
diff --git a/runtime/interpreter/mterp/arm64/op_float_to_int.S b/runtime/interpreter/mterp/arm64/op_float_to_int.S
deleted file mode 100644
index c849d81..0000000
--- a/runtime/interpreter/mterp/arm64/op_float_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopNarrow.S" {"instr":"fcvtzs w0, s0", "srcreg":"s0", "tgtreg":"w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_float_to_long.S b/runtime/interpreter/mterp/arm64/op_float_to_long.S
deleted file mode 100644
index c3de16f..0000000
--- a/runtime/interpreter/mterp/arm64/op_float_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopWider.S" {"instr":"fcvtzs x0, s0", "srcreg":"s0", "tgtreg":"x0"}
diff --git a/runtime/interpreter/mterp/arm64/op_goto.S b/runtime/interpreter/mterp/arm64/op_goto.S
deleted file mode 100644
index 6381e94..0000000
--- a/runtime/interpreter/mterp/arm64/op_goto.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /*
-     * Unconditional branch, 8-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto +AA */
-    sbfx    wINST, wINST, #8, #8           // wINST<- ssssssAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/arm64/op_goto_16.S b/runtime/interpreter/mterp/arm64/op_goto_16.S
deleted file mode 100644
index fb9a80a..0000000
--- a/runtime/interpreter/mterp/arm64/op_goto_16.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /*
-     * Unconditional branch, 16-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto/16 +AAAA */
-    FETCH_S wINST, 1                    // wINST<- ssssAAAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/arm64/op_goto_32.S b/runtime/interpreter/mterp/arm64/op_goto_32.S
deleted file mode 100644
index b13cb41..0000000
--- a/runtime/interpreter/mterp/arm64/op_goto_32.S
+++ /dev/null
@@ -1,16 +0,0 @@
-    /*
-     * Unconditional branch, 32-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     *
-     * Unlike most opcodes, this one is allowed to branch to itself, so
-     * our "backward branch" test must be "<=0" instead of "<0".  Because
-     * we need the V bit set, we'll use an adds to convert from Dalvik
-     * offset to byte offset.
-     */
-    /* goto/32 +AAAAAAAA */
-    FETCH w0, 1                         // w0<- aaaa (lo)
-    FETCH w1, 2                         // w1<- AAAA (hi)
-    orr     wINST, w0, w1, lsl #16      // wINST<- AAAAaaaa
-    b       MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/arm64/op_if_eq.S b/runtime/interpreter/mterp/arm64/op_if_eq.S
deleted file mode 100644
index aa4a0f1..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_eq.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/bincmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_eqz.S b/runtime/interpreter/mterp/arm64/op_if_eqz.S
deleted file mode 100644
index 47c1dee..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_eqz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/zcmp.S" { "compare":"0", "branch":"cbz     w2," }
diff --git a/runtime/interpreter/mterp/arm64/op_if_ge.S b/runtime/interpreter/mterp/arm64/op_if_ge.S
deleted file mode 100644
index d6ec761..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_ge.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/bincmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_gez.S b/runtime/interpreter/mterp/arm64/op_if_gez.S
deleted file mode 100644
index 087e094..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_gez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/zcmp.S" { "compare":"0", "branch":"tbz     w2, #31," }
diff --git a/runtime/interpreter/mterp/arm64/op_if_gt.S b/runtime/interpreter/mterp/arm64/op_if_gt.S
deleted file mode 100644
index 7db8e9d..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_gt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/bincmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_gtz.S b/runtime/interpreter/mterp/arm64/op_if_gtz.S
deleted file mode 100644
index 476b265..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_gtz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/zcmp.S" { "branch":"b.gt" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_le.S b/runtime/interpreter/mterp/arm64/op_if_le.S
deleted file mode 100644
index ca3a83f..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_le.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/bincmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_lez.S b/runtime/interpreter/mterp/arm64/op_if_lez.S
deleted file mode 100644
index 2717a60..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_lez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/zcmp.S" { "branch":"b.le" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_lt.S b/runtime/interpreter/mterp/arm64/op_if_lt.S
deleted file mode 100644
index 56450a1..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_lt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/bincmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_ltz.S b/runtime/interpreter/mterp/arm64/op_if_ltz.S
deleted file mode 100644
index 86089c1..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_ltz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/zcmp.S" { "compare":"0", "branch":"tbnz    w2, #31," }
diff --git a/runtime/interpreter/mterp/arm64/op_if_ne.S b/runtime/interpreter/mterp/arm64/op_if_ne.S
deleted file mode 100644
index 14d9e13..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_ne.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/bincmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_nez.S b/runtime/interpreter/mterp/arm64/op_if_nez.S
deleted file mode 100644
index efacc88..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_nez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/zcmp.S" { "compare":"0", "branch":"cbnz    w2," }
diff --git a/runtime/interpreter/mterp/arm64/op_iget.S b/runtime/interpreter/mterp/arm64/op_iget.S
deleted file mode 100644
index 48b9cad..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIGetU32"}
-%include "arm64/field.S" { }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_boolean.S b/runtime/interpreter/mterp/arm64/op_iget_boolean.S
deleted file mode 100644
index 9a83b2a..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_boolean_quick.S b/runtime/interpreter/mterp/arm64/op_iget_boolean_quick.S
deleted file mode 100644
index 2ceccb9..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget_quick.S" { "load":"ldrb" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_byte.S b/runtime/interpreter/mterp/arm64/op_iget_byte.S
deleted file mode 100644
index f73e634..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_byte_quick.S b/runtime/interpreter/mterp/arm64/op_iget_byte_quick.S
deleted file mode 100644
index 6e97b72..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget_quick.S" { "load":"ldrsb" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_char.S b/runtime/interpreter/mterp/arm64/op_iget_char.S
deleted file mode 100644
index a5efd9e..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_char_quick.S b/runtime/interpreter/mterp/arm64/op_iget_char_quick.S
deleted file mode 100644
index 325dd1c..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget_quick.S" { "load":"ldrh" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_object.S b/runtime/interpreter/mterp/arm64/op_iget_object.S
deleted file mode 100644
index 40ddadd..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_object_quick.S b/runtime/interpreter/mterp/arm64/op_iget_object_quick.S
deleted file mode 100644
index e9a797d..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_object_quick.S
+++ /dev/null
@@ -1,15 +0,0 @@
-    /* For: iget-object-quick */
-    /* op vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w1, 1                         // w1<- field byte offset
-    EXPORT_PC
-    GET_VREG w0, w2                     // w0<- object we're operating on
-    bl      artIGetObjectFromMterp      // (obj, offset)
-    ldr     x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    PREFETCH_INST 2
-    cbnz    w3, MterpPossibleException      // bail out
-    SET_VREG_OBJECT w0, w2              // fp[A]<- w0
-    ADVANCE 2                           // advance rPC
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iget_quick.S b/runtime/interpreter/mterp/arm64/op_iget_quick.S
deleted file mode 100644
index 699b2c4..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "load":"ldr", "extend":"" }
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w1, 1                         // w1<- field byte offset
-    GET_VREG w3, w2                     // w3<- object we're operating on
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    cbz     w3, common_errNullObject    // object was null
-    $load   w0, [x3, x1]                // w0<- obj.field
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    $extend
-    SET_VREG w0, w2                     // fp[A]<- w0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iget_short.S b/runtime/interpreter/mterp/arm64/op_iget_short.S
deleted file mode 100644
index bb81c17..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_short_quick.S b/runtime/interpreter/mterp/arm64/op_iget_short_quick.S
deleted file mode 100644
index 8367070..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget_quick.S" { "load":"ldrsh" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_wide.S b/runtime/interpreter/mterp/arm64/op_iget_wide.S
deleted file mode 100644
index 70061d6..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget.S" { "helper":"MterpIGetU64" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_wide_quick.S b/runtime/interpreter/mterp/arm64/op_iget_wide_quick.S
deleted file mode 100644
index e9388e4..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_wide_quick.S
+++ /dev/null
@@ -1,11 +0,0 @@
-    /* iget-wide-quick vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w4, 1                         // w4<- field byte offset
-    GET_VREG w3, w2                     // w3<- object we're operating on
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    cbz     w3, common_errNullObject    // object was null
-    ldr     x0, [x3, x4]                // x0<- obj.field
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    SET_VREG_WIDE x0, w2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_instance_of.S b/runtime/interpreter/mterp/arm64/op_instance_of.S
deleted file mode 100644
index a56705a..0000000
--- a/runtime/interpreter/mterp/arm64/op_instance_of.S
+++ /dev/null
@@ -1,22 +0,0 @@
-    /*
-     * Check to see if an object reference is an instance of a class.
-     *
-     * Most common situation is a non-null object, being compared against
-     * an already-resolved class.
-     */
-    /* instance-of vA, vB, class//CCCC */
-    EXPORT_PC
-    FETCH     w0, 1                     // w0<- CCCC
-    lsr       w1, wINST, #12            // w1<- B
-    VREG_INDEX_TO_ADDR x1, w1           // w1<- &object
-    ldr       x2, [xFP, #OFF_FP_METHOD] // w2<- method
-    mov       x3, xSELF                 // w3<- self
-    bl        MterpInstanceOf           // (index, &obj, method, self)
-    ldr       x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
-    ubfx      w2, wINST, #8, #4         // w2<- A
-    PREFETCH_INST 2
-    cbnz      x1, MterpException
-    ADVANCE 2                           // advance rPC
-    SET_VREG w0, w2                     // vA<- w0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_byte.S b/runtime/interpreter/mterp/arm64/op_int_to_byte.S
deleted file mode 100644
index 43f8148..0000000
--- a/runtime/interpreter/mterp/arm64/op_int_to_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unop.S" {"instr":"sxtb    w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_char.S b/runtime/interpreter/mterp/arm64/op_int_to_char.S
deleted file mode 100644
index f092170..0000000
--- a/runtime/interpreter/mterp/arm64/op_int_to_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unop.S" {"instr":"uxth    w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_double.S b/runtime/interpreter/mterp/arm64/op_int_to_double.S
deleted file mode 100644
index 3dee75a..0000000
--- a/runtime/interpreter/mterp/arm64/op_int_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopWider.S" {"instr":"scvtf d0, w0", "srcreg":"w0", "tgtreg":"d0"}
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_float.S b/runtime/interpreter/mterp/arm64/op_int_to_float.S
deleted file mode 100644
index 3ebbdc7..0000000
--- a/runtime/interpreter/mterp/arm64/op_int_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopNarrow.S" {"instr":"scvtf s0, w0", "srcreg":"w0", "tgtreg":"s0"}
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_long.S b/runtime/interpreter/mterp/arm64/op_int_to_long.S
deleted file mode 100644
index 45e3112..0000000
--- a/runtime/interpreter/mterp/arm64/op_int_to_long.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* int-to-long vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG_S x0, w3                   // x0<- sign_extend(fp[B])
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE x0, w4                // fp[A]<- x0
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_short.S b/runtime/interpreter/mterp/arm64/op_int_to_short.S
deleted file mode 100644
index 87fb804..0000000
--- a/runtime/interpreter/mterp/arm64/op_int_to_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unop.S" {"instr":"sxth    w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_custom.S b/runtime/interpreter/mterp/arm64/op_invoke_custom.S
deleted file mode 100644
index 3686584..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_custom.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeCustom" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_custom_range.S b/runtime/interpreter/mterp/arm64/op_invoke_custom_range.S
deleted file mode 100644
index 06de86a..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_custom_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_direct.S b/runtime/interpreter/mterp/arm64/op_invoke_direct.S
deleted file mode 100644
index c117232..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_direct.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_direct_range.S b/runtime/interpreter/mterp/arm64/op_invoke_direct_range.S
deleted file mode 100644
index efc54c7..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_direct_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_interface.S b/runtime/interpreter/mterp/arm64/op_invoke_interface.S
deleted file mode 100644
index 12dfa59..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_interface.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeInterface" }
-    /*
-     * Handle an interface method call.
-     *
-     * for: invoke-interface, invoke-interface/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_interface_range.S b/runtime/interpreter/mterp/arm64/op_invoke_interface_range.S
deleted file mode 100644
index 61caaf4..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_interface_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_polymorphic.S b/runtime/interpreter/mterp/arm64/op_invoke_polymorphic.S
deleted file mode 100644
index aace98f..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_polymorphic.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/arm64/op_invoke_polymorphic_range.S
deleted file mode 100644
index 30c8c09..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_polymorphic_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_static.S b/runtime/interpreter/mterp/arm64/op_invoke_static.S
deleted file mode 100644
index 634eda2..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_static.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeStatic" }
-
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_static_range.S b/runtime/interpreter/mterp/arm64/op_invoke_static_range.S
deleted file mode 100644
index 32cdcdd..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_static_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_super.S b/runtime/interpreter/mterp/arm64/op_invoke_super.S
deleted file mode 100644
index def2c55..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_super.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeSuper" }
-    /*
-     * Handle a "super" method call.
-     *
-     * for: invoke-super, invoke-super/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_super_range.S b/runtime/interpreter/mterp/arm64/op_invoke_super_range.S
deleted file mode 100644
index 27fb859..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_super_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_virtual.S b/runtime/interpreter/mterp/arm64/op_invoke_virtual.S
deleted file mode 100644
index 66d0502..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_virtual.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeVirtual" }
-    /*
-     * Handle a virtual method call.
-     *
-     * for: invoke-virtual, invoke-virtual/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/arm64/op_invoke_virtual_quick.S
deleted file mode 100644
index 4300c34..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_virtual_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_virtual_range.S b/runtime/interpreter/mterp/arm64/op_invoke_virtual_range.S
deleted file mode 100644
index b43955c..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_virtual_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/arm64/op_invoke_virtual_range_quick.S
deleted file mode 100644
index 90c7b65..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_virtual_range_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput.S b/runtime/interpreter/mterp/arm64/op_iput.S
deleted file mode 100644
index 2bc3db9..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIPutU32" }
-%include "arm64/field.S" { }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_boolean.S b/runtime/interpreter/mterp/arm64/op_iput_boolean.S
deleted file mode 100644
index 12a278c..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_boolean_quick.S b/runtime/interpreter/mterp/arm64/op_iput_boolean_quick.S
deleted file mode 100644
index 25c61d7..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput_quick.S" { "store":"strb" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_byte.S b/runtime/interpreter/mterp/arm64/op_iput_byte.S
deleted file mode 100644
index 82b99e9..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_byte_quick.S b/runtime/interpreter/mterp/arm64/op_iput_byte_quick.S
deleted file mode 100644
index 25c61d7..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput_quick.S" { "store":"strb" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_char.S b/runtime/interpreter/mterp/arm64/op_iput_char.S
deleted file mode 100644
index 427d92d..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_char_quick.S b/runtime/interpreter/mterp/arm64/op_iput_char_quick.S
deleted file mode 100644
index c6ef46a..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput_quick.S" { "store":"strh" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_object.S b/runtime/interpreter/mterp/arm64/op_iput_object.S
deleted file mode 100644
index e9bb93f..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput.S" { "is_object":"1", "helper":"MterpIPutObj" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_object_quick.S b/runtime/interpreter/mterp/arm64/op_iput_object_quick.S
deleted file mode 100644
index 6fbf2b1..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_object_quick.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    EXPORT_PC
-    add     x0, xFP, #OFF_FP_SHADOWFRAME
-    mov     x1, xPC
-    mov     w2, wINST
-    bl      MterpIputObjectQuick
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iput_quick.S b/runtime/interpreter/mterp/arm64/op_iput_quick.S
deleted file mode 100644
index e95da76..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_quick.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "store":"str" }
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w1, 1                         // w1<- field byte offset
-    GET_VREG w3, w2                     // w3<- fp[B], the object pointer
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    cbz     w3, common_errNullObject    // object was null
-    GET_VREG w0, w2                     // w0<- fp[A]
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    $store     w0, [x3, x1]             // obj.field<- w0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iput_short.S b/runtime/interpreter/mterp/arm64/op_iput_short.S
deleted file mode 100644
index 67f1ace..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_short_quick.S b/runtime/interpreter/mterp/arm64/op_iput_short_quick.S
deleted file mode 100644
index c6ef46a..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput_quick.S" { "store":"strh" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_wide.S b/runtime/interpreter/mterp/arm64/op_iput_wide.S
deleted file mode 100644
index e1fafad..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput.S" { "helper":"MterpIPutU64" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S b/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S
deleted file mode 100644
index 28e831a..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S
+++ /dev/null
@@ -1,11 +0,0 @@
-    /* iput-wide-quick vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w3, 1                         // w3<- field byte offset
-    GET_VREG w2, w2                     // w2<- fp[B], the object pointer
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    cbz     w2, common_errNullObject    // object was null
-    GET_VREG_WIDE x0, w0                // x0<- fp[A]
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    str     x0, [x2, x3]                // obj.field<- x0
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_long_to_double.S b/runtime/interpreter/mterp/arm64/op_long_to_double.S
deleted file mode 100644
index a3f59c2..0000000
--- a/runtime/interpreter/mterp/arm64/op_long_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopWide.S" {"instr":"scvtf d0, x0", "srcreg":"x0", "tgtreg":"d0"}
diff --git a/runtime/interpreter/mterp/arm64/op_long_to_float.S b/runtime/interpreter/mterp/arm64/op_long_to_float.S
deleted file mode 100644
index e9c9145..0000000
--- a/runtime/interpreter/mterp/arm64/op_long_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopNarrower.S" {"instr":"scvtf s0, x0", "srcreg":"x0", "tgtreg":"s0"}
diff --git a/runtime/interpreter/mterp/arm64/op_long_to_int.S b/runtime/interpreter/mterp/arm64/op_long_to_int.S
deleted file mode 100644
index 73f58d8..0000000
--- a/runtime/interpreter/mterp/arm64/op_long_to_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%include "arm64/op_move.S"
diff --git a/runtime/interpreter/mterp/arm64/op_monitor_enter.S b/runtime/interpreter/mterp/arm64/op_monitor_enter.S
deleted file mode 100644
index 6fbd9ae..0000000
--- a/runtime/interpreter/mterp/arm64/op_monitor_enter.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    /*
-     * Synchronize on an object.
-     */
-    /* monitor-enter vAA */
-    EXPORT_PC
-    lsr      w2, wINST, #8               // w2<- AA
-    GET_VREG w0, w2                      // w0<- vAA (object)
-    mov      x1, xSELF                   // w1<- self
-    bl       artLockObjectFromCode
-    cbnz     w0, MterpException
-    FETCH_ADVANCE_INST 1
-    GET_INST_OPCODE ip                   // extract opcode from rINST
-    GOTO_OPCODE ip                       // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_monitor_exit.S b/runtime/interpreter/mterp/arm64/op_monitor_exit.S
deleted file mode 100644
index 26e2d8d..0000000
--- a/runtime/interpreter/mterp/arm64/op_monitor_exit.S
+++ /dev/null
@@ -1,17 +0,0 @@
-    /*
-     * Unlock an object.
-     *
-     * Exceptions that occur when unlocking a monitor need to appear as
-     * if they happened at the following instruction.  See the Dalvik
-     * instruction spec.
-     */
-    /* monitor-exit vAA */
-    EXPORT_PC
-    lsr      w2, wINST, #8              // w2<- AA
-    GET_VREG w0, w2                     // w0<- vAA (object)
-    mov      x1, xSELF                  // w0<- self
-    bl       artUnlockObjectFromCode    // w0<- success for unlock(self, obj)
-    cbnz     w0, MterpException
-    FETCH_ADVANCE_INST 1                // before throw: advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move.S b/runtime/interpreter/mterp/arm64/op_move.S
deleted file mode 100644
index 195b7eb..0000000
--- a/runtime/interpreter/mterp/arm64/op_move.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    lsr     w1, wINST, #12              // x1<- B from 15:12
-    ubfx    w0, wINST, #8, #4           // x0<- A from 11:8
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    GET_VREG w2, w1                     // x2<- fp[B]
-    GET_INST_OPCODE ip                  // ip<- opcode from wINST
-    .if $is_object
-    SET_VREG_OBJECT w2, w0              // fp[A]<- x2
-    .else
-    SET_VREG w2, w0                     // fp[A]<- x2
-    .endif
-    GOTO_OPCODE ip                      // execute next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_16.S b/runtime/interpreter/mterp/arm64/op_move_16.S
deleted file mode 100644
index 5146e3d..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_16.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    FETCH w1, 2                         // w1<- BBBB
-    FETCH w0, 1                         // w0<- AAAA
-    FETCH_ADVANCE_INST 3                // advance xPC, load xINST
-    GET_VREG w2, w1                     // w2<- fp[BBBB]
-    GET_INST_OPCODE ip                  // extract opcode from xINST
-    .if $is_object
-    SET_VREG_OBJECT w2, w0              // fp[AAAA]<- w2
-    .else
-    SET_VREG w2, w0                     // fp[AAAA]<- w2
-    .endif
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_exception.S b/runtime/interpreter/mterp/arm64/op_move_exception.S
deleted file mode 100644
index b29298f..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_exception.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* move-exception vAA */
-    lsr     w2, wINST, #8               // w2<- AA
-    ldr     x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
-    mov     x1, #0                      // w1<- 0
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    SET_VREG_OBJECT w3, w2              // fp[AA]<- exception obj
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    str     x1, [xSELF, #THREAD_EXCEPTION_OFFSET]  // clear exception
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_from16.S b/runtime/interpreter/mterp/arm64/op_move_from16.S
deleted file mode 100644
index 78f344d..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_from16.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    FETCH w1, 1                         // r1<- BBBB
-    lsr     w0, wINST, #8               // r0<- AA
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    GET_VREG w2, w1                     // r2<- fp[BBBB]
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    .if $is_object
-    SET_VREG_OBJECT w2, w0              // fp[AA]<- r2
-    .else
-    SET_VREG w2, w0                     // fp[AA]<- r2
-    .endif
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_object.S b/runtime/interpreter/mterp/arm64/op_move_object.S
deleted file mode 100644
index a5adc59..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_move_object_16.S b/runtime/interpreter/mterp/arm64/op_move_object_16.S
deleted file mode 100644
index ef86c45..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_object_16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_move_object_from16.S b/runtime/interpreter/mterp/arm64/op_move_object_from16.S
deleted file mode 100644
index 0c73b3b..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_object_from16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_move_result.S b/runtime/interpreter/mterp/arm64/op_move_result.S
deleted file mode 100644
index 06fe962..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_result.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    lsr     w2, wINST, #8               // r2<- AA
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    ldr     x0, [xFP, #OFF_FP_RESULT_REGISTER]  // get pointer to result JType.
-    ldr     w0, [x0]                    // r0 <- result.i.
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    .if $is_object
-    SET_VREG_OBJECT w0, w2, w1          // fp[AA]<- r0
-    .else
-    SET_VREG w0, w2                     // fp[AA]<- r0
-    .endif
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_result_object.S b/runtime/interpreter/mterp/arm64/op_move_result_object.S
deleted file mode 100644
index da2bbee..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_result_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_move_result_wide.S b/runtime/interpreter/mterp/arm64/op_move_result_wide.S
deleted file mode 100644
index f90a33f..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_result_wide.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* for: move-result-wide */
-    /* op vAA */
-    lsr     w2, wINST, #8               // r2<- AA
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    ldr     x0, [xFP, #OFF_FP_RESULT_REGISTER]  // get pointer to result JType.
-    ldr     x0, [x0]                    // r0 <- result.i.
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE x0, x2                // fp[AA]<- r0
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_wide.S b/runtime/interpreter/mterp/arm64/op_move_wide.S
deleted file mode 100644
index 538f079..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_wide.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE  x3, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE  x3, w2
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_wide_16.S b/runtime/interpreter/mterp/arm64/op_move_wide_16.S
deleted file mode 100644
index c79cdc50..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_wide_16.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    FETCH w3, 2                         // w3<- BBBB
-    FETCH w2, 1                         // w2<- AAAA
-    GET_VREG_WIDE x3, w3
-    FETCH_ADVANCE_INST 3                // advance rPC, load rINST
-    SET_VREG_WIDE x3, w2
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_wide_from16.S b/runtime/interpreter/mterp/arm64/op_move_wide_from16.S
deleted file mode 100644
index 70dbe99..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_wide_from16.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    FETCH w3, 1                         // w3<- BBBB
-    lsr     w2, wINST, #8               // w2<- AA
-    GET_VREG_WIDE x3, w3
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE x3, w2
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_mul_double.S b/runtime/interpreter/mterp/arm64/op_mul_double.S
deleted file mode 100644
index 8d35b81..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"fmul d0, d1, d2", "result":"d0", "r1":"d1", "r2":"d2"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_double_2addr.S b/runtime/interpreter/mterp/arm64/op_mul_double_2addr.S
deleted file mode 100644
index 526cb3b..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"fmul     d0, d0, d1", "r0":"d0", "r1":"d1"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_float.S b/runtime/interpreter/mterp/arm64/op_mul_float.S
deleted file mode 100644
index eea7733..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop.S" {"instr":"fmul   s0, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_float_2addr.S b/runtime/interpreter/mterp/arm64/op_mul_float_2addr.S
deleted file mode 100644
index c1f2376..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop2addr.S" {"instr":"fmul   s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_int.S b/runtime/interpreter/mterp/arm64/op_mul_int.S
deleted file mode 100644
index d14cae1..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-%include "arm64/binop.S" {"instr":"mul     w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_int_2addr.S b/runtime/interpreter/mterp/arm64/op_mul_int_2addr.S
deleted file mode 100644
index f079118..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_int_2addr.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-%include "arm64/binop2addr.S" {"instr":"mul     w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_int_lit16.S b/runtime/interpreter/mterp/arm64/op_mul_int_lit16.S
deleted file mode 100644
index a378559..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_int_lit16.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-%include "arm64/binopLit16.S" {"instr":"mul     w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_int_lit8.S b/runtime/interpreter/mterp/arm64/op_mul_int_lit8.S
deleted file mode 100644
index b3d4014..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_int_lit8.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-%include "arm64/binopLit8.S" {"instr":"mul     w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_long.S b/runtime/interpreter/mterp/arm64/op_mul_long.S
deleted file mode 100644
index bc0dcbd..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"mul x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_long_2addr.S b/runtime/interpreter/mterp/arm64/op_mul_long_2addr.S
deleted file mode 100644
index fa1cdf8..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"mul     x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_neg_double.S b/runtime/interpreter/mterp/arm64/op_neg_double.S
deleted file mode 100644
index d77859d..0000000
--- a/runtime/interpreter/mterp/arm64/op_neg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unopWide.S" {"instr":"eor     x0, x0, #0x8000000000000000"}
diff --git a/runtime/interpreter/mterp/arm64/op_neg_float.S b/runtime/interpreter/mterp/arm64/op_neg_float.S
deleted file mode 100644
index 6652aec..0000000
--- a/runtime/interpreter/mterp/arm64/op_neg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unop.S" {"instr":"eor     w0, w0, #0x80000000"}
diff --git a/runtime/interpreter/mterp/arm64/op_neg_int.S b/runtime/interpreter/mterp/arm64/op_neg_int.S
deleted file mode 100644
index 59c14a9..0000000
--- a/runtime/interpreter/mterp/arm64/op_neg_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unop.S" {"instr":"sub     w0, wzr, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_neg_long.S b/runtime/interpreter/mterp/arm64/op_neg_long.S
deleted file mode 100644
index 0c71ea7..0000000
--- a/runtime/interpreter/mterp/arm64/op_neg_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unopWide.S" {"instr":"sub x0, xzr, x0"}
diff --git a/runtime/interpreter/mterp/arm64/op_new_array.S b/runtime/interpreter/mterp/arm64/op_new_array.S
deleted file mode 100644
index 886120a..0000000
--- a/runtime/interpreter/mterp/arm64/op_new_array.S
+++ /dev/null
@@ -1,18 +0,0 @@
-    /*
-     * Allocate an array of objects, specified with the array class
-     * and a count.
-     *
-     * The verifier guarantees that this is an array class, so we don't
-     * check for it here.
-     */
-    /* new-array vA, vB, class//CCCC */
-    EXPORT_PC
-    add     x0, xFP, #OFF_FP_SHADOWFRAME
-    mov     x1, xPC
-    mov     w2, wINST
-    mov     x3, xSELF
-    bl      MterpNewArray
-    cbz     w0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_new_instance.S b/runtime/interpreter/mterp/arm64/op_new_instance.S
deleted file mode 100644
index c171ac5..0000000
--- a/runtime/interpreter/mterp/arm64/op_new_instance.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    /*
-     * Create a new instance of a class.
-     */
-    /* new-instance vAA, class//BBBB */
-    EXPORT_PC
-    add     x0, xFP, #OFF_FP_SHADOWFRAME
-    mov     x1, xSELF
-    mov     w2, wINST
-    bl      MterpNewInstance           // (shadow_frame, self, inst_data)
-    cbz     w0, MterpPossibleException
-    FETCH_ADVANCE_INST 2               // advance rPC, load rINST
-    GET_INST_OPCODE ip                 // extract opcode from rINST
-    GOTO_OPCODE ip                     // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_nop.S b/runtime/interpreter/mterp/arm64/op_nop.S
deleted file mode 100644
index 80c2d45..0000000
--- a/runtime/interpreter/mterp/arm64/op_nop.S
+++ /dev/null
@@ -1,3 +0,0 @@
-    FETCH_ADVANCE_INST 1                // advance to next instr, load rINST
-    GET_INST_OPCODE ip                  // ip<- opcode from rINST
-    GOTO_OPCODE ip                      // execute it
diff --git a/runtime/interpreter/mterp/arm64/op_not_int.S b/runtime/interpreter/mterp/arm64/op_not_int.S
deleted file mode 100644
index 55d7750..0000000
--- a/runtime/interpreter/mterp/arm64/op_not_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unop.S" {"instr":"mvn     w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_not_long.S b/runtime/interpreter/mterp/arm64/op_not_long.S
deleted file mode 100644
index e5ebdd6..0000000
--- a/runtime/interpreter/mterp/arm64/op_not_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unopWide.S" {"instr":"mvn     x0, x0"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_int.S b/runtime/interpreter/mterp/arm64/op_or_int.S
deleted file mode 100644
index 648c1e6..0000000
--- a/runtime/interpreter/mterp/arm64/op_or_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"orr     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_int_2addr.S b/runtime/interpreter/mterp/arm64/op_or_int_2addr.S
deleted file mode 100644
index abdf599..0000000
--- a/runtime/interpreter/mterp/arm64/op_or_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"orr     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_int_lit16.S b/runtime/interpreter/mterp/arm64/op_or_int_lit16.S
deleted file mode 100644
index db7f4ff..0000000
--- a/runtime/interpreter/mterp/arm64/op_or_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit16.S" {"instr":"orr     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_int_lit8.S b/runtime/interpreter/mterp/arm64/op_or_int_lit8.S
deleted file mode 100644
index 7cb26b7..0000000
--- a/runtime/interpreter/mterp/arm64/op_or_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"extract":"", "instr":"orr     w0, w0, w3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_long.S b/runtime/interpreter/mterp/arm64/op_or_long.S
deleted file mode 100644
index dd137ce..0000000
--- a/runtime/interpreter/mterp/arm64/op_or_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"orr x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_long_2addr.S b/runtime/interpreter/mterp/arm64/op_or_long_2addr.S
deleted file mode 100644
index f785230..0000000
--- a/runtime/interpreter/mterp/arm64/op_or_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"orr     x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_packed_switch.S b/runtime/interpreter/mterp/arm64/op_packed_switch.S
deleted file mode 100644
index 408e030..0000000
--- a/runtime/interpreter/mterp/arm64/op_packed_switch.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "func":"MterpDoPackedSwitch" }
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBB */
-    FETCH   w0, 1                       // x0<- 000000000000bbbb (lo)
-    FETCH_S x1, 2                       // x1<- ssssssssssssBBBB (hi)
-    lsr     w3, wINST, #8               // w3<- AA
-    orr     x0, x0, x1, lsl #16         // x0<- ssssssssBBBBbbbb
-    GET_VREG w1, w3                     // w1<- vAA
-    add     x0, xPC, x0, lsl #1         // x0<- PC + ssssssssBBBBbbbb*2
-    bl      $func                       // w0<- code-unit branch offset
-    sxtw    xINST, w0
-    b       MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/arm64/op_rem_double.S b/runtime/interpreter/mterp/arm64/op_rem_double.S
deleted file mode 100644
index c631ddb..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_double.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    /* rem vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w2, w0, #8                  // w2<- CC
-    and     w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE d1, w2                // d1<- vCC
-    GET_VREG_WIDE d0, w1                // d0<- vBB
-    bl  fmod
-    lsr     w4, wINST, #8               // w4<- AA
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE d0, w4                // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm64/op_rem_double_2addr.S b/runtime/interpreter/mterp/arm64/op_rem_double_2addr.S
deleted file mode 100644
index 9868f41..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_double_2addr.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /* rem vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE d1, w1                // d1<- vB
-    GET_VREG_WIDE d0, w2                // d0<- vA
-    bl fmod
-    ubfx    w2, wINST, #8, #4           // w2<- A (need to reload - killed across call)
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE d0, w2                // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm64/op_rem_float.S b/runtime/interpreter/mterp/arm64/op_rem_float.S
deleted file mode 100644
index 73f7060..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_float.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* EABI doesn't define a float remainder function, but libm does */
-%include "arm64/fbinop.S" {"instr":"bl      fmodf"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_float_2addr.S b/runtime/interpreter/mterp/arm64/op_rem_float_2addr.S
deleted file mode 100644
index 95f81c5..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_float_2addr.S
+++ /dev/null
@@ -1,11 +0,0 @@
-    /* rem vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG s1, w3
-    GET_VREG s0, w9
-    bl  fmodf
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG s0, w9
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_rem_int.S b/runtime/interpreter/mterp/arm64/op_rem_int.S
deleted file mode 100644
index dd9dfda..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"preinstr":"sdiv     w2, w0, w1", "instr":"msub w0, w2, w1, w0", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_int_2addr.S b/runtime/interpreter/mterp/arm64/op_rem_int_2addr.S
deleted file mode 100644
index 57fc4971..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"preinstr":"sdiv     w2, w0, w1", "instr":"msub w0, w2, w1, w0", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_int_lit16.S b/runtime/interpreter/mterp/arm64/op_rem_int_lit16.S
deleted file mode 100644
index b51a739..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit16.S" {"preinstr":"sdiv w3, w0, w1", "instr":"msub w0, w3, w1, w0", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_int_lit8.S b/runtime/interpreter/mterp/arm64/op_rem_int_lit8.S
deleted file mode 100644
index 03ea324..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"preinstr":"sdiv w3, w0, w1", "instr":"msub w0, w3, w1, w0", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_long.S b/runtime/interpreter/mterp/arm64/op_rem_long.S
deleted file mode 100644
index f133f86..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"preinstr":"sdiv x3, x1, x2","instr":"msub x0, x3, x2, x1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_long_2addr.S b/runtime/interpreter/mterp/arm64/op_rem_long_2addr.S
deleted file mode 100644
index b45e2a9..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"preinstr":"sdiv x3, x0, x1", "instr":"msub x0, x3, x1, x0", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_return.S b/runtime/interpreter/mterp/arm64/op_return.S
deleted file mode 100644
index 9f125c7..0000000
--- a/runtime/interpreter/mterp/arm64/op_return.S
+++ /dev/null
@@ -1,19 +0,0 @@
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return, return-object
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    bl      MterpThreadFenceForConstructor
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    mov     x0, xSELF
-    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    b.ne    .L${opcode}_check
-.L${opcode}_return:
-    lsr     w2, wINST, #8               // r2<- AA
-    GET_VREG w0, w2                     // r0<- vAA
-    b       MterpReturn
-.L${opcode}_check:
-    bl      MterpSuspendCheck           // (self)
-    b       .L${opcode}_return
diff --git a/runtime/interpreter/mterp/arm64/op_return_object.S b/runtime/interpreter/mterp/arm64/op_return_object.S
deleted file mode 100644
index b6cb532..0000000
--- a/runtime/interpreter/mterp/arm64/op_return_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_return.S"
diff --git a/runtime/interpreter/mterp/arm64/op_return_void.S b/runtime/interpreter/mterp/arm64/op_return_void.S
deleted file mode 100644
index b253006..0000000
--- a/runtime/interpreter/mterp/arm64/op_return_void.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    .extern MterpThreadFenceForConstructor
-    bl      MterpThreadFenceForConstructor
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    mov     x0, xSELF
-    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    b.ne    .L${opcode}_check
-.L${opcode}_return:
-    mov     x0, #0
-    b       MterpReturn
-.L${opcode}_check:
-    bl      MterpSuspendCheck           // (self)
-    b       .L${opcode}_return
diff --git a/runtime/interpreter/mterp/arm64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/arm64/op_return_void_no_barrier.S
deleted file mode 100644
index c817169..0000000
--- a/runtime/interpreter/mterp/arm64/op_return_void_no_barrier.S
+++ /dev/null
@@ -1,10 +0,0 @@
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    mov     x0, xSELF
-    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    b.ne    .L${opcode}_check
-.L${opcode}_return:
-    mov     x0, #0
-    b       MterpReturn
-.L${opcode}_check:
-    bl      MterpSuspendCheck           // (self)
-    b       .L${opcode}_return
diff --git a/runtime/interpreter/mterp/arm64/op_return_wide.S b/runtime/interpreter/mterp/arm64/op_return_wide.S
deleted file mode 100644
index c47661c..0000000
--- a/runtime/interpreter/mterp/arm64/op_return_wide.S
+++ /dev/null
@@ -1,18 +0,0 @@
-    /*
-     * Return a 64-bit value.
-     */
-    /* return-wide vAA */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    bl      MterpThreadFenceForConstructor
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    mov     x0, xSELF
-    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    b.ne    .L${opcode}_check
-.L${opcode}_return:
-    lsr     w2, wINST, #8               // w2<- AA
-    GET_VREG_WIDE x0, w2                // x0<- vAA
-    b       MterpReturn
-.L${opcode}_check:
-    bl      MterpSuspendCheck           // (self)
-    b       .L${opcode}_return
diff --git a/runtime/interpreter/mterp/arm64/op_rsub_int.S b/runtime/interpreter/mterp/arm64/op_rsub_int.S
deleted file mode 100644
index 3bf45fe..0000000
--- a/runtime/interpreter/mterp/arm64/op_rsub_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-%include "arm64/binopLit16.S" {"instr":"sub     w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_rsub_int_lit8.S b/runtime/interpreter/mterp/arm64/op_rsub_int_lit8.S
deleted file mode 100644
index 7a3572b..0000000
--- a/runtime/interpreter/mterp/arm64/op_rsub_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"instr":"sub     w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget.S b/runtime/interpreter/mterp/arm64/op_sget.S
deleted file mode 100644
index 78e95b2..0000000
--- a/runtime/interpreter/mterp/arm64/op_sget.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSGetU32" }
-%include "arm64/field.S" { }
diff --git a/runtime/interpreter/mterp/arm64/op_sget_boolean.S b/runtime/interpreter/mterp/arm64/op_sget_boolean.S
deleted file mode 100644
index 0cf9f09..0000000
--- a/runtime/interpreter/mterp/arm64/op_sget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sget.S" {"helper":"MterpSGetU8"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_byte.S b/runtime/interpreter/mterp/arm64/op_sget_byte.S
deleted file mode 100644
index 7c88a81..0000000
--- a/runtime/interpreter/mterp/arm64/op_sget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sget.S" {"helper":"MterpSGetI8"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_char.S b/runtime/interpreter/mterp/arm64/op_sget_char.S
deleted file mode 100644
index 883e944..0000000
--- a/runtime/interpreter/mterp/arm64/op_sget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sget.S" {"helper":"MterpSGetU16"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_object.S b/runtime/interpreter/mterp/arm64/op_sget_object.S
deleted file mode 100644
index 69d6adb..0000000
--- a/runtime/interpreter/mterp/arm64/op_sget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_short.S b/runtime/interpreter/mterp/arm64/op_sget_short.S
deleted file mode 100644
index 6cb9184..0000000
--- a/runtime/interpreter/mterp/arm64/op_sget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sget.S" {"helper":"MterpSGetI16"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_wide.S b/runtime/interpreter/mterp/arm64/op_sget_wide.S
deleted file mode 100644
index f5d182e..0000000
--- a/runtime/interpreter/mterp/arm64/op_sget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sget.S" {"helper":"MterpSGetU64"}
diff --git a/runtime/interpreter/mterp/arm64/op_shl_int.S b/runtime/interpreter/mterp/arm64/op_shl_int.S
deleted file mode 100644
index 3062a3f..0000000
--- a/runtime/interpreter/mterp/arm64/op_shl_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"lsl     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shl_int_2addr.S b/runtime/interpreter/mterp/arm64/op_shl_int_2addr.S
deleted file mode 100644
index 9a7e09f..0000000
--- a/runtime/interpreter/mterp/arm64/op_shl_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"lsl     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shl_int_lit8.S b/runtime/interpreter/mterp/arm64/op_shl_int_lit8.S
deleted file mode 100644
index 9c19b55..0000000
--- a/runtime/interpreter/mterp/arm64/op_shl_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"extract":"ubfx    w1, w3, #8, #5", "instr":"lsl     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shl_long.S b/runtime/interpreter/mterp/arm64/op_shl_long.S
deleted file mode 100644
index bbf9600..0000000
--- a/runtime/interpreter/mterp/arm64/op_shl_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/shiftWide.S" {"opcode":"lsl"}
diff --git a/runtime/interpreter/mterp/arm64/op_shl_long_2addr.S b/runtime/interpreter/mterp/arm64/op_shl_long_2addr.S
deleted file mode 100644
index a5c4013..0000000
--- a/runtime/interpreter/mterp/arm64/op_shl_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/shiftWide2addr.S" {"opcode":"lsl"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_int.S b/runtime/interpreter/mterp/arm64/op_shr_int.S
deleted file mode 100644
index 493b740..0000000
--- a/runtime/interpreter/mterp/arm64/op_shr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"asr     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_int_2addr.S b/runtime/interpreter/mterp/arm64/op_shr_int_2addr.S
deleted file mode 100644
index 6efe8ee..0000000
--- a/runtime/interpreter/mterp/arm64/op_shr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"asr     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_int_lit8.S b/runtime/interpreter/mterp/arm64/op_shr_int_lit8.S
deleted file mode 100644
index c7b61df..0000000
--- a/runtime/interpreter/mterp/arm64/op_shr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"extract":"ubfx    w1, w3, #8, #5", "instr":"asr     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_long.S b/runtime/interpreter/mterp/arm64/op_shr_long.S
deleted file mode 100644
index 4d33235..0000000
--- a/runtime/interpreter/mterp/arm64/op_shr_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/shiftWide.S" {"opcode":"asr"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_long_2addr.S b/runtime/interpreter/mterp/arm64/op_shr_long_2addr.S
deleted file mode 100644
index 0a4a386..0000000
--- a/runtime/interpreter/mterp/arm64/op_shr_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/shiftWide2addr.S" {"opcode":"asr"}
diff --git a/runtime/interpreter/mterp/arm64/op_sparse_switch.S b/runtime/interpreter/mterp/arm64/op_sparse_switch.S
deleted file mode 100644
index 5a8d748..0000000
--- a/runtime/interpreter/mterp/arm64/op_sparse_switch.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/arm64/op_sput.S b/runtime/interpreter/mterp/arm64/op_sput.S
deleted file mode 100644
index d229d0d..0000000
--- a/runtime/interpreter/mterp/arm64/op_sput.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSPutU32"}
-%include "arm64/field.S" { }
diff --git a/runtime/interpreter/mterp/arm64/op_sput_boolean.S b/runtime/interpreter/mterp/arm64/op_sput_boolean.S
deleted file mode 100644
index 3d0c7c0..0000000
--- a/runtime/interpreter/mterp/arm64/op_sput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_byte.S b/runtime/interpreter/mterp/arm64/op_sput_byte.S
deleted file mode 100644
index 489cf92..0000000
--- a/runtime/interpreter/mterp/arm64/op_sput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_char.S b/runtime/interpreter/mterp/arm64/op_sput_char.S
deleted file mode 100644
index f79d311..0000000
--- a/runtime/interpreter/mterp/arm64/op_sput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_object.S b/runtime/interpreter/mterp/arm64/op_sput_object.S
deleted file mode 100644
index 536f1b1..0000000
--- a/runtime/interpreter/mterp/arm64/op_sput_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sput.S" {"is_object":"1", "helper":"MterpSPutObj"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_short.S b/runtime/interpreter/mterp/arm64/op_sput_short.S
deleted file mode 100644
index 06482cd..0000000
--- a/runtime/interpreter/mterp/arm64/op_sput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_wide.S b/runtime/interpreter/mterp/arm64/op_sput_wide.S
deleted file mode 100644
index b4be6b2..0000000
--- a/runtime/interpreter/mterp/arm64/op_sput_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sput.S" {"helper":"MterpSPutU64"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_double.S b/runtime/interpreter/mterp/arm64/op_sub_double.S
deleted file mode 100644
index e8e3401..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"fsub d0, d1, d2", "result":"d0", "r1":"d1", "r2":"d2"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_double_2addr.S b/runtime/interpreter/mterp/arm64/op_sub_double_2addr.S
deleted file mode 100644
index ddab55e..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"fsub     d0, d0, d1", "r0":"d0", "r1":"d1"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_float.S b/runtime/interpreter/mterp/arm64/op_sub_float.S
deleted file mode 100644
index 227b15f..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop.S" {"instr":"fsub   s0, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_float_2addr.S b/runtime/interpreter/mterp/arm64/op_sub_float_2addr.S
deleted file mode 100644
index 19ac8d5..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop2addr.S" {"instr":"fsub   s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_int.S b/runtime/interpreter/mterp/arm64/op_sub_int.S
deleted file mode 100644
index 0e7ce0e..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"sub     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_int_2addr.S b/runtime/interpreter/mterp/arm64/op_sub_int_2addr.S
deleted file mode 100644
index d2c1bd3..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"sub     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_long.S b/runtime/interpreter/mterp/arm64/op_sub_long.S
deleted file mode 100644
index 263c70d..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"sub x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_long_2addr.S b/runtime/interpreter/mterp/arm64/op_sub_long_2addr.S
deleted file mode 100644
index 5be3772..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"sub     x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_throw.S b/runtime/interpreter/mterp/arm64/op_throw.S
deleted file mode 100644
index 9a951af..0000000
--- a/runtime/interpreter/mterp/arm64/op_throw.S
+++ /dev/null
@@ -1,10 +0,0 @@
-    /*
-     * Throw an exception object in the current thread.
-     */
-    /* throw vAA */
-    EXPORT_PC
-    lsr      w2, wINST, #8               // r2<- AA
-    GET_VREG w1, w2                      // r1<- vAA (exception object)
-    cbz      w1, common_errNullObject
-    str      x1, [xSELF, #THREAD_EXCEPTION_OFFSET]  // thread->exception<- obj
-    b        MterpException
diff --git a/runtime/interpreter/mterp/arm64/op_unused_3e.S b/runtime/interpreter/mterp/arm64/op_unused_3e.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_3e.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_3f.S b/runtime/interpreter/mterp/arm64/op_unused_3f.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_3f.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_40.S b/runtime/interpreter/mterp/arm64/op_unused_40.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_40.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_41.S b/runtime/interpreter/mterp/arm64/op_unused_41.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_41.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_42.S b/runtime/interpreter/mterp/arm64/op_unused_42.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_42.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_43.S b/runtime/interpreter/mterp/arm64/op_unused_43.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_43.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_73.S b/runtime/interpreter/mterp/arm64/op_unused_73.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_73.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_79.S b/runtime/interpreter/mterp/arm64/op_unused_79.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_79.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_7a.S b/runtime/interpreter/mterp/arm64/op_unused_7a.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_7a.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f3.S b/runtime/interpreter/mterp/arm64/op_unused_f3.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_f3.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f4.S b/runtime/interpreter/mterp/arm64/op_unused_f4.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_f4.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f5.S b/runtime/interpreter/mterp/arm64/op_unused_f5.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_f5.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f6.S b/runtime/interpreter/mterp/arm64/op_unused_f6.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_f6.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f7.S b/runtime/interpreter/mterp/arm64/op_unused_f7.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_f7.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f8.S b/runtime/interpreter/mterp/arm64/op_unused_f8.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_f8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f9.S b/runtime/interpreter/mterp/arm64/op_unused_f9.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_f9.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_fc.S b/runtime/interpreter/mterp/arm64/op_unused_fc.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_fc.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_fd.S b/runtime/interpreter/mterp/arm64/op_unused_fd.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_fd.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_int.S b/runtime/interpreter/mterp/arm64/op_ushr_int.S
deleted file mode 100644
index 005452b..0000000
--- a/runtime/interpreter/mterp/arm64/op_ushr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"lsr     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_int_2addr.S b/runtime/interpreter/mterp/arm64/op_ushr_int_2addr.S
deleted file mode 100644
index 1cb8cb7..0000000
--- a/runtime/interpreter/mterp/arm64/op_ushr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"lsr     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_int_lit8.S b/runtime/interpreter/mterp/arm64/op_ushr_int_lit8.S
deleted file mode 100644
index 555ed4e..0000000
--- a/runtime/interpreter/mterp/arm64/op_ushr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"extract":"ubfx    w1, w3, #8, #5", "instr":"lsr     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_long.S b/runtime/interpreter/mterp/arm64/op_ushr_long.S
deleted file mode 100644
index e13c86a..0000000
--- a/runtime/interpreter/mterp/arm64/op_ushr_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/shiftWide.S" {"opcode":"lsr"}
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_long_2addr.S b/runtime/interpreter/mterp/arm64/op_ushr_long_2addr.S
deleted file mode 100644
index 67ec91e..0000000
--- a/runtime/interpreter/mterp/arm64/op_ushr_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/shiftWide2addr.S" {"opcode":"lsr"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_int.S b/runtime/interpreter/mterp/arm64/op_xor_int.S
deleted file mode 100644
index 7483663..0000000
--- a/runtime/interpreter/mterp/arm64/op_xor_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"eor     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_int_2addr.S b/runtime/interpreter/mterp/arm64/op_xor_int_2addr.S
deleted file mode 100644
index 2f9a2c7..0000000
--- a/runtime/interpreter/mterp/arm64/op_xor_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"eor     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_int_lit16.S b/runtime/interpreter/mterp/arm64/op_xor_int_lit16.S
deleted file mode 100644
index 6b72c56..0000000
--- a/runtime/interpreter/mterp/arm64/op_xor_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit16.S" {"instr":"eor     w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_int_lit8.S b/runtime/interpreter/mterp/arm64/op_xor_int_lit8.S
deleted file mode 100644
index 1d3d93e..0000000
--- a/runtime/interpreter/mterp/arm64/op_xor_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"extract":"", "instr":"eor     w0, w0, w3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_long.S b/runtime/interpreter/mterp/arm64/op_xor_long.S
deleted file mode 100644
index 3880d5d..0000000
--- a/runtime/interpreter/mterp/arm64/op_xor_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"eor x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_long_2addr.S b/runtime/interpreter/mterp/arm64/op_xor_long_2addr.S
deleted file mode 100644
index 3690552..0000000
--- a/runtime/interpreter/mterp/arm64/op_xor_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"eor     x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/other.S b/runtime/interpreter/mterp/arm64/other.S
new file mode 100644
index 0000000..f1d0ef3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/other.S
@@ -0,0 +1,359 @@
+%def const(helper="UndefinedConstHandler"):
+    /* const/class vAA, type@BBBB */
+    /* const/method-handle vAA, method_handle@BBBB */
+    /* const/method-type vAA, proto@BBBB */
+    /* const/string vAA, string@@BBBB */
+    .extern $helper
+    EXPORT_PC
+    FETCH w0, 1                         // w0<- BBBB
+    lsr     w1, wINST, #8               // w1<- AA
+    add     x2, xFP, #OFF_FP_SHADOWFRAME
+    mov     x3, xSELF
+    bl      $helper                     // (index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST 2                     // load rINST
+    cbnz    w0, MterpPossibleException  // let reference interpreter deal with it.
+    ADVANCE 2                           // advance rPC
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def unused():
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+%def op_const():
+    /* const vAA, #+BBBBbbbb */
+    lsr     w3, wINST, #8               // w3<- AA
+    FETCH w0, 1                         // w0<- bbbb (low
+    FETCH w1, 2                         // w1<- BBBB (high
+    FETCH_ADVANCE_INST 3                // advance rPC, load wINST
+    orr     w0, w0, w1, lsl #16         // w0<- BBBBbbbb
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    SET_VREG w0, w3                     // vAA<- w0
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_const_16():
+    /* const/16 vAA, #+BBBB */
+    FETCH_S w0, 1                       // w0<- ssssBBBB (sign-extended)
+    lsr     w3, wINST, #8               // w3<- AA
+    FETCH_ADVANCE_INST 2                // advance xPC, load wINST
+    SET_VREG w0, w3                     // vAA<- w0
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_const_4():
+    /* const/4 vA, #+B */
+    sbfx    w1, wINST, #12, #4          // w1<- sssssssB
+    ubfx    w0, wINST, #8, #4           // w0<- A
+    FETCH_ADVANCE_INST 1                // advance xPC, load wINST
+    GET_INST_OPCODE ip                  // ip<- opcode from xINST
+    SET_VREG w1, w0                     // fp[A]<- w1
+    GOTO_OPCODE ip                      // execute next instruction
+
+%def op_const_class():
+%  const(helper="MterpConstClass")
+
+%def op_const_high16():
+    /* const/high16 vAA, #+BBBB0000 */
+    FETCH   w0, 1                       // r0<- 0000BBBB (zero-extended)
+    lsr     w3, wINST, #8               // r3<- AA
+    lsl     w0, w0, #16                 // r0<- BBBB0000
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    SET_VREG w0, w3                     // vAA<- r0
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_const_method_handle():
+%  const(helper="MterpConstMethodHandle")
+
+%def op_const_method_type():
+%  const(helper="MterpConstMethodType")
+
+%def op_const_string():
+%  const(helper="MterpConstString")
+
+%def op_const_string_jumbo():
+    /* const/string vAA, String//BBBBBBBB */
+    EXPORT_PC
+    FETCH w0, 1                         // w0<- bbbb (low
+    FETCH w2, 2                         // w2<- BBBB (high
+    lsr     w1, wINST, #8               // w1<- AA
+    orr     w0, w0, w2, lsl #16         // w1<- BBBBbbbb
+    add     x2, xFP, #OFF_FP_SHADOWFRAME
+    mov     x3, xSELF
+    bl      MterpConstString            // (index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST 3                     // advance rPC
+    cbnz    w0, MterpPossibleException      // let reference interpreter deal with it.
+    ADVANCE 3                           // advance rPC
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_const_wide():
+    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+    FETCH w0, 1                         // w0<- bbbb (low)
+    FETCH w1, 2                         // w1<- BBBB (low middle)
+    FETCH w2, 3                         // w2<- hhhh (high middle)
+    FETCH w3, 4                         // w3<- HHHH (high)
+    lsr     w4, wINST, #8               // r4<- AA
+    FETCH_ADVANCE_INST 5                // advance rPC, load wINST
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    orr     w0, w0, w1, lsl #16         // w0<-         BBBBbbbb
+    orr     x0, x0, x2, lsl #32         // w0<-     hhhhBBBBbbbb
+    orr     x0, x0, x3, lsl #48         // w0<- HHHHhhhhBBBBbbbb
+    SET_VREG_WIDE x0, w4
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_const_wide_16():
+    /* const-wide/16 vAA, #+BBBB */
+    FETCH_S x0, 1                       // x0<- ssssssssssssBBBB (sign-extended)
+    lsr     w3, wINST, #8               // w3<- AA
+    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    SET_VREG_WIDE x0, w3
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_const_wide_32():
+    /* const-wide/32 vAA, #+BBBBbbbb */
+    FETCH   w0, 1                       // x0<- 000000000000bbbb (low)
+    lsr     w3, wINST, #8               // w3<- AA
+    FETCH_S x2, 2                       // x2<- ssssssssssssBBBB (high)
+    FETCH_ADVANCE_INST 3                // advance rPC, load wINST
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    orr     x0, x0, x2, lsl #16         // x0<- ssssssssBBBBbbbb
+    SET_VREG_WIDE x0, w3
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_const_wide_high16():
+    /* const-wide/high16 vAA, #+BBBB000000000000 */
+    FETCH w0, 1                         // w0<- 0000BBBB (zero-extended)
+    lsr     w1, wINST, #8               // w1<- AA
+    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
+    lsl     x0, x0, #48
+    SET_VREG_WIDE x0, w1
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_monitor_enter():
+    /*
+     * Synchronize on an object.
+     */
+    /* monitor-enter vAA */
+    EXPORT_PC
+    lsr      w2, wINST, #8               // w2<- AA
+    GET_VREG w0, w2                      // w0<- vAA (object)
+    mov      x1, xSELF                   // w1<- self
+    bl       artLockObjectFromCode
+    cbnz     w0, MterpException
+    FETCH_ADVANCE_INST 1
+    ldr      w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+    cbz      w0, MterpFallback
+    GET_INST_OPCODE ip                   // extract opcode from rINST
+    GOTO_OPCODE ip                       // jump to next instruction
+
+%def op_monitor_exit():
+    /*
+     * Unlock an object.
+     *
+     * Exceptions that occur when unlocking a monitor need to appear as
+     * if they happened at the following instruction.  See the Dalvik
+     * instruction spec.
+     */
+    /* monitor-exit vAA */
+    EXPORT_PC
+    lsr      w2, wINST, #8              // w2<- AA
+    GET_VREG w0, w2                     // w0<- vAA (object)
+    mov      x1, xSELF                  // w0<- self
+    bl       artUnlockObjectFromCode    // w0<- success for unlock(self, obj)
+    cbnz     w0, MterpException
+    FETCH_ADVANCE_INST 1                // before throw: advance rPC, load rINST
+    ldr      w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+    cbz      w0, MterpFallback
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_move(is_object="0"):
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    lsr     w1, wINST, #12              // x1<- B from 15:12
+    ubfx    w0, wINST, #8, #4           // x0<- A from 11:8
+    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
+    GET_VREG w2, w1                     // x2<- fp[B]
+    GET_INST_OPCODE ip                  // ip<- opcode from wINST
+    .if $is_object
+    SET_VREG_OBJECT w2, w0              // fp[A]<- x2
+    .else
+    SET_VREG w2, w0                     // fp[A]<- x2
+    .endif
+    GOTO_OPCODE ip                      // execute next instruction
+
+%def op_move_16(is_object="0"):
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    FETCH w1, 2                         // w1<- BBBB
+    FETCH w0, 1                         // w0<- AAAA
+    FETCH_ADVANCE_INST 3                // advance xPC, load xINST
+    GET_VREG w2, w1                     // w2<- fp[BBBB]
+    GET_INST_OPCODE ip                  // extract opcode from xINST
+    .if $is_object
+    SET_VREG_OBJECT w2, w0              // fp[AAAA]<- w2
+    .else
+    SET_VREG w2, w0                     // fp[AAAA]<- w2
+    .endif
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_move_exception():
+    /* move-exception vAA */
+    lsr     w2, wINST, #8               // w2<- AA
+    ldr     x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+    mov     x1, #0                      // w1<- 0
+    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
+    SET_VREG_OBJECT w3, w2              // fp[AA]<- exception obj
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    str     x1, [xSELF, #THREAD_EXCEPTION_OFFSET]  // clear exception
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_move_from16(is_object="0"):
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    FETCH w1, 1                         // r1<- BBBB
+    lsr     w0, wINST, #8               // r0<- AA
+    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
+    GET_VREG w2, w1                     // r2<- fp[BBBB]
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    .if $is_object
+    SET_VREG_OBJECT w2, w0              // fp[AA]<- r2
+    .else
+    SET_VREG w2, w0                     // fp[AA]<- r2
+    .endif
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_move_object():
+%  op_move(is_object="1")
+
+%def op_move_object_16():
+%  op_move_16(is_object="1")
+
+%def op_move_object_from16():
+%  op_move_from16(is_object="1")
+
+%def op_move_result(is_object="0"):
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    lsr     w2, wINST, #8               // r2<- AA
+    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
+    ldr     x0, [xFP, #OFF_FP_RESULT_REGISTER]  // get pointer to result JType.
+    ldr     w0, [x0]                    // r0 <- result.i.
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    .if $is_object
+    SET_VREG_OBJECT w0, w2, w1          // fp[AA]<- r0
+    .else
+    SET_VREG w0, w2                     // fp[AA]<- r0
+    .endif
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_move_result_object():
+%  op_move_result(is_object="1")
+
+%def op_move_result_wide():
+    /* for: move-result-wide */
+    /* op vAA */
+    lsr     w2, wINST, #8               // r2<- AA
+    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
+    ldr     x0, [xFP, #OFF_FP_RESULT_REGISTER]  // get pointer to result JType.
+    ldr     x0, [x0]                    // r0 <- result.i.
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    SET_VREG_WIDE x0, x2                // fp[AA]<- r0
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_move_wide():
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    lsr     w3, wINST, #12              // w3<- B
+    ubfx    w2, wINST, #8, #4           // w2<- A
+    GET_VREG_WIDE  x3, w3
+    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    SET_VREG_WIDE  x3, w2
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_move_wide_16():
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    FETCH w3, 2                         // w3<- BBBB
+    FETCH w2, 1                         // w2<- AAAA
+    GET_VREG_WIDE x3, w3
+    FETCH_ADVANCE_INST 3                // advance rPC, load rINST
+    SET_VREG_WIDE x3, w2
+    GET_INST_OPCODE ip                  // extract opcode from rINST
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_move_wide_from16():
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    FETCH w3, 1                         // w3<- BBBB
+    lsr     w2, wINST, #8               // w2<- AA
+    GET_VREG_WIDE x3, w3
+    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
+    GET_INST_OPCODE ip                  // extract opcode from wINST
+    SET_VREG_WIDE x3, w2
+    GOTO_OPCODE ip                      // jump to next instruction
+
+%def op_nop():
+    FETCH_ADVANCE_INST 1                // advance to next instr, load rINST
+    GET_INST_OPCODE ip                  // ip<- opcode from rINST
+    GOTO_OPCODE ip                      // execute it
+
+%def op_unused_3e():
+%  unused()
+
+%def op_unused_3f():
+%  unused()
+
+%def op_unused_40():
+%  unused()
+
+%def op_unused_41():
+%  unused()
+
+%def op_unused_42():
+%  unused()
+
+%def op_unused_43():
+%  unused()
+
+%def op_unused_73():
+%  unused()
+
+%def op_unused_79():
+%  unused()
+
+%def op_unused_7a():
+%  unused()
+
+%def op_unused_f3():
+%  unused()
+
+%def op_unused_f4():
+%  unused()
+
+%def op_unused_f5():
+%  unused()
+
+%def op_unused_f6():
+%  unused()
+
+%def op_unused_f7():
+%  unused()
+
+%def op_unused_f8():
+%  unused()
+
+%def op_unused_f9():
+%  unused()
+
+%def op_unused_fc():
+%  unused()
+
+%def op_unused_fd():
+%  unused()
diff --git a/runtime/interpreter/mterp/arm64/shiftWide.S b/runtime/interpreter/mterp/arm64/shiftWide.S
deleted file mode 100644
index dcb2fb7..0000000
--- a/runtime/interpreter/mterp/arm64/shiftWide.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"opcode":"shl"}
-    /*
-     * 64-bit shift operation.
-     *
-     * For: shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr      w3, wINST, #8               // w3<- AA
-    lsr      w2, w0, #8                  // w2<- CC
-    GET_VREG w2, w2                     // w2<- vCC (shift count)
-    and      w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE x1, w1                // x1<- vBB
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    $opcode  x0, x1, x2                 // Do the shift. Only low 6 bits of x2 are used.
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w3                // vAA<- x0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm64/shiftWide2addr.S b/runtime/interpreter/mterp/arm64/shiftWide2addr.S
deleted file mode 100644
index b860dfd..0000000
--- a/runtime/interpreter/mterp/arm64/shiftWide2addr.S
+++ /dev/null
@@ -1,15 +0,0 @@
-%default {"opcode":"lsl"}
-    /*
-     * Generic 64-bit shift operation.
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG w1, w1                     // x1<- vB
-    GET_VREG_WIDE x0, w2                // x0<- vA
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    $opcode x0, x0, x1                  // Do the shift. Only low 6 bits of x1 are used.
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm64/unop.S b/runtime/interpreter/mterp/arm64/unop.S
deleted file mode 100644
index e681968..0000000
--- a/runtime/interpreter/mterp/arm64/unop.S
+++ /dev/null
@@ -1,18 +0,0 @@
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op w0".
-     * This could be an ARM instruction or a function call.
-     *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    GET_VREG w0, w3                     // w0<- vB
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    $instr                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                     // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 8-9 instructions */
diff --git a/runtime/interpreter/mterp/arm64/unopWide.S b/runtime/interpreter/mterp/arm64/unopWide.S
deleted file mode 100644
index 6ee4f92..0000000
--- a/runtime/interpreter/mterp/arm64/unopWide.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {"instr":"sub x0, xzr, x0"}
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op x0".
-     *
-     * For: neg-long, not-long
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG_WIDE x0, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    $instr
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE x0, w4
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-11 instructions */
diff --git a/runtime/interpreter/mterp/arm64/unused.S b/runtime/interpreter/mterp/arm64/unused.S
deleted file mode 100644
index ffa00be..0000000
--- a/runtime/interpreter/mterp/arm64/unused.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
diff --git a/runtime/interpreter/mterp/arm64/zcmp.S b/runtime/interpreter/mterp/arm64/zcmp.S
deleted file mode 100644
index 510a3c1..0000000
--- a/runtime/interpreter/mterp/arm64/zcmp.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "compare":"1" }
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    lsr     w0, wINST, #8               // w0<- AA
-    GET_VREG w2, w0                     // w2<- vAA
-    FETCH_S wINST, 1                    // w1<- branch offset, in code units
-    .if ${compare}
-    cmp     w2, #0                      // compare (vA, 0)
-    .endif
-    ${branch} MterpCommonTakenBranchNoFlags
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
diff --git a/runtime/interpreter/mterp/common/gen_setup.py b/runtime/interpreter/mterp/common/gen_setup.py
new file mode 100644
index 0000000..cfa5e2e
--- /dev/null
+++ b/runtime/interpreter/mterp/common/gen_setup.py
@@ -0,0 +1,90 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Common global variables and helper methods for the in-memory python script.
+# The script starts with this file and is followed by the code generated form
+# the templated snippets. Those define all the helper functions used below.
+
+import sys, re
+from cStringIO import StringIO
+
+out = StringIO()  # File-like in-memory buffer.
+handler_size_bytes = "MTERP_HANDLER_SIZE"
+handler_size_bits = "MTERP_HANDLER_SIZE_LOG2"
+opcode = ""
+opnum = ""
+
+def write_line(line):
+  out.write(line + "\n")
+
+def balign():
+  write_line("    .balign {}".format(handler_size_bytes))
+
+def write_opcode(num, name, write_method):
+  global opnum, opcode
+  opnum, opcode = str(num), name
+  write_line("/* ------------------------------ */")
+  balign()
+  write_line(".L_{1}: /* {0:#04x} */".format(num, name))
+  opcode_start()
+  opcode_pre()
+  write_method()
+  opcode_end()
+  write_line("")
+  opnum, opcode = None, None
+
+generated_helpers = {}
+
+# This method generates a helper using the provided writer method.
+# The output is temporarily redirected to in-memory buffer.
+def add_helper(write_helper, name = None):
+  if name == None:
+    name = "Mterp_" + opcode + "_helper"
+  global out
+  old_out = out
+  out = StringIO()
+  helper_start(name)
+  write_helper()
+  helper_end(name)
+  out.seek(0)
+  generated_helpers[name] = out.read()
+  out = old_out
+  return name
+
+def generate(output_filename):
+  out.seek(0)
+  out.truncate()
+  write_line("/* DO NOT EDIT: This file was generated by gen-mterp.py. */")
+  header()
+  entry()
+
+  instruction_start()
+  opcodes()
+  balign()
+  instruction_end()
+
+  for name, helper in sorted(generated_helpers.items()):
+    out.write(helper)
+  helpers()
+
+  footer()
+
+  out.seek(0)
+  # Squash consequtive empty lines.
+  text = re.sub(r"(\n\n)(\n)+", r"\1", out.read())
+  with open(output_filename, 'w') as output_file:
+    output_file.write(text)
+
diff --git a/runtime/interpreter/mterp/config_arm b/runtime/interpreter/mterp/config_arm
deleted file mode 100644
index a45efd9..0000000
--- a/runtime/interpreter/mterp/config_arm
+++ /dev/null
@@ -1,298 +0,0 @@
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Configuration for ARMv7-A targets.
-#
-
-handler-style computed-goto
-handler-size 128
-
-# source for alternate entry stub
-asm-alt-stub arm/alt_stub.S
-
-# file header and basic definitions
-import arm/header.S
-
-# arch-specific entry point to interpreter
-import arm/entry.S
-
-# Stub to switch to alternate interpreter
-fallback-stub arm/fallback.S
-
-# opcode list; argument to op-start is default directory
-op-start arm
-    # (override example:) op op_sub_float_2addr arm-vfp
-    # (fallback example:) op op_sub_float_2addr FALLBACK
-
-    # op op_nop FALLBACK
-    # op op_move FALLBACK
-    # op op_move_from16 FALLBACK
-    # op op_move_16 FALLBACK
-    # op op_move_wide FALLBACK
-    # op op_move_wide_from16 FALLBACK
-    # op op_move_wide_16 FALLBACK
-    # op op_move_object FALLBACK
-    # op op_move_object_from16 FALLBACK
-    # op op_move_object_16 FALLBACK
-    # op op_move_result FALLBACK
-    # op op_move_result_wide FALLBACK
-    # op op_move_result_object FALLBACK
-    # op op_move_exception FALLBACK
-    # op op_return_void FALLBACK
-    # op op_return FALLBACK
-    # op op_return_wide FALLBACK
-    # op op_return_object FALLBACK
-    # op op_const_4 FALLBACK
-    # op op_const_16 FALLBACK
-    # op op_const FALLBACK
-    # op op_const_high16 FALLBACK
-    # op op_const_wide_16 FALLBACK
-    # op op_const_wide_32 FALLBACK
-    # op op_const_wide FALLBACK
-    # op op_const_wide_high16 FALLBACK
-    # op op_const_string FALLBACK
-    # op op_const_string_jumbo FALLBACK
-    # op op_const_class FALLBACK
-    # op op_monitor_enter FALLBACK
-    # op op_monitor_exit FALLBACK
-    # op op_check_cast FALLBACK
-    # op op_instance_of FALLBACK
-    # op op_array_length FALLBACK
-    # op op_new_instance FALLBACK
-    # op op_new_array FALLBACK
-    # op op_filled_new_array FALLBACK
-    # op op_filled_new_array_range FALLBACK
-    # op op_fill_array_data FALLBACK
-    # op op_throw FALLBACK
-    # op op_goto FALLBACK
-    # op op_goto_16 FALLBACK
-    # op op_goto_32 FALLBACK
-    # op op_packed_switch FALLBACK
-    # op op_sparse_switch FALLBACK
-    # op op_cmpl_float FALLBACK
-    # op op_cmpg_float FALLBACK
-    # op op_cmpl_double FALLBACK
-    # op op_cmpg_double FALLBACK
-    # op op_cmp_long FALLBACK
-    # op op_if_eq FALLBACK
-    # op op_if_ne FALLBACK
-    # op op_if_lt FALLBACK
-    # op op_if_ge FALLBACK
-    # op op_if_gt FALLBACK
-    # op op_if_le FALLBACK
-    # op op_if_eqz FALLBACK
-    # op op_if_nez FALLBACK
-    # op op_if_ltz FALLBACK
-    # op op_if_gez FALLBACK
-    # op op_if_gtz FALLBACK
-    # op op_if_lez FALLBACK
-    # op op_unused_3e FALLBACK
-    # op op_unused_3f FALLBACK
-    # op op_unused_40 FALLBACK
-    # op op_unused_41 FALLBACK
-    # op op_unused_42 FALLBACK
-    # op op_unused_43 FALLBACK
-    # op op_aget FALLBACK
-    # op op_aget_wide FALLBACK
-    # op op_aget_object FALLBACK
-    # op op_aget_boolean FALLBACK
-    # op op_aget_byte FALLBACK
-    # op op_aget_char FALLBACK
-    # op op_aget_short FALLBACK
-    # op op_aput FALLBACK
-    # op op_aput_wide FALLBACK
-    # op op_aput_object FALLBACK
-    # op op_aput_boolean FALLBACK
-    # op op_aput_byte FALLBACK
-    # op op_aput_char FALLBACK
-    # op op_aput_short FALLBACK
-    # op op_iget FALLBACK
-    # op op_iget_wide FALLBACK
-    # op op_iget_object FALLBACK
-    # op op_iget_boolean FALLBACK
-    # op op_iget_byte FALLBACK
-    # op op_iget_char FALLBACK
-    # op op_iget_short FALLBACK
-    # op op_iput FALLBACK
-    # op op_iput_wide FALLBACK
-    # op op_iput_object FALLBACK
-    # op op_iput_boolean FALLBACK
-    # op op_iput_byte FALLBACK
-    # op op_iput_char FALLBACK
-    # op op_iput_short FALLBACK
-    # op op_sget FALLBACK
-    # op op_sget_wide FALLBACK
-    # op op_sget_object FALLBACK
-    # op op_sget_boolean FALLBACK
-    # op op_sget_byte FALLBACK
-    # op op_sget_char FALLBACK
-    # op op_sget_short FALLBACK
-    # op op_sput FALLBACK
-    # op op_sput_wide FALLBACK
-    # op op_sput_object FALLBACK
-    # op op_sput_boolean FALLBACK
-    # op op_sput_byte FALLBACK
-    # op op_sput_char FALLBACK
-    # op op_sput_short FALLBACK
-    # op op_invoke_virtual FALLBACK
-    # op op_invoke_super FALLBACK
-    # op op_invoke_direct FALLBACK
-    # op op_invoke_static FALLBACK
-    # op op_invoke_interface FALLBACK
-    # op op_return_void_no_barrier FALLBACK
-    # op op_invoke_virtual_range FALLBACK
-    # op op_invoke_super_range FALLBACK
-    # op op_invoke_direct_range FALLBACK
-    # op op_invoke_static_range FALLBACK
-    # op op_invoke_interface_range FALLBACK
-    # op op_unused_79 FALLBACK
-    # op op_unused_7a FALLBACK
-    # op op_neg_int FALLBACK
-    # op op_not_int FALLBACK
-    # op op_neg_long FALLBACK
-    # op op_not_long FALLBACK
-    # op op_neg_float FALLBACK
-    # op op_neg_double FALLBACK
-    # op op_int_to_long FALLBACK
-    # op op_int_to_float FALLBACK
-    # op op_int_to_double FALLBACK
-    # op op_long_to_int FALLBACK
-    # op op_long_to_float FALLBACK
-    # op op_long_to_double FALLBACK
-    # op op_float_to_int FALLBACK
-    # op op_float_to_long FALLBACK
-    # op op_float_to_double FALLBACK
-    # op op_double_to_int FALLBACK
-    # op op_double_to_long FALLBACK
-    # op op_double_to_float FALLBACK
-    # op op_int_to_byte FALLBACK
-    # op op_int_to_char FALLBACK
-    # op op_int_to_short FALLBACK
-    # op op_add_int FALLBACK
-    # op op_sub_int FALLBACK
-    # op op_mul_int FALLBACK
-    # op op_div_int FALLBACK
-    # op op_rem_int FALLBACK
-    # op op_and_int FALLBACK
-    # op op_or_int FALLBACK
-    # op op_xor_int FALLBACK
-    # op op_shl_int FALLBACK
-    # op op_shr_int FALLBACK
-    # op op_ushr_int FALLBACK
-    # op op_add_long FALLBACK
-    # op op_sub_long FALLBACK
-    # op op_mul_long FALLBACK
-    # op op_div_long FALLBACK
-    # op op_rem_long FALLBACK
-    # op op_and_long FALLBACK
-    # op op_or_long FALLBACK
-    # op op_xor_long FALLBACK
-    # op op_shl_long FALLBACK
-    # op op_shr_long FALLBACK
-    # op op_ushr_long FALLBACK
-    # op op_add_float FALLBACK
-    # op op_sub_float FALLBACK
-    # op op_mul_float FALLBACK
-    # op op_div_float FALLBACK
-    # op op_rem_float FALLBACK
-    # op op_add_double FALLBACK
-    # op op_sub_double FALLBACK
-    # op op_mul_double FALLBACK
-    # op op_div_double FALLBACK
-    # op op_rem_double FALLBACK
-    # op op_add_int_2addr FALLBACK
-    # op op_sub_int_2addr FALLBACK
-    # op op_mul_int_2addr FALLBACK
-    # op op_div_int_2addr FALLBACK
-    # op op_rem_int_2addr FALLBACK
-    # op op_and_int_2addr FALLBACK
-    # op op_or_int_2addr FALLBACK
-    # op op_xor_int_2addr FALLBACK
-    # op op_shl_int_2addr FALLBACK
-    # op op_shr_int_2addr FALLBACK
-    # op op_ushr_int_2addr FALLBACK
-    # op op_add_long_2addr FALLBACK
-    # op op_sub_long_2addr FALLBACK
-    # op op_mul_long_2addr FALLBACK
-    # op op_div_long_2addr FALLBACK
-    # op op_rem_long_2addr FALLBACK
-    # op op_and_long_2addr FALLBACK
-    # op op_or_long_2addr FALLBACK
-    # op op_xor_long_2addr FALLBACK
-    # op op_shl_long_2addr FALLBACK
-    # op op_shr_long_2addr FALLBACK
-    # op op_ushr_long_2addr FALLBACK
-    # op op_add_float_2addr FALLBACK
-    # op op_sub_float_2addr FALLBACK
-    # op op_mul_float_2addr FALLBACK
-    # op op_div_float_2addr FALLBACK
-    # op op_rem_float_2addr FALLBACK
-    # op op_add_double_2addr FALLBACK
-    # op op_sub_double_2addr FALLBACK
-    # op op_mul_double_2addr FALLBACK
-    # op op_div_double_2addr FALLBACK
-    # op op_rem_double_2addr FALLBACK
-    # op op_add_int_lit16 FALLBACK
-    # op op_rsub_int FALLBACK
-    # op op_mul_int_lit16 FALLBACK
-    # op op_div_int_lit16 FALLBACK
-    # op op_rem_int_lit16 FALLBACK
-    # op op_and_int_lit16 FALLBACK
-    # op op_or_int_lit16 FALLBACK
-    # op op_xor_int_lit16 FALLBACK
-    # op op_add_int_lit8 FALLBACK
-    # op op_rsub_int_lit8 FALLBACK
-    # op op_mul_int_lit8 FALLBACK
-    # op op_div_int_lit8 FALLBACK
-    # op op_rem_int_lit8 FALLBACK
-    # op op_and_int_lit8 FALLBACK
-    # op op_or_int_lit8 FALLBACK
-    # op op_xor_int_lit8 FALLBACK
-    # op op_shl_int_lit8 FALLBACK
-    # op op_shr_int_lit8 FALLBACK
-    # op op_ushr_int_lit8 FALLBACK
-    # op op_iget_quick FALLBACK
-    # op op_iget_wide_quick FALLBACK
-    # op op_iget_object_quick FALLBACK
-    # op op_iput_quick FALLBACK
-    # op op_iput_wide_quick FALLBACK
-    # op op_iput_object_quick FALLBACK
-    # op op_invoke_virtual_quick FALLBACK
-    # op op_invoke_virtual_range_quick FALLBACK
-    # op op_iput_boolean_quick FALLBACK
-    # op op_iput_byte_quick FALLBACK
-    # op op_iput_char_quick FALLBACK
-    # op op_iput_short_quick FALLBACK
-    # op op_iget_boolean_quick FALLBACK
-    # op op_iget_byte_quick FALLBACK
-    # op op_iget_char_quick FALLBACK
-    # op op_iget_short_quick FALLBACK
-    # op op_unused_f3 FALLBACK
-    # op op_unused_f4 FALLBACK
-    # op op_unused_f5 FALLBACK
-    # op op_unused_f6 FALLBACK
-    # op op_unused_f7 FALLBACK
-    # op op_unused_f8 FALLBACK
-    # op op_unused_f9 FALLBACK
-    # op op_invoke_polymorphic FALLBACK
-    # op op_invoke_polymorphic_range FALLBACK
-    # op op_invoke_custom FALLBACK
-    # op op_invoke_custom_range FALLBACK
-    # op op_const_method_handle FALLBACK
-    # op op_const_method_type FALLBACK
-op-end
-
-# common subroutines for asm
-import arm/footer.S
diff --git a/runtime/interpreter/mterp/config_arm64 b/runtime/interpreter/mterp/config_arm64
deleted file mode 100644
index 590363f..0000000
--- a/runtime/interpreter/mterp/config_arm64
+++ /dev/null
@@ -1,306 +0,0 @@
-
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Configuration for ARM64
-#
-
-handler-style computed-goto
-handler-size 128
-
-# file header and basic definitions
-import arm64/header.S
-
-# arch-specific entry point to interpreter
-import arm64/entry.S
-
-# Stub to switch to alternate interpreter
-fallback-stub arm64/fallback.S
-
-# opcode list; argument to op-start is default directory
-op-start arm64
-    # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
-    # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
-
-    # op op_nop FALLBACK
-    # op op_move FALLBACK
-    # op op_move_from16 FALLBACK
-    # op op_move_16 FALLBACK
-    # op op_move_wide FALLBACK
-    # op op_move_wide_from16 FALLBACK
-    # op op_move_wide_16 FALLBACK
-    # op op_move_object FALLBACK
-    # op op_move_object_from16 FALLBACK
-    # op op_move_object_16 FALLBACK
-    # op op_move_result FALLBACK
-    # op op_move_result_wide FALLBACK
-    # op op_move_result_object FALLBACK
-    # op op_move_exception FALLBACK
-    # op op_return_void FALLBACK
-    # op op_return FALLBACK
-    # op op_return_wide FALLBACK
-    # op op_return_object FALLBACK
-    # op op_const_4 FALLBACK
-    # op op_const_16 FALLBACK
-    # op op_const FALLBACK
-    # op op_const_high16 FALLBACK
-    # op op_const_wide_16 FALLBACK
-    # op op_const_wide_32 FALLBACK
-    # op op_const_wide FALLBACK
-    # op op_const_wide_high16 FALLBACK
-    # op op_const_string FALLBACK
-    # op op_const_string_jumbo FALLBACK
-    # op op_const_class FALLBACK
-    # op op_monitor_enter FALLBACK
-    # op op_monitor_exit FALLBACK
-    # op op_check_cast FALLBACK
-    # op op_instance_of FALLBACK
-    # op op_array_length FALLBACK
-    # op op_new_instance FALLBACK
-    # op op_new_array FALLBACK
-    # op op_filled_new_array FALLBACK
-    # op op_filled_new_array_range FALLBACK
-    # op op_fill_array_data FALLBACK
-    # op op_throw FALLBACK
-    # op op_goto FALLBACK
-    # op op_goto_16 FALLBACK
-    # op op_goto_32 FALLBACK
-    # op op_packed_switch FALLBACK
-    # op op_sparse_switch FALLBACK
-    # op op_cmpl_float FALLBACK
-    # op op_cmpg_float FALLBACK
-    # op op_cmpl_double FALLBACK
-    # op op_cmpg_double FALLBACK
-    # op op_cmp_long FALLBACK
-    # op op_if_eq FALLBACK
-    # op op_if_ne FALLBACK
-    # op op_if_lt FALLBACK
-    # op op_if_ge FALLBACK
-    # op op_if_gt FALLBACK
-    # op op_if_le FALLBACK
-    # op op_if_eqz FALLBACK
-    # op op_if_nez FALLBACK
-    # op op_if_ltz FALLBACK
-    # op op_if_gez FALLBACK
-    # op op_if_gtz FALLBACK
-    # op op_if_lez FALLBACK
-    # op op_unused_3e FALLBACK
-    # op op_unused_3f FALLBACK
-    # op op_unused_40 FALLBACK
-    # op op_unused_41 FALLBACK
-    # op op_unused_42 FALLBACK
-    # op op_unused_43 FALLBACK
-    # op op_aget FALLBACK
-    # op op_aget_wide FALLBACK
-    # op op_aget_object FALLBACK
-    # op op_aget_boolean FALLBACK
-    # op op_aget_byte FALLBACK
-    # op op_aget_char FALLBACK
-    # op op_aget_short FALLBACK
-    # op op_aput FALLBACK
-    # op op_aput_wide FALLBACK
-    # op op_aput_object FALLBACK
-    # op op_aput_boolean FALLBACK
-    # op op_aput_byte FALLBACK
-    # op op_aput_char FALLBACK
-    # op op_aput_short FALLBACK
-    # op op_iget FALLBACK
-    # op op_iget_wide FALLBACK
-    # op op_iget_object FALLBACK
-    # op op_iget_boolean FALLBACK
-    # op op_iget_byte FALLBACK
-    # op op_iget_char FALLBACK
-    # op op_iget_short FALLBACK
-    # op op_iput FALLBACK
-    # op op_iput_wide FALLBACK
-    # op op_iput_object FALLBACK
-    # op op_iput_boolean FALLBACK
-    # op op_iput_byte FALLBACK
-    # op op_iput_char FALLBACK
-    # op op_iput_short FALLBACK
-    # op op_sget FALLBACK
-    # op op_sget_wide FALLBACK
-    # op op_sget_object FALLBACK
-    # op op_sget_boolean FALLBACK
-    # op op_sget_byte FALLBACK
-    # op op_sget_char FALLBACK
-    # op op_sget_short FALLBACK
-    # op op_sput FALLBACK
-    # op op_sput_wide FALLBACK
-    # op op_sput_object FALLBACK
-    # op op_sput_boolean FALLBACK
-    # op op_sput_byte FALLBACK
-    # op op_sput_char FALLBACK
-    # op op_sput_short FALLBACK
-    # op op_invoke_virtual FALLBACK
-    # op op_invoke_super FALLBACK
-    # op op_invoke_direct FALLBACK
-    # op op_invoke_static FALLBACK
-    # op op_invoke_interface FALLBACK
-    # op op_return_void_no_barrier FALLBACK
-    # op op_invoke_virtual_range FALLBACK
-    # op op_invoke_super_range FALLBACK
-    # op op_invoke_direct_range FALLBACK
-    # op op_invoke_static_range FALLBACK
-    # op op_invoke_interface_range FALLBACK
-    # op op_unused_79 FALLBACK
-    # op op_unused_7a FALLBACK
-    # op op_neg_int FALLBACK
-    # op op_not_int FALLBACK
-    # op op_neg_long FALLBACK
-    # op op_not_long FALLBACK
-    # op op_neg_float FALLBACK
-    # op op_neg_double FALLBACK
-    # op op_int_to_long FALLBACK
-    # op op_int_to_float FALLBACK
-    # op op_int_to_double FALLBACK
-    # op op_long_to_int FALLBACK
-    # op op_long_to_float FALLBACK
-    # op op_long_to_double FALLBACK
-    # op op_float_to_int FALLBACK
-    # op op_float_to_long FALLBACK
-    # op op_float_to_double FALLBACK
-    # op op_double_to_int FALLBACK
-    # op op_double_to_long FALLBACK
-    # op op_double_to_float FALLBACK
-    # op op_int_to_byte FALLBACK
-    # op op_int_to_char FALLBACK
-    # op op_int_to_short FALLBACK
-    # op op_add_int FALLBACK
-    # op op_sub_int FALLBACK
-    # op op_mul_int FALLBACK
-    # op op_div_int FALLBACK
-    # op op_rem_int FALLBACK
-    # op op_and_int FALLBACK
-    # op op_or_int FALLBACK
-    # op op_xor_int FALLBACK
-    # op op_shl_int FALLBACK
-    # op op_shr_int FALLBACK
-    # op op_ushr_int FALLBACK
-    # op op_add_long FALLBACK
-    # op op_sub_long FALLBACK
-    # op op_mul_long FALLBACK
-    # op op_div_long FALLBACK
-    # op op_rem_long FALLBACK
-    # op op_and_long FALLBACK
-    # op op_or_long FALLBACK
-    # op op_xor_long FALLBACK
-    # op op_shl_long FALLBACK
-    # op op_shr_long FALLBACK
-    # op op_ushr_long FALLBACK
-    # op op_add_float FALLBACK
-    # op op_sub_float FALLBACK
-    # op op_mul_float FALLBACK
-    # op op_div_float FALLBACK
-    # op op_rem_float FALLBACK
-    # op op_add_double FALLBACK
-    # op op_sub_double FALLBACK
-    # op op_mul_double FALLBACK
-    # op op_div_double FALLBACK
-    # op op_rem_double FALLBACK
-    # op op_add_int_2addr FALLBACK
-    # op op_sub_int_2addr FALLBACK
-    # op op_mul_int_2addr FALLBACK
-    # op op_div_int_2addr FALLBACK
-    # op op_rem_int_2addr FALLBACK
-    # op op_and_int_2addr FALLBACK
-    # op op_or_int_2addr FALLBACK
-    # op op_xor_int_2addr FALLBACK
-    # op op_shl_int_2addr FALLBACK
-    # op op_shr_int_2addr FALLBACK
-    # op op_ushr_int_2addr FALLBACK
-    # op op_add_long_2addr FALLBACK
-    # op op_sub_long_2addr FALLBACK
-    # op op_mul_long_2addr FALLBACK
-    # op op_div_long_2addr FALLBACK
-    # op op_rem_long_2addr FALLBACK
-    # op op_and_long_2addr FALLBACK
-    # op op_or_long_2addr FALLBACK
-    # op op_xor_long_2addr FALLBACK
-    # op op_shl_long_2addr FALLBACK
-    # op op_shr_long_2addr FALLBACK
-    # op op_ushr_long_2addr FALLBACK
-    # op op_add_float_2addr FALLBACK
-    # op op_sub_float_2addr FALLBACK
-    # op op_mul_float_2addr FALLBACK
-    # op op_div_float_2addr FALLBACK
-    # op op_rem_float_2addr FALLBACK
-    # op op_add_double_2addr FALLBACK
-    # op op_sub_double_2addr FALLBACK
-    # op op_mul_double_2addr FALLBACK
-    # op op_div_double_2addr FALLBACK
-    # op op_rem_double_2addr FALLBACK
-    # op op_add_int_lit16 FALLBACK
-    # op op_rsub_int FALLBACK
-    # op op_mul_int_lit16 FALLBACK
-    # op op_div_int_lit16 FALLBACK
-    # op op_rem_int_lit16 FALLBACK
-    # op op_and_int_lit16 FALLBACK
-    # op op_or_int_lit16 FALLBACK
-    # op op_xor_int_lit16 FALLBACK
-    # op op_add_int_lit8 FALLBACK
-    # op op_rsub_int_lit8 FALLBACK
-    # op op_mul_int_lit8 FALLBACK
-    # op op_div_int_lit8 FALLBACK
-    # op op_rem_int_lit8 FALLBACK
-    # op op_and_int_lit8 FALLBACK
-    # op op_or_int_lit8 FALLBACK
-    # op op_xor_int_lit8 FALLBACK
-    # op op_shl_int_lit8 FALLBACK
-    # op op_shr_int_lit8 FALLBACK
-    # op op_ushr_int_lit8 FALLBACK
-    # op op_iget_quick FALLBACK
-    # op op_iget_wide_quick FALLBACK
-    # op op_iget_object_quick FALLBACK
-    # op op_iput_quick FALLBACK
-    # op op_iput_wide_quick FALLBACK
-    # op op_iput_object_quick FALLBACK
-    # op op_invoke_virtual_quick FALLBACK
-    # op op_invoke_virtual_range_quick FALLBACK
-    # op op_iput_boolean_quick FALLBACK
-    # op op_iput_byte_quick FALLBACK
-    # op op_iput_char_quick FALLBACK
-    # op op_iput_short_quick FALLBACK
-    # op op_iget_boolean_quick FALLBACK
-    # op op_iget_byte_quick FALLBACK
-    # op op_iget_char_quick FALLBACK
-    # op op_iget_short_quick FALLBACK
-    # op op_unused_f3 FALLBACK
-    # op op_unused_f4 FALLBACK
-    # op op_unused_f5 FALLBACK
-    # op op_unused_f6 FALLBACK
-    # op op_unused_f7 FALLBACK
-    # op op_unused_f8 FALLBACK
-    # op op_unused_f9 FALLBACK
-    # op op_invoke_polymorphic FALLBACK
-    # op op_invoke_polymorphic_range FALLBACK
-    # op op_invoke_custom FALLBACK
-    # op op_invoke_custom_range FALLBACK
-    # op op_const_method_handle FALLBACK
-    # op op_const_method_type FALLBACK
-op-end
-
-# common subroutines for asm; we emit the footer before alternate
-# entry stubs, so that TBZ/TBNZ from ops can reach targets in footer
-import arm64/footer.S
-
-# source for alternate entry stub
-asm-alt-stub arm64/alt_stub.S
-
-# emit alternate entry stubs
-alt-ops
-
-# finish by closing .cfi info
-import arm64/close_cfi.S
diff --git a/runtime/interpreter/mterp/config_mips b/runtime/interpreter/mterp/config_mips
deleted file mode 100644
index d6173da..0000000
--- a/runtime/interpreter/mterp/config_mips
+++ /dev/null
@@ -1,298 +0,0 @@
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Configuration for MIPS_32 targets.
-#
-
-handler-style computed-goto
-handler-size 128
-
-# source for alternate entry stub
-asm-alt-stub mips/alt_stub.S
-
-# file header and basic definitions
-import mips/header.S
-
-# arch-specific entry point to interpreter
-import mips/entry.S
-
-# Stub to switch to alternate interpreter
-fallback-stub mips/fallback.S
-
-# opcode list; argument to op-start is default directory
-op-start mips
-    # (override example:) op op_sub_float_2addr arm-vfp
-    # (fallback example:) op op_sub_float_2addr FALLBACK
-
-    # op op_nop FALLBACK
-    # op op_move FALLBACK
-    # op op_move_from16 FALLBACK
-    # op op_move_16 FALLBACK
-    # op op_move_wide FALLBACK
-    # op op_move_wide_from16 FALLBACK
-    # op op_move_wide_16 FALLBACK
-    # op op_move_object FALLBACK
-    # op op_move_object_from16 FALLBACK
-    # op op_move_object_16 FALLBACK
-    # op op_move_result FALLBACK
-    # op op_move_result_wide FALLBACK
-    # op op_move_result_object FALLBACK
-    # op op_move_exception FALLBACK
-    # op op_return_void FALLBACK
-    # op op_return FALLBACK
-    # op op_return_wide FALLBACK
-    # op op_return_object FALLBACK
-    # op op_const_4 FALLBACK
-    # op op_const_16 FALLBACK
-    # op op_const FALLBACK
-    # op op_const_high16 FALLBACK
-    # op op_const_wide_16 FALLBACK
-    # op op_const_wide_32 FALLBACK
-    # op op_const_wide FALLBACK
-    # op op_const_wide_high16 FALLBACK
-    # op op_const_string FALLBACK
-    # op op_const_string_jumbo FALLBACK
-    # op op_const_class FALLBACK
-    # op op_monitor_enter FALLBACK
-    # op op_monitor_exit FALLBACK
-    # op op_check_cast FALLBACK
-    # op op_instance_of FALLBACK
-    # op op_array_length FALLBACK
-    # op op_new_instance FALLBACK
-    # op op_new_array FALLBACK
-    # op op_filled_new_array FALLBACK
-    # op op_filled_new_array_range FALLBACK
-    # op op_fill_array_data FALLBACK
-    # op op_throw FALLBACK
-    # op op_goto FALLBACK
-    # op op_goto_16 FALLBACK
-    # op op_goto_32 FALLBACK
-    # op op_packed_switch FALLBACK
-    # op op_sparse_switch FALLBACK
-    # op op_cmpl_float FALLBACK
-    # op op_cmpg_float FALLBACK
-    # op op_cmpl_double FALLBACK
-    # op op_cmpg_double FALLBACK
-    # op op_cmp_long FALLBACK
-    # op op_if_eq FALLBACK
-    # op op_if_ne FALLBACK
-    # op op_if_lt FALLBACK
-    # op op_if_ge FALLBACK
-    # op op_if_gt FALLBACK
-    # op op_if_le FALLBACK
-    # op op_if_eqz FALLBACK
-    # op op_if_nez FALLBACK
-    # op op_if_ltz FALLBACK
-    # op op_if_gez FALLBACK
-    # op op_if_gtz FALLBACK
-    # op op_if_lez FALLBACK
-    # op op_unused_3e FALLBACK
-    # op op_unused_3f FALLBACK
-    # op op_unused_40 FALLBACK
-    # op op_unused_41 FALLBACK
-    # op op_unused_42 FALLBACK
-    # op op_unused_43 FALLBACK
-    # op op_aget FALLBACK
-    # op op_aget_wide FALLBACK
-    # op op_aget_object FALLBACK
-    # op op_aget_boolean FALLBACK
-    # op op_aget_byte FALLBACK
-    # op op_aget_char FALLBACK
-    # op op_aget_short FALLBACK
-    # op op_aput FALLBACK
-    # op op_aput_wide FALLBACK
-    # op op_aput_object FALLBACK
-    # op op_aput_boolean FALLBACK
-    # op op_aput_byte FALLBACK
-    # op op_aput_char FALLBACK
-    # op op_aput_short FALLBACK
-    # op op_iget FALLBACK
-    # op op_iget_wide FALLBACK
-    # op op_iget_object FALLBACK
-    # op op_iget_boolean FALLBACK
-    # op op_iget_byte FALLBACK
-    # op op_iget_char FALLBACK
-    # op op_iget_short FALLBACK
-    # op op_iput FALLBACK
-    # op op_iput_wide FALLBACK
-    # op op_iput_object FALLBACK
-    # op op_iput_boolean FALLBACK
-    # op op_iput_byte FALLBACK
-    # op op_iput_char FALLBACK
-    # op op_iput_short FALLBACK
-    # op op_sget FALLBACK
-    # op op_sget_wide FALLBACK
-    # op op_sget_object FALLBACK
-    # op op_sget_boolean FALLBACK
-    # op op_sget_byte FALLBACK
-    # op op_sget_char FALLBACK
-    # op op_sget_short FALLBACK
-    # op op_sput FALLBACK
-    # op op_sput_wide FALLBACK
-    # op op_sput_object FALLBACK
-    # op op_sput_boolean FALLBACK
-    # op op_sput_byte FALLBACK
-    # op op_sput_char FALLBACK
-    # op op_sput_short FALLBACK
-    # op op_invoke_virtual FALLBACK
-    # op op_invoke_super FALLBACK
-    # op op_invoke_direct FALLBACK
-    # op op_invoke_static FALLBACK
-    # op op_invoke_interface FALLBACK
-    # op op_return_void_no_barrier FALLBACK
-    # op op_invoke_virtual_range FALLBACK
-    # op op_invoke_super_range FALLBACK
-    # op op_invoke_direct_range FALLBACK
-    # op op_invoke_static_range FALLBACK
-    # op op_invoke_interface_range FALLBACK
-    # op op_unused_79 FALLBACK
-    # op op_unused_7a FALLBACK
-    # op op_neg_int FALLBACK
-    # op op_not_int FALLBACK
-    # op op_neg_long FALLBACK
-    # op op_not_long FALLBACK
-    # op op_neg_float FALLBACK
-    # op op_neg_double FALLBACK
-    # op op_int_to_long FALLBACK
-    # op op_int_to_float FALLBACK
-    # op op_int_to_double FALLBACK
-    # op op_long_to_int FALLBACK
-    # op op_long_to_float FALLBACK
-    # op op_long_to_double FALLBACK
-    # op op_float_to_int FALLBACK
-    # op op_float_to_long FALLBACK
-    # op op_float_to_double FALLBACK
-    # op op_double_to_int FALLBACK
-    # op op_double_to_long FALLBACK
-    # op op_double_to_float FALLBACK
-    # op op_int_to_byte FALLBACK
-    # op op_int_to_char FALLBACK
-    # op op_int_to_short FALLBACK
-    # op op_add_int FALLBACK
-    # op op_sub_int FALLBACK
-    # op op_mul_int FALLBACK
-    # op op_div_int FALLBACK
-    # op op_rem_int FALLBACK
-    # op op_and_int FALLBACK
-    # op op_or_int FALLBACK
-    # op op_xor_int FALLBACK
-    # op op_shl_int FALLBACK
-    # op op_shr_int FALLBACK
-    # op op_ushr_int FALLBACK
-    # op op_add_long FALLBACK
-    # op op_sub_long FALLBACK
-    # op op_mul_long FALLBACK
-    # op op_div_long FALLBACK
-    # op op_rem_long FALLBACK
-    # op op_and_long FALLBACK
-    # op op_or_long FALLBACK
-    # op op_xor_long FALLBACK
-    # op op_shl_long FALLBACK
-    # op op_shr_long FALLBACK
-    # op op_ushr_long FALLBACK
-    # op op_add_float FALLBACK
-    # op op_sub_float FALLBACK
-    # op op_mul_float FALLBACK
-    # op op_div_float FALLBACK
-    # op op_rem_float FALLBACK
-    # op op_add_double FALLBACK
-    # op op_sub_double FALLBACK
-    # op op_mul_double FALLBACK
-    # op op_div_double FALLBACK
-    # op op_rem_double FALLBACK
-    # op op_add_int_2addr FALLBACK
-    # op op_sub_int_2addr FALLBACK
-    # op op_mul_int_2addr FALLBACK
-    # op op_div_int_2addr FALLBACK
-    # op op_rem_int_2addr FALLBACK
-    # op op_and_int_2addr FALLBACK
-    # op op_or_int_2addr FALLBACK
-    # op op_xor_int_2addr FALLBACK
-    # op op_shl_int_2addr FALLBACK
-    # op op_shr_int_2addr FALLBACK
-    # op op_ushr_int_2addr FALLBACK
-    # op op_add_long_2addr FALLBACK
-    # op op_sub_long_2addr FALLBACK
-    # op op_mul_long_2addr FALLBACK
-    # op op_div_long_2addr FALLBACK
-    # op op_rem_long_2addr FALLBACK
-    # op op_and_long_2addr FALLBACK
-    # op op_or_long_2addr FALLBACK
-    # op op_xor_long_2addr FALLBACK
-    # op op_shl_long_2addr FALLBACK
-    # op op_shr_long_2addr FALLBACK
-    # op op_ushr_long_2addr FALLBACK
-    # op op_add_float_2addr FALLBACK
-    # op op_sub_float_2addr FALLBACK
-    # op op_mul_float_2addr FALLBACK
-    # op op_div_float_2addr FALLBACK
-    # op op_rem_float_2addr FALLBACK
-    # op op_add_double_2addr FALLBACK
-    # op op_sub_double_2addr FALLBACK
-    # op op_mul_double_2addr FALLBACK
-    # op op_div_double_2addr FALLBACK
-    # op op_rem_double_2addr FALLBACK
-    # op op_add_int_lit16 FALLBACK
-    # op op_rsub_int FALLBACK
-    # op op_mul_int_lit16 FALLBACK
-    # op op_div_int_lit16 FALLBACK
-    # op op_rem_int_lit16 FALLBACK
-    # op op_and_int_lit16 FALLBACK
-    # op op_or_int_lit16 FALLBACK
-    # op op_xor_int_lit16 FALLBACK
-    # op op_add_int_lit8 FALLBACK
-    # op op_rsub_int_lit8 FALLBACK
-    # op op_mul_int_lit8 FALLBACK
-    # op op_div_int_lit8 FALLBACK
-    # op op_rem_int_lit8 FALLBACK
-    # op op_and_int_lit8 FALLBACK
-    # op op_or_int_lit8 FALLBACK
-    # op op_xor_int_lit8 FALLBACK
-    # op op_shl_int_lit8 FALLBACK
-    # op op_shr_int_lit8 FALLBACK
-    # op op_ushr_int_lit8 FALLBACK
-    # op op_iget_quick FALLBACK
-    # op op_iget_wide_quick FALLBACK
-    # op op_iget_object_quick FALLBACK
-    # op op_iput_quick FALLBACK
-    # op op_iput_wide_quick FALLBACK
-    # op op_iput_object_quick FALLBACK
-    # op op_invoke_virtual_quick FALLBACK
-    # op op_invoke_virtual_range_quick FALLBACK
-    # op op_iput_boolean_quick FALLBACK
-    # op op_iput_byte_quick FALLBACK
-    # op op_iput_char_quick FALLBACK
-    # op op_iput_short_quick FALLBACK
-    # op op_iget_boolean_quick FALLBACK
-    # op op_iget_byte_quick FALLBACK
-    # op op_iget_char_quick FALLBACK
-    # op op_iget_short_quick FALLBACK
-    # op op_unused_f3 FALLBACK
-    # op op_unused_f4 FALLBACK
-    # op op_unused_f5 FALLBACK
-    # op op_unused_f6 FALLBACK
-    # op op_unused_f7 FALLBACK
-    # op op_unused_f8 FALLBACK
-    # op op_unused_f9 FALLBACK
-    # op op_invoke_polymorphic FALLBACK
-    # op op_invoke_polymorphic_range FALLBACK
-    # op op_invoke_custom FALLBACK
-    # op op_invoke_custom_range FALLBACK
-    # op op_const_method_handle FALLBACK
-    # op op_const_method_type FALLBACK
-op-end
-
-# common subroutines for asm
-import mips/footer.S
diff --git a/runtime/interpreter/mterp/config_mips64 b/runtime/interpreter/mterp/config_mips64
deleted file mode 100644
index a9bf362..0000000
--- a/runtime/interpreter/mterp/config_mips64
+++ /dev/null
@@ -1,298 +0,0 @@
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Configuration for MIPS_64
-#
-
-handler-style computed-goto
-handler-size 128
-
-# source for alternate entry stub
-asm-alt-stub mips64/alt_stub.S
-
-# file header and basic definitions
-import mips64/header.S
-
-# arch-specific entry point to interpreter
-import mips64/entry.S
-
-# Stub to switch to alternate interpreter
-fallback-stub mips64/fallback.S
-
-# opcode list; argument to op-start is default directory
-op-start mips64
-    # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
-    # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
-
-    # op op_nop FALLBACK
-    # op op_move FALLBACK
-    # op op_move_from16 FALLBACK
-    # op op_move_16 FALLBACK
-    # op op_move_wide FALLBACK
-    # op op_move_wide_from16 FALLBACK
-    # op op_move_wide_16 FALLBACK
-    # op op_move_object FALLBACK
-    # op op_move_object_from16 FALLBACK
-    # op op_move_object_16 FALLBACK
-    # op op_move_result FALLBACK
-    # op op_move_result_wide FALLBACK
-    # op op_move_result_object FALLBACK
-    # op op_move_exception FALLBACK
-    # op op_return_void FALLBACK
-    # op op_return FALLBACK
-    # op op_return_wide FALLBACK
-    # op op_return_object FALLBACK
-    # op op_const_4 FALLBACK
-    # op op_const_16 FALLBACK
-    # op op_const FALLBACK
-    # op op_const_high16 FALLBACK
-    # op op_const_wide_16 FALLBACK
-    # op op_const_wide_32 FALLBACK
-    # op op_const_wide FALLBACK
-    # op op_const_wide_high16 FALLBACK
-    # op op_const_string FALLBACK
-    # op op_const_string_jumbo FALLBACK
-    # op op_const_class FALLBACK
-    # op op_monitor_enter FALLBACK
-    # op op_monitor_exit FALLBACK
-    # op op_check_cast FALLBACK
-    # op op_instance_of FALLBACK
-    # op op_array_length FALLBACK
-    # op op_new_instance FALLBACK
-    # op op_new_array FALLBACK
-    # op op_filled_new_array FALLBACK
-    # op op_filled_new_array_range FALLBACK
-    # op op_fill_array_data FALLBACK
-    # op op_throw FALLBACK
-    # op op_goto FALLBACK
-    # op op_goto_16 FALLBACK
-    # op op_goto_32 FALLBACK
-    # op op_packed_switch FALLBACK
-    # op op_sparse_switch FALLBACK
-    # op op_cmpl_float FALLBACK
-    # op op_cmpg_float FALLBACK
-    # op op_cmpl_double FALLBACK
-    # op op_cmpg_double FALLBACK
-    # op op_cmp_long FALLBACK
-    # op op_if_eq FALLBACK
-    # op op_if_ne FALLBACK
-    # op op_if_lt FALLBACK
-    # op op_if_ge FALLBACK
-    # op op_if_gt FALLBACK
-    # op op_if_le FALLBACK
-    # op op_if_eqz FALLBACK
-    # op op_if_nez FALLBACK
-    # op op_if_ltz FALLBACK
-    # op op_if_gez FALLBACK
-    # op op_if_gtz FALLBACK
-    # op op_if_lez FALLBACK
-    # op op_unused_3e FALLBACK
-    # op op_unused_3f FALLBACK
-    # op op_unused_40 FALLBACK
-    # op op_unused_41 FALLBACK
-    # op op_unused_42 FALLBACK
-    # op op_unused_43 FALLBACK
-    # op op_aget FALLBACK
-    # op op_aget_wide FALLBACK
-    # op op_aget_object FALLBACK
-    # op op_aget_boolean FALLBACK
-    # op op_aget_byte FALLBACK
-    # op op_aget_char FALLBACK
-    # op op_aget_short FALLBACK
-    # op op_aput FALLBACK
-    # op op_aput_wide FALLBACK
-    # op op_aput_object FALLBACK
-    # op op_aput_boolean FALLBACK
-    # op op_aput_byte FALLBACK
-    # op op_aput_char FALLBACK
-    # op op_aput_short FALLBACK
-    # op op_iget FALLBACK
-    # op op_iget_wide FALLBACK
-    # op op_iget_object FALLBACK
-    # op op_iget_boolean FALLBACK
-    # op op_iget_byte FALLBACK
-    # op op_iget_char FALLBACK
-    # op op_iget_short FALLBACK
-    # op op_iput FALLBACK
-    # op op_iput_wide FALLBACK
-    # op op_iput_object FALLBACK
-    # op op_iput_boolean FALLBACK
-    # op op_iput_byte FALLBACK
-    # op op_iput_char FALLBACK
-    # op op_iput_short FALLBACK
-    # op op_sget FALLBACK
-    # op op_sget_wide FALLBACK
-    # op op_sget_object FALLBACK
-    # op op_sget_boolean FALLBACK
-    # op op_sget_byte FALLBACK
-    # op op_sget_char FALLBACK
-    # op op_sget_short FALLBACK
-    # op op_sput FALLBACK
-    # op op_sput_wide FALLBACK
-    # op op_sput_object FALLBACK
-    # op op_sput_boolean FALLBACK
-    # op op_sput_byte FALLBACK
-    # op op_sput_char FALLBACK
-    # op op_sput_short FALLBACK
-    # op op_invoke_virtual FALLBACK
-    # op op_invoke_super FALLBACK
-    # op op_invoke_direct FALLBACK
-    # op op_invoke_static FALLBACK
-    # op op_invoke_interface FALLBACK
-    # op op_return_void_no_barrier FALLBACK
-    # op op_invoke_virtual_range FALLBACK
-    # op op_invoke_super_range FALLBACK
-    # op op_invoke_direct_range FALLBACK
-    # op op_invoke_static_range FALLBACK
-    # op op_invoke_interface_range FALLBACK
-    # op op_unused_79 FALLBACK
-    # op op_unused_7a FALLBACK
-    # op op_neg_int FALLBACK
-    # op op_not_int FALLBACK
-    # op op_neg_long FALLBACK
-    # op op_not_long FALLBACK
-    # op op_neg_float FALLBACK
-    # op op_neg_double FALLBACK
-    # op op_int_to_long FALLBACK
-    # op op_int_to_float FALLBACK
-    # op op_int_to_double FALLBACK
-    # op op_long_to_int FALLBACK
-    # op op_long_to_float FALLBACK
-    # op op_long_to_double FALLBACK
-    # op op_float_to_int FALLBACK
-    # op op_float_to_long FALLBACK
-    # op op_float_to_double FALLBACK
-    # op op_double_to_int FALLBACK
-    # op op_double_to_long FALLBACK
-    # op op_double_to_float FALLBACK
-    # op op_int_to_byte FALLBACK
-    # op op_int_to_char FALLBACK
-    # op op_int_to_short FALLBACK
-    # op op_add_int FALLBACK
-    # op op_sub_int FALLBACK
-    # op op_mul_int FALLBACK
-    # op op_div_int FALLBACK
-    # op op_rem_int FALLBACK
-    # op op_and_int FALLBACK
-    # op op_or_int FALLBACK
-    # op op_xor_int FALLBACK
-    # op op_shl_int FALLBACK
-    # op op_shr_int FALLBACK
-    # op op_ushr_int FALLBACK
-    # op op_add_long FALLBACK
-    # op op_sub_long FALLBACK
-    # op op_mul_long FALLBACK
-    # op op_div_long FALLBACK
-    # op op_rem_long FALLBACK
-    # op op_and_long FALLBACK
-    # op op_or_long FALLBACK
-    # op op_xor_long FALLBACK
-    # op op_shl_long FALLBACK
-    # op op_shr_long FALLBACK
-    # op op_ushr_long FALLBACK
-    # op op_add_float FALLBACK
-    # op op_sub_float FALLBACK
-    # op op_mul_float FALLBACK
-    # op op_div_float FALLBACK
-    # op op_rem_float FALLBACK
-    # op op_add_double FALLBACK
-    # op op_sub_double FALLBACK
-    # op op_mul_double FALLBACK
-    # op op_div_double FALLBACK
-    # op op_rem_double FALLBACK
-    # op op_add_int_2addr FALLBACK
-    # op op_sub_int_2addr FALLBACK
-    # op op_mul_int_2addr FALLBACK
-    # op op_div_int_2addr FALLBACK
-    # op op_rem_int_2addr FALLBACK
-    # op op_and_int_2addr FALLBACK
-    # op op_or_int_2addr FALLBACK
-    # op op_xor_int_2addr FALLBACK
-    # op op_shl_int_2addr FALLBACK
-    # op op_shr_int_2addr FALLBACK
-    # op op_ushr_int_2addr FALLBACK
-    # op op_add_long_2addr FALLBACK
-    # op op_sub_long_2addr FALLBACK
-    # op op_mul_long_2addr FALLBACK
-    # op op_div_long_2addr FALLBACK
-    # op op_rem_long_2addr FALLBACK
-    # op op_and_long_2addr FALLBACK
-    # op op_or_long_2addr FALLBACK
-    # op op_xor_long_2addr FALLBACK
-    # op op_shl_long_2addr FALLBACK
-    # op op_shr_long_2addr FALLBACK
-    # op op_ushr_long_2addr FALLBACK
-    # op op_add_float_2addr FALLBACK
-    # op op_sub_float_2addr FALLBACK
-    # op op_mul_float_2addr FALLBACK
-    # op op_div_float_2addr FALLBACK
-    # op op_rem_float_2addr FALLBACK
-    # op op_add_double_2addr FALLBACK
-    # op op_sub_double_2addr FALLBACK
-    # op op_mul_double_2addr FALLBACK
-    # op op_div_double_2addr FALLBACK
-    # op op_rem_double_2addr FALLBACK
-    # op op_add_int_lit16 FALLBACK
-    # op op_rsub_int FALLBACK
-    # op op_mul_int_lit16 FALLBACK
-    # op op_div_int_lit16 FALLBACK
-    # op op_rem_int_lit16 FALLBACK
-    # op op_and_int_lit16 FALLBACK
-    # op op_or_int_lit16 FALLBACK
-    # op op_xor_int_lit16 FALLBACK
-    # op op_add_int_lit8 FALLBACK
-    # op op_rsub_int_lit8 FALLBACK
-    # op op_mul_int_lit8 FALLBACK
-    # op op_div_int_lit8 FALLBACK
-    # op op_rem_int_lit8 FALLBACK
-    # op op_and_int_lit8 FALLBACK
-    # op op_or_int_lit8 FALLBACK
-    # op op_xor_int_lit8 FALLBACK
-    # op op_shl_int_lit8 FALLBACK
-    # op op_shr_int_lit8 FALLBACK
-    # op op_ushr_int_lit8 FALLBACK
-    # op op_iget_quick FALLBACK
-    # op op_iget_wide_quick FALLBACK
-    # op op_iget_object_quick FALLBACK
-    # op op_iput_quick FALLBACK
-    # op op_iput_wide_quick FALLBACK
-    # op op_iput_object_quick FALLBACK
-    # op op_invoke_virtual_quick FALLBACK
-    # op op_invoke_virtual_range_quick FALLBACK
-    # op op_iput_boolean_quick FALLBACK
-    # op op_iput_byte_quick FALLBACK
-    # op op_iput_char_quick FALLBACK
-    # op op_iput_short_quick FALLBACK
-    # op op_iget_boolean_quick FALLBACK
-    # op op_iget_byte_quick FALLBACK
-    # op op_iget_char_quick FALLBACK
-    # op op_iget_short_quick FALLBACK
-    # op op_unused_f3 FALLBACK
-    # op op_unused_f4 FALLBACK
-    # op op_unused_f5 FALLBACK
-    # op op_unused_f6 FALLBACK
-    # op op_unused_f7 FALLBACK
-    # op op_unused_f8 FALLBACK
-    # op op_unused_f9 FALLBACK
-    # op op_invoke_polymorphic FALLBACK
-    # op op_invoke_polymorphic_range FALLBACK
-    # op op_invoke_custom FALLBACK
-    # op op_invoke_custom_range FALLBACK
-    # op op_const_method_handle FALLBACK
-    # op op_const_method_type FALLBACK
-op-end
-
-# common subroutines for asm
-import mips64/footer.S
diff --git a/runtime/interpreter/mterp/config_x86 b/runtime/interpreter/mterp/config_x86
deleted file mode 100644
index 2417851..0000000
--- a/runtime/interpreter/mterp/config_x86
+++ /dev/null
@@ -1,302 +0,0 @@
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Configuration for X86
-#
-
-handler-style computed-goto
-handler-size 128
-
-function-type-format FUNCTION_TYPE(%s)
-function-size-format SIZE(%s,%s)
-global-name-format SYMBOL(%s)
-
-# source for alternate entry stub
-asm-alt-stub x86/alt_stub.S
-
-# file header and basic definitions
-import x86/header.S
-
-# arch-specific entry point to interpreter
-import x86/entry.S
-
-# Stub to switch to alternate interpreter
-fallback-stub x86/fallback.S
-
-# opcode list; argument to op-start is default directory
-op-start x86
-    # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
-    # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
-
-    # op op_nop FALLBACK
-    # op op_move FALLBACK
-    # op op_move_from16 FALLBACK
-    # op op_move_16 FALLBACK
-    # op op_move_wide FALLBACK
-    # op op_move_wide_from16 FALLBACK
-    # op op_move_wide_16 FALLBACK
-    # op op_move_object FALLBACK
-    # op op_move_object_from16 FALLBACK
-    # op op_move_object_16 FALLBACK
-    # op op_move_result FALLBACK
-    # op op_move_result_wide FALLBACK
-    # op op_move_result_object FALLBACK
-    # op op_move_exception FALLBACK
-    # op op_return_void FALLBACK
-    # op op_return FALLBACK
-    # op op_return_wide FALLBACK
-    # op op_return_object FALLBACK
-    # op op_const_4 FALLBACK
-    # op op_const_16 FALLBACK
-    # op op_const FALLBACK
-    # op op_const_high16 FALLBACK
-    # op op_const_wide_16 FALLBACK
-    # op op_const_wide_32 FALLBACK
-    # op op_const_wide FALLBACK
-    # op op_const_wide_high16 FALLBACK
-    # op op_const_string FALLBACK
-    # op op_const_string_jumbo FALLBACK
-    # op op_const_class FALLBACK
-    # op op_monitor_enter FALLBACK
-    # op op_monitor_exit FALLBACK
-    # op op_check_cast FALLBACK
-    # op op_instance_of FALLBACK
-    # op op_array_length FALLBACK
-    # op op_new_instance FALLBACK
-    # op op_new_array FALLBACK
-    # op op_filled_new_array FALLBACK
-    # op op_filled_new_array_range FALLBACK
-    # op op_fill_array_data FALLBACK
-    # op op_throw FALLBACK
-    # op op_goto FALLBACK
-    # op op_goto_16 FALLBACK
-    # op op_goto_32 FALLBACK
-    # op op_packed_switch FALLBACK
-    # op op_sparse_switch FALLBACK
-    # op op_cmpl_float FALLBACK
-    # op op_cmpg_float FALLBACK
-    # op op_cmpl_double FALLBACK
-    # op op_cmpg_double FALLBACK
-    # op op_cmp_long FALLBACK
-    # op op_if_eq FALLBACK
-    # op op_if_ne FALLBACK
-    # op op_if_lt FALLBACK
-    # op op_if_ge FALLBACK
-    # op op_if_gt FALLBACK
-    # op op_if_le FALLBACK
-    # op op_if_eqz FALLBACK
-    # op op_if_nez FALLBACK
-    # op op_if_ltz FALLBACK
-    # op op_if_gez FALLBACK
-    # op op_if_gtz FALLBACK
-    # op op_if_lez FALLBACK
-    # op op_unused_3e FALLBACK
-    # op op_unused_3f FALLBACK
-    # op op_unused_40 FALLBACK
-    # op op_unused_41 FALLBACK
-    # op op_unused_42 FALLBACK
-    # op op_unused_43 FALLBACK
-    # op op_aget FALLBACK
-    # op op_aget_wide FALLBACK
-    # op op_aget_object FALLBACK
-    # op op_aget_boolean FALLBACK
-    # op op_aget_byte FALLBACK
-    # op op_aget_char FALLBACK
-    # op op_aget_short FALLBACK
-    # op op_aput FALLBACK
-    # op op_aput_wide FALLBACK
-    # op op_aput_object FALLBACK
-    # op op_aput_boolean FALLBACK
-    # op op_aput_byte FALLBACK
-    # op op_aput_char FALLBACK
-    # op op_aput_short FALLBACK
-    # op op_iget FALLBACK
-    # op op_iget_wide FALLBACK
-    # op op_iget_object FALLBACK
-    # op op_iget_boolean FALLBACK
-    # op op_iget_byte FALLBACK
-    # op op_iget_char FALLBACK
-    # op op_iget_short FALLBACK
-    # op op_iput FALLBACK
-    # op op_iput_wide FALLBACK
-    # op op_iput_object FALLBACK
-    # op op_iput_boolean FALLBACK
-    # op op_iput_byte FALLBACK
-    # op op_iput_char FALLBACK
-    # op op_iput_short FALLBACK
-    # op op_sget FALLBACK
-    # op op_sget_wide FALLBACK
-    # op op_sget_object FALLBACK
-    # op op_sget_boolean FALLBACK
-    # op op_sget_byte FALLBACK
-    # op op_sget_char FALLBACK
-    # op op_sget_short FALLBACK
-    # op op_sput FALLBACK
-    # op op_sput_wide FALLBACK
-    # op op_sput_object FALLBACK
-    # op op_sput_boolean FALLBACK
-    # op op_sput_byte FALLBACK
-    # op op_sput_char FALLBACK
-    # op op_sput_short FALLBACK
-    # op op_invoke_virtual FALLBACK
-    # op op_invoke_super FALLBACK
-    # op op_invoke_direct FALLBACK
-    # op op_invoke_static FALLBACK
-    # op op_invoke_interface FALLBACK
-    # op op_return_void_no_barrier FALLBACK
-    # op op_invoke_virtual_range FALLBACK
-    # op op_invoke_super_range FALLBACK
-    # op op_invoke_direct_range FALLBACK
-    # op op_invoke_static_range FALLBACK
-    # op op_invoke_interface_range FALLBACK
-    # op op_unused_79 FALLBACK
-    # op op_unused_7a FALLBACK
-    # op op_neg_int FALLBACK
-    # op op_not_int FALLBACK
-    # op op_neg_long FALLBACK
-    # op op_not_long FALLBACK
-    # op op_neg_float FALLBACK
-    # op op_neg_double FALLBACK
-    # op op_int_to_long FALLBACK
-    # op op_int_to_float FALLBACK
-    # op op_int_to_double FALLBACK
-    # op op_long_to_int FALLBACK
-    # op op_long_to_float FALLBACK
-    # op op_long_to_double FALLBACK
-    # op op_float_to_int FALLBACK
-    # op op_float_to_long FALLBACK
-    # op op_float_to_double FALLBACK
-    # op op_double_to_int FALLBACK
-    # op op_double_to_long FALLBACK
-    # op op_double_to_float FALLBACK
-    # op op_int_to_byte FALLBACK
-    # op op_int_to_char FALLBACK
-    # op op_int_to_short FALLBACK
-    # op op_add_int FALLBACK
-    # op op_sub_int FALLBACK
-    # op op_mul_int FALLBACK
-    # op op_div_int FALLBACK
-    # op op_rem_int FALLBACK
-    # op op_and_int FALLBACK
-    # op op_or_int FALLBACK
-    # op op_xor_int FALLBACK
-    # op op_shl_int FALLBACK
-    # op op_shr_int FALLBACK
-    # op op_ushr_int FALLBACK
-    # op op_add_long FALLBACK
-    # op op_sub_long FALLBACK
-    # op op_mul_long FALLBACK
-    # op op_div_long FALLBACK
-    # op op_rem_long FALLBACK
-    # op op_and_long FALLBACK
-    # op op_or_long FALLBACK
-    # op op_xor_long FALLBACK
-    # op op_shl_long FALLBACK
-    # op op_shr_long FALLBACK
-    # op op_ushr_long FALLBACK
-    # op op_add_float FALLBACK
-    # op op_sub_float FALLBACK
-    # op op_mul_float FALLBACK
-    # op op_div_float FALLBACK
-    # op op_rem_float FALLBACK
-    # op op_add_double FALLBACK
-    # op op_sub_double FALLBACK
-    # op op_mul_double FALLBACK
-    # op op_div_double FALLBACK
-    # op op_rem_double FALLBACK
-    # op op_add_int_2addr FALLBACK
-    # op op_sub_int_2addr FALLBACK
-    # op op_mul_int_2addr FALLBACK
-    # op op_div_int_2addr FALLBACK
-    # op op_rem_int_2addr FALLBACK
-    # op op_and_int_2addr FALLBACK
-    # op op_or_int_2addr FALLBACK
-    # op op_xor_int_2addr FALLBACK
-    # op op_shl_int_2addr FALLBACK
-    # op op_shr_int_2addr FALLBACK
-    # op op_ushr_int_2addr FALLBACK
-    # op op_add_long_2addr FALLBACK
-    # op op_sub_long_2addr FALLBACK
-    # op op_mul_long_2addr FALLBACK
-    # op op_div_long_2addr FALLBACK
-    # op op_rem_long_2addr FALLBACK
-    # op op_and_long_2addr FALLBACK
-    # op op_or_long_2addr FALLBACK
-    # op op_xor_long_2addr FALLBACK
-    # op op_shl_long_2addr FALLBACK
-    # op op_shr_long_2addr FALLBACK
-    # op op_ushr_long_2addr FALLBACK
-    # op op_add_float_2addr FALLBACK
-    # op op_sub_float_2addr FALLBACK
-    # op op_mul_float_2addr FALLBACK
-    # op op_div_float_2addr FALLBACK
-    # op op_rem_float_2addr FALLBACK
-    # op op_add_double_2addr FALLBACK
-    # op op_sub_double_2addr FALLBACK
-    # op op_mul_double_2addr FALLBACK
-    # op op_div_double_2addr FALLBACK
-    # op op_rem_double_2addr FALLBACK
-    # op op_add_int_lit16 FALLBACK
-    # op op_rsub_int FALLBACK
-    # op op_mul_int_lit16 FALLBACK
-    # op op_div_int_lit16 FALLBACK
-    # op op_rem_int_lit16 FALLBACK
-    # op op_and_int_lit16 FALLBACK
-    # op op_or_int_lit16 FALLBACK
-    # op op_xor_int_lit16 FALLBACK
-    # op op_add_int_lit8 FALLBACK
-    # op op_rsub_int_lit8 FALLBACK
-    # op op_mul_int_lit8 FALLBACK
-    # op op_div_int_lit8 FALLBACK
-    # op op_rem_int_lit8 FALLBACK
-    # op op_and_int_lit8 FALLBACK
-    # op op_or_int_lit8 FALLBACK
-    # op op_xor_int_lit8 FALLBACK
-    # op op_shl_int_lit8 FALLBACK
-    # op op_shr_int_lit8 FALLBACK
-    # op op_ushr_int_lit8 FALLBACK
-    # op op_iget_quick FALLBACK
-    # op op_iget_wide_quick FALLBACK
-    # op op_iget_object_quick FALLBACK
-    # op op_iput_quick FALLBACK
-    # op op_iput_wide_quick FALLBACK
-    # op op_iput_object_quick FALLBACK
-    # op op_invoke_virtual_quick FALLBACK
-    # op op_invoke_virtual_range_quick FALLBACK
-    # op op_iput_boolean_quick FALLBACK
-    # op op_iput_byte_quick FALLBACK
-    # op op_iput_char_quick FALLBACK
-    # op op_iput_short_quick FALLBACK
-    # op op_iget_boolean_quick FALLBACK
-    # op op_iget_byte_quick FALLBACK
-    # op op_iget_char_quick FALLBACK
-    # op op_iget_short_quick FALLBACK
-    # op op_unused_f3 FALLBACK
-    # op op_unused_f4 FALLBACK
-    # op op_unused_f5 FALLBACK
-    # op op_unused_f6 FALLBACK
-    # op op_unused_f7 FALLBACK
-    # op op_unused_f8 FALLBACK
-    # op op_unused_f9 FALLBACK
-    # op op_invoke_polymorphic FALLBACK
-    # op op_invoke_polymorphic_range FALLBACK
-    # op op_invoke_custom FALLBACK
-    # op op_invoke_custom_range FALLBACK
-    # op op_const_method_handle FALLBACK
-    # op op_const_method_type FALLBACK
-op-end
-
-# common subroutines for asm
-import x86/footer.S
diff --git a/runtime/interpreter/mterp/config_x86_64 b/runtime/interpreter/mterp/config_x86_64
deleted file mode 100644
index 89fbf43..0000000
--- a/runtime/interpreter/mterp/config_x86_64
+++ /dev/null
@@ -1,302 +0,0 @@
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Configuration for X86_64
-#
-
-handler-style computed-goto
-handler-size 128
-
-function-type-format FUNCTION_TYPE(%s)
-function-size-format SIZE(%s,%s)
-global-name-format SYMBOL(%s)
-
-# source for alternate entry stub
-asm-alt-stub x86_64/alt_stub.S
-
-# file header and basic definitions
-import x86_64/header.S
-
-# arch-specific entry point to interpreter
-import x86_64/entry.S
-
-# Stub to switch to alternate interpreter
-fallback-stub x86_64/fallback.S
-
-# opcode list; argument to op-start is default directory
-op-start x86_64
-    # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
-    # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
-
-    # op op_nop FALLBACK
-    # op op_move FALLBACK
-    # op op_move_from16 FALLBACK
-    # op op_move_16 FALLBACK
-    # op op_move_wide FALLBACK
-    # op op_move_wide_from16 FALLBACK
-    # op op_move_wide_16 FALLBACK
-    # op op_move_object FALLBACK
-    # op op_move_object_from16 FALLBACK
-    # op op_move_object_16 FALLBACK
-    # op op_move_result FALLBACK
-    # op op_move_result_wide FALLBACK
-    # op op_move_result_object FALLBACK
-    # op op_move_exception FALLBACK
-    # op op_return_void FALLBACK
-    # op op_return FALLBACK
-    # op op_return_wide FALLBACK
-    # op op_return_object FALLBACK
-    # op op_const_4 FALLBACK
-    # op op_const_16 FALLBACK
-    # op op_const FALLBACK
-    # op op_const_high16 FALLBACK
-    # op op_const_wide_16 FALLBACK
-    # op op_const_wide_32 FALLBACK
-    # op op_const_wide FALLBACK
-    # op op_const_wide_high16 FALLBACK
-    # op op_const_string FALLBACK
-    # op op_const_string_jumbo FALLBACK
-    # op op_const_class FALLBACK
-    # op op_monitor_enter FALLBACK
-    # op op_monitor_exit FALLBACK
-    # op op_check_cast FALLBACK
-    # op op_instance_of FALLBACK
-    # op op_array_length FALLBACK
-    # op op_new_instance FALLBACK
-    # op op_new_array FALLBACK
-    # op op_filled_new_array FALLBACK
-    # op op_filled_new_array_range FALLBACK
-    # op op_fill_array_data FALLBACK
-    # op op_throw FALLBACK
-    # op op_goto FALLBACK
-    # op op_goto_16 FALLBACK
-    # op op_goto_32 FALLBACK
-    # op op_packed_switch FALLBACK
-    # op op_sparse_switch FALLBACK
-    # op op_cmpl_float FALLBACK
-    # op op_cmpg_float FALLBACK
-    # op op_cmpl_double FALLBACK
-    # op op_cmpg_double FALLBACK
-    # op op_cmp_long FALLBACK
-    # op op_if_eq FALLBACK
-    # op op_if_ne FALLBACK
-    # op op_if_lt FALLBACK
-    # op op_if_ge FALLBACK
-    # op op_if_gt FALLBACK
-    # op op_if_le FALLBACK
-    # op op_if_eqz FALLBACK
-    # op op_if_nez FALLBACK
-    # op op_if_ltz FALLBACK
-    # op op_if_gez FALLBACK
-    # op op_if_gtz FALLBACK
-    # op op_if_lez FALLBACK
-    # op op_unused_3e FALLBACK
-    # op op_unused_3f FALLBACK
-    # op op_unused_40 FALLBACK
-    # op op_unused_41 FALLBACK
-    # op op_unused_42 FALLBACK
-    # op op_unused_43 FALLBACK
-    # op op_aget FALLBACK
-    # op op_aget_wide FALLBACK
-    # op op_aget_object FALLBACK
-    # op op_aget_boolean FALLBACK
-    # op op_aget_byte FALLBACK
-    # op op_aget_char FALLBACK
-    # op op_aget_short FALLBACK
-    # op op_aput FALLBACK
-    # op op_aput_wide FALLBACK
-    # op op_aput_object FALLBACK
-    # op op_aput_boolean FALLBACK
-    # op op_aput_byte FALLBACK
-    # op op_aput_char FALLBACK
-    # op op_aput_short FALLBACK
-    # op op_iget FALLBACK
-    # op op_iget_wide FALLBACK
-    # op op_iget_object FALLBACK
-    # op op_iget_boolean FALLBACK
-    # op op_iget_byte FALLBACK
-    # op op_iget_char FALLBACK
-    # op op_iget_short FALLBACK
-    # op op_iput FALLBACK
-    # op op_iput_wide FALLBACK
-    # op op_iput_object FALLBACK
-    # op op_iput_boolean FALLBACK
-    # op op_iput_byte FALLBACK
-    # op op_iput_char FALLBACK
-    # op op_iput_short FALLBACK
-    # op op_sget FALLBACK
-    # op op_sget_wide FALLBACK
-    # op op_sget_object FALLBACK
-    # op op_sget_boolean FALLBACK
-    # op op_sget_byte FALLBACK
-    # op op_sget_char FALLBACK
-    # op op_sget_short FALLBACK
-    # op op_sput FALLBACK
-    # op op_sput_wide FALLBACK
-    # op op_sput_object FALLBACK
-    # op op_sput_boolean FALLBACK
-    # op op_sput_byte FALLBACK
-    # op op_sput_char FALLBACK
-    # op op_sput_short FALLBACK
-    # op op_invoke_virtual FALLBACK
-    # op op_invoke_super FALLBACK
-    # op op_invoke_direct FALLBACK
-    # op op_invoke_static FALLBACK
-    # op op_invoke_interface FALLBACK
-    # op op_return_void_no_barrier FALLBACK
-    # op op_invoke_virtual_range FALLBACK
-    # op op_invoke_super_range FALLBACK
-    # op op_invoke_direct_range FALLBACK
-    # op op_invoke_static_range FALLBACK
-    # op op_invoke_interface_range FALLBACK
-    # op op_unused_79 FALLBACK
-    # op op_unused_7a FALLBACK
-    # op op_neg_int FALLBACK
-    # op op_not_int FALLBACK
-    # op op_neg_long FALLBACK
-    # op op_not_long FALLBACK
-    # op op_neg_float FALLBACK
-    # op op_neg_double FALLBACK
-    # op op_int_to_long FALLBACK
-    # op op_int_to_float FALLBACK
-    # op op_int_to_double FALLBACK
-    # op op_long_to_int FALLBACK
-    # op op_long_to_float FALLBACK
-    # op op_long_to_double FALLBACK
-    # op op_float_to_int FALLBACK
-    # op op_float_to_long FALLBACK
-    # op op_float_to_double FALLBACK
-    # op op_double_to_int FALLBACK
-    # op op_double_to_long FALLBACK
-    # op op_double_to_float FALLBACK
-    # op op_int_to_byte FALLBACK
-    # op op_int_to_char FALLBACK
-    # op op_int_to_short FALLBACK
-    # op op_add_int FALLBACK
-    # op op_sub_int FALLBACK
-    # op op_mul_int FALLBACK
-    # op op_div_int FALLBACK
-    # op op_rem_int FALLBACK
-    # op op_and_int FALLBACK
-    # op op_or_int FALLBACK
-    # op op_xor_int FALLBACK
-    # op op_shl_int FALLBACK
-    # op op_shr_int FALLBACK
-    # op op_ushr_int FALLBACK
-    # op op_add_long FALLBACK
-    # op op_sub_long FALLBACK
-    # op op_mul_long FALLBACK
-    # op op_div_long FALLBACK
-    # op op_rem_long FALLBACK
-    # op op_and_long FALLBACK
-    # op op_or_long FALLBACK
-    # op op_xor_long FALLBACK
-    # op op_shl_long FALLBACK
-    # op op_shr_long FALLBACK
-    # op op_ushr_long FALLBACK
-    # op op_add_float FALLBACK
-    # op op_sub_float FALLBACK
-    # op op_mul_float FALLBACK
-    # op op_div_float FALLBACK
-    # op op_rem_float FALLBACK
-    # op op_add_double FALLBACK
-    # op op_sub_double FALLBACK
-    # op op_mul_double FALLBACK
-    # op op_div_double FALLBACK
-    # op op_rem_double FALLBACK
-    # op op_add_int_2addr FALLBACK
-    # op op_sub_int_2addr FALLBACK
-    # op op_mul_int_2addr FALLBACK
-    # op op_div_int_2addr FALLBACK
-    # op op_rem_int_2addr FALLBACK
-    # op op_and_int_2addr FALLBACK
-    # op op_or_int_2addr FALLBACK
-    # op op_xor_int_2addr FALLBACK
-    # op op_shl_int_2addr FALLBACK
-    # op op_shr_int_2addr FALLBACK
-    # op op_ushr_int_2addr FALLBACK
-    # op op_add_long_2addr FALLBACK
-    # op op_sub_long_2addr FALLBACK
-    # op op_mul_long_2addr FALLBACK
-    # op op_div_long_2addr FALLBACK
-    # op op_rem_long_2addr FALLBACK
-    # op op_and_long_2addr FALLBACK
-    # op op_or_long_2addr FALLBACK
-    # op op_xor_long_2addr FALLBACK
-    # op op_shl_long_2addr FALLBACK
-    # op op_shr_long_2addr FALLBACK
-    # op op_ushr_long_2addr FALLBACK
-    # op op_add_float_2addr FALLBACK
-    # op op_sub_float_2addr FALLBACK
-    # op op_mul_float_2addr FALLBACK
-    # op op_div_float_2addr FALLBACK
-    # op op_rem_float_2addr FALLBACK
-    # op op_add_double_2addr FALLBACK
-    # op op_sub_double_2addr FALLBACK
-    # op op_mul_double_2addr FALLBACK
-    # op op_div_double_2addr FALLBACK
-    # op op_rem_double_2addr FALLBACK
-    # op op_add_int_lit16 FALLBACK
-    # op op_rsub_int FALLBACK
-    # op op_mul_int_lit16 FALLBACK
-    # op op_div_int_lit16 FALLBACK
-    # op op_rem_int_lit16 FALLBACK
-    # op op_and_int_lit16 FALLBACK
-    # op op_or_int_lit16 FALLBACK
-    # op op_xor_int_lit16 FALLBACK
-    # op op_add_int_lit8 FALLBACK
-    # op op_rsub_int_lit8 FALLBACK
-    # op op_mul_int_lit8 FALLBACK
-    # op op_div_int_lit8 FALLBACK
-    # op op_rem_int_lit8 FALLBACK
-    # op op_and_int_lit8 FALLBACK
-    # op op_or_int_lit8 FALLBACK
-    # op op_xor_int_lit8 FALLBACK
-    # op op_shl_int_lit8 FALLBACK
-    # op op_shr_int_lit8 FALLBACK
-    # op op_ushr_int_lit8 FALLBACK
-    # op op_iget_quick FALLBACK
-    # op op_iget_wide_quick FALLBACK
-    # op op_iget_object_quick FALLBACK
-    # op op_iput_quick FALLBACK
-    # op op_iput_wide_quick FALLBACK
-    # op op_iput_object_quick FALLBACK
-    # op op_invoke_virtual_quick FALLBACK
-    # op op_invoke_virtual_range_quick FALLBACK
-    # op op_iput_boolean_quick FALLBACK
-    # op op_iput_byte_quick FALLBACK
-    # op op_iput_char_quick FALLBACK
-    # op op_iput_short_quick FALLBACK
-    # op op_iget_boolean_quick FALLBACK
-    # op op_iget_byte_quick FALLBACK
-    # op op_iget_char_quick FALLBACK
-    # op op_iget_short_quick FALLBACK
-    # op op_unused_f3 FALLBACK
-    # op op_unused_f4 FALLBACK
-    # op op_unused_f5 FALLBACK
-    # op op_unused_f6 FALLBACK
-    # op op_unused_f7 FALLBACK
-    # op op_unused_f8 FALLBACK
-    # op op_unused_f9 FALLBACK
-    # op op_invoke_polymorphic FALLBACK
-    # op op_invoke_polymorphic_range FALLBACK
-    # op op_invoke_custom FALLBACK
-    # op op_invoke_custom_range FALLBACK
-    # op op_const_method_handle FALLBACK
-    # op op_const_method_type FALLBACK
-op-end
-
-# common subroutines for asm
-import x86_64/footer.S
diff --git a/runtime/interpreter/mterp/gen_mterp.py b/runtime/interpreter/mterp/gen_mterp.py
index 75c5174..5d25955 100755
--- a/runtime/interpreter/mterp/gen_mterp.py
+++ b/runtime/interpreter/mterp/gen_mterp.py
@@ -14,605 +14,85 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-#
-# Using instructions from an architecture-specific config file, generate C
-# and assembly source files for the Dalvik interpreter.
-#
+import sys, re, os
+from cStringIO import StringIO
 
-import sys, string, re, time
-from string import Template
+SCRIPT_DIR = os.path.dirname(sys.argv[0])
+# This file is included verbatim at the start of the in-memory python script.
+SCRIPT_SETUP_CODE = SCRIPT_DIR + "/common/gen_setup.py"
+INTERP_DEFS_FILE = SCRIPT_DIR + "/../../../libdexfile/dex/dex_instruction_list.h"
+NUM_PACKED_OPCODES = 256
 
-interp_defs_file = "../../../libdexfile/dex/dex_instruction_list.h" # need opcode list
-kNumPackedOpcodes = 256
-
-splitops = False
-verbose = False
-handler_size_bits = -1000
-handler_size_bytes = -1000
-in_op_start = 0             # 0=not started, 1=started, 2=ended
-in_alt_op_start = 0         # 0=not started, 1=started, 2=ended
-default_op_dir = None
-default_alt_stub = None
-opcode_locations = {}
-alt_opcode_locations = {}
-asm_stub_text = []
-fallback_stub_text = []
-label_prefix = ".L"         # use ".L" to hide labels from gdb
-alt_label_prefix = ".L_ALT" # use ".L" to hide labels from gdb
-style = None                # interpreter style
-generate_alt_table = False
-function_type_format = ".type   %s, %%function"
-function_size_format = ".size   %s, .-%s"
-global_name_format = "%s"
-
-# Exception class.
-class DataParseError(SyntaxError):
-    "Failure when parsing data file"
-
-#
-# Set any omnipresent substitution values.
-#
-def getGlobalSubDict():
-    return { "handler_size_bits":handler_size_bits,
-             "handler_size_bytes":handler_size_bytes }
-
-#
-# Parse arch config file --
-# Set interpreter style.
-#
-def setHandlerStyle(tokens):
-    global style
-    if len(tokens) != 2:
-        raise DataParseError("handler-style requires one argument")
-    style = tokens[1]
-    if style != "computed-goto":
-        raise DataParseError("handler-style (%s) invalid" % style)
-
-#
-# Parse arch config file --
-# Set handler_size_bytes to the value of tokens[1], and handler_size_bits to
-# log2(handler_size_bytes).  Throws an exception if "bytes" is not 0 or
-# a power of two.
-#
-def setHandlerSize(tokens):
-    global handler_size_bits, handler_size_bytes
-    if style != "computed-goto":
-        print "Warning: handler-size valid only for computed-goto interpreters"
-    if len(tokens) != 2:
-        raise DataParseError("handler-size requires one argument")
-    if handler_size_bits != -1000:
-        raise DataParseError("handler-size may only be set once")
-
-    # compute log2(n), and make sure n is 0 or a power of 2
-    handler_size_bytes = bytes = int(tokens[1])
-    bits = -1
-    while bytes > 0:
-        bytes //= 2     # halve with truncating division
-        bits += 1
-
-    if handler_size_bytes == 0 or handler_size_bytes != (1 << bits):
-        raise DataParseError("handler-size (%d) must be power of 2" \
-                % orig_bytes)
-    handler_size_bits = bits
-
-#
-# Parse arch config file --
-# Copy a file in to asm output file.
-#
-def importFile(tokens):
-    if len(tokens) != 2:
-        raise DataParseError("import requires one argument")
-    source = tokens[1]
-    if source.endswith(".S"):
-        appendSourceFile(tokens[1], getGlobalSubDict(), asm_fp, None)
-    else:
-        raise DataParseError("don't know how to import %s (expecting .cpp/.S)"
-                % source)
-
-#
-# Parse arch config file --
-# Copy a file in to the C or asm output file.
-#
-def setAsmStub(tokens):
-    global asm_stub_text
-    if len(tokens) != 2:
-        raise DataParseError("import requires one argument")
-    try:
-        stub_fp = open(tokens[1])
-        asm_stub_text = stub_fp.readlines()
-    except IOError, err:
-        stub_fp.close()
-        raise DataParseError("unable to load asm-stub: %s" % str(err))
-    stub_fp.close()
-
-#
-# Parse arch config file --
-# Copy a file in to the C or asm output file.
-#
-def setFallbackStub(tokens):
-    global fallback_stub_text
-    if len(tokens) != 2:
-        raise DataParseError("import requires one argument")
-    try:
-        stub_fp = open(tokens[1])
-        fallback_stub_text = stub_fp.readlines()
-    except IOError, err:
-        stub_fp.close()
-        raise DataParseError("unable to load fallback-stub: %s" % str(err))
-    stub_fp.close()
-#
-# Parse arch config file --
-# Record location of default alt stub
-#
-def setAsmAltStub(tokens):
-    global default_alt_stub, generate_alt_table
-    if len(tokens) != 2:
-        raise DataParseError("import requires one argument")
-    default_alt_stub = tokens[1]
-    generate_alt_table = True
-#
-# Change the default function type format
-#
-def setFunctionTypeFormat(tokens):
-    global function_type_format
-    function_type_format = tokens[1]
-#
-# Change the default function size format
-#
-def setFunctionSizeFormat(tokens):
-    global function_size_format
-    function_size_format = tokens[1]
-#
-# Change the global name format
-#
-def setGlobalNameFormat(tokens):
-    global global_name_format
-    global_name_format = tokens[1]
-#
-# Parse arch config file --
-# Start of opcode list.
-#
-def opStart(tokens):
-    global in_op_start
-    global default_op_dir
-    if len(tokens) != 2:
-        raise DataParseError("opStart takes a directory name argument")
-    if in_op_start != 0:
-        raise DataParseError("opStart can only be specified once")
-    default_op_dir = tokens[1]
-    in_op_start = 1
-
-#
-# Parse arch config file --
-# Set location of a single alt opcode's source file.
-#
-def altEntry(tokens):
-    global generate_alt_table
-    if len(tokens) != 3:
-        raise DataParseError("alt requires exactly two arguments")
-    if in_op_start != 1:
-        raise DataParseError("alt statements must be between opStart/opEnd")
-    try:
-        index = opcodes.index(tokens[1])
-    except ValueError:
-        raise DataParseError("unknown opcode %s" % tokens[1])
-    if alt_opcode_locations.has_key(tokens[1]):
-        print "Note: alt overrides earlier %s (%s -> %s)" \
-                % (tokens[1], alt_opcode_locations[tokens[1]], tokens[2])
-    alt_opcode_locations[tokens[1]] = tokens[2]
-    generate_alt_table = True
-
-#
-# Parse arch config file --
-# Set location of a single opcode's source file.
-#
-def opEntry(tokens):
-    #global opcode_locations
-    if len(tokens) != 3:
-        raise DataParseError("op requires exactly two arguments")
-    if in_op_start != 1:
-        raise DataParseError("op statements must be between opStart/opEnd")
-    try:
-        index = opcodes.index(tokens[1])
-    except ValueError:
-        raise DataParseError("unknown opcode %s" % tokens[1])
-    if opcode_locations.has_key(tokens[1]):
-        print "Note: op overrides earlier %s (%s -> %s)" \
-                % (tokens[1], opcode_locations[tokens[1]], tokens[2])
-    opcode_locations[tokens[1]] = tokens[2]
-
-#
-# Parse arch config file --
-# End of opcode list; emit instruction blocks.
-#
-def opEnd(tokens):
-    global in_op_start
-    if len(tokens) != 1:
-        raise DataParseError("opEnd takes no arguments")
-    if in_op_start != 1:
-        raise DataParseError("opEnd must follow opStart, and only appear once")
-    in_op_start = 2
-
-    loadAndEmitOpcodes()
-    if splitops == False:
-        if generate_alt_table:
-            loadAndEmitAltOpcodes()
-
-def genaltop(tokens):
-    if in_op_start != 2:
-       raise DataParseError("alt-op can be specified only after op-end")
-    if len(tokens) != 1:
-        raise DataParseError("opEnd takes no arguments")
-    if generate_alt_table:
-        loadAndEmitAltOpcodes()
-
-#
 # Extract an ordered list of instructions from the VM sources.  We use the
-# "goto table" definition macro, which has exactly kNumPackedOpcodes
-# entries.
-#
+# "goto table" definition macro, which has exactly NUM_PACKED_OPCODES entries.
 def getOpcodeList():
-    opcodes = []
-    opcode_fp = open(interp_defs_file)
-    opcode_re = re.compile(r"^\s*V\((....), (\w+),.*", re.DOTALL)
-    for line in opcode_fp:
-        match = opcode_re.match(line)
-        if not match:
-            continue
-        opcodes.append("op_" + match.group(2).lower())
-    opcode_fp.close()
+  opcodes = []
+  opcode_fp = open(INTERP_DEFS_FILE)
+  opcode_re = re.compile(r"^\s*V\((....), (\w+),.*", re.DOTALL)
+  for line in opcode_fp:
+    match = opcode_re.match(line)
+    if not match:
+      continue
+    opcodes.append("op_" + match.group(2).lower())
+  opcode_fp.close()
 
-    if len(opcodes) != kNumPackedOpcodes:
-        print "ERROR: found %d opcodes in Interp.h (expected %d)" \
-                % (len(opcodes), kNumPackedOpcodes)
-        raise SyntaxError, "bad opcode count"
-    return opcodes
+  if len(opcodes) != NUM_PACKED_OPCODES:
+    print "ERROR: found %d opcodes in Interp.h (expected %d)" \
+        % (len(opcodes), NUM_PACKED_OPCODES)
+    raise SyntaxError, "bad opcode count"
+  return opcodes
 
-def emitAlign():
-    if style == "computed-goto":
-        asm_fp.write("    .balign %d\n" % handler_size_bytes)
+indent_re = re.compile(r"^%( *)")
 
-#
-# Load and emit opcodes for all kNumPackedOpcodes instructions.
-#
-def loadAndEmitOpcodes():
-    sister_list = []
-    assert len(opcodes) == kNumPackedOpcodes
-    need_dummy_start = False
+# Finds variable references in text: $foo or ${foo}
+escape_re = re.compile(r'''
+  (?<!\$)        # Look-back: must not be preceded by another $.
+  \$
+  (\{)?          # May be enclosed by { } pair.
+  (?P<name>\w+)  # Save the symbol in named group.
+  (?(1)\})       # Expect } if and only if { was present.
+''', re.VERBOSE)
 
-    loadAndEmitGenericAsm("instruction_start")
+def generate_script(output_filename, input_filenames):
+  # Create new python script and write the initial setup code.
+  script = StringIO()  # File-like in-memory buffer.
+  script.write("# DO NOT EDIT: This file was generated by gen-mterp.py.\n")
+  script.write(open(SCRIPT_SETUP_CODE, "r").read())
+  script.write("def opcodes():\n")
+  for i, opcode in enumerate(getOpcodeList()):
+    script.write('  write_opcode({0}, "{1}", {1})\n'.format(i, opcode))
 
-    for i in xrange(kNumPackedOpcodes):
-        op = opcodes[i]
+  # Read all template files and translate them into python code.
+  for input_filename in sorted(input_filenames):
+    lines = open(input_filename, "r").readlines()
+    indent = ""
+    for line in lines:
+      line = line.rstrip()
+      if line.startswith("%"):
+        script.write(line.lstrip("%") + "\n")
+        indent = indent_re.match(line).group(1)
+        if line.endswith(":"):
+          indent += "  "
+      else:
+        line = escape_re.sub(r"''' + \g<name> + '''", line)
+        line = line.replace("\\", "\\\\")
+        line = line.replace("$$", "$")
+        script.write(indent + "write_line('''" + line + "''')\n")
+    script.write("\n")
 
-        if opcode_locations.has_key(op):
-            location = opcode_locations[op]
-        else:
-            location = default_op_dir
+  script.write("generate('''" + output_filename + "''')\n")
+  script.seek(0)
+  return script.read()
 
-        if location == "FALLBACK":
-            emitFallback(i)
-        else:
-            loadAndEmitAsm(location, i, sister_list)
+if len(sys.argv) <= 3:
+  print("Usage: output_file input_file(s)")
+  sys.exit(1)
 
-    # For a 100% C implementation, there are no asm handlers or stubs.  We
-    # need to have the MterpAsmInstructionStart label point at op_nop, and it's
-    # too annoying to try to slide it in after the alignment psuedo-op, so
-    # we take the low road and just emit a dummy op_nop here.
-    if need_dummy_start:
-        emitAlign()
-        asm_fp.write(label_prefix + "_op_nop:   /* dummy */\n");
-
-    emitAlign()
-
-    loadAndEmitGenericAsm("instruction_end")
-
-    if style == "computed-goto":
-        emitSectionComment("Sister implementations", asm_fp)
-        loadAndEmitGenericAsm("instruction_start_sister")
-        asm_fp.writelines(sister_list)
-        loadAndEmitGenericAsm("instruction_end_sister")
-
-#
-# Load an alternate entry stub
-#
-def loadAndEmitAltStub(source, opindex):
-    op = opcodes[opindex]
-    if verbose:
-        print " alt emit %s --> stub" % source
-    dict = getGlobalSubDict()
-    dict.update({ "opcode":op, "opnum":opindex })
-
-    emitAsmHeader(asm_fp, dict, alt_label_prefix)
-    appendSourceFile(source, dict, asm_fp, None)
-
-#
-# Load and emit alternate opcodes for all kNumPackedOpcodes instructions.
-#
-def loadAndEmitAltOpcodes():
-    assert len(opcodes) == kNumPackedOpcodes
-    start_label = global_name_format % "artMterpAsmAltInstructionStart"
-    end_label = global_name_format % "artMterpAsmAltInstructionEnd"
-
-    loadAndEmitGenericAsm("instruction_start_alt")
-
-    for i in xrange(kNumPackedOpcodes):
-        op = opcodes[i]
-        if alt_opcode_locations.has_key(op):
-            source = "%s/alt_%s.S" % (alt_opcode_locations[op], op)
-        else:
-            source = default_alt_stub
-        loadAndEmitAltStub(source, i)
-
-    emitAlign()
-
-    loadAndEmitGenericAsm("instruction_end_alt")
-
-#
-# Load an assembly fragment and emit it.
-#
-def loadAndEmitAsm(location, opindex, sister_list):
-    op = opcodes[opindex]
-    source = "%s/%s.S" % (location, op)
-    dict = getGlobalSubDict()
-    dict.update({ "opcode":op, "opnum":opindex })
-    if verbose:
-        print " emit %s --> asm" % source
-
-    emitAsmHeader(asm_fp, dict, label_prefix)
-    appendSourceFile(source, dict, asm_fp, sister_list)
-
-#
-# Load a non-handler assembly fragment and emit it.
-#
-def loadAndEmitGenericAsm(name):
-    source = "%s/%s.S" % (default_op_dir, name)
-    dict = getGlobalSubDict()
-    appendSourceFile(source, dict, asm_fp, None)
-
-#
-# Emit fallback fragment
-#
-def emitFallback(opindex):
-    op = opcodes[opindex]
-    dict = getGlobalSubDict()
-    dict.update({ "opcode":op, "opnum":opindex })
-    emitAsmHeader(asm_fp, dict, label_prefix)
-    for line in fallback_stub_text:
-        asm_fp.write(line)
-    asm_fp.write("\n")
-
-#
-# Output the alignment directive and label for an assembly piece.
-#
-def emitAsmHeader(outfp, dict, prefix):
-    outfp.write("/* ------------------------------ */\n")
-    # The alignment directive ensures that the handler occupies
-    # at least the correct amount of space.  We don't try to deal
-    # with overflow here.
-    emitAlign()
-    # Emit a label so that gdb will say the right thing.  We prepend an
-    # underscore so the symbol name doesn't clash with the Opcode enum.
-    outfp.write(prefix + "_%(opcode)s: /* 0x%(opnum)02x */\n" % dict)
-
-#
-# Output a generic instruction stub that updates the "glue" struct and
-# calls the C implementation.
-#
-def emitAsmStub(outfp, dict):
-    emitAsmHeader(outfp, dict, label_prefix)
-    for line in asm_stub_text:
-        templ = Template(line)
-        outfp.write(templ.substitute(dict))
-
-#
-# Append the file specified by "source" to the open "outfp".  Each line will
-# be template-replaced using the substitution dictionary "dict".
-#
-# If the first line of the file starts with "%" it is taken as a directive.
-# A "%include" line contains a filename and, optionally, a Python-style
-# dictionary declaration with substitution strings.  (This is implemented
-# with recursion.)
-#
-# If "sister_list" is provided, and we find a line that contains only "&",
-# all subsequent lines from the file will be appended to sister_list instead
-# of copied to the output.
-#
-# This may modify "dict".
-#
-def appendSourceFile(source, dict, outfp, sister_list):
-    outfp.write("/* File: %s */\n" % source)
-    infp = open(source, "r")
-    in_sister = False
-    for line in infp:
-        if line.startswith("%include"):
-            # Parse the "include" line
-            tokens = line.strip().split(' ', 2)
-            if len(tokens) < 2:
-                raise DataParseError("malformed %%include in %s" % source)
-
-            alt_source = tokens[1].strip("\"")
-            if alt_source == source:
-                raise DataParseError("self-referential %%include in %s"
-                        % source)
-
-            new_dict = dict.copy()
-            if len(tokens) == 3:
-                new_dict.update(eval(tokens[2]))
-            #print " including src=%s dict=%s" % (alt_source, new_dict)
-            appendSourceFile(alt_source, new_dict, outfp, sister_list)
-            continue
-
-        elif line.startswith("%default"):
-            # copy keywords into dictionary
-            tokens = line.strip().split(' ', 1)
-            if len(tokens) < 2:
-                raise DataParseError("malformed %%default in %s" % source)
-            defaultValues = eval(tokens[1])
-            for entry in defaultValues:
-                dict.setdefault(entry, defaultValues[entry])
-            continue
-
-        elif line.startswith("%break") and sister_list != None:
-            # allow more than one %break, ignoring all following the first
-            if style == "computed-goto" and not in_sister:
-                in_sister = True
-                sister_list.append("\n/* continuation for %(opcode)s */\n"%dict)
-            continue
-
-        # perform keyword substitution if a dictionary was provided
-        if dict != None:
-            templ = Template(line)
-            try:
-                subline = templ.substitute(dict)
-            except KeyError, err:
-                raise DataParseError("keyword substitution failed in %s: %s"
-                        % (source, str(err)))
-            except:
-                print "ERROR: substitution failed: " + line
-                raise
-        else:
-            subline = line
-
-        # write output to appropriate file
-        if in_sister:
-            sister_list.append(subline)
-        else:
-            outfp.write(subline)
-    outfp.write("\n")
-    infp.close()
-
-#
-# Emit a C-style section header comment.
-#
-def emitSectionComment(str, fp):
-    equals = "========================================" \
-             "==================================="
-
-    fp.write("\n/*\n * %s\n *  %s\n * %s\n */\n" %
-        (equals, str, equals))
-
-
-#
-# ===========================================================================
-# "main" code
-#
-
-#
-# Check args.
-#
-if len(sys.argv) != 3:
-    print "Usage: %s target-arch output-dir" % sys.argv[0]
-    sys.exit(2)
-
-target_arch = sys.argv[1]
-output_dir = sys.argv[2]
-
-#
-# Extract opcode list.
-#
-opcodes = getOpcodeList()
-#for op in opcodes:
-#    print "  %s" % op
-
-#
-# Open config file.
-#
-try:
-    config_fp = open("config_%s" % target_arch)
-except:
-    print "Unable to open config file 'config_%s'" % target_arch
-    sys.exit(1)
-
-#
-# Open and prepare output files.
-#
-try:
-    asm_fp = open("%s/mterp_%s.S" % (output_dir, target_arch), "w")
-except:
-    print "Unable to open output files"
-    print "Make sure directory '%s' exists and existing files are writable" \
-            % output_dir
-    # Ideally we'd remove the files to avoid confusing "make", but if they
-    # failed to open we probably won't be able to remove them either.
-    sys.exit(1)
-
-print "Generating %s" % (asm_fp.name)
-
-file_header = """/*
- * This file was generated automatically by gen-mterp.py for '%s'.
- *
- * --> DO NOT EDIT <--
- */
-
-""" % (target_arch)
-
-asm_fp.write(file_header)
-
-#
-# Process the config file.
-#
-failed = False
-try:
-    for line in config_fp:
-        line = line.strip()         # remove CRLF, leading spaces
-        tokens = line.split(' ')    # tokenize
-        #print "%d: %s" % (len(tokens), tokens)
-        if len(tokens[0]) == 0:
-            #print "  blank"
-            pass
-        elif tokens[0][0] == '#':
-            #print "  comment"
-            pass
-        else:
-            if tokens[0] == "handler-size":
-                setHandlerSize(tokens)
-            elif tokens[0] == "import":
-                importFile(tokens)
-            elif tokens[0] == "asm-stub":
-                setAsmStub(tokens)
-            elif tokens[0] == "asm-alt-stub":
-                setAsmAltStub(tokens)
-            elif tokens[0] == "op-start":
-                opStart(tokens)
-            elif tokens[0] == "op-end":
-                opEnd(tokens)
-            elif tokens[0] == "alt":
-                altEntry(tokens)
-            elif tokens[0] == "op":
-                opEntry(tokens)
-            elif tokens[0] == "handler-style":
-                setHandlerStyle(tokens)
-            elif tokens[0] == "alt-ops":
-                genaltop(tokens)
-            elif tokens[0] == "split-ops":
-                splitops = True
-            elif tokens[0] == "fallback-stub":
-               setFallbackStub(tokens)
-            elif tokens[0] == "function-type-format":
-               setFunctionTypeFormat(tokens)
-            elif tokens[0] == "function-size-format":
-               setFunctionSizeFormat(tokens)
-            elif tokens[0] == "global-name-format":
-               setGlobalNameFormat(tokens)
-            else:
-                raise DataParseError, "unrecognized command '%s'" % tokens[0]
-            if style == None:
-                print "tokens[0] = %s" % tokens[0]
-                raise DataParseError, "handler-style must be first command"
-except DataParseError, err:
-    print "Failed: " + str(err)
-    # TODO: remove output files so "make" doesn't get confused
-    failed = True
-    asm_fp.close()
-    asm_fp = None
-
-config_fp.close()
-
-#
-# Done!
-#
-if asm_fp:
-    asm_fp.close()
-
-sys.exit(failed)
+# Generate the script and execute it.
+output_filename = sys.argv[1]
+input_filenames = sys.argv[2:]
+script_filename = output_filename + ".py"
+script = generate_script(output_filename, input_filenames)
+with open(script_filename, "w") as script_file:
+  script_file.write(script)  # Write to disk for debugging.
+exec(compile(script, script_filename, mode='exec'))
diff --git a/runtime/interpreter/mterp/mips/alt_stub.S b/runtime/interpreter/mterp/mips/alt_stub.S
deleted file mode 100644
index de13313..0000000
--- a/runtime/interpreter/mterp/mips/alt_stub.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (${opnum} * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
diff --git a/runtime/interpreter/mterp/mips/arithmetic.S b/runtime/interpreter/mterp/mips/arithmetic.S
new file mode 100644
index 0000000..9ae10f2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/arithmetic.S
@@ -0,0 +1,803 @@
+%def binop(preinstr="", result="a0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if $chkzero
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+    $instr                                 #  $result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
+
+%def binop2addr(preinstr="", result="a0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if $chkzero
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    $preinstr                              #  optional op
+    $instr                                 #  $result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO($result, rOBJ, t0)       #  vA <- $result
+
+%def binopLit16(preinstr="", result="a0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    .if $chkzero
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    $preinstr                              #  optional op
+    $instr                                 #  $result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO($result, rOBJ, t0)       #  vA <- $result
+
+%def binopLit8(preinstr="", result="a0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if $chkzero
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    $preinstr                              #  optional op
+    $instr                                 #  $result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
+
+%def binopWide(preinstr="", result0="a0", result1="a1", chkzero="0", arg0="a0", arg1="a1", arg2="a2", arg3="a3", instr=""):
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register pair other than a0-a1, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a2-a3).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64($arg0, $arg1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64($arg2, $arg3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if $chkzero
+    or        t0, $arg2, $arg3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    $preinstr                              #  optional op
+    $instr                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vAA/vAA+1 <- $result0/$result1
+
+%def binopWide2addr(preinstr="", result0="a0", result1="a1", chkzero="0", arg0="a0", arg1="a1", arg2="a2", arg3="a3", instr=""):
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register pair other than a0-a1, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a2-a3).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64($arg2, $arg3, a1)               #  a2/a3 <- vB/vB+1
+    LOAD64($arg0, $arg1, t0)               #  a0/a1 <- vA/vA+1
+    .if $chkzero
+    or        t0, $arg2, $arg3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    $preinstr                              #  optional op
+    $instr                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vA/vA+1 <- $result0/$result1
+
+%def unop(preinstr="", result0="a0", instr=""):
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result0 = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: int-to-byte, int-to-char, int-to-short,
+     *      neg-int, not-int, neg-float
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(t0)                           #  t0 <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+    $instr                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO($result0, t0, t1)        #  vA <- result0
+
+%def unopNarrower(load="LOAD64_F(fa0, fa0f, a3)", instr=""):
+    /*
+     * Generic 64bit-to-32bit floating-point unary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = op fa0".
+     *
+     * For: double-to-float
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    $load
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $instr
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- fv0
+
+%def unopWide(preinstr="", result0="a0", result1="a1", instr=""):
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result0/result1 = op a0/a1".
+     * This could be MIPS instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double,
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+    $instr                                 #  a0/a1 <- op, a2-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
+
+%def unopWider(preinstr="", result0="a0", result1="a1", instr=""):
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result0/result1 = op a0".
+     *
+     * For: int-to-long
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+    $instr                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
+
+%def op_add_int():
+%  binop(instr="addu a0, a0, a1")
+
+%def op_add_int_2addr():
+%  binop2addr(instr="addu a0, a0, a1")
+
+%def op_add_int_lit16():
+%  binopLit16(instr="addu a0, a0, a1")
+
+%def op_add_int_lit8():
+%  binopLit8(instr="addu a0, a0, a1")
+
+%def op_add_long():
+/*
+ *  The compiler generates the following sequence for
+ *  [v1 v0] =  [a1 a0] + [a3 a2];
+ *    addu v0,a2,a0
+ *    addu a1,a3,a1
+ *    sltu v1,v0,a2
+ *    addu v1,v1,a1
+ */
+%  binopWide(result0="v0", result1="v1", preinstr="addu v0, a2, a0", instr="addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1")
+
+%def op_add_long_2addr():
+/*
+ * See op_add_long.S for details
+ */
+%  binopWide2addr(result0="v0", result1="v1", preinstr="addu v0, a2, a0", instr="addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1")
+
+%def op_and_int():
+%  binop(instr="and a0, a0, a1")
+
+%def op_and_int_2addr():
+%  binop2addr(instr="and a0, a0, a1")
+
+%def op_and_int_lit16():
+%  binopLit16(instr="and a0, a0, a1")
+
+%def op_and_int_lit8():
+%  binopLit8(instr="and a0, a0, a1")
+
+%def op_and_long():
+%  binopWide(preinstr="and a0, a0, a2", instr="and a1, a1, a3")
+
+%def op_and_long_2addr():
+%  binopWide2addr(preinstr="and a0, a0, a2", instr="and a1, a1, a3")
+
+%def op_cmp_long():
+    /*
+     * Compare two 64-bit values
+     *    x = y     return  0
+     *    x < y     return -1
+     *    x > y     return  1
+     *
+     * I think I can improve on the ARM code by the following observation
+     *    slt   t0,  x.hi, y.hi;        # (x.hi < y.hi) ? 1:0
+     *    sgt   t1,  x.hi, y.hi;        # (y.hi > x.hi) ? 1:0
+     *    subu  v0, t0, t1              # v0= -1:1:0 for [ < > = ]
+     */
+    /* cmp-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)                     #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, a3)                     #  a2/a3 <- vCC/vCC+1
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    slt       t0, a1, a3                   #  compare hi
+    sgt       t1, a1, a3
+    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0)
+    bnez      v0, .L${opcode}_finish
+    # at this point x.hi==y.hi
+    sltu      t0, a0, a2                   #  compare lo
+    sgtu      t1, a0, a2
+    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0) for [< > =]
+
+.L${opcode}_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(v0, rOBJ, t0)            #  vAA <- v0
+
+%def op_div_int():
+#ifdef MIPS32REVGE6
+%  binop(instr="div a0, a0, a1", chkzero="1")
+#else
+%  binop(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
+#endif
+
+%def op_div_int_2addr():
+#ifdef MIPS32REVGE6
+%  binop2addr(instr="div a0, a0, a1", chkzero="1")
+#else
+%  binop2addr(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
+#endif
+
+%def op_div_int_lit16():
+#ifdef MIPS32REVGE6
+%  binopLit16(instr="div a0, a0, a1", chkzero="1")
+#else
+%  binopLit16(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
+#endif
+
+%def op_div_int_lit8():
+#ifdef MIPS32REVGE6
+%  binopLit8(instr="div a0, a0, a1", chkzero="1")
+#else
+%  binopLit8(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
+#endif
+
+%def op_div_long():
+%  binopWide(result0="v0", result1="v1", instr="JAL(__divdi3)", chkzero="1")
+
+%def op_div_long_2addr():
+%  binopWide2addr(result0="v0", result1="v1", instr="JAL(__divdi3)", chkzero="1")
+
+%def op_int_to_byte():
+%  unop(instr="SEB(a0, a0)")
+
+%def op_int_to_char():
+%  unop(preinstr="", instr="and a0, 0xffff")
+
+%def op_int_to_long():
+%  unopWider(instr="sra a1, a0, 31")
+
+%def op_int_to_short():
+%  unop(instr="SEH(a0, a0)")
+
+%def op_long_to_int():
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%  op_move()
+
+%def op_mul_int():
+%  binop(instr="mul a0, a0, a1")
+
+%def op_mul_int_2addr():
+%  binop2addr(instr="mul a0, a0, a1")
+
+%def op_mul_int_lit16():
+%  binopLit16(instr="mul a0, a0, a1")
+
+%def op_mul_int_lit8():
+%  binopLit8(instr="mul a0, a0, a1")
+
+%def op_mul_long():
+    /*
+     * Signed 64-bit integer multiply.
+     *         a1   a0
+     *   x     a3   a2
+     *   -------------
+     *       a2a1 a2a0
+     *       a3a0
+     *  a3a1 (<= unused)
+     *  ---------------
+     *         v1   v0
+     */
+    /* mul-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       t0, a0, 255                  #  a2 <- BB
+    srl       t1, a0, 8                    #  a3 <- CC
+    EAS2(t0, rFP, t0)                      #  t0 <- &fp[BB]
+    LOAD64(a0, a1, t0)                     #  a0/a1 <- vBB/vBB+1
+
+    EAS2(t1, rFP, t1)                      #  t0 <- &fp[CC]
+    LOAD64(a2, a3, t1)                     #  a2/a3 <- vCC/vCC+1
+
+    mul       v1, a3, a0                   #  v1= a3a0
+#ifdef MIPS32REVGE6
+    mulu      v0, a2, a0                   #  v0= a2a0
+    muhu      t1, a2, a0
+#else
+    multu     a2, a0
+    mfhi      t1
+    mflo      v0                           #  v0= a2a0
+#endif
+    mul       t0, a2, a1                   #  t0= a2a1
+    addu      v1, v1, t1                   #  v1+= hi(a2a0)
+    addu      v1, v1, t0                   #  v1= a3a0 + a2a1;
+
+    GET_OPA(a0)                            #  a0 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    b         .L${opcode}_finish
+%def op_mul_long_helper_code():
+
+.Lop_mul_long_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(v0, v1, a0, t0)        #  vAA/vAA+1 <- v0(low)/v1(high)
+
+%def op_mul_long_2addr():
+    /*
+     * See op_mul_long.S for more details
+     */
+    /* mul-long/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64(a0, a1, t0)                     #  vAA.low / high
+
+    GET_OPB(t1)                            #  t1 <- B
+    EAS2(t1, rFP, t1)                      #  t1 <- &fp[B]
+    LOAD64(a2, a3, t1)                     #  vBB.low / high
+
+    mul       v1, a3, a0                   #  v1= a3a0
+#ifdef MIPS32REVGE6
+    mulu      v0, a2, a0                   #  v0= a2a0
+    muhu      t1, a2, a0
+#else
+    multu     a2, a0
+    mfhi      t1
+    mflo      v0                           #  v0= a2a0
+ #endif
+    mul       t2, a2, a1                   #  t2= a2a1
+    addu      v1, v1, t1                   #  v1= a3a0 + hi(a2a0)
+    addu      v1, v1, t2                   #  v1= v1 + a2a1;
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(v0, v1, rOBJ, t1)      #  vA/vA+1 <- v0(low)/v1(high)
+
+%def op_neg_int():
+%  unop(instr="negu a0, a0")
+
+%def op_neg_long():
+%  unopWide(result0="v0", result1="v1", preinstr="negu v0, a0", instr="negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0")
+
+%def op_not_int():
+%  unop(instr="not a0, a0")
+
+%def op_not_long():
+%  unopWide(preinstr="not a0, a0", instr="not a1, a1")
+
+%def op_or_int():
+%  binop(instr="or a0, a0, a1")
+
+%def op_or_int_2addr():
+%  binop2addr(instr="or a0, a0, a1")
+
+%def op_or_int_lit16():
+%  binopLit16(instr="or a0, a0, a1")
+
+%def op_or_int_lit8():
+%  binopLit8(instr="or a0, a0, a1")
+
+%def op_or_long():
+%  binopWide(preinstr="or a0, a0, a2", instr="or a1, a1, a3")
+
+%def op_or_long_2addr():
+%  binopWide2addr(preinstr="or a0, a0, a2", instr="or a1, a1, a3")
+
+%def op_rem_int():
+#ifdef MIPS32REVGE6
+%  binop(instr="mod a0, a0, a1", chkzero="1")
+#else
+%  binop(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
+#endif
+
+%def op_rem_int_2addr():
+#ifdef MIPS32REVGE6
+%  binop2addr(instr="mod a0, a0, a1", chkzero="1")
+#else
+%  binop2addr(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
+#endif
+
+%def op_rem_int_lit16():
+#ifdef MIPS32REVGE6
+%  binopLit16(instr="mod a0, a0, a1", chkzero="1")
+#else
+%  binopLit16(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
+#endif
+
+%def op_rem_int_lit8():
+#ifdef MIPS32REVGE6
+%  binopLit8(instr="mod a0, a0, a1", chkzero="1")
+#else
+%  binopLit8(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
+#endif
+
+%def op_rem_long():
+%  binopWide(result0="v0", result1="v1", instr="JAL(__moddi3)", chkzero="1")
+
+%def op_rem_long_2addr():
+%  binopWide2addr(result0="v0", result1="v1", instr="JAL(__moddi3)", chkzero="1")
+
+%def op_rsub_int():
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+%  binopLit16(instr="subu a0, a1, a0")
+
+%def op_rsub_int_lit8():
+%  binopLit8(instr="subu a0, a1, a0")
+
+%def op_shl_int():
+%  binop(instr="sll a0, a0, a1")
+
+%def op_shl_int_2addr():
+%  binop2addr(instr="sll a0, a0, a1")
+
+%def op_shl_int_lit8():
+%  binopLit8(instr="sll a0, a0, a1")
+
+%def op_shl_long():
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shl-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t2)                            #  t2 <- AA
+    and       a3, a0, 255                  #  a3 <- BB
+    srl       a0, a0, 8                    #  a0 <- CC
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
+    GET_VREG(a2, a0)                       #  a2 <- vCC
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi    v1, a2, 0x20                   #  shift< shift & 0x20
+    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
+    bnez    v1, .L${opcode}_finish
+    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
+    srl     a0, 1
+    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
+    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
+    or      v1, a0                         #  rhi<- rhi | alo
+    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vAA/vAA+1 <- v0/v1
+%def op_shl_long_helper_code():
+
+.Lop_shl_long_finish:
+    SET_VREG64_GOTO(zero, v0, t2, t0)      #  vAA/vAA+1 <- rlo/rhi
+
+%def op_shl_long_2addr():
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shl-long/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a2, a3)                       #  a2 <- vB
+    EAS2(t2, rFP, rOBJ)                    #  t2 <- &fp[A]
+    LOAD64(a0, a1, t2)                     #  a0/a1 <- vA/vA+1
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi    v1, a2, 0x20                   #  shift< shift & 0x20
+    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
+    bnez    v1, .L${opcode}_finish
+    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
+    srl     a0, 1
+    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
+    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
+    or      v1, a0                         #  rhi<- rhi | alo
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vA/vA+1 <- v0/v1
+%def op_shl_long_2addr_helper_code():
+
+.Lop_shl_long_2addr_finish:
+    SET_VREG64_GOTO(zero, v0, rOBJ, t0)    #  vA/vA+1 <- rlo/rhi
+
+%def op_shr_int():
+%  binop(instr="sra a0, a0, a1")
+
+%def op_shr_int_2addr():
+%  binop2addr(instr="sra a0, a0, a1")
+
+%def op_shr_int_lit8():
+%  binopLit8(instr="sra a0, a0, a1")
+
+%def op_shr_long():
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shr-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t3)                            #  t3 <- AA
+    and       a3, a0, 255                  #  a3 <- BB
+    srl       a0, a0, 8                    #  a0 <- CC
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
+    GET_VREG(a2, a0)                       #  a2 <- vCC
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi    v0, a2, 0x20                   #  shift & 0x20
+    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
+    bnez    v0, .L${opcode}_finish
+    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
+    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
+    sll     a1, 1
+    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
+    or      v0, a1                         #  rlo<- rlo | ahi
+    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vAA/VAA+1 <- v0/v1
+%def op_shr_long_helper_code():
+
+.Lop_shr_long_finish:
+    sra     a3, a1, 31                     #  a3<- sign(ah)
+    SET_VREG64_GOTO(v1, a3, t3, t0)        #  vAA/VAA+1 <- rlo/rhi
+
+%def op_shr_long_2addr():
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shr-long/2addr vA, vB */
+    GET_OPA4(t2)                           #  t2 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a2, a3)                       #  a2 <- vB
+    EAS2(t0, rFP, t2)                      #  t0 <- &fp[A]
+    LOAD64(a0, a1, t0)                     #  a0/a1 <- vA/vA+1
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi    v0, a2, 0x20                   #  shift & 0x20
+    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
+    bnez    v0, .L${opcode}_finish
+    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
+    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
+    sll     a1, 1
+    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
+    or      v0, a1                         #  rlo<- rlo | ahi
+    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vA/vA+1 <- v0/v1
+%def op_shr_long_2addr_helper_code():
+
+.Lop_shr_long_2addr_finish:
+    sra     a3, a1, 31                     #  a3<- sign(ah)
+    SET_VREG64_GOTO(v1, a3, t2, t0)        #  vA/vA+1 <- rlo/rhi
+
+%def op_sub_int():
+%  binop(instr="subu a0, a0, a1")
+
+%def op_sub_int_2addr():
+%  binop2addr(instr="subu a0, a0, a1")
+
+%def op_sub_long():
+/*
+ * For little endian the code sequence looks as follows:
+ *    subu    v0,a0,a2
+ *    subu    v1,a1,a3
+ *    sltu    a0,a0,v0
+ *    subu    v1,v1,a0
+ */
+%  binopWide(result0="v0", result1="v1", preinstr="subu v0, a0, a2", instr="subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0")
+
+%def op_sub_long_2addr():
+/*
+ * See op_sub_long.S for more details
+ */
+%  binopWide2addr(result0="v0", result1="v1", preinstr="subu v0, a0, a2", instr="subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0")
+
+%def op_ushr_int():
+%  binop(instr="srl a0, a0, a1")
+
+%def op_ushr_int_2addr():
+%  binop2addr(instr="srl a0, a0, a1 ")
+
+%def op_ushr_int_lit8():
+%  binopLit8(instr="srl a0, a0, a1")
+
+%def op_ushr_long():
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* ushr-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a3, a0, 255                  #  a3 <- BB
+    srl       a0, a0, 8                    #  a0 <- CC
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
+    GET_VREG(a2, a0)                       #  a2 <- vCC
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi      v0, a2, 0x20                 #  shift & 0x20
+    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
+    bnez      v0, .L${opcode}_finish
+    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
+    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
+    sll       a1, 1
+    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
+    or        v0, a1                       #  rlo<- rlo | ahi
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vAA/vAA+1 <- v0/v1
+%def op_ushr_long_helper_code():
+
+.Lop_ushr_long_finish:
+    SET_VREG64_GOTO(v1, zero, rOBJ, t0)    #  vAA/vAA+1 <- rlo/rhi
+
+%def op_ushr_long_2addr():
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* ushr-long/2addr vA, vB */
+    GET_OPA4(t3)                           #  t3 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a2, a3)                       #  a2 <- vB
+    EAS2(t0, rFP, t3)                      #  t0 <- &fp[A]
+    LOAD64(a0, a1, t0)                     #  a0/a1 <- vA/vA+1
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi      v0, a2, 0x20                 #  shift & 0x20
+    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
+    bnez      v0, .L${opcode}_finish
+    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
+    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
+    sll       a1, 1
+    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
+    or        v0, a1                       #  rlo<- rlo | ahi
+    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vA/vA+1 <- v0/v1
+%def op_ushr_long_2addr_helper_code():
+
+.Lop_ushr_long_2addr_finish:
+    SET_VREG64_GOTO(v1, zero, t3, t0)      #  vA/vA+1 <- rlo/rhi
+
+%def op_xor_int():
+%  binop(instr="xor a0, a0, a1")
+
+%def op_xor_int_2addr():
+%  binop2addr(instr="xor a0, a0, a1")
+
+%def op_xor_int_lit16():
+%  binopLit16(instr="xor a0, a0, a1")
+
+%def op_xor_int_lit8():
+%  binopLit8(instr="xor a0, a0, a1")
+
+%def op_xor_long():
+%  binopWide(preinstr="xor a0, a0, a2", instr="xor a1, a1, a3")
+
+%def op_xor_long_2addr():
+%  binopWide2addr(preinstr="xor a0, a0, a2", instr="xor a1, a1, a3")
diff --git a/runtime/interpreter/mterp/mips/array.S b/runtime/interpreter/mterp/mips/array.S
new file mode 100644
index 0000000..57ab147
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/array.S
@@ -0,0 +1,239 @@
+%def op_aget(load="lw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    EASN(a0, a0, a1, $shift)               #  a0 <- arrayObj + index*width
+    # a1 >= a3; compare unsigned index
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    $load a2, $data_offset(a0)             #  a2 <- vBB[vCC]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
+
+%def op_aget_boolean():
+%  op_aget(load="lbu", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aget_byte():
+%  op_aget(load="lb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aget_char():
+%  op_aget(load="lhu", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aget_object():
+    /*
+     * Array object get.  vAA <- vBB[vCC].
+     *
+     * for: aget-object
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    EXPORT_PC()
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    JAL(artAGetObjectFromMterp)            #  v0 <- GetObj(array, index)
+    lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
+    PREFETCH_INST(2)                       #  load rINST
+    bnez a1, MterpException
+    ADVANCE(2)                             #  advance rPC
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_OBJECT_GOTO(v0, rOBJ, t0)     #  vAA <- v0
+
+%def op_aget_short():
+%  op_aget(load="lh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aget_wide():
+    /*
+     * Array get, 64 bits.  vAA <- vBB[vCC].
+     *
+     * Arrays of long/double are 64-bit aligned.
+     */
+    /* aget-wide vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(a2, a3, rOBJ, t0)      #  vAA/vAA+1 <- a2/a3
+
+%def op_aput(store="sw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    EASN(a0, a0, a1, $shift)               #  a0 <- arrayObj + index*width
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t0)
+    $store a2, $data_offset(a0)            #  vBB[vCC] <- a2
+    JR(t0)                                 #  jump to next instruction
+
+%def op_aput_boolean():
+%  op_aput(store="sb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aput_byte():
+%  op_aput(store="sb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aput_char():
+%  op_aput(store="sh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aput_object():
+    /*
+     * Store an object into an array.  vBB[vCC] <- vAA.
+     *
+     */
+    /* op vAA, vBB, vCC */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rPC
+    move   a2, rINST
+    JAL(MterpAputObject)
+    beqz   v0, MterpPossibleException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+%def op_aput_short():
+%  op_aput(store="sh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aput_wide():
+    /*
+     * Array put, 64 bits.  vBB[vCC] <- vAA.
+     */
+    /* aput-wide vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t0)                            #  t0 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
+    EAS2(rOBJ, rFP, t0)                    #  rOBJ <- &fp[AA]
+    # compare unsigned index, length
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64(a2, a3, rOBJ)                   #  a2/a3 <- vAA/vAA+1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t0)
+    STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) #  a2/a3 <- vBB[vCC]
+    JR(t0)                                 #  jump to next instruction
+
+%def op_array_length():
+    /*
+     * Return the length of an array.
+     */
+    /* array-length vA, vB */
+    GET_OPB(a1)                            #  a1 <- B
+    GET_OPA4(a2)                           #  a2 <- A+
+    GET_VREG(a0, a1)                       #  a0 <- vB (object ref)
+    # is object null?
+    beqz      a0, common_errNullObject     #  yup, fail
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- array length
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a3, a2, t0)              #  vA <- length
+
+%def op_fill_array_data():
+    /* fill-array-data vAA, +BBBBBBBB */
+    EXPORT_PC()
+    FETCH(a1, 1)                           #  a1 <- bbbb (lo)
+    FETCH(a0, 2)                           #  a0 <- BBBB (hi)
+    GET_OPA(a3)                            #  a3 <- AA
+    INSERT_HIGH_HALF(a1, a0)               #  a1 <- BBBBbbbb
+    GET_VREG(a0, a3)                       #  a0 <- vAA (array object)
+    EAS1(a1, rPC, a1)                      #  a1 <- PC + BBBBbbbb*2 (array data off.)
+    JAL(MterpFillArrayData)                #  v0 <- Mterp(obj, payload)
+    beqz      v0,  MterpPossibleException  #  has exception
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+%def op_filled_new_array(helper="MterpFilledNewArray"):
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+    .extern $helper
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME     # a0 <- shadow frame
+    move   a1, rPC
+    move   a2, rSELF
+    JAL($helper)                           #  v0 <- helper(shadow_frame, pc, self)
+    beqz      v0,  MterpPossibleException  #  has exception
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+%def op_filled_new_array_range():
+%  op_filled_new_array(helper="MterpFilledNewArrayRange")
+
+%def op_new_array():
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array vA, vB, class@CCCC */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rPC
+    move   a2, rINST
+    move   a3, rSELF
+    JAL(MterpNewArray)
+    beqz   v0, MterpPossibleException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/bincmp.S b/runtime/interpreter/mterp/mips/bincmp.S
deleted file mode 100644
index 68df5c3..0000000
--- a/runtime/interpreter/mterp/mips/bincmp.S
+++ /dev/null
@@ -1,18 +0,0 @@
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    GET_OPA4(a0)                           #  a0 <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    GET_VREG(a3, a1)                       #  a3 <- vB
-    GET_VREG(a0, a0)                       #  a0 <- vA
-    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
-    b${condition} a0, a3, MterpCommonTakenBranchNoFlags  #  compare (vA, vB)
-    li        t0, JIT_CHECK_OSR
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/binop.S b/runtime/interpreter/mterp/mips/binop.S
deleted file mode 100644
index 862d95a..0000000
--- a/runtime/interpreter/mterp/mips/binop.S
+++ /dev/null
@@ -1,32 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if $chkzero
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    $preinstr                              #  optional op
-    $instr                                 #  $result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
diff --git a/runtime/interpreter/mterp/mips/binop2addr.S b/runtime/interpreter/mterp/mips/binop2addr.S
deleted file mode 100644
index 17aa8eb..0000000
--- a/runtime/interpreter/mterp/mips/binop2addr.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if $chkzero
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    $preinstr                              #  optional op
-    $instr                                 #  $result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result, rOBJ, t0)       #  vA <- $result
diff --git a/runtime/interpreter/mterp/mips/binopLit16.S b/runtime/interpreter/mterp/mips/binopLit16.S
deleted file mode 100644
index 0696e7a..0000000
--- a/runtime/interpreter/mterp/mips/binopLit16.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, +CCCC */
-    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
-    GET_OPB(a2)                            #  a2 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG(a0, a2)                       #  a0 <- vB
-    .if $chkzero
-    # cmp a1, 0; is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    $preinstr                              #  optional op
-    $instr                                 #  $result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result, rOBJ, t0)       #  vA <- $result
diff --git a/runtime/interpreter/mterp/mips/binopLit8.S b/runtime/interpreter/mterp/mips/binopLit8.S
deleted file mode 100644
index 382dd2b..0000000
--- a/runtime/interpreter/mterp/mips/binopLit8.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if $chkzero
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    $preinstr                              #  optional op
-    $instr                                 #  $result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
diff --git a/runtime/interpreter/mterp/mips/binopWide.S b/runtime/interpreter/mterp/mips/binopWide.S
deleted file mode 100644
index 604134d..0000000
--- a/runtime/interpreter/mterp/mips/binopWide.S
+++ /dev/null
@@ -1,34 +0,0 @@
-%default {"preinstr":"", "result0":"a0", "result1":"a1", "chkzero":"0", "arg0":"a0", "arg1":"a1", "arg2":"a2", "arg3":"a3"}
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a2-a3).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64($arg0, $arg1, a2)               #  a0/a1 <- vBB/vBB+1
-    LOAD64($arg2, $arg3, t1)               #  a2/a3 <- vCC/vCC+1
-    .if $chkzero
-    or        t0, $arg2, $arg3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    $preinstr                              #  optional op
-    $instr                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vAA/vAA+1 <- $result0/$result1
diff --git a/runtime/interpreter/mterp/mips/binopWide2addr.S b/runtime/interpreter/mterp/mips/binopWide2addr.S
deleted file mode 100644
index f96fdb2..0000000
--- a/runtime/interpreter/mterp/mips/binopWide2addr.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "result0":"a0", "result1":"a1", "chkzero":"0", "arg0":"a0", "arg1":"a1", "arg2":"a2", "arg3":"a3"}
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a2-a3).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64($arg2, $arg3, a1)               #  a2/a3 <- vB/vB+1
-    LOAD64($arg0, $arg1, t0)               #  a0/a1 <- vA/vA+1
-    .if $chkzero
-    or        t0, $arg2, $arg3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    $preinstr                              #  optional op
-    $instr                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vA/vA+1 <- $result0/$result1
diff --git a/runtime/interpreter/mterp/mips/const.S b/runtime/interpreter/mterp/mips/const.S
deleted file mode 100644
index 5d8379d..0000000
--- a/runtime/interpreter/mterp/mips/const.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default { "helper":"UndefinedConstHandler" }
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern $helper
-    EXPORT_PC()
-    FETCH(a0, 1)                        # a0 <- BBBB
-    GET_OPA(a1)                         # a1 <- AA
-    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
-    move   a3, rSELF
-    JAL($helper)                        # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST(2)                    # load rINST
-    bnez   v0, MterpPossibleException
-    ADVANCE(2)                          # advance rPC
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/control_flow.S b/runtime/interpreter/mterp/mips/control_flow.S
new file mode 100644
index 0000000..88e1f0e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/control_flow.S
@@ -0,0 +1,214 @@
+%def bincmp(condition=""):
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform.
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    GET_OPA4(a0)                           #  a0 <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a3, a1)                       #  a3 <- vB
+    GET_VREG(a0, a0)                       #  a0 <- vA
+    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
+    b${condition} a0, a3, MterpCommonTakenBranchNoFlags  #  compare (vA, vB)
+    li        t0, JIT_CHECK_OSR
+    beq       rPROFILE, t0, .L_check_not_taken_osr
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+%def zcmp(condition=""):
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform.
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    GET_OPA(a0)                            #  a0 <- AA
+    GET_VREG(a0, a0)                       #  a0 <- vAA
+    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
+    b${condition} a0, zero, MterpCommonTakenBranchNoFlags
+    li        t0, JIT_CHECK_OSR            # possible OSR re-entry?
+    beq       rPROFILE, t0, .L_check_not_taken_osr
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+%def op_goto():
+    /*
+     * Unconditional branch, 8-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto +AA */
+    sll       a0, rINST, 16                #  a0 <- AAxx0000
+    sra       rINST, a0, 24                #  rINST <- ssssssAA (sign-extended)
+    b       MterpCommonTakenBranchNoFlags
+
+%def op_goto_16():
+    /*
+     * Unconditional branch, 16-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto/16 +AAAA */
+    FETCH_S(rINST, 1)                      #  rINST <- ssssAAAA (sign-extended)
+    b       MterpCommonTakenBranchNoFlags
+
+%def op_goto_32():
+    /*
+     * Unconditional branch, 32-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     *
+     * Unlike most opcodes, this one is allowed to branch to itself, so
+     * our "backward branch" test must be "<=0" instead of "<0".
+     */
+    /* goto/32 +AAAAAAAA */
+    FETCH(rINST, 1)                        #  rINST <- aaaa (lo)
+    FETCH(a1, 2)                           #  a1 <- AAAA (hi)
+    INSERT_HIGH_HALF(rINST, a1)            #  rINST <- AAAAaaaa
+    b         MterpCommonTakenBranchNoFlags
+
+%def op_if_eq():
+%  bincmp(condition="eq")
+
+%def op_if_eqz():
+%  zcmp(condition="eq")
+
+%def op_if_ge():
+%  bincmp(condition="ge")
+
+%def op_if_gez():
+%  zcmp(condition="ge")
+
+%def op_if_gt():
+%  bincmp(condition="gt")
+
+%def op_if_gtz():
+%  zcmp(condition="gt")
+
+%def op_if_le():
+%  bincmp(condition="le")
+
+%def op_if_lez():
+%  zcmp(condition="le")
+
+%def op_if_lt():
+%  bincmp(condition="lt")
+
+%def op_if_ltz():
+%  zcmp(condition="lt")
+
+%def op_if_ne():
+%  bincmp(condition="ne")
+
+%def op_if_nez():
+%  zcmp(condition="ne")
+
+%def op_packed_switch(func="MterpDoPackedSwitch"):
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBB */
+    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
+    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    GET_OPA(a3)                            #  a3 <- AA
+    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
+    GET_VREG(a1, a3)                       #  a1 <- vAA
+    EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
+    JAL($func)                             #  a0 <- code-unit branch offset
+    move      rINST, v0
+    b         MterpCommonTakenBranchNoFlags
+
+%def op_return():
+    /*
+     * Return a 32-bit value.
+     *
+     * for: return, return-object
+     */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    JAL(MterpThreadFenceForConstructor)
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    move      a0, rSELF
+    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    beqz      ra, 1f
+    JAL(MterpSuspendCheck)                 # (self)
+1:
+    GET_OPA(a2)                            #  a2 <- AA
+    GET_VREG(v0, a2)                       #  v0 <- vAA
+    move      v1, zero
+    b         MterpReturn
+
+%def op_return_object():
+%  op_return()
+
+%def op_return_void():
+    .extern MterpThreadFenceForConstructor
+    JAL(MterpThreadFenceForConstructor)
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    move      a0, rSELF
+    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    beqz      ra, 1f
+    JAL(MterpSuspendCheck)                 # (self)
+1:
+    move      v0, zero
+    move      v1, zero
+    b         MterpReturn
+
+%def op_return_void_no_barrier():
+    lw     ra, THREAD_FLAGS_OFFSET(rSELF)
+    move   a0, rSELF
+    and    ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    beqz   ra, 1f
+    JAL(MterpSuspendCheck)                 # (self)
+1:
+    move   v0, zero
+    move   v1, zero
+    b      MterpReturn
+
+%def op_return_wide():
+    /*
+     * Return a 64-bit value.
+     */
+    /* return-wide vAA */
+    .extern MterpThreadFenceForConstructor
+    JAL(MterpThreadFenceForConstructor)
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    move      a0, rSELF
+    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    beqz      ra, 1f
+    JAL(MterpSuspendCheck)                 # (self)
+1:
+    GET_OPA(a2)                            #  a2 <- AA
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[AA]
+    LOAD64(v0, v1, a2)                     #  v0/v1 <- vAA/vAA+1
+    b         MterpReturn
+
+%def op_sparse_switch():
+%  op_packed_switch(func="MterpDoSparseSwitch")
+
+%def op_throw():
+    /*
+     * Throw an exception object in the current thread.
+     */
+    /* throw vAA */
+    EXPORT_PC()                              #  exception handler can throw
+    GET_OPA(a2)                              #  a2 <- AA
+    GET_VREG(a1, a2)                         #  a1 <- vAA (exception object)
+    # null object?
+    beqz  a1, common_errNullObject           #  yes, throw an NPE instead
+    sw    a1, THREAD_EXCEPTION_OFFSET(rSELF) #  thread->exception <- obj
+    b         MterpException
diff --git a/runtime/interpreter/mterp/mips/entry.S b/runtime/interpreter/mterp/mips/entry.S
deleted file mode 100644
index d342354..0000000
--- a/runtime/interpreter/mterp/mips/entry.S
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
-    .text
-    .align 2
-    .global ExecuteMterpImpl
-    .ent    ExecuteMterpImpl
-    .frame sp, STACK_SIZE, ra
-/*
- * On entry:
- *  a0  Thread* self
- *  a1  dex_instructions
- *  a2  ShadowFrame
- *  a3  JValue* result_register
- *
- */
-
-ExecuteMterpImpl:
-    .cfi_startproc
-    .set noreorder
-    .cpload t9
-    .set reorder
-/* Save to the stack. Frame size = STACK_SIZE */
-    STACK_STORE_FULL()
-/* This directive will make sure all subsequent jal restore gp at a known offset */
-    .cprestore STACK_OFFSET_GP
-
-    /* Remember the return register */
-    sw      a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
-
-    /* Remember the dex instruction pointer */
-    sw      a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
-
-    /* set up "named" registers */
-    move    rSELF, a0
-    lw      a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
-    addu    rFP, a2, SHADOWFRAME_VREGS_OFFSET     # point to vregs.
-    EAS2(rREFS, rFP, a0)                          # point to reference array in shadow frame
-    lw      a0, SHADOWFRAME_DEX_PC_OFFSET(a2)     # Get starting dex_pc
-    EAS1(rPC, a1, a0)                             # Create direct pointer to 1st dex opcode
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-
-    EXPORT_PC()
-
-    /* Starting ibase */
-    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-
-    /* Set up for backwards branches & osr profiling */
-    lw      a0, OFF_FP_METHOD(rFP)
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    JAL(MterpSetUpHotnessCountdown)        # (method, shadow_frame, self)
-    move    rPROFILE, v0                   # Starting hotness countdown to rPROFILE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST()                           # load rINST from rPC
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
-    /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/mips/fallback.S b/runtime/interpreter/mterp/mips/fallback.S
deleted file mode 100644
index 82cbc63..0000000
--- a/runtime/interpreter/mterp/mips/fallback.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* Transfer stub to alternate interpreter */
-    b    MterpFallback
diff --git a/runtime/interpreter/mterp/mips/fbinop.S b/runtime/interpreter/mterp/mips/fbinop.S
deleted file mode 100644
index 6c1468c..0000000
--- a/runtime/interpreter/mterp/mips/fbinop.S
+++ /dev/null
@@ -1,18 +0,0 @@
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
-     */
-
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
-    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    $instr                                 #  f0 = result
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vAA <- fv0
diff --git a/runtime/interpreter/mterp/mips/fbinop2addr.S b/runtime/interpreter/mterp/mips/fbinop2addr.S
deleted file mode 100644
index 2caaf9c..0000000
--- a/runtime/interpreter/mterp/mips/fbinop2addr.S
+++ /dev/null
@@ -1,18 +0,0 @@
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
-     * that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
-     *      div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, rOBJ)
-    GET_VREG_F(fa1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    $instr
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- result
diff --git a/runtime/interpreter/mterp/mips/fbinopWide.S b/runtime/interpreter/mterp/mips/fbinopWide.S
deleted file mode 100644
index a1fe91e..0000000
--- a/runtime/interpreter/mterp/mips/fbinopWide.S
+++ /dev/null
@@ -1,23 +0,0 @@
-    /*
-     * Generic 64-bit floating-point binary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * for: add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64_F(fa0, fa0f, a2)
-    LOAD64_F(fa1, fa1f, t1)
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    $instr
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vAA/vAA+1 <- fv0
diff --git a/runtime/interpreter/mterp/mips/fbinopWide2addr.S b/runtime/interpreter/mterp/mips/fbinopWide2addr.S
deleted file mode 100644
index 7303441..0000000
--- a/runtime/interpreter/mterp/mips/fbinopWide2addr.S
+++ /dev/null
@@ -1,21 +0,0 @@
-    /*
-     * Generic 64-bit floating-point "/2addr" binary operation.
-     * Provide an "instr" line that specifies an instruction that
-     * performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *      div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64_F(fa0, fa0f, t0)
-    LOAD64_F(fa1, fa1f, a1)
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $instr
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vA/vA+1 <- fv0
diff --git a/runtime/interpreter/mterp/mips/field.S b/runtime/interpreter/mterp/mips/field.S
deleted file mode 100644
index 1333ed7..0000000
--- a/runtime/interpreter/mterp/mips/field.S
+++ /dev/null
@@ -1 +0,0 @@
-TODO
diff --git a/runtime/interpreter/mterp/mips/floating_point.S b/runtime/interpreter/mterp/mips/floating_point.S
new file mode 100644
index 0000000..20df51e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/floating_point.S
@@ -0,0 +1,518 @@
+%def fbinop(instr=""):
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
+     */
+
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
+    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    $instr                                 #  f0 = result
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vAA <- fv0
+
+%def fbinop2addr(instr=""):
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
+     * that specifies an instruction that performs "fv0 = fa0 op fa1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+     *      div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG_F(fa0, rOBJ)
+    GET_VREG_F(fa1, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    $instr
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- result
+
+%def fbinopWide(instr=""):
+    /*
+     * Generic 64-bit floating-point binary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = fa0 op fa1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * for: add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64_F(fa0, fa0f, a2)
+    LOAD64_F(fa1, fa1f, t1)
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    $instr
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vAA/vAA+1 <- fv0
+
+%def fbinopWide2addr(instr=""):
+    /*
+     * Generic 64-bit floating-point "/2addr" binary operation.
+     * Provide an "instr" line that specifies an instruction that
+     * performs "fv0 = fa0 op fa1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+     *      div-double/2addr, rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64_F(fa0, fa0f, t0)
+    LOAD64_F(fa1, fa1f, a1)
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $instr
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vA/vA+1 <- fv0
+
+%def funop(instr=""):
+    /*
+     * Generic 32-bit floating-point unary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = op fa0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: int-to-float
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_VREG_F(fa0, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $instr
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_F_GOTO(fv0, rOBJ, t1)         #  vA <- fv0
+
+%def funopWider(instr=""):
+    /*
+     * Generic 32bit-to-64bit floating-point unary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = op fa0".
+     *
+     * For: int-to-double, float-to-double
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG_F(fa0, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $instr
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- fv0
+
+%def op_add_double():
+%  fbinopWide(instr="add.d fv0, fa0, fa1")
+
+%def op_add_double_2addr():
+%  fbinopWide2addr(instr="add.d fv0, fa0, fa1")
+
+%def op_add_float():
+%  fbinop(instr="add.s fv0, fa0, fa1")
+
+%def op_add_float_2addr():
+%  fbinop2addr(instr="add.s fv0, fa0, fa1")
+
+%def op_cmpg_double():
+%  op_cmpl_double(gt_bias="1")
+
+%def op_cmpg_float():
+%  op_cmpl_float(gt_bias="1")
+
+%def op_cmpl_double(gt_bias="0"):
+    /*
+     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+     * into the destination register based on the comparison results.
+     *
+     * For: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       rOBJ, a0, 255                #  rOBJ <- BB
+    srl       t0, a0, 8                    #  t0 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[BB]
+    EAS2(t0, rFP, t0)                      #  t0 <- &fp[CC]
+    LOAD64_F(ft0, ft0f, rOBJ)
+    LOAD64_F(ft1, ft1f, t0)
+#ifdef MIPS32REVGE6
+    cmp.eq.d  ft2, ft0, ft1
+    li        rTEMP, 0
+    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
+    .if $gt_bias
+    cmp.lt.d  ft2, ft0, ft1
+    li        rTEMP, -1
+    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    cmp.lt.d  ft2, ft1, ft0
+    li        rTEMP, 1
+    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
+#else
+    c.eq.d    fcc0, ft0, ft1
+    li        rTEMP, 0
+    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
+    .if $gt_bias
+    c.olt.d   fcc0, ft0, ft1
+    li        rTEMP, -1
+    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    c.olt.d   fcc0, ft1, ft0
+    li        rTEMP, 1
+    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
+#endif
+1:
+    GET_OPA(rOBJ)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
+
+%def op_cmpl_float(gt_bias="0"):
+    /*
+     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+     * into the destination register based on the comparison results.
+     *
+     * for: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8
+    GET_VREG_F(ft0, a2)
+    GET_VREG_F(ft1, a3)
+#ifdef MIPS32REVGE6
+    cmp.eq.s  ft2, ft0, ft1
+    li        rTEMP, 0
+    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
+    .if $gt_bias
+    cmp.lt.s  ft2, ft0, ft1
+    li        rTEMP, -1
+    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    cmp.lt.s  ft2, ft1, ft0
+    li        rTEMP, 1
+    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
+#else
+    c.eq.s    fcc0, ft0, ft1
+    li        rTEMP, 0
+    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
+    .if $gt_bias
+    c.olt.s   fcc0, ft0, ft1
+    li        rTEMP, -1
+    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    c.olt.s   fcc0, ft1, ft0
+    li        rTEMP, 1
+    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
+#endif
+1:
+    GET_OPA(rOBJ)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
+
+%def op_div_double():
+%  fbinopWide(instr="div.d fv0, fa0, fa1")
+
+%def op_div_double_2addr():
+%  fbinopWide2addr(instr="div.d fv0, fa0, fa1")
+
+%def op_div_float():
+%  fbinop(instr="div.s fv0, fa0, fa1")
+
+%def op_div_float_2addr():
+%  fbinop2addr(instr="div.s fv0, fa0, fa1")
+
+%def op_double_to_float():
+%  unopNarrower(instr="cvt.s.d fv0, fa0")
+
+%def op_double_to_int():
+    /*
+     * double-to-int
+     *
+     * We have to clip values to int min/max per the specification.  The
+     * expected common case is a "reasonable" value that converts directly
+     * to modest integer.  The EABI convert function isn't doing this for us
+     * for pre-R6.
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64_F(fa0, fa0f, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+#ifndef MIPS32REVGE6
+    li        t0, INT_MIN_AS_DOUBLE_HIGH
+    mtc1      zero, fa1
+    MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
+    c.ole.d   fcc0, fa1, fa0
+#endif
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+#ifndef MIPS32REVGE6
+    bc1t      fcc0, 1f                     #  if INT_MIN <= vB, proceed to truncation
+    c.eq.d    fcc0, fa0, fa0
+    mtc1      zero, fa0
+    MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
+    movt.d    fa0, fa1, fcc0               #  fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
+1:
+#endif
+    trunc.w.d fa0, fa0
+    SET_VREG_F_GOTO(fa0, rOBJ, t1)         #  vA <- result
+
+%def op_double_to_long():
+    /*
+     * double-to-long
+     *
+     * We have to clip values to long min/max per the specification.  The
+     * expected common case is a "reasonable" value that converts directly
+     * to modest integer.  The EABI convert function isn't doing this for us
+     * for pre-R6.
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64_F(fa0, fa0f, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+#ifdef MIPS32REVGE6
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    trunc.l.d fa0, fa0
+    SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) #  vA <- result
+#else
+    c.eq.d    fcc0, fa0, fa0
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1f      fcc0, .L${opcode}_get_opcode
+
+    li        t0, LONG_MIN_AS_DOUBLE_HIGH
+    mtc1      zero, fa1
+    MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
+    c.ole.d   fcc0, fa0, fa1
+    li        rRESULT1, LONG_MIN_HIGH
+    bc1t      fcc0, .L${opcode}_get_opcode
+
+    neg.d     fa1, fa1
+    c.ole.d   fcc0, fa1, fa0
+    nor       rRESULT0, rRESULT0, zero
+    nor       rRESULT1, rRESULT1, zero
+    bc1t      fcc0, .L${opcode}_get_opcode
+
+    JAL(__fixdfdi)
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    b         .L${opcode}_set_vreg
+#endif
+%def op_double_to_long_helper_code():
+
+#ifndef MIPS32REVGE6
+.Lop_double_to_long_get_opcode:
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+.Lop_double_to_long_set_vreg:
+    SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1)   #  vA/vA+1 <- v0/v1
+#endif
+
+%def op_float_to_double():
+%  funopWider(instr="cvt.d.s fv0, fa0")
+
+%def op_float_to_int():
+    /*
+     * float-to-int
+     *
+     * We have to clip values to int min/max per the specification.  The
+     * expected common case is a "reasonable" value that converts directly
+     * to modest integer.  The EABI convert function isn't doing this for us
+     * for pre-R6.
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_VREG_F(fa0, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+#ifndef MIPS32REVGE6
+    li        t0, INT_MIN_AS_FLOAT
+    mtc1      t0, fa1
+    c.ole.s   fcc0, fa1, fa0
+#endif
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+#ifndef MIPS32REVGE6
+    bc1t      fcc0, 1f                     #  if INT_MIN <= vB, proceed to truncation
+    c.eq.s    fcc0, fa0, fa0
+    mtc1      zero, fa0
+    movt.s    fa0, fa1, fcc0               #  fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
+1:
+#endif
+    trunc.w.s fa0, fa0
+    SET_VREG_F_GOTO(fa0, rOBJ, t1)         #  vA <- result
+
+%def op_float_to_long():
+    /*
+     * float-to-long
+     *
+     * We have to clip values to long min/max per the specification.  The
+     * expected common case is a "reasonable" value that converts directly
+     * to modest integer.  The EABI convert function isn't doing this for us
+     * for pre-R6.
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG_F(fa0, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+#ifdef MIPS32REVGE6
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    trunc.l.s fa0, fa0
+    SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) #  vA <- result
+#else
+    c.eq.s    fcc0, fa0, fa0
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1f      fcc0, .L${opcode}_get_opcode
+
+    li        t0, LONG_MIN_AS_FLOAT
+    mtc1      t0, fa1
+    c.ole.s   fcc0, fa0, fa1
+    li        rRESULT1, LONG_MIN_HIGH
+    bc1t      fcc0, .L${opcode}_get_opcode
+
+    neg.s     fa1, fa1
+    c.ole.s   fcc0, fa1, fa0
+    nor       rRESULT0, rRESULT0, zero
+    nor       rRESULT1, rRESULT1, zero
+    bc1t      fcc0, .L${opcode}_get_opcode
+
+    JAL(__fixsfdi)
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    b         .L${opcode}_set_vreg
+#endif
+%def op_float_to_long_helper_code():
+
+#ifndef MIPS32REVGE6
+.Lop_float_to_long_get_opcode:
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+.Lop_float_to_long_set_vreg:
+    SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1)   #  vA/vA+1 <- v0/v1
+#endif
+
+%def op_int_to_double():
+%  funopWider(instr="cvt.d.w fv0, fa0")
+
+%def op_int_to_float():
+%  funop(instr="cvt.s.w fv0, fa0")
+
+%def op_long_to_double():
+    /*
+     * long-to-double
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+
+#ifdef MIPS32REVGE6
+    LOAD64_F(fv0, fv0f, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    cvt.d.l   fv0, fv0
+#else
+    LOAD64(rARG0, rARG1, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    JAL(__floatdidf)                       #  a0/a1 <- op, a2-a3 changed
+#endif
+
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- result
+
+%def op_long_to_float():
+    /*
+     * long-to-float
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+
+#ifdef MIPS32REVGE6
+    LOAD64_F(fv0, fv0f, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    cvt.s.l   fv0, fv0
+#else
+    LOAD64(rARG0, rARG1, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    JAL(__floatdisf)
+#endif
+
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- fv0
+
+%def op_mul_double():
+%  fbinopWide(instr="mul.d fv0, fa0, fa1")
+
+%def op_mul_double_2addr():
+%  fbinopWide2addr(instr="mul.d fv0, fa0, fa1")
+
+%def op_mul_float():
+%  fbinop(instr="mul.s fv0, fa0, fa1")
+
+%def op_mul_float_2addr():
+%  fbinop2addr(instr="mul.s fv0, fa0, fa1")
+
+%def op_neg_double():
+%  unopWide(instr="addu a1, a1, 0x80000000")
+
+%def op_neg_float():
+%  unop(instr="addu a0, a0, 0x80000000")
+
+%def op_rem_double():
+%  fbinopWide(instr="JAL(fmod)")
+
+%def op_rem_double_2addr():
+%  fbinopWide2addr(instr="JAL(fmod)")
+
+%def op_rem_float():
+%  fbinop(instr="JAL(fmodf)")
+
+%def op_rem_float_2addr():
+%  fbinop2addr(instr="JAL(fmodf)")
+
+%def op_sub_double():
+%  fbinopWide(instr="sub.d fv0, fa0, fa1")
+
+%def op_sub_double_2addr():
+%  fbinopWide2addr(instr="sub.d fv0, fa0, fa1")
+
+%def op_sub_float():
+%  fbinop(instr="sub.s fv0, fa0, fa1")
+
+%def op_sub_float_2addr():
+%  fbinop2addr(instr="sub.s fv0, fa0, fa1")
diff --git a/runtime/interpreter/mterp/mips/footer.S b/runtime/interpreter/mterp/mips/footer.S
deleted file mode 100644
index 1c784ef..0000000
--- a/runtime/interpreter/mterp/mips/footer.S
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- * ===========================================================================
- *  Common subroutines and data
- * ===========================================================================
- */
-
-    .text
-    .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogDivideByZeroException)
-#endif
-    b MterpCommonFallback
-
-common_errArrayIndex:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogArrayIndexException)
-#endif
-    b MterpCommonFallback
-
-common_errNegativeArraySize:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogNegativeArraySizeException)
-#endif
-    b MterpCommonFallback
-
-common_errNoSuchMethod:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogNoSuchMethodException)
-#endif
-    b MterpCommonFallback
-
-common_errNullObject:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogNullObjectException)
-#endif
-    b MterpCommonFallback
-
-common_exceptionThrown:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogExceptionThrownException)
-#endif
-    b MterpCommonFallback
-
-MterpSuspendFallback:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    lw    a2, THREAD_FLAGS_OFFSET(rSELF)
-    JAL(MterpLogSuspendFallback)
-#endif
-    b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    lw      a0, THREAD_EXCEPTION_OFFSET(rSELF)
-    beqz    a0, MterpFallback          # If exception, fall back to reference interpreter.
-    /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpHandleException)                    # (self, shadow_frame)
-    beqz    v0, MterpExceptionReturn             # no local catch, back to caller.
-    lw      a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
-    lw      a1, OFF_FP_DEX_PC(rFP)
-    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-    EAS1(rPC, a0, a1)                            # generate new dex_pc_ptr
-    /* Do we need to switch interpreters? */
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    /* resume execution at catch block */
-    EXPORT_PC()
-    FETCH_INST()
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-    /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    rINST          <= signed offset
- *    rPROFILE       <= signed hotness countdown (expanded to 32 bits)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- */
-MterpCommonTakenBranchNoFlags:
-    bgtz    rINST, .L_forward_branch    # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-#  error "JIT_CHECK_OSR must be -1."
-#endif
-    li      t0, JIT_CHECK_OSR
-    beq     rPROFILE, t0, .L_osr_check
-    blt     rPROFILE, t0, .L_resume_backward_branch
-    subu    rPROFILE, 1
-    beqz    rPROFILE, .L_add_batch      # counted down to zero - report
-.L_resume_backward_branch:
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    REFRESH_IBASE()
-    addu    a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB(a2)           # update rPC, load rINST
-    and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    bnez    ra, .L_suspend_request_pending
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-.L_suspend_request_pending:
-    EXPORT_PC()
-    move    a0, rSELF
-    JAL(MterpSuspendCheck)              # (self)
-    bnez    v0, MterpFallback
-    REFRESH_IBASE()                     # might have changed during suspend
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-.L_no_count_backwards:
-    li      t0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    bne     rPROFILE, t0, .L_resume_backward_branch
-.L_osr_check:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC()
-    JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    bnez    v0, MterpOnStackReplacement
-    b       .L_resume_backward_branch
-
-.L_forward_branch:
-    li      t0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    beq     rPROFILE, t0, .L_check_osr_forward
-.L_resume_forward_branch:
-    add     a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB(a2)           # update rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-.L_check_osr_forward:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC()
-    JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    bnez    v0, MterpOnStackReplacement
-    b       .L_resume_forward_branch
-
-.L_add_batch:
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    lw      a0, OFF_FP_METHOD(rFP)
-    move    a2, rSELF
-    JAL(MterpAddHotnessBatch)           # (method, shadow_frame, self)
-    move    rPROFILE, v0                # restore new hotness countdown to rPROFILE
-    b       .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    li      a2, 2
-    EXPORT_PC()
-    JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    bnez    v0, MterpOnStackReplacement
-    FETCH_ADVANCE_INST(2)
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    JAL(MterpLogOSR)
-#endif
-    li      v0, 1                       # Signal normal return
-    b       MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogFallback)
-#endif
-MterpCommonFallback:
-    move    v0, zero                    # signal retry with reference interpreter.
-    b       MterpDone
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR.  Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- *  uint32_t* rFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    li      v0, 1                       # signal return to caller.
-    b       MterpDone
-MterpReturn:
-    lw      a2, OFF_FP_RESULT_REGISTER(rFP)
-    sw      v0, 0(a2)
-    sw      v1, 4(a2)
-    li      v0, 1                       # signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    blez    rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
-
-MterpProfileActive:
-    move    rINST, v0                   # stash return value
-    /* Report cached hotness counts */
-    lw      a0, OFF_FP_METHOD(rFP)
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    JAL(MterpAddHotnessBatch)           # (method, shadow_frame, self)
-    move    v0, rINST                   # restore return value
-
-.L_pop_and_return:
-/* Restore from the stack and return. Frame size = STACK_SIZE */
-    STACK_LOAD_FULL()
-    jalr    zero, ra
-
-    .cfi_endproc
-    .end ExecuteMterpImpl
diff --git a/runtime/interpreter/mterp/mips/funop.S b/runtime/interpreter/mterp/mips/funop.S
deleted file mode 100644
index b2b22c9..0000000
--- a/runtime/interpreter/mterp/mips/funop.S
+++ /dev/null
@@ -1,15 +0,0 @@
-    /*
-     * Generic 32-bit floating-point unary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = op fa0".
-     * This could be a MIPS instruction or a function call.
-     *
-     * for: int-to-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $instr
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t1)         #  vA <- fv0
diff --git a/runtime/interpreter/mterp/mips/funopWider.S b/runtime/interpreter/mterp/mips/funopWider.S
deleted file mode 100644
index 6862e24..0000000
--- a/runtime/interpreter/mterp/mips/funopWider.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /*
-     * Generic 32bit-to-64bit floating-point unary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = op fa0".
-     *
-     * For: int-to-double, float-to-double
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $instr
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- fv0
diff --git a/runtime/interpreter/mterp/mips/header.S b/runtime/interpreter/mterp/mips/header.S
deleted file mode 100644
index bef9eeb..0000000
--- a/runtime/interpreter/mterp/mips/header.S
+++ /dev/null
@@ -1,727 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-  Art assembly interpreter notes:
-
-  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
-  handle invoke, allows higher-level code to create frame & shadow frame.
-
-  Once that's working, support direct entry code & eliminate shadow frame (and
-  excess locals allocation.
-
-  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
-  base of the vreg array within the shadow frame.  Access the other fields,
-  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
-  the shadow frame mechanism of double-storing object references - via rFP &
-  number_of_vregs_.
-
- */
-
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#if (__mips==32) && (__mips_isa_rev>=2)
-#define MIPS32REVGE2    /* mips32r2 and greater */
-#if (__mips==32) && (__mips_isa_rev>=5)
-#define FPU64           /* 64 bit FPU */
-#if (__mips==32) && (__mips_isa_rev>=6)
-#define MIPS32REVGE6    /* mips32r6 and greater */
-#endif
-#endif
-#endif
-
-/* MIPS definitions and declarations
-
-   reg  nick      purpose
-   s0   rPC       interpreted program counter, used for fetching instructions
-   s1   rFP       interpreted frame pointer, used for accessing locals and args
-   s2   rSELF     self (Thread) pointer
-   s3   rIBASE    interpreted instruction base pointer, used for computed goto
-   s4   rINST     first 16-bit code unit of current instruction
-   s5   rOBJ      object pointer
-   s6   rREFS     base of object references in shadow frame (ideally, we'll get rid of this later).
-   s7   rTEMP     used as temp storage that can survive a function call
-   s8   rPROFILE  branch profiling countdown
-
-*/
-
-/* single-purpose registers, given names for clarity */
-#define rPC s0
-#define CFI_DEX 16  // DWARF register number of the register holding dex-pc (s0).
-#define CFI_TMP 4   // DWARF register number of the first argument register (a0).
-#define rFP s1
-#define rSELF s2
-#define rIBASE s3
-#define rINST s4
-#define rOBJ s5
-#define rREFS s6
-#define rTEMP s7
-#define rPROFILE s8
-
-#define rARG0 a0
-#define rARG1 a1
-#define rARG2 a2
-#define rARG3 a3
-#define rRESULT0 v0
-#define rRESULT1 v1
-
-/* GP register definitions */
-#define zero    $$0      /* always zero */
-#define AT      $$at     /* assembler temp */
-#define v0      $$2      /* return value */
-#define v1      $$3
-#define a0      $$4      /* argument registers */
-#define a1      $$5
-#define a2      $$6
-#define a3      $$7
-#define t0      $$8      /* temp registers (not saved across subroutine calls) */
-#define t1      $$9
-#define t2      $$10
-#define t3      $$11
-#define t4      $$12
-#define t5      $$13
-#define t6      $$14
-#define t7      $$15
-#define ta0     $$12     /* alias */
-#define ta1     $$13
-#define ta2     $$14
-#define ta3     $$15
-#define s0      $$16     /* saved across subroutine calls (callee saved) */
-#define s1      $$17
-#define s2      $$18
-#define s3      $$19
-#define s4      $$20
-#define s5      $$21
-#define s6      $$22
-#define s7      $$23
-#define t8      $$24     /* two more temp registers */
-#define t9      $$25
-#define k0      $$26     /* kernel temporary */
-#define k1      $$27
-#define gp      $$28     /* global pointer */
-#define sp      $$29     /* stack pointer */
-#define s8      $$30     /* one more callee saved */
-#define ra      $$31     /* return address */
-
-/* FP register definitions */
-#define fv0    $$f0
-#define fv0f   $$f1
-#define fv1    $$f2
-#define fv1f   $$f3
-#define fa0    $$f12
-#define fa0f   $$f13
-#define fa1    $$f14
-#define fa1f   $$f15
-#define ft0    $$f4
-#define ft0f   $$f5
-#define ft1    $$f6
-#define ft1f   $$f7
-#define ft2    $$f8
-#define ft2f   $$f9
-#define ft3    $$f10
-#define ft3f   $$f11
-#define ft4    $$f16
-#define ft4f   $$f17
-#define ft5    $$f18
-#define ft5f   $$f19
-#define fs0    $$f20
-#define fs0f   $$f21
-#define fs1    $$f22
-#define fs1f   $$f23
-#define fs2    $$f24
-#define fs2f   $$f25
-#define fs3    $$f26
-#define fs3f   $$f27
-#define fs4    $$f28
-#define fs4f   $$f29
-#define fs5    $$f30
-#define fs5f   $$f31
-
-#ifndef MIPS32REVGE6
-#define fcc0   $$fcc0
-#define fcc1   $$fcc1
-#endif
-
-#ifdef MIPS32REVGE2
-#define SEB(rd, rt) \
-    seb       rd, rt
-#define SEH(rd, rt) \
-    seh       rd, rt
-#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
-    ins       rd_lo, rt_hi, 16, 16
-#else
-#define SEB(rd, rt) \
-    sll       rd, rt, 24; \
-    sra       rd, rd, 24
-#define SEH(rd, rt) \
-    sll       rd, rt, 16; \
-    sra       rd, rd, 16
-/* Clobbers rt_hi on pre-R2. */
-#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
-    sll       rt_hi, rt_hi, 16; \
-    or        rd_lo, rt_hi
-#endif
-
-#ifdef FPU64
-#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
-    mthc1     r, flo
-#else
-#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
-    mtc1      r, fhi
-#endif
-
-#ifdef MIPS32REVGE6
-#define JR(rt) \
-    jic       rt, 0
-#define LSA(rd, rs, rt, sa) \
-    .if sa; \
-    lsa       rd, rs, rt, sa; \
-    .else; \
-    addu      rd, rs, rt; \
-    .endif
-#else
-#define JR(rt) \
-    jalr      zero, rt
-#define LSA(rd, rs, rt, sa) \
-    .if sa; \
-    .set      push; \
-    .set      noat; \
-    sll       AT, rs, sa; \
-    addu      rd, AT, rt; \
-    .set      pop; \
-    .else; \
-    addu      rd, rs, rt; \
-    .endif
-#endif
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-#define EXPORT_PC() \
-    sw        rPC, OFF_FP_DEX_PC_PTR(rFP)
-
-#define EXPORT_DEX_PC(tmp) \
-    lw        tmp, OFF_FP_DEX_INSTRUCTIONS(rFP); \
-    sw        rPC, OFF_FP_DEX_PC_PTR(rFP); \
-    subu      tmp, rPC, tmp; \
-    sra       tmp, tmp, 1; \
-    sw        tmp, OFF_FP_DEX_PC(rFP)
-
-/*
- * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
- */
-#define FETCH_INST() lhu rINST, (rPC)
-
-/*
- * Fetch the next instruction from the specified offset.  Advances rPC
- * to point to the next instruction.  "_count" is in 16-bit code units.
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss.  (This also implies that it must come after
- * EXPORT_PC().)
- */
-#define FETCH_ADVANCE_INST(_count) \
-    lhu       rINST, ((_count)*2)(rPC); \
-    addu      rPC, rPC, ((_count) * 2)
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
- * rINST ahead of possible exception point.  Be sure to manually advance rPC
- * later.
- */
-#define PREFETCH_INST(_count) lhu rINST, ((_count)*2)(rPC)
-
-/* Advance rPC by some number of code units. */
-#define ADVANCE(_count) addu rPC, rPC, ((_count) * 2)
-
-/*
- * Fetch the next instruction from an offset specified by rd.  Updates
- * rPC to point to the next instruction.  "rd" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.
- */
-#define FETCH_ADVANCE_INST_RB(rd) \
-    addu      rPC, rPC, rd; \
-    lhu       rINST, (rPC)
-
-/*
- * Fetch a half-word code unit from an offset past the current PC.  The
- * "_count" value is in 16-bit code units.  Does not advance rPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
-#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
-
-/*
- * Fetch one byte from an offset past the current PC.  Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-#define FETCH_B(rd, _count, _byte) lbu rd, ((_count) * 2 + _byte)(rPC)
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
-
-/*
- * Transform opcode into branch target address.
- */
-#define GET_OPCODE_TARGET(rd) \
-    sll       rd, rd, ${handler_size_bits}; \
-    addu      rd, rIBASE, rd
-
-/*
- * Begin executing the opcode in rd.
- */
-#define GOTO_OPCODE(rd) \
-    GET_OPCODE_TARGET(rd); \
-    JR(rd)
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
-
-#define GET_VREG_F(rd, rix) \
-    .set noat; \
-    EAS2(AT, rFP, rix); \
-    l.s       rd, (AT); \
-    .set at
-
-#ifdef MIPS32REVGE6
-#define SET_VREG(rd, rix) \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8)
-#else
-#define SET_VREG(rd, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG_OBJECT(rd, rix) \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        rd, 0(t8)
-#else
-#define SET_VREG_OBJECT(rd, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        rd, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG64(rlo, rhi, rix) \
-    lsa       t8, rix, rFP, 2; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#else
-#define SET_VREG64(rlo, rhi, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG_F(rd, rix) \
-    lsa       t8, rix, rFP, 2; \
-    s.s       rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8)
-#else
-#define SET_VREG_F(rd, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG64_F(rlo, rhi, rix) \
-    lsa       t8, rix, rFP, 2; \
-    .set noat; \
-    mfhc1     AT, rlo; \
-    s.s       rlo, 0(t8); \
-    sw        AT, 4(t8); \
-    .set at; \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#elif defined(FPU64)
-#define SET_VREG64_F(rlo, rhi, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rREFS, AT; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8); \
-    addu      t8, rFP, AT; \
-    mfhc1     AT, rlo; \
-    sw        AT, 4(t8); \
-    .set at; \
-    s.s       rlo, 0(t8)
-#else
-#define SET_VREG64_F(rlo, rhi, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rlo, 0(t8); \
-    s.s       rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#endif
-
-/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG_OBJECT and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    jalr      zero, dst; \
-    sw        rd, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    jalr      zero, dst; \
-    sw        rd, 0(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#else
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG_F and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_F_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    s.s       rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG_F_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG64_F and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    .set noat; \
-    mfhc1     AT, rlo; \
-    s.s       rlo, 0(t8); \
-    sw        AT, 4(t8); \
-    .set at; \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#elif defined(FPU64)
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rREFS, AT; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8); \
-    addu      t8, rFP, AT; \
-    mfhc1     AT, rlo; \
-    sw        AT, 4(t8); \
-    .set at; \
-    jalr      zero, dst; \
-    s.s       rlo, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rlo, 0(t8); \
-    s.s       rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#endif
-
-#define GET_OPA(rd) srl rd, rINST, 8
-#ifdef MIPS32REVGE2
-#define GET_OPA4(rd) ext rd, rINST, 8, 4
-#else
-#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
-#endif
-#define GET_OPB(rd) srl rd, rINST, 12
-
-/*
- * Form an Effective Address rd = rbase + roff<<shift;
- * Uses reg AT on pre-R6.
- */
-#define EASN(rd, rbase, roff, shift) LSA(rd, roff, rbase, shift)
-
-#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
-#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
-#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
-#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
-
-#define LOAD_eas2(rd, rbase, roff) \
-    .set noat; \
-    EAS2(AT, rbase, roff); \
-    lw        rd, 0(AT); \
-    .set at
-
-#define STORE_eas2(rd, rbase, roff) \
-    .set noat; \
-    EAS2(AT, rbase, roff); \
-    sw        rd, 0(AT); \
-    .set at
-
-#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
-#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
-
-#define STORE64_off(rlo, rhi, rbase, off) \
-    sw        rlo, off(rbase); \
-    sw        rhi, (off+4)(rbase)
-#define LOAD64_off(rlo, rhi, rbase, off) \
-    lw        rlo, off(rbase); \
-    lw        rhi, (off+4)(rbase)
-
-#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
-#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
-
-#ifdef FPU64
-#define STORE64_off_F(rlo, rhi, rbase, off) \
-    s.s       rlo, off(rbase); \
-    .set noat; \
-    mfhc1     AT, rlo; \
-    sw        AT, (off+4)(rbase); \
-    .set at
-#define LOAD64_off_F(rlo, rhi, rbase, off) \
-    l.s       rlo, off(rbase); \
-    .set noat; \
-    lw        AT, (off+4)(rbase); \
-    mthc1     AT, rlo; \
-    .set at
-#else
-#define STORE64_off_F(rlo, rhi, rbase, off) \
-    s.s       rlo, off(rbase); \
-    s.s       rhi, (off+4)(rbase)
-#define LOAD64_off_F(rlo, rhi, rbase, off) \
-    l.s       rlo, off(rbase); \
-    l.s       rhi, (off+4)(rbase)
-#endif
-
-#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
-#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
-
-
-#define LOAD_base_offMirrorArray_length(rd, rbase) LOAD_RB_OFF(rd, rbase, MIRROR_ARRAY_LENGTH_OFFSET)
-
-#define STACK_STORE(rd, off) sw rd, off(sp)
-#define STACK_LOAD(rd, off) lw rd, off(sp)
-#define CREATE_STACK(n) subu sp, sp, n
-#define DELETE_STACK(n) addu sp, sp, n
-
-#define LOAD_ADDR(dest, addr) la dest, addr
-#define LOAD_IMM(dest, imm) li dest, imm
-#define MOVE_REG(dest, src) move dest, src
-#define STACK_SIZE 128
-
-#define STACK_OFFSET_ARG04 16
-#define STACK_OFFSET_ARG05 20
-#define STACK_OFFSET_ARG06 24
-#define STACK_OFFSET_ARG07 28
-#define STACK_OFFSET_GP    84
-
-#define JAL(n) jal n
-#define BAL(n) bal n
-
-/*
- * FP register usage restrictions:
- * 1) We don't use the callee save FP registers so we don't have to save them.
- * 2) We don't use the odd FP registers so we can share code with mips32r6.
- */
-#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
-    STACK_STORE(ra, 124); \
-    STACK_STORE(s8, 120); \
-    STACK_STORE(s0, 116); \
-    STACK_STORE(s1, 112); \
-    STACK_STORE(s2, 108); \
-    STACK_STORE(s3, 104); \
-    STACK_STORE(s4, 100); \
-    STACK_STORE(s5, 96); \
-    STACK_STORE(s6, 92); \
-    STACK_STORE(s7, 88);
-
-#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
-    STACK_LOAD(s7, 88); \
-    STACK_LOAD(s6, 92); \
-    STACK_LOAD(s5, 96); \
-    STACK_LOAD(s4, 100); \
-    STACK_LOAD(s3, 104); \
-    STACK_LOAD(s2, 108); \
-    STACK_LOAD(s1, 112); \
-    STACK_LOAD(s0, 116); \
-    STACK_LOAD(s8, 120); \
-    STACK_LOAD(ra, 124); \
-    DELETE_STACK(STACK_SIZE)
-
-#define REFRESH_IBASE() \
-    lw        rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-
-/* Constants for float/double_to_int/long conversions */
-#define INT_MIN                 0x80000000
-#define INT_MIN_AS_FLOAT        0xCF000000
-#define INT_MIN_AS_DOUBLE_HIGH  0xC1E00000
-#define LONG_MIN_HIGH           0x80000000
-#define LONG_MIN_AS_FLOAT       0xDF000000
-#define LONG_MIN_AS_DOUBLE_HIGH 0xC3E00000
diff --git a/runtime/interpreter/mterp/mips/instruction_end.S b/runtime/interpreter/mterp/mips/instruction_end.S
deleted file mode 100644
index 32c725c..0000000
--- a/runtime/interpreter/mterp/mips/instruction_end.S
+++ /dev/null
@@ -1,3 +0,0 @@
-
-    .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
diff --git a/runtime/interpreter/mterp/mips/instruction_end_alt.S b/runtime/interpreter/mterp/mips/instruction_end_alt.S
deleted file mode 100644
index f90916f..0000000
--- a/runtime/interpreter/mterp/mips/instruction_end_alt.S
+++ /dev/null
@@ -1,3 +0,0 @@
-
-    .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
diff --git a/runtime/interpreter/mterp/mips/instruction_end_sister.S b/runtime/interpreter/mterp/mips/instruction_end_sister.S
deleted file mode 100644
index c5f4886..0000000
--- a/runtime/interpreter/mterp/mips/instruction_end_sister.S
+++ /dev/null
@@ -1,3 +0,0 @@
-
-    .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
diff --git a/runtime/interpreter/mterp/mips/instruction_start.S b/runtime/interpreter/mterp/mips/instruction_start.S
deleted file mode 100644
index 8874c20..0000000
--- a/runtime/interpreter/mterp/mips/instruction_start.S
+++ /dev/null
@@ -1,4 +0,0 @@
-
-    .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
-    .text
diff --git a/runtime/interpreter/mterp/mips/instruction_start_alt.S b/runtime/interpreter/mterp/mips/instruction_start_alt.S
deleted file mode 100644
index 0c9ffdb..0000000
--- a/runtime/interpreter/mterp/mips/instruction_start_alt.S
+++ /dev/null
@@ -1,4 +0,0 @@
-
-    .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
-    .text
diff --git a/runtime/interpreter/mterp/mips/instruction_start_sister.S b/runtime/interpreter/mterp/mips/instruction_start_sister.S
deleted file mode 100644
index 2ec51f7..0000000
--- a/runtime/interpreter/mterp/mips/instruction_start_sister.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
-    .global artMterpAsmSisterStart
-    .text
-    .balign 4
-artMterpAsmSisterStart:
diff --git a/runtime/interpreter/mterp/mips/invoke.S b/runtime/interpreter/mterp/mips/invoke.S
index db3b8af..c77d12b 100644
--- a/runtime/interpreter/mterp/mips/invoke.S
+++ b/runtime/interpreter/mterp/mips/invoke.S
@@ -1,4 +1,4 @@
-%default { "helper":"UndefinedInvokeHandler" }
+%def invoke(helper="UndefinedInvokeHandler"):
     /*
      * Generic invoke handler wrapper.
      */
@@ -17,3 +17,71 @@
     bnez    v0, MterpFallback
     GET_INST_OPCODE(t0)
     GOTO_OPCODE(t0)
+
+%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
+    /*
+     * invoke-polymorphic handler wrapper.
+     */
+    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+    .extern $helper
+    EXPORT_PC()
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    JAL($helper)
+    beqz    v0, MterpException
+    FETCH_ADVANCE_INST(4)
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+%def op_invoke_custom():
+%  invoke(helper="MterpInvokeCustom")
+
+%def op_invoke_custom_range():
+%  invoke(helper="MterpInvokeCustomRange")
+
+%def op_invoke_direct():
+%  invoke(helper="MterpInvokeDirect")
+
+%def op_invoke_direct_range():
+%  invoke(helper="MterpInvokeDirectRange")
+
+%def op_invoke_interface():
+%  invoke(helper="MterpInvokeInterface")
+
+%def op_invoke_interface_range():
+%  invoke(helper="MterpInvokeInterfaceRange")
+
+%def op_invoke_polymorphic():
+%  invoke_polymorphic(helper="MterpInvokePolymorphic")
+
+%def op_invoke_polymorphic_range():
+%  invoke_polymorphic(helper="MterpInvokePolymorphicRange")
+
+%def op_invoke_static():
+%  invoke(helper="MterpInvokeStatic")
+
+%def op_invoke_static_range():
+%  invoke(helper="MterpInvokeStaticRange")
+
+%def op_invoke_super():
+%  invoke(helper="MterpInvokeSuper")
+
+%def op_invoke_super_range():
+%  invoke(helper="MterpInvokeSuperRange")
+
+%def op_invoke_virtual():
+%  invoke(helper="MterpInvokeVirtual")
+
+%def op_invoke_virtual_quick():
+%  invoke(helper="MterpInvokeVirtualQuick")
+
+%def op_invoke_virtual_range():
+%  invoke(helper="MterpInvokeVirtualRange")
+
+%def op_invoke_virtual_range_quick():
+%  invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/mips/invoke_polymorphic.S b/runtime/interpreter/mterp/mips/invoke_polymorphic.S
deleted file mode 100644
index 5c963f0..0000000
--- a/runtime/interpreter/mterp/mips/invoke_polymorphic.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default { "helper":"UndefinedInvokeHandler" }
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern $helper
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL($helper)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(4)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
diff --git a/runtime/interpreter/mterp/mips/main.S b/runtime/interpreter/mterp/mips/main.S
new file mode 100644
index 0000000..3ebd3d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/main.S
@@ -0,0 +1,1151 @@
+%def header():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+  Art assembly interpreter notes:
+
+  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+  handle invoke, allows higher-level code to create frame & shadow frame.
+
+  Once that's working, support direct entry code & eliminate shadow frame (and
+  excess locals allocation.
+
+  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
+  base of the vreg array within the shadow frame.  Access the other fields,
+  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
+  the shadow frame mechanism of double-storing object references - via rFP &
+  number_of_vregs_.
+
+ */
+
+#include "asm_support.h"
+#include "interpreter/cfi_asm_support.h"
+
+#if (__mips==32) && (__mips_isa_rev>=2)
+#define MIPS32REVGE2    /* mips32r2 and greater */
+#if (__mips==32) && (__mips_isa_rev>=5)
+#define FPU64           /* 64 bit FPU */
+#if (__mips==32) && (__mips_isa_rev>=6)
+#define MIPS32REVGE6    /* mips32r6 and greater */
+#endif
+#endif
+#endif
+
+/* MIPS definitions and declarations
+
+   reg  nick      purpose
+   s0   rPC       interpreted program counter, used for fetching instructions
+   s1   rFP       interpreted frame pointer, used for accessing locals and args
+   s2   rSELF     self (Thread) pointer
+   s3   rIBASE    interpreted instruction base pointer, used for computed goto
+   s4   rINST     first 16-bit code unit of current instruction
+   s5   rOBJ      object pointer
+   s6   rREFS     base of object references in shadow frame (ideally, we'll get rid of this later).
+   s7   rTEMP     used as temp storage that can survive a function call
+   s8   rPROFILE  branch profiling countdown
+
+*/
+
+/* single-purpose registers, given names for clarity */
+#define rPC s0
+#define CFI_DEX 16  // DWARF register number of the register holding dex-pc (s0).
+#define CFI_TMP 4   // DWARF register number of the first argument register (a0).
+#define rFP s1
+#define rSELF s2
+#define rIBASE s3
+#define rINST s4
+#define rOBJ s5
+#define rREFS s6
+#define rTEMP s7
+#define rPROFILE s8
+
+#define rARG0 a0
+#define rARG1 a1
+#define rARG2 a2
+#define rARG3 a3
+#define rRESULT0 v0
+#define rRESULT1 v1
+
+/* GP register definitions */
+#define zero    $$0      /* always zero */
+#define AT      $$at     /* assembler temp */
+#define v0      $$2      /* return value */
+#define v1      $$3
+#define a0      $$4      /* argument registers */
+#define a1      $$5
+#define a2      $$6
+#define a3      $$7
+#define t0      $$8      /* temp registers (not saved across subroutine calls) */
+#define t1      $$9
+#define t2      $$10
+#define t3      $$11
+#define t4      $$12
+#define t5      $$13
+#define t6      $$14
+#define t7      $$15
+#define ta0     $$12     /* alias */
+#define ta1     $$13
+#define ta2     $$14
+#define ta3     $$15
+#define s0      $$16     /* saved across subroutine calls (callee saved) */
+#define s1      $$17
+#define s2      $$18
+#define s3      $$19
+#define s4      $$20
+#define s5      $$21
+#define s6      $$22
+#define s7      $$23
+#define t8      $$24     /* two more temp registers */
+#define t9      $$25
+#define k0      $$26     /* kernel temporary */
+#define k1      $$27
+#define gp      $$28     /* global pointer */
+#define sp      $$29     /* stack pointer */
+#define s8      $$30     /* one more callee saved */
+#define ra      $$31     /* return address */
+
+/* FP register definitions */
+#define fv0    $$f0
+#define fv0f   $$f1
+#define fv1    $$f2
+#define fv1f   $$f3
+#define fa0    $$f12
+#define fa0f   $$f13
+#define fa1    $$f14
+#define fa1f   $$f15
+#define ft0    $$f4
+#define ft0f   $$f5
+#define ft1    $$f6
+#define ft1f   $$f7
+#define ft2    $$f8
+#define ft2f   $$f9
+#define ft3    $$f10
+#define ft3f   $$f11
+#define ft4    $$f16
+#define ft4f   $$f17
+#define ft5    $$f18
+#define ft5f   $$f19
+#define fs0    $$f20
+#define fs0f   $$f21
+#define fs1    $$f22
+#define fs1f   $$f23
+#define fs2    $$f24
+#define fs2f   $$f25
+#define fs3    $$f26
+#define fs3f   $$f27
+#define fs4    $$f28
+#define fs4f   $$f29
+#define fs5    $$f30
+#define fs5f   $$f31
+
+#ifndef MIPS32REVGE6
+#define fcc0   $$fcc0
+#define fcc1   $$fcc1
+#endif
+
+#ifdef MIPS32REVGE2
+#define SEB(rd, rt) \
+    seb       rd, rt
+#define SEH(rd, rt) \
+    seh       rd, rt
+#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
+    ins       rd_lo, rt_hi, 16, 16
+#else
+#define SEB(rd, rt) \
+    sll       rd, rt, 24; \
+    sra       rd, rd, 24
+#define SEH(rd, rt) \
+    sll       rd, rt, 16; \
+    sra       rd, rd, 16
+/* Clobbers rt_hi on pre-R2. */
+#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
+    sll       rt_hi, rt_hi, 16; \
+    or        rd_lo, rt_hi
+#endif
+
+#ifdef FPU64
+#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
+    mthc1     r, flo
+#else
+#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
+    mtc1      r, fhi
+#endif
+
+#ifdef MIPS32REVGE6
+#define JR(rt) \
+    jic       rt, 0
+#define LSA(rd, rs, rt, sa) \
+    .if sa; \
+    lsa       rd, rs, rt, sa; \
+    .else; \
+    addu      rd, rs, rt; \
+    .endif
+#else
+#define JR(rt) \
+    jalr      zero, rt
+#define LSA(rd, rs, rt, sa) \
+    .if sa; \
+    .set      push; \
+    .set      noat; \
+    sll       AT, rs, sa; \
+    addu      rd, AT, rt; \
+    .set      pop; \
+    .else; \
+    addu      rd, rs, rt; \
+    .endif
+#endif
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
+ * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
+#define OFF_FP_SHADOWFRAME OFF_FP(0)
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array.  For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+#define EXPORT_PC() \
+    sw        rPC, OFF_FP_DEX_PC_PTR(rFP)
+
+#define EXPORT_DEX_PC(tmp) \
+    lw        tmp, OFF_FP_DEX_INSTRUCTIONS(rFP); \
+    sw        rPC, OFF_FP_DEX_PC_PTR(rFP); \
+    subu      tmp, rPC, tmp; \
+    sra       tmp, tmp, 1; \
+    sw        tmp, OFF_FP_DEX_PC(rFP)
+
+/*
+ * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
+ */
+#define FETCH_INST() lhu rINST, (rPC)
+
+/*
+ * Fetch the next instruction from the specified offset.  Advances rPC
+ * to point to the next instruction.  "_count" is in 16-bit code units.
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss.  (This also implies that it must come after
+ * EXPORT_PC().)
+ */
+#define FETCH_ADVANCE_INST(_count) \
+    lhu       rINST, ((_count)*2)(rPC); \
+    addu      rPC, rPC, ((_count) * 2)
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
+ * rINST ahead of possible exception point.  Be sure to manually advance rPC
+ * later.
+ */
+#define PREFETCH_INST(_count) lhu rINST, ((_count)*2)(rPC)
+
+/* Advance rPC by some number of code units. */
+#define ADVANCE(_count) addu rPC, rPC, ((_count) * 2)
+
+/*
+ * Fetch the next instruction from an offset specified by rd.  Updates
+ * rPC to point to the next instruction.  "rd" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ */
+#define FETCH_ADVANCE_INST_RB(rd) \
+    addu      rPC, rPC, rd; \
+    lhu       rINST, (rPC)
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC.  The
+ * "_count" value is in 16-bit code units.  Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
+#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
+
+/*
+ * Fetch one byte from an offset past the current PC.  Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+#define FETCH_B(rd, _count, _byte) lbu rd, ((_count) * 2 + _byte)(rPC)
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
+
+/*
+ * Transform opcode into branch target address.
+ */
+#define GET_OPCODE_TARGET(rd) \
+    sll       rd, rd, ${handler_size_bits}; \
+    addu      rd, rIBASE, rd
+
+/*
+ * Begin executing the opcode in rd.
+ */
+#define GOTO_OPCODE(rd) \
+    GET_OPCODE_TARGET(rd); \
+    JR(rd)
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
+
+#define GET_VREG_F(rd, rix) \
+    .set noat; \
+    EAS2(AT, rFP, rix); \
+    l.s       rd, (AT); \
+    .set at
+
+#ifdef MIPS32REVGE6
+#define SET_VREG(rd, rix) \
+    lsa       t8, rix, rFP, 2; \
+    sw        rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8)
+#else
+#define SET_VREG(rd, rix) \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG_OBJECT(rd, rix) \
+    lsa       t8, rix, rFP, 2; \
+    sw        rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    sw        rd, 0(t8)
+#else
+#define SET_VREG_OBJECT(rd, rix) \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        rd, 0(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG64(rlo, rhi, rix) \
+    lsa       t8, rix, rFP, 2; \
+    sw        rlo, 0(t8); \
+    sw        rhi, 4(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8)
+#else
+#define SET_VREG64(rlo, rhi, rix) \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rlo, 0(t8); \
+    sw        rhi, 4(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG_F(rd, rix) \
+    lsa       t8, rix, rFP, 2; \
+    s.s       rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8)
+#else
+#define SET_VREG_F(rd, rix) \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    s.s       rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG64_F(rlo, rhi, rix) \
+    lsa       t8, rix, rFP, 2; \
+    .set noat; \
+    mfhc1     AT, rlo; \
+    s.s       rlo, 0(t8); \
+    sw        AT, 4(t8); \
+    .set at; \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8)
+#elif defined(FPU64)
+#define SET_VREG64_F(rlo, rhi, rix) \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rREFS, AT; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8); \
+    addu      t8, rFP, AT; \
+    mfhc1     AT, rlo; \
+    sw        AT, 4(t8); \
+    .set at; \
+    s.s       rlo, 0(t8)
+#else
+#define SET_VREG64_F(rlo, rhi, rix) \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    s.s       rlo, 0(t8); \
+    s.s       rhi, 4(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8)
+#endif
+
+/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    lsa       t8, rix, rFP, 2; \
+    sw        rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    jalr      zero, dst; \
+    sw        zero, 0(t8); \
+    .set reorder
+#else
+#define SET_VREG_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    jalr      zero, dst; \
+    sw        zero, 0(t8); \
+    .set reorder
+#endif
+
+/* Combination of the SET_VREG_OBJECT and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    lsa       t8, rix, rFP, 2; \
+    sw        rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    jalr      zero, dst; \
+    sw        rd, 0(t8); \
+    .set reorder
+#else
+#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    jalr      zero, dst; \
+    sw        rd, 0(t8); \
+    .set reorder
+#endif
+
+/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    lsa       t8, rix, rFP, 2; \
+    sw        rlo, 0(t8); \
+    sw        rhi, 4(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8); \
+    jalr      zero, dst; \
+    sw        zero, 4(t8); \
+    .set reorder
+#else
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rlo, 0(t8); \
+    sw        rhi, 4(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8); \
+    jalr      zero, dst; \
+    sw        zero, 4(t8); \
+    .set reorder
+#endif
+
+/* Combination of the SET_VREG_F and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG_F_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    lsa       t8, rix, rFP, 2; \
+    s.s       rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    jalr      zero, dst; \
+    sw        zero, 0(t8); \
+    .set reorder
+#else
+#define SET_VREG_F_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    s.s       rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    jalr      zero, dst; \
+    sw        zero, 0(t8); \
+    .set reorder
+#endif
+
+/* Combination of the SET_VREG64_F and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    lsa       t8, rix, rFP, 2; \
+    .set noat; \
+    mfhc1     AT, rlo; \
+    s.s       rlo, 0(t8); \
+    sw        AT, 4(t8); \
+    .set at; \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8); \
+    jalr      zero, dst; \
+    sw        zero, 4(t8); \
+    .set reorder
+#elif defined(FPU64)
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rREFS, AT; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8); \
+    addu      t8, rFP, AT; \
+    mfhc1     AT, rlo; \
+    sw        AT, 4(t8); \
+    .set at; \
+    jalr      zero, dst; \
+    s.s       rlo, 0(t8); \
+    .set reorder
+#else
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    s.s       rlo, 0(t8); \
+    s.s       rhi, 4(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8); \
+    jalr      zero, dst; \
+    sw        zero, 4(t8); \
+    .set reorder
+#endif
+
+#define GET_OPA(rd) srl rd, rINST, 8
+#ifdef MIPS32REVGE2
+#define GET_OPA4(rd) ext rd, rINST, 8, 4
+#else
+#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
+#endif
+#define GET_OPB(rd) srl rd, rINST, 12
+
+/*
+ * Form an Effective Address rd = rbase + roff<<shift;
+ * Uses reg AT on pre-R6.
+ */
+#define EASN(rd, rbase, roff, shift) LSA(rd, roff, rbase, shift)
+
+#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
+#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
+#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
+#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
+
+#define LOAD_eas2(rd, rbase, roff) \
+    .set noat; \
+    EAS2(AT, rbase, roff); \
+    lw        rd, 0(AT); \
+    .set at
+
+#define STORE_eas2(rd, rbase, roff) \
+    .set noat; \
+    EAS2(AT, rbase, roff); \
+    sw        rd, 0(AT); \
+    .set at
+
+#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
+#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
+
+#define STORE64_off(rlo, rhi, rbase, off) \
+    sw        rlo, off(rbase); \
+    sw        rhi, (off+4)(rbase)
+#define LOAD64_off(rlo, rhi, rbase, off) \
+    lw        rlo, off(rbase); \
+    lw        rhi, (off+4)(rbase)
+
+#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
+#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
+
+#ifdef FPU64
+#define STORE64_off_F(rlo, rhi, rbase, off) \
+    s.s       rlo, off(rbase); \
+    .set noat; \
+    mfhc1     AT, rlo; \
+    sw        AT, (off+4)(rbase); \
+    .set at
+#define LOAD64_off_F(rlo, rhi, rbase, off) \
+    l.s       rlo, off(rbase); \
+    .set noat; \
+    lw        AT, (off+4)(rbase); \
+    mthc1     AT, rlo; \
+    .set at
+#else
+#define STORE64_off_F(rlo, rhi, rbase, off) \
+    s.s       rlo, off(rbase); \
+    s.s       rhi, (off+4)(rbase)
+#define LOAD64_off_F(rlo, rhi, rbase, off) \
+    l.s       rlo, off(rbase); \
+    l.s       rhi, (off+4)(rbase)
+#endif
+
+#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
+#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
+
+#define LOAD_base_offMirrorArray_length(rd, rbase) LOAD_RB_OFF(rd, rbase, MIRROR_ARRAY_LENGTH_OFFSET)
+
+#define STACK_STORE(rd, off) sw rd, off(sp)
+#define STACK_LOAD(rd, off) lw rd, off(sp)
+#define CREATE_STACK(n) subu sp, sp, n
+#define DELETE_STACK(n) addu sp, sp, n
+
+#define LOAD_ADDR(dest, addr) la dest, addr
+#define LOAD_IMM(dest, imm) li dest, imm
+#define MOVE_REG(dest, src) move dest, src
+#define STACK_SIZE 128
+
+#define STACK_OFFSET_ARG04 16
+#define STACK_OFFSET_ARG05 20
+#define STACK_OFFSET_ARG06 24
+#define STACK_OFFSET_ARG07 28
+#define STACK_OFFSET_GP    84
+
+#define JAL(n) jal n
+#define BAL(n) bal n
+
+/*
+ * FP register usage restrictions:
+ * 1) We don't use the callee save FP registers so we don't have to save them.
+ * 2) We don't use the odd FP registers so we can share code with mips32r6.
+ */
+#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
+    STACK_STORE(ra, 124); \
+    STACK_STORE(s8, 120); \
+    STACK_STORE(s0, 116); \
+    STACK_STORE(s1, 112); \
+    STACK_STORE(s2, 108); \
+    STACK_STORE(s3, 104); \
+    STACK_STORE(s4, 100); \
+    STACK_STORE(s5, 96); \
+    STACK_STORE(s6, 92); \
+    STACK_STORE(s7, 88);
+
+#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
+    STACK_LOAD(s7, 88); \
+    STACK_LOAD(s6, 92); \
+    STACK_LOAD(s5, 96); \
+    STACK_LOAD(s4, 100); \
+    STACK_LOAD(s3, 104); \
+    STACK_LOAD(s2, 108); \
+    STACK_LOAD(s1, 112); \
+    STACK_LOAD(s0, 116); \
+    STACK_LOAD(s8, 120); \
+    STACK_LOAD(ra, 124); \
+    DELETE_STACK(STACK_SIZE)
+
+#define REFRESH_IBASE() \
+    lw        rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+
+/* Constants for float/double_to_int/long conversions */
+#define INT_MIN                 0x80000000
+#define INT_MIN_AS_FLOAT        0xCF000000
+#define INT_MIN_AS_DOUBLE_HIGH  0xC1E00000
+#define LONG_MIN_HIGH           0x80000000
+#define LONG_MIN_AS_FLOAT       0xDF000000
+#define LONG_MIN_AS_DOUBLE_HIGH 0xC3E00000
+
+%def entry():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+    .text
+    .align 2
+    .global ExecuteMterpImpl
+    .ent    ExecuteMterpImpl
+    .frame sp, STACK_SIZE, ra
+/*
+ * On entry:
+ *  a0  Thread* self
+ *  a1  dex_instructions
+ *  a2  ShadowFrame
+ *  a3  JValue* result_register
+ *
+ */
+
+ExecuteMterpImpl:
+    .cfi_startproc
+    .set noreorder
+    .cpload t9
+    .set reorder
+/* Save to the stack. Frame size = STACK_SIZE */
+    STACK_STORE_FULL()
+/* This directive will make sure all subsequent jal restore gp at a known offset */
+    .cprestore STACK_OFFSET_GP
+
+    /* Remember the return register */
+    sw      a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
+
+    /* Remember the dex instruction pointer */
+    sw      a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
+
+    /* set up "named" registers */
+    move    rSELF, a0
+    lw      a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
+    addu    rFP, a2, SHADOWFRAME_VREGS_OFFSET     # point to vregs.
+    EAS2(rREFS, rFP, a0)                          # point to reference array in shadow frame
+    lw      a0, SHADOWFRAME_DEX_PC_OFFSET(a2)     # Get starting dex_pc
+    EAS1(rPC, a1, a0)                             # Create direct pointer to 1st dex opcode
+    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
+
+    EXPORT_PC()
+
+    /* Starting ibase */
+    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+
+    /* Set up for backwards branches & osr profiling */
+    lw      a0, OFF_FP_METHOD(rFP)
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rSELF
+    JAL(MterpSetUpHotnessCountdown)        # (method, shadow_frame, self)
+    move    rPROFILE, v0                   # Starting hotness countdown to rPROFILE
+
+    /* start executing the instruction at rPC */
+    FETCH_INST()                           # load rINST from rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+    /* NOTE: no fallthrough */
+
+%def alt_stub():
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    la     ra, artMterpAsmInstructionStart + (${opnum} * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    move   a2, rPC
+    la     t9, MterpCheckBefore
+    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
+
+%def fallback():
+/* Transfer stub to alternate interpreter */
+    b    MterpFallback
+
+%def helpers():
+%  op_float_to_long_helper_code()
+%  op_double_to_long_helper_code()
+%  op_mul_long_helper_code()
+%  op_shl_long_helper_code()
+%  op_shr_long_helper_code()
+%  op_ushr_long_helper_code()
+%  op_shl_long_2addr_helper_code()
+%  op_shr_long_2addr_helper_code()
+%  op_ushr_long_2addr_helper_code()
+
+%def footer():
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+    .text
+    .align 2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogDivideByZeroException)
+#endif
+    b MterpCommonFallback
+
+common_errArrayIndex:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogArrayIndexException)
+#endif
+    b MterpCommonFallback
+
+common_errNegativeArraySize:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogNegativeArraySizeException)
+#endif
+    b MterpCommonFallback
+
+common_errNoSuchMethod:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogNoSuchMethodException)
+#endif
+    b MterpCommonFallback
+
+common_errNullObject:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogNullObjectException)
+#endif
+    b MterpCommonFallback
+
+common_exceptionThrown:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogExceptionThrownException)
+#endif
+    b MterpCommonFallback
+
+MterpSuspendFallback:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    lw    a2, THREAD_FLAGS_OFFSET(rSELF)
+    JAL(MterpLogSuspendFallback)
+#endif
+    b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary.  If there is a pending
+ * exception, handle it.  Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+    lw      a0, THREAD_EXCEPTION_OFFSET(rSELF)
+    beqz    a0, MterpFallback          # If exception, fall back to reference interpreter.
+    /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpHandleException)                    # (self, shadow_frame)
+    beqz    v0, MterpExceptionReturn             # no local catch, back to caller.
+    lw      a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
+    lw      a1, OFF_FP_DEX_PC(rFP)
+    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+    EAS1(rPC, a0, a1)                            # generate new dex_pc_ptr
+    /* Do we need to switch interpreters? */
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    /* resume execution at catch block */
+    EXPORT_PC()
+    FETCH_INST()
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+    /* NOTE: no fallthrough */
+
+/*
+ * Common handling for branches with support for Jit profiling.
+ * On entry:
+ *    rINST          <= signed offset
+ *    rPROFILE       <= signed hotness countdown (expanded to 32 bits)
+ *
+ * We have quite a few different cases for branch profiling, OSR detection and
+ * suspend check support here.
+ *
+ * Taken backward branches:
+ *    If profiling active, do hotness countdown and report if we hit zero.
+ *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *    Is there a pending suspend request?  If so, suspend.
+ *
+ * Taken forward branches and not-taken backward branches:
+ *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *
+ * Our most common case is expected to be a taken backward branch with active jit profiling,
+ * but no full OSR check and no pending suspend request.
+ * Next most common case is not-taken branch with no full OSR check.
+ */
+MterpCommonTakenBranchNoFlags:
+    bgtz    rINST, .L_forward_branch    # don't add forward branches to hotness
+/*
+ * We need to subtract 1 from positive values and we should not see 0 here,
+ * so we may use the result of the comparison with -1.
+ */
+#if JIT_CHECK_OSR != -1
+#  error "JIT_CHECK_OSR must be -1."
+#endif
+    li      t0, JIT_CHECK_OSR
+    beq     rPROFILE, t0, .L_osr_check
+    blt     rPROFILE, t0, .L_resume_backward_branch
+    subu    rPROFILE, 1
+    beqz    rPROFILE, .L_add_batch      # counted down to zero - report
+.L_resume_backward_branch:
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    REFRESH_IBASE()
+    addu    a2, rINST, rINST            # a2<- byte offset
+    FETCH_ADVANCE_INST_RB(a2)           # update rPC, load rINST
+    and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    bnez    ra, .L_suspend_request_pending
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+.L_suspend_request_pending:
+    EXPORT_PC()
+    move    a0, rSELF
+    JAL(MterpSuspendCheck)              # (self)
+    bnez    v0, MterpFallback
+    REFRESH_IBASE()                     # might have changed during suspend
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+.L_no_count_backwards:
+    li      t0, JIT_CHECK_OSR           # check for possible OSR re-entry
+    bne     rPROFILE, t0, .L_resume_backward_branch
+.L_osr_check:
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rINST
+    EXPORT_PC()
+    JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+    bnez    v0, MterpOnStackReplacement
+    b       .L_resume_backward_branch
+
+.L_forward_branch:
+    li      t0, JIT_CHECK_OSR           # check for possible OSR re-entry
+    beq     rPROFILE, t0, .L_check_osr_forward
+.L_resume_forward_branch:
+    add     a2, rINST, rINST            # a2<- byte offset
+    FETCH_ADVANCE_INST_RB(a2)           # update rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+.L_check_osr_forward:
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rINST
+    EXPORT_PC()
+    JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+    bnez    v0, MterpOnStackReplacement
+    b       .L_resume_forward_branch
+
+.L_add_batch:
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
+    lw      a0, OFF_FP_METHOD(rFP)
+    move    a2, rSELF
+    JAL(MterpAddHotnessBatch)           # (method, shadow_frame, self)
+    move    rPROFILE, v0                # restore new hotness countdown to rPROFILE
+    b       .L_no_count_backwards
+
+/*
+ * Entered from the conditional branch handlers when OSR check request active on
+ * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
+ */
+.L_check_not_taken_osr:
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    li      a2, 2
+    EXPORT_PC()
+    JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+    bnez    v0, MterpOnStackReplacement
+    FETCH_ADVANCE_INST(2)
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rINST
+    JAL(MterpLogOSR)
+#endif
+    li      v0, 1                       # Signal normal return
+    b       MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogFallback)
+#endif
+MterpCommonFallback:
+    move    v0, zero                    # signal retry with reference interpreter.
+    b       MterpDone
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR.  Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ *  uint32_t* rFP  (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+    li      v0, 1                       # signal return to caller.
+    b       MterpDone
+MterpReturn:
+    lw      a2, OFF_FP_RESULT_REGISTER(rFP)
+    sw      v0, 0(a2)
+    sw      v1, 4(a2)
+    li      v0, 1                       # signal return to caller.
+MterpDone:
+/*
+ * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
+ * checking for OSR.  If greater than zero, we might have unreported hotness to register
+ * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
+ * should only reach zero immediately after a hotness decrement, and is then reset to either
+ * a negative special state or the new non-zero countdown value.
+ */
+    blez    rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
+
+MterpProfileActive:
+    move    rINST, v0                   # stash return value
+    /* Report cached hotness counts */
+    lw      a0, OFF_FP_METHOD(rFP)
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rSELF
+    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
+    JAL(MterpAddHotnessBatch)           # (method, shadow_frame, self)
+    move    v0, rINST                   # restore return value
+
+.L_pop_and_return:
+/* Restore from the stack and return. Frame size = STACK_SIZE */
+    STACK_LOAD_FULL()
+    jalr    zero, ra
+
+    .cfi_endproc
+    .end ExecuteMterpImpl
+
+%def instruction_end():
+
+    .global artMterpAsmInstructionEnd
+artMterpAsmInstructionEnd:
+
+%def instruction_end_alt():
+
+    .global artMterpAsmAltInstructionEnd
+artMterpAsmAltInstructionEnd:
+
+%def instruction_start():
+
+    .global artMterpAsmInstructionStart
+artMterpAsmInstructionStart = .L_op_nop
+    .text
+
+%def instruction_start_alt():
+
+    .global artMterpAsmAltInstructionStart
+artMterpAsmAltInstructionStart = .L_ALT_op_nop
+    .text
+
+%def opcode_start():
+%  pass
+%def opcode_end():
+%  pass
diff --git a/runtime/interpreter/mterp/mips/object.S b/runtime/interpreter/mterp/mips/object.S
new file mode 100644
index 0000000..a987789
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/object.S
@@ -0,0 +1,257 @@
+%def field(helper=""):
+TODO
+
+%def op_check_cast():
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast vAA, class@BBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                           #  a0 <- BBBB
+    GET_OPA(a1)                            #  a1 <- AA
+    EAS2(a1, rFP, a1)                      #  a1 <- &object
+    lw     a2, OFF_FP_METHOD(rFP)          #  a2 <- method
+    move   a3, rSELF                       #  a3 <- self
+    JAL(MterpCheckCast)                    #  v0 <- CheckCast(index, &obj, method, self)
+    PREFETCH_INST(2)
+    bnez   v0, MterpPossibleException
+    ADVANCE(2)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+%def op_iget(is_object="0", helper="MterpIGetU32"):
+%  field(helper=helper)
+
+%def op_iget_boolean():
+%  op_iget(helper="MterpIGetU8")
+
+%def op_iget_boolean_quick():
+%  op_iget_quick(load="lbu")
+
+%def op_iget_byte():
+%  op_iget(helper="MterpIGetI8")
+
+%def op_iget_byte_quick():
+%  op_iget_quick(load="lb")
+
+%def op_iget_char():
+%  op_iget(helper="MterpIGetU16")
+
+%def op_iget_char_quick():
+%  op_iget_quick(load="lhu")
+
+%def op_iget_object():
+%  op_iget(is_object="1", helper="MterpIGetObj")
+
+%def op_iget_object_quick():
+    /* For: iget-object-quick */
+    /* op vA, vB, offset@CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    EXPORT_PC()
+    GET_VREG(a0, a2)                       #  a0 <- object we're operating on
+    JAL(artIGetObjectFromMterp)            #  v0 <- GetObj(obj, offset)
+    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA4(a2)                           #  a2<- A+
+    PREFETCH_INST(2)                       #  load rINST
+    bnez a3, MterpPossibleException        #  bail out
+    ADVANCE(2)                             #  advance rPC
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       #  fp[A] <- v0
+
+%def op_iget_quick(load="lw"):
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    /* op vA, vB, offset@CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    # check object for null
+    beqz      a3, common_errNullObject     #  object was null
+    addu      t0, a3, a1
+    $load     a0, 0(t0)                    #  a0 <- obj.field (8/16/32 bits)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a2, t0)              #  fp[A] <- a0
+
+%def op_iget_short():
+%  op_iget(helper="MterpIGetI16")
+
+%def op_iget_short_quick():
+%  op_iget_quick(load="lh")
+
+%def op_iget_wide():
+%  op_iget(helper="MterpIGetU64")
+
+%def op_iget_wide_quick():
+    /* iget-wide-quick vA, vB, offset@CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    # check object for null
+    beqz      a3, common_errNullObject     #  object was null
+    addu      t0, a3, a1                   #  t0 <- a3 + a1
+    LOAD64(a0, a1, t0)                     #  a0 <- obj.field (64 bits, aligned)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[A] <- a0/a1
+
+%def op_instance_of():
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     */
+    /* instance-of vA, vB, class@CCCC */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- CCCC
+    GET_OPB(a1)                            # a1 <- B
+    EAS2(a1, rFP, a1)                      # a1 <- &object
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- method
+    move  a3, rSELF                        # a3 <- self
+    GET_OPA4(rOBJ)                         # rOBJ <- A+
+    JAL(MterpInstanceOf)                   # v0 <- Mterp(index, &obj, method, self)
+    lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
+    PREFETCH_INST(2)                       # load rINST
+    bnez a1, MterpException
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    SET_VREG_GOTO(v0, rOBJ, t0)            # vA <- v0
+
+%def op_iput(is_object="0", helper="MterpIPutU32"):
+%  field(helper=helper)
+
+%def op_iput_boolean():
+%  op_iput(helper="MterpIPutU8")
+
+%def op_iput_boolean_quick():
+%  op_iput_quick(store="sb")
+
+%def op_iput_byte():
+%  op_iput(helper="MterpIPutI8")
+
+%def op_iput_byte_quick():
+%  op_iput_quick(store="sb")
+
+%def op_iput_char():
+%  op_iput(helper="MterpIPutU16")
+
+%def op_iput_char_quick():
+%  op_iput_quick(store="sh")
+
+%def op_iput_object():
+%  op_iput(is_object="1", helper="MterpIPutObj")
+
+%def op_iput_object_quick():
+    /* For: iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rPC
+    move   a2, rINST
+    JAL(MterpIputObjectQuick)
+    beqz   v0, MterpException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+%def op_iput_quick(store="sw"):
+    /* For: iput-quick, iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    beqz      a3, common_errNullObject     #  object was null
+    GET_VREG(a0, a2)                       #  a0 <- fp[A]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      t0, a3, a1
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t1)
+    $store    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
+    JR(t1)                                 #  jump to next instruction
+
+%def op_iput_short():
+%  op_iput(helper="MterpIPutI16")
+
+%def op_iput_short_quick():
+%  op_iput_quick(store="sh")
+
+%def op_iput_wide():
+%  op_iput(helper="MterpIPutU64")
+
+%def op_iput_wide_quick():
+    /* iput-wide-quick vA, vB, offset@CCCC */
+    GET_OPA4(a0)                           #  a0 <- A(+)
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a2, a1)                       #  a2 <- fp[B], the object pointer
+    # check object for null
+    beqz      a2, common_errNullObject     #  object was null
+    EAS2(a3, rFP, a0)                      #  a3 <- &fp[A]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[A]
+    FETCH(a3, 1)                           #  a3 <- field byte offset
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      a2, a2, a3                   #  obj.field (64 bits, aligned) <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t0)
+    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0/a1
+    JR(t0)                                 #  jump to next instruction
+
+%def op_new_instance():
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance vAA, class@BBBB */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rSELF
+    move   a2, rINST
+    JAL(MterpNewInstance)
+    beqz   v0, MterpPossibleException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+%def op_sget(is_object="0", helper="MterpSGetU32"):
+%  field(helper=helper)
+
+%def op_sget_boolean():
+%  op_sget(helper="MterpSGetU8")
+
+%def op_sget_byte():
+%  op_sget(helper="MterpSGetI8")
+
+%def op_sget_char():
+%  op_sget(helper="MterpSGetU16")
+
+%def op_sget_object():
+%  op_sget(is_object="1", helper="MterpSGetObj")
+
+%def op_sget_short():
+%  op_sget(helper="MterpSGetI16")
+
+%def op_sget_wide():
+%  op_sget(helper="MterpSGetU64")
+
+%def op_sput(is_object="0", helper="MterpSPutU32"):
+%  field(helper=helper)
+
+%def op_sput_boolean():
+%  op_sput(helper="MterpSPutU8")
+
+%def op_sput_byte():
+%  op_sput(helper="MterpSPutI8")
+
+%def op_sput_char():
+%  op_sput(helper="MterpSPutU16")
+
+%def op_sput_object():
+%  op_sput(is_object="1", helper="MterpSPutObj")
+
+%def op_sput_short():
+%  op_sput(helper="MterpSPutI16")
+
+%def op_sput_wide():
+%  op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/mips/op_add_double.S b/runtime/interpreter/mterp/mips/op_add_double.S
deleted file mode 100644
index 12ef0cf3..0000000
--- a/runtime/interpreter/mterp/mips/op_add_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide.S" {"instr":"add.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_double_2addr.S b/runtime/interpreter/mterp/mips/op_add_double_2addr.S
deleted file mode 100644
index c57add5..0000000
--- a/runtime/interpreter/mterp/mips/op_add_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide2addr.S" {"instr":"add.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_float.S b/runtime/interpreter/mterp/mips/op_add_float.S
deleted file mode 100644
index 6a46cf0..0000000
--- a/runtime/interpreter/mterp/mips/op_add_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop.S" {"instr":"add.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_float_2addr.S b/runtime/interpreter/mterp/mips/op_add_float_2addr.S
deleted file mode 100644
index 6ab5cc1..0000000
--- a/runtime/interpreter/mterp/mips/op_add_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop2addr.S" {"instr":"add.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int.S b/runtime/interpreter/mterp/mips/op_add_int.S
deleted file mode 100644
index 53a0cb1..0000000
--- a/runtime/interpreter/mterp/mips/op_add_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int_2addr.S b/runtime/interpreter/mterp/mips/op_add_int_2addr.S
deleted file mode 100644
index ddd9214..0000000
--- a/runtime/interpreter/mterp/mips/op_add_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int_lit16.S b/runtime/interpreter/mterp/mips/op_add_int_lit16.S
deleted file mode 100644
index 05535c1..0000000
--- a/runtime/interpreter/mterp/mips/op_add_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit16.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int_lit8.S b/runtime/interpreter/mterp/mips/op_add_int_lit8.S
deleted file mode 100644
index fd021b3..0000000
--- a/runtime/interpreter/mterp/mips/op_add_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_long.S b/runtime/interpreter/mterp/mips/op_add_long.S
deleted file mode 100644
index faacc6a..0000000
--- a/runtime/interpreter/mterp/mips/op_add_long.S
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- *  The compiler generates the following sequence for
- *  [v1 v0] =  [a1 a0] + [a3 a2];
- *    addu v0,a2,a0
- *    addu a1,a3,a1
- *    sltu v1,v0,a2
- *    addu v1,v1,a1
- */
-%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "preinstr":"addu v0, a2, a0", "instr":"addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1" }
diff --git a/runtime/interpreter/mterp/mips/op_add_long_2addr.S b/runtime/interpreter/mterp/mips/op_add_long_2addr.S
deleted file mode 100644
index bf827c1..0000000
--- a/runtime/interpreter/mterp/mips/op_add_long_2addr.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * See op_add_long.S for details
- */
-%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "preinstr":"addu v0, a2, a0", "instr":"addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1" }
diff --git a/runtime/interpreter/mterp/mips/op_aget.S b/runtime/interpreter/mterp/mips/op_aget.S
deleted file mode 100644
index e88402c..0000000
--- a/runtime/interpreter/mterp/mips/op_aget.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default { "load":"lw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, $shift)               #  a0 <- arrayObj + index*width
-    # a1 >= a3; compare unsigned index
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    $load a2, $data_offset(a0)             #  a2 <- vBB[vCC]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
diff --git a/runtime/interpreter/mterp/mips/op_aget_boolean.S b/runtime/interpreter/mterp/mips/op_aget_boolean.S
deleted file mode 100644
index 59f7f82..0000000
--- a/runtime/interpreter/mterp/mips/op_aget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aget.S" { "load":"lbu", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_byte.S b/runtime/interpreter/mterp/mips/op_aget_byte.S
deleted file mode 100644
index 11038fa..0000000
--- a/runtime/interpreter/mterp/mips/op_aget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aget.S" { "load":"lb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_char.S b/runtime/interpreter/mterp/mips/op_aget_char.S
deleted file mode 100644
index 96f2ab6..0000000
--- a/runtime/interpreter/mterp/mips/op_aget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aget.S" { "load":"lhu", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_object.S b/runtime/interpreter/mterp/mips/op_aget_object.S
deleted file mode 100644
index 9c49dfe..0000000
--- a/runtime/interpreter/mterp/mips/op_aget_object.S
+++ /dev/null
@@ -1,19 +0,0 @@
-    /*
-     * Array object get.  vAA <- vBB[vCC].
-     *
-     * for: aget-object
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    EXPORT_PC()
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    JAL(artAGetObjectFromMterp)            #  v0 <- GetObj(array, index)
-    lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    PREFETCH_INST(2)                       #  load rINST
-    bnez a1, MterpException
-    ADVANCE(2)                             #  advance rPC
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_OBJECT_GOTO(v0, rOBJ, t0)     #  vAA <- v0
diff --git a/runtime/interpreter/mterp/mips/op_aget_short.S b/runtime/interpreter/mterp/mips/op_aget_short.S
deleted file mode 100644
index cd7f7bf..0000000
--- a/runtime/interpreter/mterp/mips/op_aget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aget.S" { "load":"lh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_wide.S b/runtime/interpreter/mterp/mips/op_aget_wide.S
deleted file mode 100644
index 08822f5..0000000
--- a/runtime/interpreter/mterp/mips/op_aget_wide.S
+++ /dev/null
@@ -1,22 +0,0 @@
-    /*
-     * Array get, 64 bits.  vAA <- vBB[vCC].
-     *
-     * Arrays of long/double are 64-bit aligned.
-     */
-    /* aget-wide vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    LOAD64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET)
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a2, a3, rOBJ, t0)      #  vAA/vAA+1 <- a2/a3
diff --git a/runtime/interpreter/mterp/mips/op_and_int.S b/runtime/interpreter/mterp/mips/op_and_int.S
deleted file mode 100644
index 98fe4af..0000000
--- a/runtime/interpreter/mterp/mips/op_and_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_int_2addr.S b/runtime/interpreter/mterp/mips/op_and_int_2addr.S
deleted file mode 100644
index 7f90ed4..0000000
--- a/runtime/interpreter/mterp/mips/op_and_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_int_lit16.S b/runtime/interpreter/mterp/mips/op_and_int_lit16.S
deleted file mode 100644
index e46f23b..0000000
--- a/runtime/interpreter/mterp/mips/op_and_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit16.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_int_lit8.S b/runtime/interpreter/mterp/mips/op_and_int_lit8.S
deleted file mode 100644
index 3332883..0000000
--- a/runtime/interpreter/mterp/mips/op_and_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_long.S b/runtime/interpreter/mterp/mips/op_and_long.S
deleted file mode 100644
index a98a6df..0000000
--- a/runtime/interpreter/mterp/mips/op_and_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide.S" {"preinstr":"and a0, a0, a2", "instr":"and a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_and_long_2addr.S b/runtime/interpreter/mterp/mips/op_and_long_2addr.S
deleted file mode 100644
index 350c044..0000000
--- a/runtime/interpreter/mterp/mips/op_and_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide2addr.S" {"preinstr":"and a0, a0, a2", "instr":"and a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_aput.S b/runtime/interpreter/mterp/mips/op_aput.S
deleted file mode 100644
index 46dcaee..0000000
--- a/runtime/interpreter/mterp/mips/op_aput.S
+++ /dev/null
@@ -1,27 +0,0 @@
-%default { "store":"sw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
-
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, $shift)               #  a0 <- arrayObj + index*width
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    $store a2, $data_offset(a0)            #  vBB[vCC] <- a2
-    JR(t0)                                 #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_aput_boolean.S b/runtime/interpreter/mterp/mips/op_aput_boolean.S
deleted file mode 100644
index 9cae5ef..0000000
--- a/runtime/interpreter/mterp/mips/op_aput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_byte.S b/runtime/interpreter/mterp/mips/op_aput_byte.S
deleted file mode 100644
index 3bbd12c..0000000
--- a/runtime/interpreter/mterp/mips/op_aput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_char.S b/runtime/interpreter/mterp/mips/op_aput_char.S
deleted file mode 100644
index ae69717..0000000
--- a/runtime/interpreter/mterp/mips/op_aput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_object.S b/runtime/interpreter/mterp/mips/op_aput_object.S
deleted file mode 100644
index 55b13b1..0000000
--- a/runtime/interpreter/mterp/mips/op_aput_object.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /*
-     * Store an object into an array.  vBB[vCC] <- vAA.
-     *
-     */
-    /* op vAA, vBB, vCC */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rPC
-    move   a2, rINST
-    JAL(MterpAputObject)
-    beqz   v0, MterpPossibleException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_aput_short.S b/runtime/interpreter/mterp/mips/op_aput_short.S
deleted file mode 100644
index 9586259..0000000
--- a/runtime/interpreter/mterp/mips/op_aput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_wide.S b/runtime/interpreter/mterp/mips/op_aput_wide.S
deleted file mode 100644
index c3cff56..0000000
--- a/runtime/interpreter/mterp/mips/op_aput_wide.S
+++ /dev/null
@@ -1,24 +0,0 @@
-    /*
-     * Array put, 64 bits.  vBB[vCC] <- vAA.
-     */
-    /* aput-wide vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(t0)                            #  t0 <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
-    EAS2(rOBJ, rFP, t0)                    #  rOBJ <- &fp[AA]
-    # compare unsigned index, length
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    LOAD64(a2, a3, rOBJ)                   #  a2/a3 <- vAA/vAA+1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) #  a2/a3 <- vBB[vCC]
-    JR(t0)                                 #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_array_length.S b/runtime/interpreter/mterp/mips/op_array_length.S
deleted file mode 100644
index ae2fe68..0000000
--- a/runtime/interpreter/mterp/mips/op_array_length.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    /*
-     * Return the length of an array.
-     */
-    /* array-length vA, vB */
-    GET_OPB(a1)                            #  a1 <- B
-    GET_OPA4(a2)                           #  a2 <- A+
-    GET_VREG(a0, a1)                       #  a0 <- vB (object ref)
-    # is object null?
-    beqz      a0, common_errNullObject     #  yup, fail
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- array length
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a3, a2, t0)              #  vA <- length
diff --git a/runtime/interpreter/mterp/mips/op_check_cast.S b/runtime/interpreter/mterp/mips/op_check_cast.S
deleted file mode 100644
index 3875ce6..0000000
--- a/runtime/interpreter/mterp/mips/op_check_cast.S
+++ /dev/null
@@ -1,16 +0,0 @@
-    /*
-     * Check to see if a cast from one class to another is allowed.
-     */
-    /* check-cast vAA, class@BBBB */
-    EXPORT_PC()
-    FETCH(a0, 1)                           #  a0 <- BBBB
-    GET_OPA(a1)                            #  a1 <- AA
-    EAS2(a1, rFP, a1)                      #  a1 <- &object
-    lw     a2, OFF_FP_METHOD(rFP)          #  a2 <- method
-    move   a3, rSELF                       #  a3 <- self
-    JAL(MterpCheckCast)                    #  v0 <- CheckCast(index, &obj, method, self)
-    PREFETCH_INST(2)
-    bnez   v0, MterpPossibleException
-    ADVANCE(2)
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_cmp_long.S b/runtime/interpreter/mterp/mips/op_cmp_long.S
deleted file mode 100644
index 44806c3..0000000
--- a/runtime/interpreter/mterp/mips/op_cmp_long.S
+++ /dev/null
@@ -1,34 +0,0 @@
-    /*
-     * Compare two 64-bit values
-     *    x = y     return  0
-     *    x < y     return -1
-     *    x > y     return  1
-     *
-     * I think I can improve on the ARM code by the following observation
-     *    slt   t0,  x.hi, y.hi;        # (x.hi < y.hi) ? 1:0
-     *    sgt   t1,  x.hi, y.hi;        # (y.hi > x.hi) ? 1:0
-     *    subu  v0, t0, t1              # v0= -1:1:0 for [ < > = ]
-     */
-    /* cmp-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64(a0, a1, a2)                     #  a0/a1 <- vBB/vBB+1
-    LOAD64(a2, a3, a3)                     #  a2/a3 <- vCC/vCC+1
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    slt       t0, a1, a3                   #  compare hi
-    sgt       t1, a1, a3
-    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0)
-    bnez      v0, .L${opcode}_finish
-    # at this point x.hi==y.hi
-    sltu      t0, a0, a2                   #  compare lo
-    sgtu      t1, a0, a2
-    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0) for [< > =]
-
-.L${opcode}_finish:
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(v0, rOBJ, t0)            #  vAA <- v0
diff --git a/runtime/interpreter/mterp/mips/op_cmpg_double.S b/runtime/interpreter/mterp/mips/op_cmpg_double.S
deleted file mode 100644
index b2e7532..0000000
--- a/runtime/interpreter/mterp/mips/op_cmpg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_cmpl_double.S" { "gt_bias":"1" }
diff --git a/runtime/interpreter/mterp/mips/op_cmpg_float.S b/runtime/interpreter/mterp/mips/op_cmpg_float.S
deleted file mode 100644
index 76550b5..0000000
--- a/runtime/interpreter/mterp/mips/op_cmpg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_cmpl_float.S" { "gt_bias":"1" }
diff --git a/runtime/interpreter/mterp/mips/op_cmpl_double.S b/runtime/interpreter/mterp/mips/op_cmpl_double.S
deleted file mode 100644
index 369e5b3..0000000
--- a/runtime/interpreter/mterp/mips/op_cmpl_double.S
+++ /dev/null
@@ -1,52 +0,0 @@
-%default { "gt_bias":"0" }
-    /*
-     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
-     * into the destination register based on the comparison results.
-     *
-     * For: cmpl-double, cmpg-double
-     */
-    /* op vAA, vBB, vCC */
-
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    and       rOBJ, a0, 255                #  rOBJ <- BB
-    srl       t0, a0, 8                    #  t0 <- CC
-    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[BB]
-    EAS2(t0, rFP, t0)                      #  t0 <- &fp[CC]
-    LOAD64_F(ft0, ft0f, rOBJ)
-    LOAD64_F(ft1, ft1f, t0)
-#ifdef MIPS32REVGE6
-    cmp.eq.d  ft2, ft0, ft1
-    li        rTEMP, 0
-    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
-    .if $gt_bias
-    cmp.lt.d  ft2, ft0, ft1
-    li        rTEMP, -1
-    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    cmp.lt.d  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#else
-    c.eq.d    fcc0, ft0, ft1
-    li        rTEMP, 0
-    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
-    .if $gt_bias
-    c.olt.d   fcc0, ft0, ft1
-    li        rTEMP, -1
-    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    c.olt.d   fcc0, ft1, ft0
-    li        rTEMP, 1
-    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#endif
-1:
-    GET_OPA(rOBJ)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
diff --git a/runtime/interpreter/mterp/mips/op_cmpl_float.S b/runtime/interpreter/mterp/mips/op_cmpl_float.S
deleted file mode 100644
index 1dd5506..0000000
--- a/runtime/interpreter/mterp/mips/op_cmpl_float.S
+++ /dev/null
@@ -1,50 +0,0 @@
-%default { "gt_bias":"0" }
-    /*
-     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
-     * into the destination register based on the comparison results.
-     *
-     * for: cmpl-float, cmpg-float
-     */
-    /* op vAA, vBB, vCC */
-
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8
-    GET_VREG_F(ft0, a2)
-    GET_VREG_F(ft1, a3)
-#ifdef MIPS32REVGE6
-    cmp.eq.s  ft2, ft0, ft1
-    li        rTEMP, 0
-    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
-    .if $gt_bias
-    cmp.lt.s  ft2, ft0, ft1
-    li        rTEMP, -1
-    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    cmp.lt.s  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#else
-    c.eq.s    fcc0, ft0, ft1
-    li        rTEMP, 0
-    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
-    .if $gt_bias
-    c.olt.s   fcc0, ft0, ft1
-    li        rTEMP, -1
-    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    c.olt.s   fcc0, ft1, ft0
-    li        rTEMP, 1
-    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#endif
-1:
-    GET_OPA(rOBJ)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
diff --git a/runtime/interpreter/mterp/mips/op_const.S b/runtime/interpreter/mterp/mips/op_const.S
deleted file mode 100644
index bd9f873..0000000
--- a/runtime/interpreter/mterp/mips/op_const.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* const vAA, +BBBBbbbb */
-    GET_OPA(a3)                            #  a3 <- AA
-    FETCH(a0, 1)                           #  a0 <- bbbb (low)
-    FETCH(a1, 2)                           #  a1 <- BBBB (high)
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
diff --git a/runtime/interpreter/mterp/mips/op_const_16.S b/runtime/interpreter/mterp/mips/op_const_16.S
deleted file mode 100644
index 2ffb30f..0000000
--- a/runtime/interpreter/mterp/mips/op_const_16.S
+++ /dev/null
@@ -1,6 +0,0 @@
-    /* const/16 vAA, +BBBB */
-    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
diff --git a/runtime/interpreter/mterp/mips/op_const_4.S b/runtime/interpreter/mterp/mips/op_const_4.S
deleted file mode 100644
index 6866c78..0000000
--- a/runtime/interpreter/mterp/mips/op_const_4.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* const/4 vA, +B */
-    sll       a1, rINST, 16                #  a1 <- Bxxx0000
-    GET_OPA(a0)                            #  a0 <- A+
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    sra       a1, a1, 28                   #  a1 <- sssssssB (sign-extended)
-    and       a0, a0, 15
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a1, a0, t0)              #  fp[A] <- a1
diff --git a/runtime/interpreter/mterp/mips/op_const_class.S b/runtime/interpreter/mterp/mips/op_const_class.S
deleted file mode 100644
index 5b3c968..0000000
--- a/runtime/interpreter/mterp/mips/op_const_class.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/mips/op_const_high16.S b/runtime/interpreter/mterp/mips/op_const_high16.S
deleted file mode 100644
index 5162402..0000000
--- a/runtime/interpreter/mterp/mips/op_const_high16.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* const/high16 vAA, +BBBB0000 */
-    FETCH(a0, 1)                           #  a0 <- 0000BBBB (zero-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    sll       a0, a0, 16                   #  a0 <- BBBB0000
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
diff --git a/runtime/interpreter/mterp/mips/op_const_method_handle.S b/runtime/interpreter/mterp/mips/op_const_method_handle.S
deleted file mode 100644
index 4011e43..0000000
--- a/runtime/interpreter/mterp/mips/op_const_method_handle.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/mips/op_const_method_type.S b/runtime/interpreter/mterp/mips/op_const_method_type.S
deleted file mode 100644
index 18a5e0f..0000000
--- a/runtime/interpreter/mterp/mips/op_const_method_type.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/mips/op_const_string.S b/runtime/interpreter/mterp/mips/op_const_string.S
deleted file mode 100644
index 0bab6b4..0000000
--- a/runtime/interpreter/mterp/mips/op_const_string.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/mips/op_const_string_jumbo.S b/runtime/interpreter/mterp/mips/op_const_string_jumbo.S
deleted file mode 100644
index 54cec97..0000000
--- a/runtime/interpreter/mterp/mips/op_const_string_jumbo.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /* const/string vAA, string@BBBBBBBB */
-    EXPORT_PC()
-    FETCH(a0, 1)                        # a0 <- bbbb (low)
-    FETCH(a2, 2)                        # a2 <- BBBB (high)
-    GET_OPA(a1)                         # a1 <- AA
-    INSERT_HIGH_HALF(a0, a2)            # a0 <- BBBBbbbb
-    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
-    move   a3, rSELF
-    JAL(MterpConstString)               # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST(3)                    # load rINST
-    bnez   v0, MterpPossibleException
-    ADVANCE(3)                          # advance rPC
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_const_wide.S b/runtime/interpreter/mterp/mips/op_const_wide.S
deleted file mode 100644
index f8911e3..0000000
--- a/runtime/interpreter/mterp/mips/op_const_wide.S
+++ /dev/null
@@ -1,11 +0,0 @@
-    /* const-wide vAA, +HHHHhhhhBBBBbbbb */
-    FETCH(a0, 1)                           #  a0 <- bbbb (low)
-    FETCH(a1, 2)                           #  a1 <- BBBB (low middle)
-    FETCH(a2, 3)                           #  a2 <- hhhh (high middle)
-    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb (low word)
-    FETCH(a3, 4)                           #  a3 <- HHHH (high)
-    GET_OPA(t1)                            #  t1 <- AA
-    INSERT_HIGH_HALF(a2, a3)               #  a2 <- HHHHhhhh (high word)
-    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a2, t1, t0)        #  vAA/vAA+1 <- a0/a2
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_16.S b/runtime/interpreter/mterp/mips/op_const_wide_16.S
deleted file mode 100644
index 2ca5ab9..0000000
--- a/runtime/interpreter/mterp/mips/op_const_wide_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* const-wide/16 vAA, +BBBB */
-    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    sra       a1, a0, 31                   #  a1 <- ssssssss
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_32.S b/runtime/interpreter/mterp/mips/op_const_wide_32.S
deleted file mode 100644
index bf802ca..0000000
--- a/runtime/interpreter/mterp/mips/op_const_wide_32.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* const-wide/32 vAA, +BBBBbbbb */
-    FETCH(a0, 1)                           #  a0 <- 0000bbbb (low)
-    GET_OPA(a3)                            #  a3 <- AA
-    FETCH_S(a2, 2)                         #  a2 <- ssssBBBB (high)
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    INSERT_HIGH_HALF(a0, a2)               #  a0 <- BBBBbbbb
-    sra       a1, a0, 31                   #  a1 <- ssssssss
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_high16.S b/runtime/interpreter/mterp/mips/op_const_wide_high16.S
deleted file mode 100644
index 04b90fa..0000000
--- a/runtime/interpreter/mterp/mips/op_const_wide_high16.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* const-wide/high16 vAA, +BBBB000000000000 */
-    FETCH(a1, 1)                           #  a1 <- 0000BBBB (zero-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    li        a0, 0                        #  a0 <- 00000000
-    sll       a1, 16                       #  a1 <- BBBB0000
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_div_double.S b/runtime/interpreter/mterp/mips/op_div_double.S
deleted file mode 100644
index 84e4c4e..0000000
--- a/runtime/interpreter/mterp/mips/op_div_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide.S" {"instr":"div.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_double_2addr.S b/runtime/interpreter/mterp/mips/op_div_double_2addr.S
deleted file mode 100644
index 65b92e3..0000000
--- a/runtime/interpreter/mterp/mips/op_div_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide2addr.S" {"instr":"div.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_float.S b/runtime/interpreter/mterp/mips/op_div_float.S
deleted file mode 100644
index 44b8d47..0000000
--- a/runtime/interpreter/mterp/mips/op_div_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop.S" {"instr":"div.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_float_2addr.S b/runtime/interpreter/mterp/mips/op_div_float_2addr.S
deleted file mode 100644
index e5fff92..0000000
--- a/runtime/interpreter/mterp/mips/op_div_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop2addr.S" {"instr":"div.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_int.S b/runtime/interpreter/mterp/mips/op_div_int.S
deleted file mode 100644
index 5d28c84..0000000
--- a/runtime/interpreter/mterp/mips/op_div_int.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binop.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binop.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_int_2addr.S b/runtime/interpreter/mterp/mips/op_div_int_2addr.S
deleted file mode 100644
index 6c079e0..0000000
--- a/runtime/interpreter/mterp/mips/op_div_int_2addr.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binop2addr.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binop2addr.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_int_lit16.S b/runtime/interpreter/mterp/mips/op_div_int_lit16.S
deleted file mode 100644
index ee7452c..0000000
--- a/runtime/interpreter/mterp/mips/op_div_int_lit16.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binopLit16.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binopLit16.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_int_lit8.S b/runtime/interpreter/mterp/mips/op_div_int_lit8.S
deleted file mode 100644
index d2964b8..0000000
--- a/runtime/interpreter/mterp/mips/op_div_int_lit8.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binopLit8.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binopLit8.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_long.S b/runtime/interpreter/mterp/mips/op_div_long.S
deleted file mode 100644
index 2097866..0000000
--- a/runtime/interpreter/mterp/mips/op_div_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide.S" {"result0":"v0", "result1":"v1", "instr":"JAL(__divdi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_long_2addr.S b/runtime/interpreter/mterp/mips/op_div_long_2addr.S
deleted file mode 100644
index c279305..0000000
--- a/runtime/interpreter/mterp/mips/op_div_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide2addr.S" {"result0":"v0", "result1":"v1", "instr":"JAL(__divdi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_double_to_float.S b/runtime/interpreter/mterp/mips/op_double_to_float.S
deleted file mode 100644
index 1d32c2e..0000000
--- a/runtime/interpreter/mterp/mips/op_double_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unopNarrower.S" {"instr":"cvt.s.d fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_double_to_int.S b/runtime/interpreter/mterp/mips/op_double_to_int.S
deleted file mode 100644
index 6d7c6ca..0000000
--- a/runtime/interpreter/mterp/mips/op_double_to_int.S
+++ /dev/null
@@ -1,31 +0,0 @@
-    /*
-     * double-to-int
-     *
-     * We have to clip values to int min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64_F(fa0, fa0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-#ifndef MIPS32REVGE6
-    li        t0, INT_MIN_AS_DOUBLE_HIGH
-    mtc1      zero, fa1
-    MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
-    c.ole.d   fcc0, fa1, fa0
-#endif
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-#ifndef MIPS32REVGE6
-    bc1t      fcc0, 1f                     #  if INT_MIN <= vB, proceed to truncation
-    c.eq.d    fcc0, fa0, fa0
-    mtc1      zero, fa0
-    MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
-    movt.d    fa0, fa1, fcc0               #  fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
-1:
-#endif
-    trunc.w.d fa0, fa0
-    SET_VREG_F_GOTO(fa0, rOBJ, t1)         #  vA <- result
diff --git a/runtime/interpreter/mterp/mips/op_double_to_long.S b/runtime/interpreter/mterp/mips/op_double_to_long.S
deleted file mode 100644
index 459ab7e..0000000
--- a/runtime/interpreter/mterp/mips/op_double_to_long.S
+++ /dev/null
@@ -1,50 +0,0 @@
-    /*
-     * double-to-long
-     *
-     * We have to clip values to long min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64_F(fa0, fa0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-#ifdef MIPS32REVGE6
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    trunc.l.d fa0, fa0
-    SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) #  vA <- result
-#else
-    c.eq.d    fcc0, fa0, fa0
-    li        rRESULT0, 0
-    li        rRESULT1, 0
-    bc1f      fcc0, .L${opcode}_get_opcode
-
-    li        t0, LONG_MIN_AS_DOUBLE_HIGH
-    mtc1      zero, fa1
-    MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
-    c.ole.d   fcc0, fa0, fa1
-    li        rRESULT1, LONG_MIN_HIGH
-    bc1t      fcc0, .L${opcode}_get_opcode
-
-    neg.d     fa1, fa1
-    c.ole.d   fcc0, fa1, fa0
-    nor       rRESULT0, rRESULT0, zero
-    nor       rRESULT1, rRESULT1, zero
-    bc1t      fcc0, .L${opcode}_get_opcode
-
-    JAL(__fixdfdi)
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    b         .L${opcode}_set_vreg
-#endif
-%break
-
-#ifndef MIPS32REVGE6
-.L${opcode}_get_opcode:
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-.L${opcode}_set_vreg:
-    SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1)   #  vA/vA+1 <- v0/v1
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_fill_array_data.S b/runtime/interpreter/mterp/mips/op_fill_array_data.S
deleted file mode 100644
index c3cd371..0000000
--- a/runtime/interpreter/mterp/mips/op_fill_array_data.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    /* fill-array-data vAA, +BBBBBBBB */
-    EXPORT_PC()
-    FETCH(a1, 1)                           #  a1 <- bbbb (lo)
-    FETCH(a0, 2)                           #  a0 <- BBBB (hi)
-    GET_OPA(a3)                            #  a3 <- AA
-    INSERT_HIGH_HALF(a1, a0)               #  a1 <- BBBBbbbb
-    GET_VREG(a0, a3)                       #  a0 <- vAA (array object)
-    EAS1(a1, rPC, a1)                      #  a1 <- PC + BBBBbbbb*2 (array data off.)
-    JAL(MterpFillArrayData)                #  v0 <- Mterp(obj, payload)
-    beqz      v0,  MterpPossibleException  #  has exception
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_filled_new_array.S b/runtime/interpreter/mterp/mips/op_filled_new_array.S
deleted file mode 100644
index 9511578..0000000
--- a/runtime/interpreter/mterp/mips/op_filled_new_array.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default { "helper":"MterpFilledNewArray" }
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    .extern $helper
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME     # a0 <- shadow frame
-    move   a1, rPC
-    move   a2, rSELF
-    JAL($helper)                           #  v0 <- helper(shadow_frame, pc, self)
-    beqz      v0,  MterpPossibleException  #  has exception
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_filled_new_array_range.S b/runtime/interpreter/mterp/mips/op_filled_new_array_range.S
deleted file mode 100644
index f8dcb0e..0000000
--- a/runtime/interpreter/mterp/mips/op_filled_new_array_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/mips/op_float_to_double.S b/runtime/interpreter/mterp/mips/op_float_to_double.S
deleted file mode 100644
index 1315255..0000000
--- a/runtime/interpreter/mterp/mips/op_float_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/funopWider.S" {"instr":"cvt.d.s fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_float_to_int.S b/runtime/interpreter/mterp/mips/op_float_to_int.S
deleted file mode 100644
index 26a0988..0000000
--- a/runtime/interpreter/mterp/mips/op_float_to_int.S
+++ /dev/null
@@ -1,29 +0,0 @@
-    /*
-     * float-to-int
-     *
-     * We have to clip values to int min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-#ifndef MIPS32REVGE6
-    li        t0, INT_MIN_AS_FLOAT
-    mtc1      t0, fa1
-    c.ole.s   fcc0, fa1, fa0
-#endif
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-#ifndef MIPS32REVGE6
-    bc1t      fcc0, 1f                     #  if INT_MIN <= vB, proceed to truncation
-    c.eq.s    fcc0, fa0, fa0
-    mtc1      zero, fa0
-    movt.s    fa0, fa1, fcc0               #  fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
-1:
-#endif
-    trunc.w.s fa0, fa0
-    SET_VREG_F_GOTO(fa0, rOBJ, t1)         #  vA <- result
diff --git a/runtime/interpreter/mterp/mips/op_float_to_long.S b/runtime/interpreter/mterp/mips/op_float_to_long.S
deleted file mode 100644
index b8f8efb..0000000
--- a/runtime/interpreter/mterp/mips/op_float_to_long.S
+++ /dev/null
@@ -1,48 +0,0 @@
-    /*
-     * float-to-long
-     *
-     * We have to clip values to long min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-#ifdef MIPS32REVGE6
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    trunc.l.s fa0, fa0
-    SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) #  vA <- result
-#else
-    c.eq.s    fcc0, fa0, fa0
-    li        rRESULT0, 0
-    li        rRESULT1, 0
-    bc1f      fcc0, .L${opcode}_get_opcode
-
-    li        t0, LONG_MIN_AS_FLOAT
-    mtc1      t0, fa1
-    c.ole.s   fcc0, fa0, fa1
-    li        rRESULT1, LONG_MIN_HIGH
-    bc1t      fcc0, .L${opcode}_get_opcode
-
-    neg.s     fa1, fa1
-    c.ole.s   fcc0, fa1, fa0
-    nor       rRESULT0, rRESULT0, zero
-    nor       rRESULT1, rRESULT1, zero
-    bc1t      fcc0, .L${opcode}_get_opcode
-
-    JAL(__fixsfdi)
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    b         .L${opcode}_set_vreg
-#endif
-%break
-
-#ifndef MIPS32REVGE6
-.L${opcode}_get_opcode:
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-.L${opcode}_set_vreg:
-    SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1)   #  vA/vA+1 <- v0/v1
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_goto.S b/runtime/interpreter/mterp/mips/op_goto.S
deleted file mode 100644
index 57182a5..0000000
--- a/runtime/interpreter/mterp/mips/op_goto.S
+++ /dev/null
@@ -1,10 +0,0 @@
-    /*
-     * Unconditional branch, 8-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto +AA */
-    sll       a0, rINST, 16                #  a0 <- AAxx0000
-    sra       rINST, a0, 24                #  rINST <- ssssssAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips/op_goto_16.S b/runtime/interpreter/mterp/mips/op_goto_16.S
deleted file mode 100644
index 06c96cd..0000000
--- a/runtime/interpreter/mterp/mips/op_goto_16.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /*
-     * Unconditional branch, 16-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto/16 +AAAA */
-    FETCH_S(rINST, 1)                      #  rINST <- ssssAAAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips/op_goto_32.S b/runtime/interpreter/mterp/mips/op_goto_32.S
deleted file mode 100644
index ef5bf6b..0000000
--- a/runtime/interpreter/mterp/mips/op_goto_32.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /*
-     * Unconditional branch, 32-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     *
-     * Unlike most opcodes, this one is allowed to branch to itself, so
-     * our "backward branch" test must be "<=0" instead of "<0".
-     */
-    /* goto/32 +AAAAAAAA */
-    FETCH(rINST, 1)                        #  rINST <- aaaa (lo)
-    FETCH(a1, 2)                           #  a1 <- AAAA (hi)
-    INSERT_HIGH_HALF(rINST, a1)            #  rINST <- AAAAaaaa
-    b         MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips/op_if_eq.S b/runtime/interpreter/mterp/mips/op_if_eq.S
deleted file mode 100644
index d6f9987..0000000
--- a/runtime/interpreter/mterp/mips/op_if_eq.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/bincmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/mips/op_if_eqz.S b/runtime/interpreter/mterp/mips/op_if_eqz.S
deleted file mode 100644
index c52b76a..0000000
--- a/runtime/interpreter/mterp/mips/op_if_eqz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/zcmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/mips/op_if_ge.S b/runtime/interpreter/mterp/mips/op_if_ge.S
deleted file mode 100644
index bd06ff5..0000000
--- a/runtime/interpreter/mterp/mips/op_if_ge.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/bincmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/mips/op_if_gez.S b/runtime/interpreter/mterp/mips/op_if_gez.S
deleted file mode 100644
index 549231a..0000000
--- a/runtime/interpreter/mterp/mips/op_if_gez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/zcmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/mips/op_if_gt.S b/runtime/interpreter/mterp/mips/op_if_gt.S
deleted file mode 100644
index 0be3091..0000000
--- a/runtime/interpreter/mterp/mips/op_if_gt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/bincmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_gtz.S b/runtime/interpreter/mterp/mips/op_if_gtz.S
deleted file mode 100644
index 5c7bcc4..0000000
--- a/runtime/interpreter/mterp/mips/op_if_gtz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/zcmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_le.S b/runtime/interpreter/mterp/mips/op_if_le.S
deleted file mode 100644
index c35c1a2..0000000
--- a/runtime/interpreter/mterp/mips/op_if_le.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/bincmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/mips/op_if_lez.S b/runtime/interpreter/mterp/mips/op_if_lez.S
deleted file mode 100644
index 3dc6543..0000000
--- a/runtime/interpreter/mterp/mips/op_if_lez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/zcmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/mips/op_if_lt.S b/runtime/interpreter/mterp/mips/op_if_lt.S
deleted file mode 100644
index 3f3386c..0000000
--- a/runtime/interpreter/mterp/mips/op_if_lt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/bincmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_ltz.S b/runtime/interpreter/mterp/mips/op_if_ltz.S
deleted file mode 100644
index e6d6ed6..0000000
--- a/runtime/interpreter/mterp/mips/op_if_ltz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/zcmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_ne.S b/runtime/interpreter/mterp/mips/op_if_ne.S
deleted file mode 100644
index 3d7bf35..0000000
--- a/runtime/interpreter/mterp/mips/op_if_ne.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/bincmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/mips/op_if_nez.S b/runtime/interpreter/mterp/mips/op_if_nez.S
deleted file mode 100644
index d121eae..0000000
--- a/runtime/interpreter/mterp/mips/op_if_nez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/zcmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/mips/op_iget.S b/runtime/interpreter/mterp/mips/op_iget.S
deleted file mode 100644
index e218272..0000000
--- a/runtime/interpreter/mterp/mips/op_iget.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIGetU32"}
-%include "mips/field.S" { }
diff --git a/runtime/interpreter/mterp/mips/op_iget_boolean.S b/runtime/interpreter/mterp/mips/op_iget_boolean.S
deleted file mode 100644
index f2ef68d..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_boolean_quick.S b/runtime/interpreter/mterp/mips/op_iget_boolean_quick.S
deleted file mode 100644
index f3032b3..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget_quick.S" { "load":"lbu" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_byte.S b/runtime/interpreter/mterp/mips/op_iget_byte.S
deleted file mode 100644
index 0c8fb7c..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_byte_quick.S b/runtime/interpreter/mterp/mips/op_iget_byte_quick.S
deleted file mode 100644
index d93f844..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget_quick.S" { "load":"lb" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_char.S b/runtime/interpreter/mterp/mips/op_iget_char.S
deleted file mode 100644
index 69d04c4..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_char_quick.S b/runtime/interpreter/mterp/mips/op_iget_char_quick.S
deleted file mode 100644
index 6f6d608..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget_quick.S" { "load":"lhu" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_object.S b/runtime/interpreter/mterp/mips/op_iget_object.S
deleted file mode 100644
index bea330a..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_object_quick.S b/runtime/interpreter/mterp/mips/op_iget_object_quick.S
deleted file mode 100644
index 95c34d7..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_object_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /* For: iget-object-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    EXPORT_PC()
-    GET_VREG(a0, a2)                       #  a0 <- object we're operating on
-    JAL(artIGetObjectFromMterp)            #  v0 <- GetObj(obj, offset)
-    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
-    GET_OPA4(a2)                           #  a2<- A+
-    PREFETCH_INST(2)                       #  load rINST
-    bnez a3, MterpPossibleException        #  bail out
-    ADVANCE(2)                             #  advance rPC
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_OBJECT_GOTO(v0, a2, t0)       #  fp[A] <- v0
diff --git a/runtime/interpreter/mterp/mips/op_iget_quick.S b/runtime/interpreter/mterp/mips/op_iget_quick.S
deleted file mode 100644
index 46277d3..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "load":"lw" }
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    # check object for null
-    beqz      a3, common_errNullObject     #  object was null
-    addu      t0, a3, a1
-    $load     a0, 0(t0)                    #  a0 <- obj.field (8/16/32 bits)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a2, t0)              #  fp[A] <- a0
diff --git a/runtime/interpreter/mterp/mips/op_iget_short.S b/runtime/interpreter/mterp/mips/op_iget_short.S
deleted file mode 100644
index 357c791..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_short_quick.S b/runtime/interpreter/mterp/mips/op_iget_short_quick.S
deleted file mode 100644
index 899a0fe..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget_quick.S" { "load":"lh" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_wide.S b/runtime/interpreter/mterp/mips/op_iget_wide.S
deleted file mode 100644
index 885372a..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget.S" { "helper":"MterpIGetU64" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_wide_quick.S b/runtime/interpreter/mterp/mips/op_iget_wide_quick.S
deleted file mode 100644
index 128be57..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_wide_quick.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /* iget-wide-quick vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    # check object for null
-    beqz      a3, common_errNullObject     #  object was null
-    addu      t0, a3, a1                   #  t0 <- a3 + a1
-    LOAD64(a0, a1, t0)                     #  a0 <- obj.field (64 bits, aligned)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[A] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_instance_of.S b/runtime/interpreter/mterp/mips/op_instance_of.S
deleted file mode 100644
index 706dcf3..0000000
--- a/runtime/interpreter/mterp/mips/op_instance_of.S
+++ /dev/null
@@ -1,21 +0,0 @@
-    /*
-     * Check to see if an object reference is an instance of a class.
-     *
-     * Most common situation is a non-null object, being compared against
-     * an already-resolved class.
-     */
-    /* instance-of vA, vB, class@CCCC */
-    EXPORT_PC()
-    FETCH(a0, 1)                           # a0 <- CCCC
-    GET_OPB(a1)                            # a1 <- B
-    EAS2(a1, rFP, a1)                      # a1 <- &object
-    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- method
-    move  a3, rSELF                        # a3 <- self
-    GET_OPA4(rOBJ)                         # rOBJ <- A+
-    JAL(MterpInstanceOf)                   # v0 <- Mterp(index, &obj, method, self)
-    lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    PREFETCH_INST(2)                       # load rINST
-    bnez a1, MterpException
-    ADVANCE(2)                             # advance rPC
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    SET_VREG_GOTO(v0, rOBJ, t0)            # vA <- v0
diff --git a/runtime/interpreter/mterp/mips/op_int_to_byte.S b/runtime/interpreter/mterp/mips/op_int_to_byte.S
deleted file mode 100644
index 9266aab..0000000
--- a/runtime/interpreter/mterp/mips/op_int_to_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unop.S" {"instr":"SEB(a0, a0)"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_char.S b/runtime/interpreter/mterp/mips/op_int_to_char.S
deleted file mode 100644
index 1b74a6e..0000000
--- a/runtime/interpreter/mterp/mips/op_int_to_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unop.S" {"preinstr":"", "instr":"and a0, 0xffff"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_double.S b/runtime/interpreter/mterp/mips/op_int_to_double.S
deleted file mode 100644
index 89484ce..0000000
--- a/runtime/interpreter/mterp/mips/op_int_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/funopWider.S" {"instr":"cvt.d.w fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_float.S b/runtime/interpreter/mterp/mips/op_int_to_float.S
deleted file mode 100644
index d6f4b36..0000000
--- a/runtime/interpreter/mterp/mips/op_int_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/funop.S" {"instr":"cvt.s.w fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_long.S b/runtime/interpreter/mterp/mips/op_int_to_long.S
deleted file mode 100644
index 9907463..0000000
--- a/runtime/interpreter/mterp/mips/op_int_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unopWider.S" {"instr":"sra a1, a0, 31"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_short.S b/runtime/interpreter/mterp/mips/op_int_to_short.S
deleted file mode 100644
index 8749cd8..0000000
--- a/runtime/interpreter/mterp/mips/op_int_to_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unop.S" {"instr":"SEH(a0, a0)"}
diff --git a/runtime/interpreter/mterp/mips/op_invoke_custom.S b/runtime/interpreter/mterp/mips/op_invoke_custom.S
deleted file mode 100644
index f9241c4..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_custom.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeCustom" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_custom_range.S b/runtime/interpreter/mterp/mips/op_invoke_custom_range.S
deleted file mode 100644
index 862a614..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_custom_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_direct.S b/runtime/interpreter/mterp/mips/op_invoke_direct.S
deleted file mode 100644
index 1ef198a..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_direct.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_direct_range.S b/runtime/interpreter/mterp/mips/op_invoke_direct_range.S
deleted file mode 100644
index af7477f..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_direct_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_interface.S b/runtime/interpreter/mterp/mips/op_invoke_interface.S
deleted file mode 100644
index 80a485a..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_interface.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeInterface" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_interface_range.S b/runtime/interpreter/mterp/mips/op_invoke_interface_range.S
deleted file mode 100644
index 8d725dc..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_interface_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_polymorphic.S b/runtime/interpreter/mterp/mips/op_invoke_polymorphic.S
deleted file mode 100644
index 85e01e7..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_polymorphic.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/mips/op_invoke_polymorphic_range.S
deleted file mode 100644
index ce63978..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_polymorphic_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_static.S b/runtime/interpreter/mterp/mips/op_invoke_static.S
deleted file mode 100644
index 46253cb..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_static.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeStatic" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_static_range.S b/runtime/interpreter/mterp/mips/op_invoke_static_range.S
deleted file mode 100644
index 96abafe..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_static_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_super.S b/runtime/interpreter/mterp/mips/op_invoke_super.S
deleted file mode 100644
index 473951b..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_super.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeSuper" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_super_range.S b/runtime/interpreter/mterp/mips/op_invoke_super_range.S
deleted file mode 100644
index 963ff27..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_super_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual.S b/runtime/interpreter/mterp/mips/op_invoke_virtual.S
deleted file mode 100644
index ea51e98..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_virtual.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeVirtual" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/mips/op_invoke_virtual_quick.S
deleted file mode 100644
index 0c00091..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_virtual_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual_range.S b/runtime/interpreter/mterp/mips/op_invoke_virtual_range.S
deleted file mode 100644
index 82201e7..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_virtual_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/mips/op_invoke_virtual_range_quick.S
deleted file mode 100644
index c783675..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_virtual_range_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/mips/op_iput.S b/runtime/interpreter/mterp/mips/op_iput.S
deleted file mode 100644
index efbdfba..0000000
--- a/runtime/interpreter/mterp/mips/op_iput.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIPutU32" }
-%include "mips/field.S" { }
diff --git a/runtime/interpreter/mterp/mips/op_iput_boolean.S b/runtime/interpreter/mterp/mips/op_iput_boolean.S
deleted file mode 100644
index 55ac4ce..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_boolean_quick.S b/runtime/interpreter/mterp/mips/op_iput_boolean_quick.S
deleted file mode 100644
index 7d5caf6..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_byte.S b/runtime/interpreter/mterp/mips/op_iput_byte.S
deleted file mode 100644
index 61e489b..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_byte_quick.S b/runtime/interpreter/mterp/mips/op_iput_byte_quick.S
deleted file mode 100644
index 7d5caf6..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_char.S b/runtime/interpreter/mterp/mips/op_iput_char.S
deleted file mode 100644
index 2caad1e..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_char_quick.S b/runtime/interpreter/mterp/mips/op_iput_char_quick.S
deleted file mode 100644
index 4bc84eb..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_object.S b/runtime/interpreter/mterp/mips/op_iput_object.S
deleted file mode 100644
index 6f7e7b7..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput.S" { "is_object":"1", "helper":"MterpIPutObj" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_object_quick.S b/runtime/interpreter/mterp/mips/op_iput_object_quick.S
deleted file mode 100644
index 82044f5..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_object_quick.S
+++ /dev/null
@@ -1,11 +0,0 @@
-    /* For: iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rPC
-    move   a2, rINST
-    JAL(MterpIputObjectQuick)
-    beqz   v0, MterpException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_quick.S b/runtime/interpreter/mterp/mips/op_iput_quick.S
deleted file mode 100644
index d9753b1..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_quick.S
+++ /dev/null
@@ -1,15 +0,0 @@
-%default { "store":"sw" }
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    beqz      a3, common_errNullObject     #  object was null
-    GET_VREG(a0, a2)                       #  a0 <- fp[A]
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    addu      t0, a3, a1
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t1)
-    $store    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
-    JR(t1)                                 #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_short.S b/runtime/interpreter/mterp/mips/op_iput_short.S
deleted file mode 100644
index 414a15b..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_short_quick.S b/runtime/interpreter/mterp/mips/op_iput_short_quick.S
deleted file mode 100644
index 4bc84eb..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_wide.S b/runtime/interpreter/mterp/mips/op_iput_wide.S
deleted file mode 100644
index fc862e4..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput.S" { "helper":"MterpIPutU64" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_wide_quick.S b/runtime/interpreter/mterp/mips/op_iput_wide_quick.S
deleted file mode 100644
index 0eb228d..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_wide_quick.S
+++ /dev/null
@@ -1,15 +0,0 @@
-    /* iput-wide-quick vA, vB, offset@CCCC */
-    GET_OPA4(a0)                           #  a0 <- A(+)
-    GET_OPB(a1)                            #  a1 <- B
-    GET_VREG(a2, a1)                       #  a2 <- fp[B], the object pointer
-    # check object for null
-    beqz      a2, common_errNullObject     #  object was null
-    EAS2(a3, rFP, a0)                      #  a3 <- &fp[A]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[A]
-    FETCH(a3, 1)                           #  a3 <- field byte offset
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    addu      a2, a2, a3                   #  obj.field (64 bits, aligned) <- a0/a1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0/a1
-    JR(t0)                                 #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_long_to_double.S b/runtime/interpreter/mterp/mips/op_long_to_double.S
deleted file mode 100644
index 153f582..0000000
--- a/runtime/interpreter/mterp/mips/op_long_to_double.S
+++ /dev/null
@@ -1,20 +0,0 @@
-    /*
-     * long-to-double
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-
-#ifdef MIPS32REVGE6
-    LOAD64_F(fv0, fv0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    cvt.d.l   fv0, fv0
-#else
-    LOAD64(rARG0, rARG1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    JAL(__floatdidf)                       #  a0/a1 <- op, a2-a3 changed
-#endif
-
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- result
diff --git a/runtime/interpreter/mterp/mips/op_long_to_float.S b/runtime/interpreter/mterp/mips/op_long_to_float.S
deleted file mode 100644
index dd1ab81..0000000
--- a/runtime/interpreter/mterp/mips/op_long_to_float.S
+++ /dev/null
@@ -1,20 +0,0 @@
-    /*
-     * long-to-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-
-#ifdef MIPS32REVGE6
-    LOAD64_F(fv0, fv0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    cvt.s.l   fv0, fv0
-#else
-    LOAD64(rARG0, rARG1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    JAL(__floatdisf)
-#endif
-
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- fv0
diff --git a/runtime/interpreter/mterp/mips/op_long_to_int.S b/runtime/interpreter/mterp/mips/op_long_to_int.S
deleted file mode 100644
index 949c180..0000000
--- a/runtime/interpreter/mterp/mips/op_long_to_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%include "mips/op_move.S"
diff --git a/runtime/interpreter/mterp/mips/op_monitor_enter.S b/runtime/interpreter/mterp/mips/op_monitor_enter.S
deleted file mode 100644
index 20d9029..0000000
--- a/runtime/interpreter/mterp/mips/op_monitor_enter.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    /*
-     * Synchronize on an object.
-     */
-    /* monitor-enter vAA */
-    EXPORT_PC()
-    GET_OPA(a2)                            # a2 <- AA
-    GET_VREG(a0, a2)                       # a0 <- vAA (object)
-    move   a1, rSELF                       # a1 <- self
-    JAL(artLockObjectFromCode)             # v0 <- artLockObject(obj, self)
-    bnez v0, MterpException
-    FETCH_ADVANCE_INST(1)                  # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_monitor_exit.S b/runtime/interpreter/mterp/mips/op_monitor_exit.S
deleted file mode 100644
index 1eadff9..0000000
--- a/runtime/interpreter/mterp/mips/op_monitor_exit.S
+++ /dev/null
@@ -1,17 +0,0 @@
-    /*
-     * Unlock an object.
-     *
-     * Exceptions that occur when unlocking a monitor need to appear as
-     * if they happened at the following instruction.  See the Dalvik
-     * instruction spec.
-     */
-    /* monitor-exit vAA */
-    EXPORT_PC()
-    GET_OPA(a2)                            # a2 <- AA
-    GET_VREG(a0, a2)                       # a0 <- vAA (object)
-    move   a1, rSELF                       # a1 <- self
-    JAL(artUnlockObjectFromCode)           # v0 <- artUnlockObject(obj, self)
-    bnez v0, MterpException
-    FETCH_ADVANCE_INST(1)                  # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move.S b/runtime/interpreter/mterp/mips/op_move.S
deleted file mode 100644
index 547ea3a..0000000
--- a/runtime/interpreter/mterp/mips/op_move.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "is_object":"0" }
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    GET_OPB(a1)                            #  a1 <- B from 15:12
-    GET_OPA4(a0)                           #  a0 <- A from 11:8
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[B]
-    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[A] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
-    .endif
diff --git a/runtime/interpreter/mterp/mips/op_move_16.S b/runtime/interpreter/mterp/mips/op_move_16.S
deleted file mode 100644
index 91b7399..0000000
--- a/runtime/interpreter/mterp/mips/op_move_16.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    FETCH(a1, 2)                           #  a1 <- BBBB
-    FETCH(a0, 1)                           #  a0 <- AAAA
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AAAA] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[AAAA] <- a2
-    .endif
diff --git a/runtime/interpreter/mterp/mips/op_move_exception.S b/runtime/interpreter/mterp/mips/op_move_exception.S
deleted file mode 100644
index f1bece7..0000000
--- a/runtime/interpreter/mterp/mips/op_move_exception.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* move-exception vAA */
-    GET_OPA(a2)                                 #  a2 <- AA
-    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)    #  get exception obj
-    FETCH_ADVANCE_INST(1)                       #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                         #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    SET_VREG_OBJECT(a3, a2)                     #  fp[AA] <- exception obj
-    sw    zero, THREAD_EXCEPTION_OFFSET(rSELF)  #  clear exception
-    JR(t0)                                      #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_from16.S b/runtime/interpreter/mterp/mips/op_move_from16.S
deleted file mode 100644
index 90c25c9..0000000
--- a/runtime/interpreter/mterp/mips/op_move_from16.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    FETCH(a1, 1)                           #  a1 <- BBBB
-    GET_OPA(a0)                            #  a0 <- AA
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AA] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[AA] <- a2
-    .endif
diff --git a/runtime/interpreter/mterp/mips/op_move_object.S b/runtime/interpreter/mterp/mips/op_move_object.S
deleted file mode 100644
index 9420ff3..0000000
--- a/runtime/interpreter/mterp/mips/op_move_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_object_16.S b/runtime/interpreter/mterp/mips/op_move_object_16.S
deleted file mode 100644
index d6454c2..0000000
--- a/runtime/interpreter/mterp/mips/op_move_object_16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_object_from16.S b/runtime/interpreter/mterp/mips/op_move_object_from16.S
deleted file mode 100644
index db0aca1..0000000
--- a/runtime/interpreter/mterp/mips/op_move_object_from16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_result.S b/runtime/interpreter/mterp/mips/op_move_result.S
deleted file mode 100644
index a4d5bfe..0000000
--- a/runtime/interpreter/mterp/mips/op_move_result.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    GET_OPA(a2)                            #  a2 <- AA
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    lw    a0, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
-    lw    a0, 0(a0)                        #  a0 <- result.i
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT_GOTO(a0, a2, t0)       #  fp[AA] <- a0
-    .else
-    SET_VREG_GOTO(a0, a2, t0)              #  fp[AA] <- a0
-    .endif
diff --git a/runtime/interpreter/mterp/mips/op_move_result_object.S b/runtime/interpreter/mterp/mips/op_move_result_object.S
deleted file mode 100644
index fcbffee..0000000
--- a/runtime/interpreter/mterp/mips/op_move_result_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_result_wide.S b/runtime/interpreter/mterp/mips/op_move_result_wide.S
deleted file mode 100644
index 1259218..0000000
--- a/runtime/interpreter/mterp/mips/op_move_result_wide.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* move-result-wide vAA */
-    GET_OPA(a2)                            #  a2 <- AA
-    lw    a3, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- retval.j
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AA] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_move_wide.S b/runtime/interpreter/mterp/mips/op_move_wide.S
deleted file mode 100644
index 01d0949..0000000
--- a/runtime/interpreter/mterp/mips/op_move_wide.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[B]
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[A] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_move_wide_16.S b/runtime/interpreter/mterp/mips/op_move_wide_16.S
deleted file mode 100644
index 587ba04..0000000
--- a/runtime/interpreter/mterp/mips/op_move_wide_16.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
-    FETCH(a3, 2)                           #  a3 <- BBBB
-    FETCH(a2, 1)                           #  a2 <- AAAA
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AAAA] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_move_wide_from16.S b/runtime/interpreter/mterp/mips/op_move_wide_from16.S
deleted file mode 100644
index 5003fbd..0000000
--- a/runtime/interpreter/mterp/mips/op_move_wide_from16.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
-    FETCH(a3, 1)                           #  a3 <- BBBB
-    GET_OPA(a2)                            #  a2 <- AA
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AA] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_mul_double.S b/runtime/interpreter/mterp/mips/op_mul_double.S
deleted file mode 100644
index 44a473b..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide.S" {"instr":"mul.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_double_2addr.S b/runtime/interpreter/mterp/mips/op_mul_double_2addr.S
deleted file mode 100644
index 4e5c230..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide2addr.S" {"instr":"mul.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_float.S b/runtime/interpreter/mterp/mips/op_mul_float.S
deleted file mode 100644
index abc9390..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop.S" {"instr":"mul.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_float_2addr.S b/runtime/interpreter/mterp/mips/op_mul_float_2addr.S
deleted file mode 100644
index 2469109..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop2addr.S" {"instr":"mul.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int.S b/runtime/interpreter/mterp/mips/op_mul_int.S
deleted file mode 100644
index 266823c..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int_2addr.S b/runtime/interpreter/mterp/mips/op_mul_int_2addr.S
deleted file mode 100644
index b7dc5d3..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int_lit16.S b/runtime/interpreter/mterp/mips/op_mul_int_lit16.S
deleted file mode 100644
index fb4c8ec..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit16.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int_lit8.S b/runtime/interpreter/mterp/mips/op_mul_int_lit8.S
deleted file mode 100644
index 6d2e7de..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_long.S b/runtime/interpreter/mterp/mips/op_mul_long.S
deleted file mode 100644
index 74b049a..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_long.S
+++ /dev/null
@@ -1,42 +0,0 @@
-    /*
-     * Signed 64-bit integer multiply.
-     *         a1   a0
-     *   x     a3   a2
-     *   -------------
-     *       a2a1 a2a0
-     *       a3a0
-     *  a3a1 (<= unused)
-     *  ---------------
-     *         v1   v0
-     */
-    /* mul-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    and       t0, a0, 255                  #  a2 <- BB
-    srl       t1, a0, 8                    #  a3 <- CC
-    EAS2(t0, rFP, t0)                      #  t0 <- &fp[BB]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vBB/vBB+1
-
-    EAS2(t1, rFP, t1)                      #  t0 <- &fp[CC]
-    LOAD64(a2, a3, t1)                     #  a2/a3 <- vCC/vCC+1
-
-    mul       v1, a3, a0                   #  v1= a3a0
-#ifdef MIPS32REVGE6
-    mulu      v0, a2, a0                   #  v0= a2a0
-    muhu      t1, a2, a0
-#else
-    multu     a2, a0
-    mfhi      t1
-    mflo      v0                           #  v0= a2a0
-#endif
-    mul       t0, a2, a1                   #  t0= a2a1
-    addu      v1, v1, t1                   #  v1+= hi(a2a0)
-    addu      v1, v1, t0                   #  v1= a3a0 + a2a1;
-
-    GET_OPA(a0)                            #  a0 <- AA
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    b         .L${opcode}_finish
-%break
-
-.L${opcode}_finish:
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, a0, t0)        #  vAA/vAA+1 <- v0(low)/v1(high)
diff --git a/runtime/interpreter/mterp/mips/op_mul_long_2addr.S b/runtime/interpreter/mterp/mips/op_mul_long_2addr.S
deleted file mode 100644
index 683b055..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_long_2addr.S
+++ /dev/null
@@ -1,29 +0,0 @@
-    /*
-     * See op_mul_long.S for more details
-     */
-    /* mul-long/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  vAA.low / high
-
-    GET_OPB(t1)                            #  t1 <- B
-    EAS2(t1, rFP, t1)                      #  t1 <- &fp[B]
-    LOAD64(a2, a3, t1)                     #  vBB.low / high
-
-    mul       v1, a3, a0                   #  v1= a3a0
-#ifdef MIPS32REVGE6
-    mulu      v0, a2, a0                   #  v0= a2a0
-    muhu      t1, a2, a0
-#else
-    multu     a2, a0
-    mfhi      t1
-    mflo      v0                           #  v0= a2a0
- #endif
-    mul       t2, a2, a1                   #  t2= a2a1
-    addu      v1, v1, t1                   #  v1= a3a0 + hi(a2a0)
-    addu      v1, v1, t2                   #  v1= v1 + a2a1;
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, rOBJ, t1)      #  vA/vA+1 <- v0(low)/v1(high)
diff --git a/runtime/interpreter/mterp/mips/op_neg_double.S b/runtime/interpreter/mterp/mips/op_neg_double.S
deleted file mode 100644
index 89cc918..0000000
--- a/runtime/interpreter/mterp/mips/op_neg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unopWide.S" {"instr":"addu a1, a1, 0x80000000"}
diff --git a/runtime/interpreter/mterp/mips/op_neg_float.S b/runtime/interpreter/mterp/mips/op_neg_float.S
deleted file mode 100644
index e702755..0000000
--- a/runtime/interpreter/mterp/mips/op_neg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unop.S" {"instr":"addu a0, a0, 0x80000000"}
diff --git a/runtime/interpreter/mterp/mips/op_neg_int.S b/runtime/interpreter/mterp/mips/op_neg_int.S
deleted file mode 100644
index 4461731..0000000
--- a/runtime/interpreter/mterp/mips/op_neg_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unop.S" {"instr":"negu a0, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_neg_long.S b/runtime/interpreter/mterp/mips/op_neg_long.S
deleted file mode 100644
index 71e60f5..0000000
--- a/runtime/interpreter/mterp/mips/op_neg_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unopWide.S" {"result0":"v0", "result1":"v1", "preinstr":"negu v0, a0", "instr":"negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_new_array.S b/runtime/interpreter/mterp/mips/op_new_array.S
deleted file mode 100644
index 4a6512d..0000000
--- a/runtime/interpreter/mterp/mips/op_new_array.S
+++ /dev/null
@@ -1,18 +0,0 @@
-    /*
-     * Allocate an array of objects, specified with the array class
-     * and a count.
-     *
-     * The verifier guarantees that this is an array class, so we don't
-     * check for it here.
-     */
-    /* new-array vA, vB, class@CCCC */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rPC
-    move   a2, rINST
-    move   a3, rSELF
-    JAL(MterpNewArray)
-    beqz   v0, MterpPossibleException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_new_instance.S b/runtime/interpreter/mterp/mips/op_new_instance.S
deleted file mode 100644
index 3c9e83f..0000000
--- a/runtime/interpreter/mterp/mips/op_new_instance.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    /*
-     * Create a new instance of a class.
-     */
-    /* new-instance vAA, class@BBBB */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rSELF
-    move   a2, rINST
-    JAL(MterpNewInstance)
-    beqz   v0, MterpPossibleException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_nop.S b/runtime/interpreter/mterp/mips/op_nop.S
deleted file mode 100644
index 3565631..0000000
--- a/runtime/interpreter/mterp/mips/op_nop.S
+++ /dev/null
@@ -1,3 +0,0 @@
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_not_int.S b/runtime/interpreter/mterp/mips/op_not_int.S
deleted file mode 100644
index 55d8cc1..0000000
--- a/runtime/interpreter/mterp/mips/op_not_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unop.S" {"instr":"not a0, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_not_long.S b/runtime/interpreter/mterp/mips/op_not_long.S
deleted file mode 100644
index 9e7c95b..0000000
--- a/runtime/interpreter/mterp/mips/op_not_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unopWide.S" {"preinstr":"not a0, a0", "instr":"not a1, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int.S b/runtime/interpreter/mterp/mips/op_or_int.S
deleted file mode 100644
index c7ce760..0000000
--- a/runtime/interpreter/mterp/mips/op_or_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int_2addr.S b/runtime/interpreter/mterp/mips/op_or_int_2addr.S
deleted file mode 100644
index 192d611..0000000
--- a/runtime/interpreter/mterp/mips/op_or_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int_lit16.S b/runtime/interpreter/mterp/mips/op_or_int_lit16.S
deleted file mode 100644
index f4ef75f..0000000
--- a/runtime/interpreter/mterp/mips/op_or_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit16.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int_lit8.S b/runtime/interpreter/mterp/mips/op_or_int_lit8.S
deleted file mode 100644
index f6212e2..0000000
--- a/runtime/interpreter/mterp/mips/op_or_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_long.S b/runtime/interpreter/mterp/mips/op_or_long.S
deleted file mode 100644
index 0f94486..0000000
--- a/runtime/interpreter/mterp/mips/op_or_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide.S" {"preinstr":"or a0, a0, a2", "instr":"or a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_or_long_2addr.S b/runtime/interpreter/mterp/mips/op_or_long_2addr.S
deleted file mode 100644
index 43c3d05..0000000
--- a/runtime/interpreter/mterp/mips/op_or_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide2addr.S" {"preinstr":"or a0, a0, a2", "instr":"or a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_packed_switch.S b/runtime/interpreter/mterp/mips/op_packed_switch.S
deleted file mode 100644
index 0a1ff98..0000000
--- a/runtime/interpreter/mterp/mips/op_packed_switch.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "func":"MterpDoPackedSwitch" }
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBB */
-    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
-    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
-    GET_OPA(a3)                            #  a3 <- AA
-    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
-    GET_VREG(a1, a3)                       #  a1 <- vAA
-    EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
-    JAL($func)                             #  a0 <- code-unit branch offset
-    move      rINST, v0
-    b         MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips/op_rem_double.S b/runtime/interpreter/mterp/mips/op_rem_double.S
deleted file mode 100644
index a6890a8..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide.S" {"instr":"JAL(fmod)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_double_2addr.S b/runtime/interpreter/mterp/mips/op_rem_double_2addr.S
deleted file mode 100644
index a24e160..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide2addr.S" {"instr":"JAL(fmod)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_float.S b/runtime/interpreter/mterp/mips/op_rem_float.S
deleted file mode 100644
index ac3d50c..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop.S" {"instr":"JAL(fmodf)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_float_2addr.S b/runtime/interpreter/mterp/mips/op_rem_float_2addr.S
deleted file mode 100644
index 7f0a932..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop2addr.S" {"instr":"JAL(fmodf)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_int.S b/runtime/interpreter/mterp/mips/op_rem_int.S
deleted file mode 100644
index c2a334a..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_int.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binop.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binop.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_int_2addr.S b/runtime/interpreter/mterp/mips/op_rem_int_2addr.S
deleted file mode 100644
index 46c353f..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_int_2addr.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binop2addr.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binop2addr.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_int_lit16.S b/runtime/interpreter/mterp/mips/op_rem_int_lit16.S
deleted file mode 100644
index 2894ad3..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_int_lit16.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binopLit16.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binopLit16.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_int_lit8.S b/runtime/interpreter/mterp/mips/op_rem_int_lit8.S
deleted file mode 100644
index 582248b..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_int_lit8.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binopLit8.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binopLit8.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_long.S b/runtime/interpreter/mterp/mips/op_rem_long.S
deleted file mode 100644
index e3eb19b..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "instr":"JAL(__moddi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_long_2addr.S b/runtime/interpreter/mterp/mips/op_rem_long_2addr.S
deleted file mode 100644
index 8fc9fdb..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "instr":"JAL(__moddi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_return.S b/runtime/interpreter/mterp/mips/op_return.S
deleted file mode 100644
index 44b9395..0000000
--- a/runtime/interpreter/mterp/mips/op_return.S
+++ /dev/null
@@ -1,18 +0,0 @@
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return, return-object
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    JAL(MterpThreadFenceForConstructor)
-    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
-    move      a0, rSELF
-    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz      ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    GET_OPA(a2)                            #  a2 <- AA
-    GET_VREG(v0, a2)                       #  v0 <- vAA
-    move      v1, zero
-    b         MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_return_object.S b/runtime/interpreter/mterp/mips/op_return_object.S
deleted file mode 100644
index 7350e00..0000000
--- a/runtime/interpreter/mterp/mips/op_return_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_return.S"
diff --git a/runtime/interpreter/mterp/mips/op_return_void.S b/runtime/interpreter/mterp/mips/op_return_void.S
deleted file mode 100644
index 1f616ea..0000000
--- a/runtime/interpreter/mterp/mips/op_return_void.S
+++ /dev/null
@@ -1,11 +0,0 @@
-    .extern MterpThreadFenceForConstructor
-    JAL(MterpThreadFenceForConstructor)
-    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
-    move      a0, rSELF
-    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz      ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    move      v0, zero
-    move      v1, zero
-    b         MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S b/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S
deleted file mode 100644
index e670c28..0000000
--- a/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    lw     ra, THREAD_FLAGS_OFFSET(rSELF)
-    move   a0, rSELF
-    and    ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz   ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    move   v0, zero
-    move   v1, zero
-    b      MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_return_wide.S b/runtime/interpreter/mterp/mips/op_return_wide.S
deleted file mode 100644
index f0f679d..0000000
--- a/runtime/interpreter/mterp/mips/op_return_wide.S
+++ /dev/null
@@ -1,16 +0,0 @@
-    /*
-     * Return a 64-bit value.
-     */
-    /* return-wide vAA */
-    .extern MterpThreadFenceForConstructor
-    JAL(MterpThreadFenceForConstructor)
-    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
-    move      a0, rSELF
-    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz      ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    GET_OPA(a2)                            #  a2 <- AA
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[AA]
-    LOAD64(v0, v1, a2)                     #  v0/v1 <- vAA/vAA+1
-    b         MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_rsub_int.S b/runtime/interpreter/mterp/mips/op_rsub_int.S
deleted file mode 100644
index f7e61bb..0000000
--- a/runtime/interpreter/mterp/mips/op_rsub_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-%include "mips/binopLit16.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_rsub_int_lit8.S b/runtime/interpreter/mterp/mips/op_rsub_int_lit8.S
deleted file mode 100644
index 3968a5e..0000000
--- a/runtime/interpreter/mterp/mips/op_rsub_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_sget.S b/runtime/interpreter/mterp/mips/op_sget.S
deleted file mode 100644
index 92d6673..0000000
--- a/runtime/interpreter/mterp/mips/op_sget.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSGetU32" }
-%include "mips/field.S" { }
diff --git a/runtime/interpreter/mterp/mips/op_sget_boolean.S b/runtime/interpreter/mterp/mips/op_sget_boolean.S
deleted file mode 100644
index 7a7012e..0000000
--- a/runtime/interpreter/mterp/mips/op_sget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sget.S" {"helper":"MterpSGetU8"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_byte.S b/runtime/interpreter/mterp/mips/op_sget_byte.S
deleted file mode 100644
index a2f1dbf..0000000
--- a/runtime/interpreter/mterp/mips/op_sget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sget.S" {"helper":"MterpSGetI8"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_char.S b/runtime/interpreter/mterp/mips/op_sget_char.S
deleted file mode 100644
index 07d4041..0000000
--- a/runtime/interpreter/mterp/mips/op_sget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sget.S" {"helper":"MterpSGetU16"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_object.S b/runtime/interpreter/mterp/mips/op_sget_object.S
deleted file mode 100644
index 0a3c9ee..0000000
--- a/runtime/interpreter/mterp/mips/op_sget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_short.S b/runtime/interpreter/mterp/mips/op_sget_short.S
deleted file mode 100644
index 2960443..0000000
--- a/runtime/interpreter/mterp/mips/op_sget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sget.S" {"helper":"MterpSGetI16"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_wide.S b/runtime/interpreter/mterp/mips/op_sget_wide.S
deleted file mode 100644
index be4ae02..0000000
--- a/runtime/interpreter/mterp/mips/op_sget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sget.S" {"helper":"MterpSGetU64"}
diff --git a/runtime/interpreter/mterp/mips/op_shl_int.S b/runtime/interpreter/mterp/mips/op_shl_int.S
deleted file mode 100644
index 15cbe94..0000000
--- a/runtime/interpreter/mterp/mips/op_shl_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shl_int_2addr.S b/runtime/interpreter/mterp/mips/op_shl_int_2addr.S
deleted file mode 100644
index ef9bd65..0000000
--- a/runtime/interpreter/mterp/mips/op_shl_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shl_int_lit8.S b/runtime/interpreter/mterp/mips/op_shl_int_lit8.S
deleted file mode 100644
index d2afb53..0000000
--- a/runtime/interpreter/mterp/mips/op_shl_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shl_long.S b/runtime/interpreter/mterp/mips/op_shl_long.S
deleted file mode 100644
index cc08112..0000000
--- a/runtime/interpreter/mterp/mips/op_shl_long.S
+++ /dev/null
@@ -1,31 +0,0 @@
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* shl-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(t2)                            #  t2 <- AA
-    and       a3, a0, 255                  #  a3 <- BB
-    srl       a0, a0, 8                    #  a0 <- CC
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
-    GET_VREG(a2, a0)                       #  a2 <- vCC
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v1, a2, 0x20                   #  shift< shift & 0x20
-    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
-    bnez    v1, .L${opcode}_finish
-    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
-    srl     a0, 1
-    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
-    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
-    or      v1, a0                         #  rhi<- rhi | alo
-    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vAA/vAA+1 <- v0/v1
-%break
-
-.L${opcode}_finish:
-    SET_VREG64_GOTO(zero, v0, t2, t0)      #  vAA/vAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_shl_long_2addr.S b/runtime/interpreter/mterp/mips/op_shl_long_2addr.S
deleted file mode 100644
index 93c5783..0000000
--- a/runtime/interpreter/mterp/mips/op_shl_long_2addr.S
+++ /dev/null
@@ -1,27 +0,0 @@
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* shl-long/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a2, a3)                       #  a2 <- vB
-    EAS2(t2, rFP, rOBJ)                    #  t2 <- &fp[A]
-    LOAD64(a0, a1, t2)                     #  a0/a1 <- vA/vA+1
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v1, a2, 0x20                   #  shift< shift & 0x20
-    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
-    bnez    v1, .L${opcode}_finish
-    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
-    srl     a0, 1
-    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
-    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
-    or      v1, a0                         #  rhi<- rhi | alo
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vA/vA+1 <- v0/v1
-%break
-
-.L${opcode}_finish:
-    SET_VREG64_GOTO(zero, v0, rOBJ, t0)    #  vA/vA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_shr_int.S b/runtime/interpreter/mterp/mips/op_shr_int.S
deleted file mode 100644
index 61108399..0000000
--- a/runtime/interpreter/mterp/mips/op_shr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shr_int_2addr.S b/runtime/interpreter/mterp/mips/op_shr_int_2addr.S
deleted file mode 100644
index e00ff5b..0000000
--- a/runtime/interpreter/mterp/mips/op_shr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shr_int_lit8.S b/runtime/interpreter/mterp/mips/op_shr_int_lit8.S
deleted file mode 100644
index d058f58..0000000
--- a/runtime/interpreter/mterp/mips/op_shr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shr_long.S b/runtime/interpreter/mterp/mips/op_shr_long.S
deleted file mode 100644
index ea032fe..0000000
--- a/runtime/interpreter/mterp/mips/op_shr_long.S
+++ /dev/null
@@ -1,31 +0,0 @@
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* shr-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(t3)                            #  t3 <- AA
-    and       a3, a0, 255                  #  a3 <- BB
-    srl       a0, a0, 8                    #  a0 <- CC
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
-    GET_VREG(a2, a0)                       #  a2 <- vCC
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v0, a2, 0x20                   #  shift & 0x20
-    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
-    bnez    v0, .L${opcode}_finish
-    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
-    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
-    sll     a1, 1
-    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
-    or      v0, a1                         #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vAA/VAA+1 <- v0/v1
-%break
-
-.L${opcode}_finish:
-    sra     a3, a1, 31                     #  a3<- sign(ah)
-    SET_VREG64_GOTO(v1, a3, t3, t0)        #  vAA/VAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_shr_long_2addr.S b/runtime/interpreter/mterp/mips/op_shr_long_2addr.S
deleted file mode 100644
index c805ea4..0000000
--- a/runtime/interpreter/mterp/mips/op_shr_long_2addr.S
+++ /dev/null
@@ -1,27 +0,0 @@
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* shr-long/2addr vA, vB */
-    GET_OPA4(t2)                           #  t2 <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a2, a3)                       #  a2 <- vB
-    EAS2(t0, rFP, t2)                      #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vA/vA+1
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v0, a2, 0x20                   #  shift & 0x20
-    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
-    bnez    v0, .L${opcode}_finish
-    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
-    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
-    sll     a1, 1
-    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
-    or      v0, a1                         #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vA/vA+1 <- v0/v1
-%break
-
-.L${opcode}_finish:
-    sra     a3, a1, 31                     #  a3<- sign(ah)
-    SET_VREG64_GOTO(v1, a3, t2, t0)        #  vA/vA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_sparse_switch.S b/runtime/interpreter/mterp/mips/op_sparse_switch.S
deleted file mode 100644
index 670f464..0000000
--- a/runtime/interpreter/mterp/mips/op_sparse_switch.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/mips/op_sput.S b/runtime/interpreter/mterp/mips/op_sput.S
deleted file mode 100644
index c858679..0000000
--- a/runtime/interpreter/mterp/mips/op_sput.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSPutU32"}
-%include "mips/field.S" { }
diff --git a/runtime/interpreter/mterp/mips/op_sput_boolean.S b/runtime/interpreter/mterp/mips/op_sput_boolean.S
deleted file mode 100644
index 0137430..0000000
--- a/runtime/interpreter/mterp/mips/op_sput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_byte.S b/runtime/interpreter/mterp/mips/op_sput_byte.S
deleted file mode 100644
index 5ae4256..0000000
--- a/runtime/interpreter/mterp/mips/op_sput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_char.S b/runtime/interpreter/mterp/mips/op_sput_char.S
deleted file mode 100644
index 83787a7..0000000
--- a/runtime/interpreter/mterp/mips/op_sput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_object.S b/runtime/interpreter/mterp/mips/op_sput_object.S
deleted file mode 100644
index 683b767..0000000
--- a/runtime/interpreter/mterp/mips/op_sput_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sput.S" {"is_object":"1", "helper":"MterpSPutObj"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_short.S b/runtime/interpreter/mterp/mips/op_sput_short.S
deleted file mode 100644
index df99b44..0000000
--- a/runtime/interpreter/mterp/mips/op_sput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_wide.S b/runtime/interpreter/mterp/mips/op_sput_wide.S
deleted file mode 100644
index 1d2ed19..0000000
--- a/runtime/interpreter/mterp/mips/op_sput_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sput.S" {"helper":"MterpSPutU64"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_double.S b/runtime/interpreter/mterp/mips/op_sub_double.S
deleted file mode 100644
index 9473218..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide.S" {"instr":"sub.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_double_2addr.S b/runtime/interpreter/mterp/mips/op_sub_double_2addr.S
deleted file mode 100644
index 7ce7c74..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide2addr.S" {"instr":"sub.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_float.S b/runtime/interpreter/mterp/mips/op_sub_float.S
deleted file mode 100644
index 04650d9..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop.S" {"instr":"sub.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_float_2addr.S b/runtime/interpreter/mterp/mips/op_sub_float_2addr.S
deleted file mode 100644
index dfe935c..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop2addr.S" {"instr":"sub.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_int.S b/runtime/interpreter/mterp/mips/op_sub_int.S
deleted file mode 100644
index 43da1b6..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_int_2addr.S b/runtime/interpreter/mterp/mips/op_sub_int_2addr.S
deleted file mode 100644
index cf34aa6..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_long.S b/runtime/interpreter/mterp/mips/op_sub_long.S
deleted file mode 100644
index 0f58e8e..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_long.S
+++ /dev/null
@@ -1,8 +0,0 @@
-/*
- * For little endian the code sequence looks as follows:
- *    subu    v0,a0,a2
- *    subu    v1,a1,a3
- *    sltu    a0,a0,v0
- *    subu    v1,v1,a0
- */
-%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "preinstr":"subu v0, a0, a2", "instr":"subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0" }
diff --git a/runtime/interpreter/mterp/mips/op_sub_long_2addr.S b/runtime/interpreter/mterp/mips/op_sub_long_2addr.S
deleted file mode 100644
index aa256c2..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_long_2addr.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * See op_sub_long.S for more details
- */
-%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "preinstr":"subu v0, a0, a2", "instr":"subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0" }
diff --git a/runtime/interpreter/mterp/mips/op_throw.S b/runtime/interpreter/mterp/mips/op_throw.S
deleted file mode 100644
index adc8b04..0000000
--- a/runtime/interpreter/mterp/mips/op_throw.S
+++ /dev/null
@@ -1,11 +0,0 @@
-    /*
-     * Throw an exception object in the current thread.
-     */
-    /* throw vAA */
-    EXPORT_PC()                              #  exception handler can throw
-    GET_OPA(a2)                              #  a2 <- AA
-    GET_VREG(a1, a2)                         #  a1 <- vAA (exception object)
-    # null object?
-    beqz  a1, common_errNullObject           #  yes, throw an NPE instead
-    sw    a1, THREAD_EXCEPTION_OFFSET(rSELF) #  thread->exception <- obj
-    b         MterpException
diff --git a/runtime/interpreter/mterp/mips/op_unused_3e.S b/runtime/interpreter/mterp/mips/op_unused_3e.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_3e.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_3f.S b/runtime/interpreter/mterp/mips/op_unused_3f.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_3f.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_40.S b/runtime/interpreter/mterp/mips/op_unused_40.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_40.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_41.S b/runtime/interpreter/mterp/mips/op_unused_41.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_41.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_42.S b/runtime/interpreter/mterp/mips/op_unused_42.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_42.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_43.S b/runtime/interpreter/mterp/mips/op_unused_43.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_43.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_73.S b/runtime/interpreter/mterp/mips/op_unused_73.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_73.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_79.S b/runtime/interpreter/mterp/mips/op_unused_79.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_79.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_7a.S b/runtime/interpreter/mterp/mips/op_unused_7a.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_7a.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f3.S b/runtime/interpreter/mterp/mips/op_unused_f3.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_f3.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f4.S b/runtime/interpreter/mterp/mips/op_unused_f4.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_f4.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f5.S b/runtime/interpreter/mterp/mips/op_unused_f5.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_f5.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f6.S b/runtime/interpreter/mterp/mips/op_unused_f6.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_f6.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f7.S b/runtime/interpreter/mterp/mips/op_unused_f7.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_f7.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f8.S b/runtime/interpreter/mterp/mips/op_unused_f8.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_f8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f9.S b/runtime/interpreter/mterp/mips/op_unused_f9.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_f9.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_fc.S b/runtime/interpreter/mterp/mips/op_unused_fc.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_fc.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_fd.S b/runtime/interpreter/mterp/mips/op_unused_fd.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_fd.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_ushr_int.S b/runtime/interpreter/mterp/mips/op_ushr_int.S
deleted file mode 100644
index b95472b..0000000
--- a/runtime/interpreter/mterp/mips/op_ushr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_ushr_int_2addr.S b/runtime/interpreter/mterp/mips/op_ushr_int_2addr.S
deleted file mode 100644
index fc17778..0000000
--- a/runtime/interpreter/mterp/mips/op_ushr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"srl a0, a0, a1 "}
diff --git a/runtime/interpreter/mterp/mips/op_ushr_int_lit8.S b/runtime/interpreter/mterp/mips/op_ushr_int_lit8.S
deleted file mode 100644
index c82cfba..0000000
--- a/runtime/interpreter/mterp/mips/op_ushr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_ushr_long.S b/runtime/interpreter/mterp/mips/op_ushr_long.S
deleted file mode 100644
index 2e227a9..0000000
--- a/runtime/interpreter/mterp/mips/op_ushr_long.S
+++ /dev/null
@@ -1,31 +0,0 @@
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* ushr-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a3, a0, 255                  #  a3 <- BB
-    srl       a0, a0, 8                    #  a0 <- CC
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
-    GET_VREG(a2, a0)                       #  a2 <- vCC
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi      v0, a2, 0x20                 #  shift & 0x20
-    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
-    bnez      v0, .L${opcode}_finish
-    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
-    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
-    sll       a1, 1
-    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
-    or        v0, a1                       #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vAA/vAA+1 <- v0/v1
-%break
-
-.L${opcode}_finish:
-    SET_VREG64_GOTO(v1, zero, rOBJ, t0)    #  vAA/vAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S b/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S
deleted file mode 100644
index 9e93f34..0000000
--- a/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S
+++ /dev/null
@@ -1,27 +0,0 @@
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* ushr-long/2addr vA, vB */
-    GET_OPA4(t3)                           #  t3 <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a2, a3)                       #  a2 <- vB
-    EAS2(t0, rFP, t3)                      #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vA/vA+1
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi      v0, a2, 0x20                 #  shift & 0x20
-    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
-    bnez      v0, .L${opcode}_finish
-    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
-    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
-    sll       a1, 1
-    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
-    or        v0, a1                       #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vA/vA+1 <- v0/v1
-%break
-
-.L${opcode}_finish:
-    SET_VREG64_GOTO(v1, zero, t3, t0)      #  vA/vA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_xor_int.S b/runtime/interpreter/mterp/mips/op_xor_int.S
deleted file mode 100644
index 6c23f1f..0000000
--- a/runtime/interpreter/mterp/mips/op_xor_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_int_2addr.S b/runtime/interpreter/mterp/mips/op_xor_int_2addr.S
deleted file mode 100644
index 5ee1667..0000000
--- a/runtime/interpreter/mterp/mips/op_xor_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_int_lit16.S b/runtime/interpreter/mterp/mips/op_xor_int_lit16.S
deleted file mode 100644
index 2af37a6..0000000
--- a/runtime/interpreter/mterp/mips/op_xor_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit16.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_int_lit8.S b/runtime/interpreter/mterp/mips/op_xor_int_lit8.S
deleted file mode 100644
index 944ed69..0000000
--- a/runtime/interpreter/mterp/mips/op_xor_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_long.S b/runtime/interpreter/mterp/mips/op_xor_long.S
deleted file mode 100644
index 93f8f70..0000000
--- a/runtime/interpreter/mterp/mips/op_xor_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide.S" {"preinstr":"xor a0, a0, a2", "instr":"xor a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_long_2addr.S b/runtime/interpreter/mterp/mips/op_xor_long_2addr.S
deleted file mode 100644
index 49f3fa4..0000000
--- a/runtime/interpreter/mterp/mips/op_xor_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide2addr.S" {"preinstr":"xor a0, a0, a2", "instr":"xor a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/other.S b/runtime/interpreter/mterp/mips/other.S
new file mode 100644
index 0000000..5002329
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/other.S
@@ -0,0 +1,345 @@
+%def const(helper="UndefinedConstHandler"):
+    /* const/class vAA, type@BBBB */
+    /* const/method-handle vAA, method_handle@BBBB */
+    /* const/method-type vAA, proto@BBBB */
+    /* const/string vAA, string@@BBBB */
+    .extern $helper
+    EXPORT_PC()
+    FETCH(a0, 1)                        # a0 <- BBBB
+    GET_OPA(a1)                         # a1 <- AA
+    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
+    move   a3, rSELF
+    JAL($helper)                        # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST(2)                    # load rINST
+    bnez   v0, MterpPossibleException
+    ADVANCE(2)                          # advance rPC
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+%def unused():
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+%def op_const():
+    /* const vAA, +BBBBbbbb */
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH(a0, 1)                           #  a0 <- bbbb (low)
+    FETCH(a1, 2)                           #  a1 <- BBBB (high)
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
+
+%def op_const_16():
+    /* const/16 vAA, +BBBB */
+    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
+
+%def op_const_4():
+    /* const/4 vA, +B */
+    sll       a1, rINST, 16                #  a1 <- Bxxx0000
+    GET_OPA(a0)                            #  a0 <- A+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    sra       a1, a1, 28                   #  a1 <- sssssssB (sign-extended)
+    and       a0, a0, 15
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a0, t0)              #  fp[A] <- a1
+
+%def op_const_class():
+%  const(helper="MterpConstClass")
+
+%def op_const_high16():
+    /* const/high16 vAA, +BBBB0000 */
+    FETCH(a0, 1)                           #  a0 <- 0000BBBB (zero-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       a0, a0, 16                   #  a0 <- BBBB0000
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
+
+%def op_const_method_handle():
+%  const(helper="MterpConstMethodHandle")
+
+%def op_const_method_type():
+%  const(helper="MterpConstMethodType")
+
+%def op_const_string():
+%  const(helper="MterpConstString")
+
+%def op_const_string_jumbo():
+    /* const/string vAA, string@BBBBBBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                        # a0 <- bbbb (low)
+    FETCH(a2, 2)                        # a2 <- BBBB (high)
+    GET_OPA(a1)                         # a1 <- AA
+    INSERT_HIGH_HALF(a0, a2)            # a0 <- BBBBbbbb
+    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
+    move   a3, rSELF
+    JAL(MterpConstString)               # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST(3)                    # load rINST
+    bnez   v0, MterpPossibleException
+    ADVANCE(3)                          # advance rPC
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+%def op_const_wide():
+    /* const-wide vAA, +HHHHhhhhBBBBbbbb */
+    FETCH(a0, 1)                           #  a0 <- bbbb (low)
+    FETCH(a1, 2)                           #  a1 <- BBBB (low middle)
+    FETCH(a2, 3)                           #  a2 <- hhhh (high middle)
+    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb (low word)
+    FETCH(a3, 4)                           #  a3 <- HHHH (high)
+    GET_OPA(t1)                            #  t1 <- AA
+    INSERT_HIGH_HALF(a2, a3)               #  a2 <- HHHHhhhh (high word)
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(a0, a2, t1, t0)        #  vAA/vAA+1 <- a0/a2
+
+%def op_const_wide_16():
+    /* const-wide/16 vAA, +BBBB */
+    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    sra       a1, a0, 31                   #  a1 <- ssssssss
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
+
+%def op_const_wide_32():
+    /* const-wide/32 vAA, +BBBBbbbb */
+    FETCH(a0, 1)                           #  a0 <- 0000bbbb (low)
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH_S(a2, 2)                         #  a2 <- ssssBBBB (high)
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    INSERT_HIGH_HALF(a0, a2)               #  a0 <- BBBBbbbb
+    sra       a1, a0, 31                   #  a1 <- ssssssss
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
+
+%def op_const_wide_high16():
+    /* const-wide/high16 vAA, +BBBB000000000000 */
+    FETCH(a1, 1)                           #  a1 <- 0000BBBB (zero-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    li        a0, 0                        #  a0 <- 00000000
+    sll       a1, 16                       #  a1 <- BBBB0000
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
+
+%def op_monitor_enter():
+    /*
+     * Synchronize on an object.
+     */
+    /* monitor-enter vAA */
+    EXPORT_PC()
+    GET_OPA(a2)                            # a2 <- AA
+    GET_VREG(a0, a2)                       # a0 <- vAA (object)
+    move   a1, rSELF                       # a1 <- self
+    JAL(artLockObjectFromCode)             # v0 <- artLockObject(obj, self)
+    bnez v0, MterpException
+    FETCH_ADVANCE_INST(1)                  # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+%def op_monitor_exit():
+    /*
+     * Unlock an object.
+     *
+     * Exceptions that occur when unlocking a monitor need to appear as
+     * if they happened at the following instruction.  See the Dalvik
+     * instruction spec.
+     */
+    /* monitor-exit vAA */
+    EXPORT_PC()
+    GET_OPA(a2)                            # a2 <- AA
+    GET_VREG(a0, a2)                       # a0 <- vAA (object)
+    move   a1, rSELF                       # a1 <- self
+    JAL(artUnlockObjectFromCode)           # v0 <- artUnlockObject(obj, self)
+    bnez v0, MterpException
+    FETCH_ADVANCE_INST(1)                  # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+%def op_move(is_object="0"):
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    GET_OPB(a1)                            #  a1 <- B from 15:12
+    GET_OPA4(a0)                           #  a0 <- A from 11:8
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[B]
+    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[A] <- a2
+    .else
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
+    .endif
+
+%def op_move_16(is_object="0"):
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    FETCH(a1, 2)                           #  a1 <- BBBB
+    FETCH(a0, 1)                           #  a0 <- AAAA
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AAAA] <- a2
+    .else
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[AAAA] <- a2
+    .endif
+
+%def op_move_exception():
+    /* move-exception vAA */
+    GET_OPA(a2)                                 #  a2 <- AA
+    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)    #  get exception obj
+    FETCH_ADVANCE_INST(1)                       #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                         #  extract opcode from rINST
+    GET_OPCODE_TARGET(t0)
+    SET_VREG_OBJECT(a3, a2)                     #  fp[AA] <- exception obj
+    sw    zero, THREAD_EXCEPTION_OFFSET(rSELF)  #  clear exception
+    JR(t0)                                      #  jump to next instruction
+
+%def op_move_from16(is_object="0"):
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    GET_OPA(a0)                            #  a0 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AA] <- a2
+    .else
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[AA] <- a2
+    .endif
+
+%def op_move_object():
+%  op_move(is_object="1")
+
+%def op_move_object_16():
+%  op_move_16(is_object="1")
+
+%def op_move_object_from16():
+%  op_move_from16(is_object="1")
+
+%def op_move_result(is_object="0"):
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    lw    a0, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
+    lw    a0, 0(a0)                        #  a0 <- result.i
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT_GOTO(a0, a2, t0)       #  fp[AA] <- a0
+    .else
+    SET_VREG_GOTO(a0, a2, t0)              #  fp[AA] <- a0
+    .endif
+
+%def op_move_result_object():
+%  op_move_result(is_object="1")
+
+%def op_move_result_wide():
+    /* move-result-wide vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    lw    a3, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- retval.j
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AA] <- a0/a1
+
+%def op_move_wide():
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[B]
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[A] <- a0/a1
+
+%def op_move_wide_16():
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+    FETCH(a3, 2)                           #  a3 <- BBBB
+    FETCH(a2, 1)                           #  a2 <- AAAA
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AAAA] <- a0/a1
+
+%def op_move_wide_from16():
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+    FETCH(a3, 1)                           #  a3 <- BBBB
+    GET_OPA(a2)                            #  a2 <- AA
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AA] <- a0/a1
+
+%def op_nop():
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+%def op_unused_3e():
+%  unused()
+
+%def op_unused_3f():
+%  unused()
+
+%def op_unused_40():
+%  unused()
+
+%def op_unused_41():
+%  unused()
+
+%def op_unused_42():
+%  unused()
+
+%def op_unused_43():
+%  unused()
+
+%def op_unused_73():
+%  unused()
+
+%def op_unused_79():
+%  unused()
+
+%def op_unused_7a():
+%  unused()
+
+%def op_unused_f3():
+%  unused()
+
+%def op_unused_f4():
+%  unused()
+
+%def op_unused_f5():
+%  unused()
+
+%def op_unused_f6():
+%  unused()
+
+%def op_unused_f7():
+%  unused()
+
+%def op_unused_f8():
+%  unused()
+
+%def op_unused_f9():
+%  unused()
+
+%def op_unused_fc():
+%  unused()
+
+%def op_unused_fd():
+%  unused()
diff --git a/runtime/interpreter/mterp/mips/unop.S b/runtime/interpreter/mterp/mips/unop.S
deleted file mode 100644
index bc99263..0000000
--- a/runtime/interpreter/mterp/mips/unop.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default {"preinstr":"", "result0":"a0"}
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result0 = op a0".
-     * This could be a MIPS instruction or a function call.
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      neg-int, not-int, neg-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(t0)                           #  t0 <- A+
-    GET_VREG(a0, a3)                       #  a0 <- vB
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $preinstr                              #  optional op
-    $instr                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result0, t0, t1)        #  vA <- result0
diff --git a/runtime/interpreter/mterp/mips/unopNarrower.S b/runtime/interpreter/mterp/mips/unopNarrower.S
deleted file mode 100644
index 0196e27..0000000
--- a/runtime/interpreter/mterp/mips/unopNarrower.S
+++ /dev/null
@@ -1,16 +0,0 @@
-%default {"load":"LOAD64_F(fa0, fa0f, a3)"}
-    /*
-     * Generic 64bit-to-32bit floating-point unary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = op fa0".
-     *
-     * For: double-to-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    $load
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $instr
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- fv0
diff --git a/runtime/interpreter/mterp/mips/unopWide.S b/runtime/interpreter/mterp/mips/unopWide.S
deleted file mode 100644
index 135d9fa..0000000
--- a/runtime/interpreter/mterp/mips/unopWide.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default {"preinstr":"", "result0":"a0", "result1":"a1"}
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result0/result1 = op a0/a1".
-     * This could be MIPS instruction or a function call.
-     *
-     * For: neg-long, not-long, neg-double,
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vA
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $preinstr                              #  optional op
-    $instr                                 #  a0/a1 <- op, a2-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/unopWider.S b/runtime/interpreter/mterp/mips/unopWider.S
deleted file mode 100644
index ca888ad..0000000
--- a/runtime/interpreter/mterp/mips/unopWider.S
+++ /dev/null
@@ -1,16 +0,0 @@
-%default {"preinstr":"", "result0":"a0", "result1":"a1"}
-    /*
-     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result0/result1 = op a0".
-     *
-     * For: int-to-long
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, a3)                       #  a0 <- vB
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $preinstr                              #  optional op
-    $instr                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/unused.S b/runtime/interpreter/mterp/mips/unused.S
deleted file mode 100644
index ffa00be..0000000
--- a/runtime/interpreter/mterp/mips/unused.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
diff --git a/runtime/interpreter/mterp/mips/zcmp.S b/runtime/interpreter/mterp/mips/zcmp.S
deleted file mode 100644
index 8d3a198..0000000
--- a/runtime/interpreter/mterp/mips/zcmp.S
+++ /dev/null
@@ -1,16 +0,0 @@
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    GET_OPA(a0)                            #  a0 <- AA
-    GET_VREG(a0, a0)                       #  a0 <- vAA
-    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
-    b${condition} a0, zero, MterpCommonTakenBranchNoFlags
-    li        t0, JIT_CHECK_OSR            # possible OSR re-entry?
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/alt_stub.S b/runtime/interpreter/mterp/mips64/alt_stub.S
deleted file mode 100644
index 12fa84d..0000000
--- a/runtime/interpreter/mterp/mips64/alt_stub.S
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (${opnum} * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
diff --git a/runtime/interpreter/mterp/mips64/arithmetic.S b/runtime/interpreter/mterp/mips64/arithmetic.S
new file mode 100644
index 0000000..0b03e02
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/arithmetic.S
@@ -0,0 +1,458 @@
+%def binop(preinstr="", result="a0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG a0, a2                     # a0 <- vBB
+    GET_VREG a1, a3                     # a1 <- vCC
+    .if $chkzero
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    $preinstr                           # optional op
+    $instr                              # $result <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG $result, a4                # vAA <- $result
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def binop2addr(preinstr="", result="a0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+    .if $chkzero
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    $preinstr                           # optional op
+    $instr                              # $result <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG $result, a2                # vA <- $result
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def binopLit16(preinstr="", result="a0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CCCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    .if $chkzero
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    $preinstr                           # optional op
+    $instr                              # $result <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG $result, a2                # vA <- $result
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+%def binopLit8(preinstr="", result="a0", chkzero="0", instr=""):
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    lbu     a3, 2(rPC)                  # a3 <- BB
+    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a3                     # a0 <- vBB
+    .if $chkzero
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    $preinstr                           # optional op
+    $instr                              # $result <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG $result, a2                # vAA <- $result
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+%def binopWide(preinstr="", result="a0", chkzero="0", instr=""):
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, shl-long, shr-long, ushr-long
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    .if $chkzero
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    $preinstr                           # optional op
+    $instr                              # $result <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE $result, a4           # vAA <- $result
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def binopWide2addr(preinstr="", result="a0", chkzero="0", instr=""):
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a2                # a0 <- vA
+    GET_VREG_WIDE a1, a3                # a1 <- vB
+    .if $chkzero
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    $preinstr                           # optional op
+    $instr                              # $result <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE $result, a2           # vA <- $result
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def unop(preinstr="", instr=""):
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "a0 = op a0".
+     *
+     * for: int-to-byte, int-to-char, int-to-short,
+     *      not-int, neg-int
+     */
+    /* unop vA, vB */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    ext     a2, rINST, 8, 4             # a2 <- A
+    $preinstr                           # optional op
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    $instr                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def unopWide(preinstr="", instr=""):
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "a0 = op a0".
+     *
+     * For: not-long, neg-long
+     */
+    /* unop vA, vB */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a3                # a0 <- vB
+    ext     a2, rINST, 8, 4             # a2 <- A
+    $preinstr                           # optional op
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    $instr                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_add_int():
+%  binop(instr="addu a0, a0, a1")
+
+%def op_add_int_2addr():
+%  binop2addr(instr="addu a0, a0, a1")
+
+%def op_add_int_lit16():
+%  binopLit16(instr="addu a0, a0, a1")
+
+%def op_add_int_lit8():
+%  binopLit8(instr="addu a0, a0, a1")
+
+%def op_add_long():
+%  binopWide(instr="daddu a0, a0, a1")
+
+%def op_add_long_2addr():
+%  binopWide2addr(instr="daddu a0, a0, a1")
+
+%def op_and_int():
+%  binop(instr="and a0, a0, a1")
+
+%def op_and_int_2addr():
+%  binop2addr(instr="and a0, a0, a1")
+
+%def op_and_int_lit16():
+%  binopLit16(instr="and a0, a0, a1")
+
+%def op_and_int_lit8():
+%  binopLit8(instr="and a0, a0, a1")
+
+%def op_and_long():
+%  binopWide(instr="and a0, a0, a1")
+
+%def op_and_long_2addr():
+%  binopWide2addr(instr="and a0, a0, a1")
+
+%def op_cmp_long():
+    /* cmp-long vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    slt     a2, a0, a1
+    slt     a0, a1, a0
+    subu    a0, a0, a2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                     # vAA <- result
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_div_int():
+%  binop(instr="div a0, a0, a1", chkzero="1")
+
+%def op_div_int_2addr():
+%  binop2addr(instr="div a0, a0, a1", chkzero="1")
+
+%def op_div_int_lit16():
+%  binopLit16(instr="div a0, a0, a1", chkzero="1")
+
+%def op_div_int_lit8():
+%  binopLit8(instr="div a0, a0, a1", chkzero="1")
+
+%def op_div_long():
+%  binopWide(instr="ddiv a0, a0, a1", chkzero="1")
+
+%def op_div_long_2addr():
+%  binopWide2addr(instr="ddiv a0, a0, a1", chkzero="1")
+
+%def op_int_to_byte():
+%  unop(instr="seb     a0, a0")
+
+%def op_int_to_char():
+%  unop(instr="and     a0, a0, 0xffff")
+
+%def op_int_to_long():
+    /* int-to-long vA, vB */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB (sign-extended to 64 bits)
+    ext     a2, rINST, 8, 4             # a2 <- A
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vA <- vB
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_int_to_short():
+%  unop(instr="seh     a0, a0")
+
+%def op_long_to_int():
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%  op_move()
+
+%def op_mul_int():
+%  binop(instr="mul a0, a0, a1")
+
+%def op_mul_int_2addr():
+%  binop2addr(instr="mul a0, a0, a1")
+
+%def op_mul_int_lit16():
+%  binopLit16(instr="mul a0, a0, a1")
+
+%def op_mul_int_lit8():
+%  binopLit8(instr="mul a0, a0, a1")
+
+%def op_mul_long():
+%  binopWide(instr="dmul a0, a0, a1")
+
+%def op_mul_long_2addr():
+%  binopWide2addr(instr="dmul a0, a0, a1")
+
+%def op_neg_int():
+%  unop(instr="subu    a0, zero, a0")
+
+%def op_neg_long():
+%  unopWide(instr="dsubu   a0, zero, a0")
+
+%def op_not_int():
+%  unop(instr="nor     a0, zero, a0")
+
+%def op_not_long():
+%  unopWide(instr="nor     a0, zero, a0")
+
+%def op_or_int():
+%  binop(instr="or a0, a0, a1")
+
+%def op_or_int_2addr():
+%  binop2addr(instr="or a0, a0, a1")
+
+%def op_or_int_lit16():
+%  binopLit16(instr="or a0, a0, a1")
+
+%def op_or_int_lit8():
+%  binopLit8(instr="or a0, a0, a1")
+
+%def op_or_long():
+%  binopWide(instr="or a0, a0, a1")
+
+%def op_or_long_2addr():
+%  binopWide2addr(instr="or a0, a0, a1")
+
+%def op_rem_int():
+%  binop(instr="mod a0, a0, a1", chkzero="1")
+
+%def op_rem_int_2addr():
+%  binop2addr(instr="mod a0, a0, a1", chkzero="1")
+
+%def op_rem_int_lit16():
+%  binopLit16(instr="mod a0, a0, a1", chkzero="1")
+
+%def op_rem_int_lit8():
+%  binopLit8(instr="mod a0, a0, a1", chkzero="1")
+
+%def op_rem_long():
+%  binopWide(instr="dmod a0, a0, a1", chkzero="1")
+
+%def op_rem_long_2addr():
+%  binopWide2addr(instr="dmod a0, a0, a1", chkzero="1")
+
+%def op_rsub_int():
+%  binopLit16(instr="subu a0, a1, a0")
+
+%def op_rsub_int_lit8():
+%  binopLit8(instr="subu a0, a1, a0")
+
+%def op_shl_int():
+%  binop(instr="sll a0, a0, a1")
+
+%def op_shl_int_2addr():
+%  binop2addr(instr="sll a0, a0, a1")
+
+%def op_shl_int_lit8():
+%  binopLit8(instr="sll a0, a0, a1")
+
+%def op_shl_long():
+%  binopWide(instr="dsll a0, a0, a1")
+
+%def op_shl_long_2addr():
+%  binopWide2addr(instr="dsll a0, a0, a1")
+
+%def op_shr_int():
+%  binop(instr="sra a0, a0, a1")
+
+%def op_shr_int_2addr():
+%  binop2addr(instr="sra a0, a0, a1")
+
+%def op_shr_int_lit8():
+%  binopLit8(instr="sra a0, a0, a1")
+
+%def op_shr_long():
+%  binopWide(instr="dsra a0, a0, a1")
+
+%def op_shr_long_2addr():
+%  binopWide2addr(instr="dsra a0, a0, a1")
+
+%def op_sub_int():
+%  binop(instr="subu a0, a0, a1")
+
+%def op_sub_int_2addr():
+%  binop2addr(instr="subu a0, a0, a1")
+
+%def op_sub_long():
+%  binopWide(instr="dsubu a0, a0, a1")
+
+%def op_sub_long_2addr():
+%  binopWide2addr(instr="dsubu a0, a0, a1")
+
+%def op_ushr_int():
+%  binop(instr="srl a0, a0, a1")
+
+%def op_ushr_int_2addr():
+%  binop2addr(instr="srl a0, a0, a1")
+
+%def op_ushr_int_lit8():
+%  binopLit8(instr="srl a0, a0, a1")
+
+%def op_ushr_long():
+%  binopWide(instr="dsrl a0, a0, a1")
+
+%def op_ushr_long_2addr():
+%  binopWide2addr(instr="dsrl a0, a0, a1")
+
+%def op_xor_int():
+%  binop(instr="xor a0, a0, a1")
+
+%def op_xor_int_2addr():
+%  binop2addr(instr="xor a0, a0, a1")
+
+%def op_xor_int_lit16():
+%  binopLit16(instr="xor a0, a0, a1")
+
+%def op_xor_int_lit8():
+%  binopLit8(instr="xor a0, a0, a1")
+
+%def op_xor_long():
+%  binopWide(instr="xor a0, a0, a1")
+
+%def op_xor_long_2addr():
+%  binopWide2addr(instr="xor a0, a0, a1")
diff --git a/runtime/interpreter/mterp/mips64/array.S b/runtime/interpreter/mterp/mips64/array.S
new file mode 100644
index 0000000..9d97f0a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/array.S
@@ -0,0 +1,241 @@
+%def op_aget(load="lw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    .if $shift
+    # [d]lsa does not support shift count of 0.
+    dlsa    a0, a1, a0, $shift          # a0 <- arrayObj + index*width
+    .else
+    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
+    .endif
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    $load   a2, $data_offset(a0)        # a2 <- vBB[vCC]
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a2, a4                     # vAA <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_aget_boolean():
+%  op_aget(load="lbu", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aget_byte():
+%  op_aget(load="lb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aget_char():
+%  op_aget(load="lhu", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aget_object():
+    /*
+     * Array object get.  vAA <- vBB[vCC].
+     *
+     * for: aget-object
+     */
+    /* op vAA, vBB, vCC */
+    .extern artAGetObjectFromMterp
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    EXPORT_PC
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    jal     artAGetObjectFromMterp      # (array, index)
+    ld      a1, THREAD_EXCEPTION_OFFSET(rSELF)
+    srl     a4, rINST, 8                # a4 <- AA
+    PREFETCH_INST 2
+    bnez    a1, MterpException
+    SET_VREG_OBJECT v0, a4              # vAA <- v0
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_aget_short():
+%  op_aget(load="lh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aget_wide():
+    /*
+     * Array get, 64 bits.  vAA <- vBB[vCC].
+     *
+     */
+    /* aget-wide vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    dlsa    a0, a1, a0, 3               # a0 <- arrayObj + index*width
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    lw      a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
+    lw      a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)
+    dinsu   a2, a3, 32, 32              # a2 <- vBB[vCC]
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a2, a4                # vAA <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_aput(store="sw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    .if $shift
+    # [d]lsa does not support shift count of 0.
+    dlsa    a0, a1, a0, $shift          # a0 <- arrayObj + index*width
+    .else
+    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
+    .endif
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_VREG a2, a4                     # a2 <- vAA
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    $store  a2, $data_offset(a0)        # vBB[vCC] <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_aput_boolean():
+%  op_aput(store="sb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aput_byte():
+%  op_aput(store="sb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aput_char():
+%  op_aput(store="sh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aput_object():
+    /*
+     * Store an object into an array.  vBB[vCC] <- vAA.
+     */
+    /* op vAA, vBB, vCC */
+    .extern MterpAputObject
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rINST
+    jal     MterpAputObject
+    beqzc   v0, MterpPossibleException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_aput_short():
+%  op_aput(store="sh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aput_wide():
+    /*
+     * Array put, 64 bits.  vBB[vCC] <- vAA.
+     *
+     */
+    /* aput-wide vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    dlsa    a0, a1, a0, 3               # a0 <- arrayObj + index*width
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    GET_VREG_WIDE a2, a4                # a2 <- vAA
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    sw      a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
+    dsrl32  a2, a2, 0
+    sw      a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)  # vBB[vCC] <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_array_length():
+    /*
+     * Return the length of an array.
+     */
+    srl     a1, rINST, 12               # a1 <- B
+    GET_VREG_U a0, a1                   # a0 <- vB (object ref)
+    ext     a2, rINST, 8, 4             # a2 <- A
+    beqz    a0, common_errNullObject    # yup, fail
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- array length
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a3, a2                     # vB <- length
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_fill_array_data():
+    /* fill-array-data vAA, +BBBBBBBB */
+    .extern MterpFillArrayData
+    EXPORT_PC
+    lh      a1, 2(rPC)                  # a1 <- bbbb (lo)
+    lh      a0, 4(rPC)                  # a0 <- BBBB (hi)
+    srl     a3, rINST, 8                # a3 <- AA
+    ins     a1, a0, 16, 16              # a1 <- BBBBbbbb
+    GET_VREG_U a0, a3                   # a0 <- vAA (array object)
+    dlsa    a1, a1, rPC, 1              # a1 <- PC + BBBBbbbb*2 (array data off.)
+    jal     MterpFillArrayData          # (obj, payload)
+    beqzc   v0, MterpPossibleException  # exception?
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_filled_new_array(helper="MterpFilledNewArray"):
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
+    .extern $helper
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rSELF
+    jal     $helper
+    beqzc   v0, MterpPossibleException
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_filled_new_array_range():
+%  op_filled_new_array(helper="MterpFilledNewArrayRange")
+
+%def op_new_array():
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array vA, vB, class//CCCC */
+    .extern MterpNewArray
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rINST
+    move    a3, rSELF
+    jal     MterpNewArray
+    beqzc   v0, MterpPossibleException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/bincmp.S b/runtime/interpreter/mterp/mips64/bincmp.S
deleted file mode 100644
index c2bca91..0000000
--- a/runtime/interpreter/mterp/mips64/bincmp.S
+++ /dev/null
@@ -1,19 +0,0 @@
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-le" you would use "le".
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended CCCC)
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    b${condition}c a0, a1, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binop.S b/runtime/interpreter/mterp/mips64/binop.S
deleted file mode 100644
index fab48b7..0000000
--- a/runtime/interpreter/mterp/mips64/binop.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG a0, a2                     # a0 <- vBB
-    GET_VREG a1, a3                     # a1 <- vCC
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG $result, a4                # vAA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binop2addr.S b/runtime/interpreter/mterp/mips64/binop2addr.S
deleted file mode 100644
index 1ae73f5..0000000
--- a/runtime/interpreter/mterp/mips64/binop2addr.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG $result, a2                # vA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binopLit16.S b/runtime/interpreter/mterp/mips64/binopLit16.S
deleted file mode 100644
index 9257758..0000000
--- a/runtime/interpreter/mterp/mips64/binopLit16.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CCCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG $result, a2                # vA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
-
diff --git a/runtime/interpreter/mterp/mips64/binopLit8.S b/runtime/interpreter/mterp/mips64/binopLit8.S
deleted file mode 100644
index f4a0bba..0000000
--- a/runtime/interpreter/mterp/mips64/binopLit8.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    lbu     a3, 2(rPC)                  # a3 <- BB
-    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG a0, a3                     # a0 <- vBB
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG $result, a2                # vAA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
-
diff --git a/runtime/interpreter/mterp/mips64/binopWide.S b/runtime/interpreter/mterp/mips64/binopWide.S
deleted file mode 100644
index 732f0d6..0000000
--- a/runtime/interpreter/mterp/mips64/binopWide.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE $result, a4           # vAA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binopWide2addr.S b/runtime/interpreter/mterp/mips64/binopWide2addr.S
deleted file mode 100644
index 45d8d82..0000000
--- a/runtime/interpreter/mterp/mips64/binopWide2addr.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a2                # a0 <- vA
-    GET_VREG_WIDE a1, a3                # a1 <- vB
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE $result, a2           # vA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/const.S b/runtime/interpreter/mterp/mips64/const.S
deleted file mode 100644
index 2ec1173..0000000
--- a/runtime/interpreter/mterp/mips64/const.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default { "helper":"UndefinedConstHandler" }
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern $helper
-    EXPORT_PC
-    lhu     a0, 2(rPC)                  # a0 <- BBBB
-    srl     a1, rINST, 8                # a1 <- AA
-    daddu   a2, rFP, OFF_FP_SHADOWFRAME
-    move    a3, rSELF
-    jal     $helper                     # (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     # load rINST
-    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
-    ADVANCE 2                           # advance rPC
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/control_flow.S b/runtime/interpreter/mterp/mips64/control_flow.S
new file mode 100644
index 0000000..457b938
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/control_flow.S
@@ -0,0 +1,217 @@
+%def bincmp(condition=""):
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-le" you would use "le".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended CCCC)
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+    b${condition}c a0, a1, MterpCommonTakenBranchNoFlags
+    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
+    beqc    rPROFILE, v0, .L_check_not_taken_osr
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def zcmp(condition=""):
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-lez" you would use "le".
+     *
+     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended BBBB)
+    GET_VREG a0, a2                     # a0 <- vAA
+    b${condition}zc a0, MterpCommonTakenBranchNoFlags
+    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
+    beqc    rPROFILE, v0, .L_check_not_taken_osr
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_goto():
+    /*
+     * Unconditional branch, 8-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto +AA */
+    srl     rINST, rINST, 8
+    seb     rINST, rINST                # rINST <- offset (sign-extended AA)
+    b       MterpCommonTakenBranchNoFlags
+
+%def op_goto_16():
+    /*
+     * Unconditional branch, 16-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto/16 +AAAA */
+    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended AAAA)
+    b       MterpCommonTakenBranchNoFlags
+
+%def op_goto_32():
+    /*
+     * Unconditional branch, 32-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     *
+     * Unlike most opcodes, this one is allowed to branch to itself, so
+     * our "backward branch" test must be "<=0" instead of "<0".
+     */
+    /* goto/32 +AAAAAAAA */
+    lh      rINST, 2(rPC)               # rINST <- aaaa (low)
+    lh      a1, 4(rPC)                  # a1 <- AAAA (high)
+    ins     rINST, a1, 16, 16           # rINST <- offset (sign-extended AAAAaaaa)
+    b       MterpCommonTakenBranchNoFlags
+
+%def op_if_eq():
+%  bincmp(condition="eq")
+
+%def op_if_eqz():
+%  zcmp(condition="eq")
+
+%def op_if_ge():
+%  bincmp(condition="ge")
+
+%def op_if_gez():
+%  zcmp(condition="ge")
+
+%def op_if_gt():
+%  bincmp(condition="gt")
+
+%def op_if_gtz():
+%  zcmp(condition="gt")
+
+%def op_if_le():
+%  bincmp(condition="le")
+
+%def op_if_lez():
+%  zcmp(condition="le")
+
+%def op_if_lt():
+%  bincmp(condition="lt")
+
+%def op_if_ltz():
+%  zcmp(condition="lt")
+
+%def op_if_ne():
+%  bincmp(condition="ne")
+
+%def op_if_nez():
+%  zcmp(condition="ne")
+
+%def op_packed_switch(func="MterpDoPackedSwitch"):
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBBBBBB */
+    .extern $func
+    lh      a0, 2(rPC)                  # a0 <- bbbb (lo)
+    lh      a1, 4(rPC)                  # a1 <- BBBB (hi)
+    srl     a3, rINST, 8                # a3 <- AA
+    ins     a0, a1, 16, 16              # a0 <- BBBBbbbb
+    GET_VREG a1, a3                     # a1 <- vAA
+    dlsa    a0, a0, rPC, 1              # a0 <- PC + BBBBbbbb*2
+    jal     $func                       # v0 <- code-unit branch offset
+    move    rINST, v0
+    b       MterpCommonTakenBranchNoFlags
+
+%def op_return(instr="GET_VREG"):
+    /*
+     * Return a 32-bit value.
+     *
+     * for: return (sign-extend), return-object (zero-extend)
+     */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    .extern MterpSuspendCheck
+    jal     MterpThreadFenceForConstructor
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    move    a0, rSELF
+    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    beqzc   ra, 1f
+    jal     MterpSuspendCheck           # (self)
+1:
+    srl     a2, rINST, 8                # a2 <- AA
+    $instr  a0, a2                      # a0 <- vAA
+    b       MterpReturn
+
+%def op_return_object():
+%  op_return(instr="GET_VREG_U")
+
+%def op_return_void():
+    .extern MterpThreadFenceForConstructor
+    .extern MterpSuspendCheck
+    jal     MterpThreadFenceForConstructor
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    move    a0, rSELF
+    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    beqzc   ra, 1f
+    jal     MterpSuspendCheck           # (self)
+1:
+    li      a0, 0
+    b       MterpReturn
+
+%def op_return_void_no_barrier():
+    .extern MterpSuspendCheck
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    move    a0, rSELF
+    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    beqzc   ra, 1f
+    jal     MterpSuspendCheck           # (self)
+1:
+    li      a0, 0
+    b       MterpReturn
+
+%def op_return_wide():
+    /*
+     * Return a 64-bit value.
+     */
+    /* return-wide vAA */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    .extern MterpSuspendCheck
+    jal     MterpThreadFenceForConstructor
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    move    a0, rSELF
+    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    beqzc   ra, 1f
+    jal     MterpSuspendCheck           # (self)
+1:
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_WIDE a0, a2                # a0 <- vAA
+    b       MterpReturn
+
+%def op_sparse_switch():
+%  op_packed_switch(func="MterpDoSparseSwitch")
+
+%def op_throw():
+    /*
+     * Throw an exception object in the current thread.
+     */
+    /* throw vAA */
+    EXPORT_PC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vAA (exception object)
+    beqzc   a0, common_errNullObject
+    sd      a0, THREAD_EXCEPTION_OFFSET(rSELF)  # thread->exception <- obj
+    b       MterpException
diff --git a/runtime/interpreter/mterp/mips64/entry.S b/runtime/interpreter/mterp/mips64/entry.S
deleted file mode 100644
index ed965aa..0000000
--- a/runtime/interpreter/mterp/mips64/entry.S
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * Interpreter entry point.
- */
-
-    .set    reorder
-
-    .text
-    .global ExecuteMterpImpl
-    .type   ExecuteMterpImpl, %function
-    .balign 16
-/*
- * On entry:
- *  a0  Thread* self
- *  a1  dex_instructions
- *  a2  ShadowFrame
- *  a3  JValue* result_register
- *
- */
-ExecuteMterpImpl:
-    .cfi_startproc
-    .cpsetup t9, t8, ExecuteMterpImpl
-
-    .cfi_def_cfa sp, 0
-    daddu   sp, sp, -STACK_SIZE
-    .cfi_adjust_cfa_offset STACK_SIZE
-
-    sd      t8, STACK_OFFSET_GP(sp)
-    .cfi_rel_offset 28, STACK_OFFSET_GP
-    sd      ra, STACK_OFFSET_RA(sp)
-    .cfi_rel_offset 31, STACK_OFFSET_RA
-
-    sd      s0, STACK_OFFSET_S0(sp)
-    .cfi_rel_offset 16, STACK_OFFSET_S0
-    sd      s1, STACK_OFFSET_S1(sp)
-    .cfi_rel_offset 17, STACK_OFFSET_S1
-    sd      s2, STACK_OFFSET_S2(sp)
-    .cfi_rel_offset 18, STACK_OFFSET_S2
-    sd      s3, STACK_OFFSET_S3(sp)
-    .cfi_rel_offset 19, STACK_OFFSET_S3
-    sd      s4, STACK_OFFSET_S4(sp)
-    .cfi_rel_offset 20, STACK_OFFSET_S4
-    sd      s5, STACK_OFFSET_S5(sp)
-    .cfi_rel_offset 21, STACK_OFFSET_S5
-    sd      s6, STACK_OFFSET_S6(sp)
-    .cfi_rel_offset 22, STACK_OFFSET_S6
-
-    /* Remember the return register */
-    sd      a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
-
-    /* Remember the dex instruction pointer */
-    sd      a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
-
-    /* set up "named" registers */
-    move    rSELF, a0
-    daddu   rFP, a2, SHADOWFRAME_VREGS_OFFSET
-    lw      v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
-    dlsa    rREFS, v0, rFP, 2
-    lw      v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
-    dlsa    rPC, v0, a1, 1
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-    EXPORT_PC
-
-    /* Starting ibase */
-    REFRESH_IBASE
-
-    /* Set up for backwards branches & osr profiling */
-    ld      a0, OFF_FP_METHOD(rFP)
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    jal     MterpSetUpHotnessCountdown
-    move    rPROFILE, v0                # Starting hotness countdown to rPROFILE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-    /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/mips64/fallback.S b/runtime/interpreter/mterp/mips64/fallback.S
deleted file mode 100644
index 560b994..0000000
--- a/runtime/interpreter/mterp/mips64/fallback.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* Transfer stub to alternate interpreter */
-    b       MterpFallback
diff --git a/runtime/interpreter/mterp/mips64/fbinop.S b/runtime/interpreter/mterp/mips64/fbinop.S
deleted file mode 100644
index f19dd1c..0000000
--- a/runtime/interpreter/mterp/mips64/fbinop.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default {}
-    /*:
-     * Generic 32-bit floating-point operation.
-     *
-     * For: add-float, sub-float, mul-float, div-float.
-     * form: <op> f0, f0, f1
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f0, a2               # f0 <- vBB
-    GET_VREG_FLOAT f1, a3               # f1 <- vCC
-    $instr                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a4               # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fbinop2addr.S b/runtime/interpreter/mterp/mips64/fbinop2addr.S
deleted file mode 100644
index 2e2cd7e..0000000
--- a/runtime/interpreter/mterp/mips64/fbinop2addr.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {}
-    /*:
-     * Generic 32-bit "/2addr" floating-point operation.
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
-     * form: <op> f0, f0, f1
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_FLOAT f0, a2               # f0 <- vA
-    GET_VREG_FLOAT f1, a3               # f1 <- vB
-    $instr                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a2               # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fbinopWide.S b/runtime/interpreter/mterp/mips64/fbinopWide.S
deleted file mode 100644
index 8915c94..0000000
--- a/runtime/interpreter/mterp/mips64/fbinopWide.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default {}
-    /*:
-     * Generic 64-bit floating-point operation.
-     *
-     * For: add-double, sub-double, mul-double, div-double.
-     * form: <op> f0, f0, f1
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
-    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
-    $instr                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a4              # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fbinopWide2addr.S b/runtime/interpreter/mterp/mips64/fbinopWide2addr.S
deleted file mode 100644
index a3f4eaa..0000000
--- a/runtime/interpreter/mterp/mips64/fbinopWide2addr.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {}
-    /*:
-     * Generic 64-bit "/2addr" floating-point operation.
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
-     * form: <op> f0, f0, f1
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_DOUBLE f0, a2              # f0 <- vA
-    GET_VREG_DOUBLE f1, a3              # f1 <- vB
-    $instr                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a2              # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcmp.S b/runtime/interpreter/mterp/mips64/fcmp.S
deleted file mode 100644
index 2e1a3e4..0000000
--- a/runtime/interpreter/mterp/mips64/fcmp.S
+++ /dev/null
@@ -1,32 +0,0 @@
-%default {}
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * For: cmpl-float, cmpg-float
-     */
-    /* op vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f0, a2               # f0 <- vBB
-    GET_VREG_FLOAT f1, a3               # f1 <- vCC
-    cmp.eq.s f2, f0, f1
-    li      a0, 0
-    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
-    .if $gt_bias
-    cmp.lt.s f2, f0, f1
-    li      a0, -1
-    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
-    li      a0, 1                       # vBB > vCC or unordered
-    .else
-    cmp.lt.s f2, f1, f0
-    li      a0, 1
-    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
-    li      a0, -1                      # vBB < vCC or unordered
-    .endif
-1:
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                     # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcmpWide.S b/runtime/interpreter/mterp/mips64/fcmpWide.S
deleted file mode 100644
index 2a3a341..0000000
--- a/runtime/interpreter/mterp/mips64/fcmpWide.S
+++ /dev/null
@@ -1,32 +0,0 @@
-%default {}
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * For: cmpl-double, cmpg-double
-     */
-    /* op vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
-    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
-    cmp.eq.d f2, f0, f1
-    li      a0, 0
-    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
-    .if $gt_bias
-    cmp.lt.d f2, f0, f1
-    li      a0, -1
-    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
-    li      a0, 1                       # vBB > vCC or unordered
-    .else
-    cmp.lt.d f2, f1, f0
-    li      a0, 1
-    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
-    li      a0, -1                      # vBB < vCC or unordered
-    .endif
-1:
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                     # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcvtFooter.S b/runtime/interpreter/mterp/mips64/fcvtFooter.S
deleted file mode 100644
index 06e9507..0000000
--- a/runtime/interpreter/mterp/mips64/fcvtFooter.S
+++ /dev/null
@@ -1,18 +0,0 @@
-    /*
-     * Stores a specified register containing the result of conversion
-     * from or to a floating-point type and jumps to the next instruction.
-     *
-     * Expects a1 to contain the destination Dalvik register number.
-     * a1 is set up by fcvtHeader.S.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     *
-     * Note that this file can't be included after a break in other files
-     * and in those files its contents appear as a copy.
-     * See: float-to-int, float-to-long, double-to-int, double-to-long.
-     */
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG$suffix $valreg, a1
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcvtHeader.S b/runtime/interpreter/mterp/mips64/fcvtHeader.S
deleted file mode 100644
index 8742e42..0000000
--- a/runtime/interpreter/mterp/mips64/fcvtHeader.S
+++ /dev/null
@@ -1,15 +0,0 @@
-    /*
-     * Loads a specified register from vB. Used primarily for conversions
-     * from or to a floating-point type.
-     *
-     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
-     * store the result in vA and jump to the next instruction.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     */
-    ext     a1, rINST, 8, 4             # a1 <- A
-    srl     a2, rINST, 12               # a2 <- B
-    GET_VREG$suffix $valreg, a2
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/mips64/field.S b/runtime/interpreter/mterp/mips64/field.S
deleted file mode 100644
index 1333ed7..0000000
--- a/runtime/interpreter/mterp/mips64/field.S
+++ /dev/null
@@ -1 +0,0 @@
-TODO
diff --git a/runtime/interpreter/mterp/mips64/floating_point.S b/runtime/interpreter/mterp/mips64/floating_point.S
new file mode 100644
index 0000000..1132a09
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/floating_point.S
@@ -0,0 +1,382 @@
+%def fbinop(instr=""):
+    /*:
+     * Generic 32-bit floating-point operation.
+     *
+     * For: add-float, sub-float, mul-float, div-float.
+     * form: <op> f0, f0, f1
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_FLOAT f0, a2               # f0 <- vBB
+    GET_VREG_FLOAT f1, a3               # f1 <- vCC
+    $instr                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a4               # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def fbinop2addr(instr=""):
+    /*:
+     * Generic 32-bit "/2addr" floating-point operation.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
+     * form: <op> f0, f0, f1
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_FLOAT f0, a2               # f0 <- vA
+    GET_VREG_FLOAT f1, a3               # f1 <- vB
+    $instr                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a2               # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def fbinopWide(instr=""):
+    /*:
+     * Generic 64-bit floating-point operation.
+     *
+     * For: add-double, sub-double, mul-double, div-double.
+     * form: <op> f0, f0, f1
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
+    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
+    $instr                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a4              # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def fbinopWide2addr(instr=""):
+    /*:
+     * Generic 64-bit "/2addr" floating-point operation.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
+     * form: <op> f0, f0, f1
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_DOUBLE f0, a2              # f0 <- vA
+    GET_VREG_DOUBLE f1, a3              # f1 <- vB
+    $instr                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a2              # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def fcmp(gt_bias=""):
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * For: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_FLOAT f0, a2               # f0 <- vBB
+    GET_VREG_FLOAT f1, a3               # f1 <- vCC
+    cmp.eq.s f2, f0, f1
+    li      a0, 0
+    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
+    .if $gt_bias
+    cmp.lt.s f2, f0, f1
+    li      a0, -1
+    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
+    li      a0, 1                       # vBB > vCC or unordered
+    .else
+    cmp.lt.s f2, f1, f0
+    li      a0, 1
+    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
+    li      a0, -1                      # vBB < vCC or unordered
+    .endif
+1:
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                     # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def fcmpWide(gt_bias=""):
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * For: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
+    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
+    cmp.eq.d f2, f0, f1
+    li      a0, 0
+    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
+    .if $gt_bias
+    cmp.lt.d f2, f0, f1
+    li      a0, -1
+    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
+    li      a0, 1                       # vBB > vCC or unordered
+    .else
+    cmp.lt.d f2, f1, f0
+    li      a0, 1
+    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
+    li      a0, -1                      # vBB < vCC or unordered
+    .endif
+1:
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                     # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def fcvtFooter(suffix="", valreg=""):
+    /*
+     * Stores a specified register containing the result of conversion
+     * from or to a floating-point type and jumps to the next instruction.
+     *
+     * Expects a1 to contain the destination Dalvik register number.
+     * a1 is set up by fcvtHeader.S.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     *
+     * Note that this file can't be included after a break in other files
+     * and in those files its contents appear as a copy.
+     * See: float-to-int, float-to-long, double-to-int, double-to-long.
+     */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG$suffix $valreg, a1
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def fcvtHeader(suffix="", valreg=""):
+    /*
+     * Loads a specified register from vB. Used primarily for conversions
+     * from or to a floating-point type.
+     *
+     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+     * store the result in vA and jump to the next instruction.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     */
+    ext     a1, rINST, 8, 4             # a1 <- A
+    srl     a2, rINST, 12               # a2 <- B
+    GET_VREG$suffix $valreg, a2
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+
+%def op_add_double():
+%  fbinopWide(instr="add.d f0, f0, f1")
+
+%def op_add_double_2addr():
+%  fbinopWide2addr(instr="add.d f0, f0, f1")
+
+%def op_add_float():
+%  fbinop(instr="add.s f0, f0, f1")
+
+%def op_add_float_2addr():
+%  fbinop2addr(instr="add.s f0, f0, f1")
+
+%def op_cmpg_double():
+%  fcmpWide(gt_bias="1")
+
+%def op_cmpg_float():
+%  fcmp(gt_bias="1")
+
+%def op_cmpl_double():
+%  fcmpWide(gt_bias="0")
+
+%def op_cmpl_float():
+%  fcmp(gt_bias="0")
+
+%def op_div_double():
+%  fbinopWide(instr="div.d f0, f0, f1")
+
+%def op_div_double_2addr():
+%  fbinopWide2addr(instr="div.d f0, f0, f1")
+
+%def op_div_float():
+%  fbinop(instr="div.s f0, f0, f1")
+
+%def op_div_float_2addr():
+%  fbinop2addr(instr="div.s f0, f0, f1")
+
+%def op_double_to_float():
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
+    cvt.s.d f0, f0
+%  fcvtFooter(suffix="_FLOAT", valreg="f0")
+
+%def op_double_to_int():
+%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
+    trunc.w.d f0, f0
+%  fcvtFooter(suffix="_FLOAT", valreg="f0")
+
+%def op_double_to_long():
+%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
+    trunc.l.d f0, f0
+%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
+
+%def op_float_to_double():
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+%  fcvtHeader(suffix="_FLOAT", valreg="f0")
+    cvt.d.s f0, f0
+%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
+
+%def op_float_to_int():
+%  fcvtHeader(suffix="_FLOAT", valreg="f0")
+    trunc.w.s f0, f0
+%  fcvtFooter(suffix="_FLOAT", valreg="f0")
+
+%def op_float_to_long():
+%  fcvtHeader(suffix="_FLOAT", valreg="f0")
+    trunc.l.s f0, f0
+%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
+
+%def op_int_to_double():
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+%  fcvtHeader(suffix="_FLOAT", valreg="f0")
+    cvt.d.w f0, f0
+%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
+
+%def op_int_to_float():
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+%  fcvtHeader(suffix="_FLOAT", valreg="f0")
+    cvt.s.w f0, f0
+%  fcvtFooter(suffix="_FLOAT", valreg="f0")
+
+%def op_long_to_double():
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
+    cvt.d.l f0, f0
+%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
+
+%def op_long_to_float():
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
+    cvt.s.l f0, f0
+%  fcvtFooter(suffix="_FLOAT", valreg="f0")
+
+%def op_mul_double():
+%  fbinopWide(instr="mul.d f0, f0, f1")
+
+%def op_mul_double_2addr():
+%  fbinopWide2addr(instr="mul.d f0, f0, f1")
+
+%def op_mul_float():
+%  fbinop(instr="mul.s f0, f0, f1")
+
+%def op_mul_float_2addr():
+%  fbinop2addr(instr="mul.s f0, f0, f1")
+
+%def op_neg_double():
+%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
+    neg.d   f0, f0
+%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
+
+%def op_neg_float():
+%  fcvtHeader(suffix="_FLOAT", valreg="f0")
+    neg.s   f0, f0
+%  fcvtFooter(suffix="_FLOAT", valreg="f0")
+
+%def op_rem_double():
+    /* rem-double vAA, vBB, vCC */
+    .extern fmod
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_DOUBLE f12, a2             # f12 <- vBB
+    GET_VREG_DOUBLE f13, a3             # f13 <- vCC
+    jal     fmod                        # f0 <- f12 op f13
+    srl     a4, rINST, 8                # a4 <- AA
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a4              # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_rem_double_2addr():
+    /* rem-double/2addr vA, vB */
+    .extern fmod
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_DOUBLE f12, a2             # f12 <- vA
+    GET_VREG_DOUBLE f13, a3             # f13 <- vB
+    jal     fmod                        # f0 <- f12 op f13
+    ext     a2, rINST, 8, 4             # a2 <- A
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a2              # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_rem_float():
+    /* rem-float vAA, vBB, vCC */
+    .extern fmodf
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_FLOAT f12, a2              # f12 <- vBB
+    GET_VREG_FLOAT f13, a3              # f13 <- vCC
+    jal     fmodf                       # f0 <- f12 op f13
+    srl     a4, rINST, 8                # a4 <- AA
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a4               # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_rem_float_2addr():
+    /* rem-float/2addr vA, vB */
+    .extern fmodf
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_FLOAT f12, a2              # f12 <- vA
+    GET_VREG_FLOAT f13, a3              # f13 <- vB
+    jal     fmodf                       # f0 <- f12 op f13
+    ext     a2, rINST, 8, 4             # a2 <- A
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a2               # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_sub_double():
+%  fbinopWide(instr="sub.d f0, f0, f1")
+
+%def op_sub_double_2addr():
+%  fbinopWide2addr(instr="sub.d f0, f0, f1")
+
+%def op_sub_float():
+%  fbinop(instr="sub.s f0, f0, f1")
+
+%def op_sub_float_2addr():
+%  fbinop2addr(instr="sub.s f0, f0, f1")
diff --git a/runtime/interpreter/mterp/mips64/footer.S b/runtime/interpreter/mterp/mips64/footer.S
deleted file mode 100644
index 779b1fb..0000000
--- a/runtime/interpreter/mterp/mips64/footer.S
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-
-    .extern MterpLogDivideByZeroException
-common_errDivideByZero:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogDivideByZeroException
-#endif
-    b       MterpCommonFallback
-
-    .extern MterpLogArrayIndexException
-common_errArrayIndex:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogArrayIndexException
-#endif
-    b       MterpCommonFallback
-
-    .extern MterpLogNullObjectException
-common_errNullObject:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogNullObjectException
-#endif
-    b       MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)
-    beqzc   a0, MterpFallback                       # If not, fall back to reference interpreter.
-    /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-    .extern MterpHandleException
-    .extern MterpShouldSwitchInterpreters
-MterpException:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpHandleException                    # (self, shadow_frame)
-    beqzc   v0, MterpExceptionReturn                # no local catch, back to caller.
-    ld      a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
-    lwu     a1, OFF_FP_DEX_PC(rFP)
-    REFRESH_IBASE
-    dlsa    rPC, a1, a0, 1                          # generate new dex_pc_ptr
-    /* Do we need to switch interpreters? */
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    /* resume execution at catch block */
-    EXPORT_PC
-    FETCH_INST
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-    /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    rINST          <= signed offset
- *    rPROFILE       <= signed hotness countdown (expanded to 64 bits)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
-    bgtzc   rINST, .L_forward_branch    # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-    li      v0, JIT_CHECK_OSR
-    beqc    rPROFILE, v0, .L_osr_check
-    bltc    rPROFILE, v0, .L_resume_backward_branch
-    dsubu   rPROFILE, 1
-    beqzc   rPROFILE, .L_add_batch      # counted down to zero - report
-.L_resume_backward_branch:
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    REFRESH_IBASE
-    daddu   a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
-    and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    bnezc   ra, .L_suspend_request_pending
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-.L_suspend_request_pending:
-    EXPORT_PC
-    move    a0, rSELF
-    jal     MterpSuspendCheck           # (self)
-    bnezc   v0, MterpFallback
-    REFRESH_IBASE                       # might have changed during suspend
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-.L_no_count_backwards:
-    li      v0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    bnec    rPROFILE, v0, .L_resume_backward_branch
-.L_osr_check:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC
-    jal MterpMaybeDoOnStackReplacement  # (self, shadow_frame, offset)
-    bnezc   v0, MterpOnStackReplacement
-    b       .L_resume_backward_branch
-
-.L_forward_branch:
-    li      v0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    beqc    rPROFILE, v0, .L_check_osr_forward
-.L_resume_forward_branch:
-    daddu   a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-.L_check_osr_forward:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC
-    jal     MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
-    bnezc   v0, MterpOnStackReplacement
-    b       .L_resume_forward_branch
-
-.L_add_batch:
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    ld      a0, OFF_FP_METHOD(rFP)
-    move    a2, rSELF
-    jal     MterpAddHotnessBatch        # (method, shadow_frame, self)
-    move    rPROFILE, v0                # restore new hotness countdown to rPROFILE
-    b       .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    li      a2, 2
-    EXPORT_PC
-    jal     MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
-    bnezc   v0, MterpOnStackReplacement
-    FETCH_ADVANCE_INST 2 
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST                               # rINST contains offset
-    jal     MterpLogOSR
-#endif
-    li      v0, 1                                   # Signal normal return
-    b       MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-    .extern MterpLogFallback
-MterpFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogFallback
-#endif
-MterpCommonFallback:
-    li      v0, 0                                   # signal retry with reference interpreter.
-    b       MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and RA.  Here we restore SP, restore the registers, and then restore
- * RA to PC.
- *
- * On entry:
- *  uint32_t* rFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    li      v0, 1                                   # signal return to caller.
-    b       MterpDone
-/*
- * Returned value is expected in a0 and if it's not 64-bit, the 32 most
- * significant bits of a0 must be zero-extended or sign-extended
- * depending on the return type.
- */
-MterpReturn:
-    ld      a2, OFF_FP_RESULT_REGISTER(rFP)
-    sd      a0, 0(a2)
-    li      v0, 1                                   # signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    blez    rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
-
-MterpProfileActive:
-    move    rINST, v0                   # stash return value
-    /* Report cached hotness counts */
-    ld      a0, OFF_FP_METHOD(rFP)
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    jal     MterpAddHotnessBatch        # (method, shadow_frame, self)
-    move    v0, rINST                   # restore return value
-
-.L_pop_and_return:
-    ld      s6, STACK_OFFSET_S6(sp)
-    .cfi_restore 22
-    ld      s5, STACK_OFFSET_S5(sp)
-    .cfi_restore 21
-    ld      s4, STACK_OFFSET_S4(sp)
-    .cfi_restore 20
-    ld      s3, STACK_OFFSET_S3(sp)
-    .cfi_restore 19
-    ld      s2, STACK_OFFSET_S2(sp)
-    .cfi_restore 18
-    ld      s1, STACK_OFFSET_S1(sp)
-    .cfi_restore 17
-    ld      s0, STACK_OFFSET_S0(sp)
-    .cfi_restore 16
-
-    ld      ra, STACK_OFFSET_RA(sp)
-    .cfi_restore 31
-
-    ld      t8, STACK_OFFSET_GP(sp)
-    .cpreturn
-    .cfi_restore 28
-
-    .set    noreorder
-    jr      ra
-    daddu   sp, sp, STACK_SIZE
-    .cfi_adjust_cfa_offset -STACK_SIZE
-
-    .cfi_endproc
-    .set    reorder
-    .size ExecuteMterpImpl, .-ExecuteMterpImpl
diff --git a/runtime/interpreter/mterp/mips64/header.S b/runtime/interpreter/mterp/mips64/header.S
deleted file mode 100644
index 7e1446c..0000000
--- a/runtime/interpreter/mterp/mips64/header.S
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define zero $$0  /* always zero */
-#define AT   $$at /* assembler temp */
-#define v0   $$2  /* return value */
-#define v1   $$3
-#define a0   $$4  /* argument registers */
-#define a1   $$5
-#define a2   $$6
-#define a3   $$7
-#define a4   $$8  /* expanded register arguments */
-#define a5   $$9
-#define a6   $$10
-#define a7   $$11
-#define ta0  $$8  /* alias */
-#define ta1  $$9
-#define ta2  $$10
-#define ta3  $$11
-#define t0   $$12 /* temp registers (not saved across subroutine calls) */
-#define t1   $$13
-#define t2   $$14
-#define t3   $$15
-
-#define s0   $$16 /* saved across subroutine calls (callee saved) */
-#define s1   $$17
-#define s2   $$18
-#define s3   $$19
-#define s4   $$20
-#define s5   $$21
-#define s6   $$22
-#define s7   $$23
-#define t8   $$24 /* two more temp registers */
-#define t9   $$25
-#define k0   $$26 /* kernel temporary */
-#define k1   $$27
-#define gp   $$28 /* global pointer */
-#define sp   $$29 /* stack pointer */
-#define s8   $$30 /* one more callee saved */
-#define ra   $$31 /* return address */
-
-#define f0   $$f0
-#define f1   $$f1
-#define f2   $$f2
-#define f3   $$f3
-#define f12  $$f12
-#define f13  $$f13
-
-/*
- * It looks like the GNU assembler currently does not support the blec and bgtc
- * idioms, which should translate into bgec and bltc respectively with swapped
- * left and right register operands.
- * TODO: remove these macros when the assembler is fixed.
- */
-.macro blec lreg, rreg, target
-    bgec    \rreg, \lreg, \target
-.endm
-.macro bgtc lreg, rreg, target
-    bltc    \rreg, \lreg, \target
-.endm
-
-/*
-Mterp and MIPS64 notes:
-
-The following registers have fixed assignments:
-
-  reg nick      purpose
-  s0  rPC       interpreted program counter, used for fetching instructions
-  s1  rFP       interpreted frame pointer, used for accessing locals and args
-  s2  rSELF     self (Thread) pointer
-  s3  rINST     first 16-bit code unit of current instruction
-  s4  rIBASE    interpreted instruction base pointer, used for computed goto
-  s5  rREFS     base of object references in shadow frame  (ideally, we'll get rid of this later).
-  s6  rPROFILE  jit profile hotness countdown
-*/
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rPC      s0
-#define CFI_DEX  16  // DWARF register number of the register holding dex-pc (s0).
-#define CFI_TMP  4   // DWARF register number of the first argument register (a0).
-#define rFP      s1
-#define rSELF    s2
-#define rINST    s3
-#define rIBASE   s4
-#define rREFS    s5
-#define rPROFILE s6
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
-    sd      rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
-    ld      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
- */
-.macro FETCH_INST
-    lhu     rINST, 0(rPC)
-.endm
-
-/* Advance rPC by some number of code units. */
-.macro ADVANCE count
-    daddu   rPC, rPC, (\count) * 2
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg and advance xPC.
- * xPC to point to the next instruction.  "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.  Must not set flags.
- *
- */
-.macro FETCH_ADVANCE_INST_RB reg
-    daddu   rPC, rPC, \reg
-    FETCH_INST
-.endm
-
-/*
- * Fetch the next instruction from the specified offset.  Advances rPC
- * to point to the next instruction.
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss.  (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
-    ADVANCE \count
-    FETCH_INST
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
- * rINST ahead of possible exception point.  Be sure to manually advance rPC
- * later.
- */
-.macro PREFETCH_INST count
-    lhu     rINST, ((\count) * 2)(rPC)
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
-    and     \reg, rINST, 255
-.endm
-
-/*
- * Begin executing the opcode in _reg.
- */
-.macro GOTO_OPCODE reg
-    .set noat
-    sll     AT, \reg, 7
-    daddu   AT, rIBASE, AT
-    jic     AT, 0
-    .set at
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- * Note, GET_VREG does sign extension to 64 bits while
- * GET_VREG_U does zero extension to 64 bits.
- * One is useful for arithmetic while the other is
- * useful for storing the result value as 64-bit.
- */
-.macro GET_VREG reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lw      \reg, 0(AT)
-    .set at
-.endm
-.macro GET_VREG_U reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lwu     \reg, 0(AT)
-    .set at
-.endm
-.macro GET_VREG_FLOAT reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lwc1    \reg, 0(AT)
-    .set at
-.endm
-.macro SET_VREG reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    sw      \reg, 0(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    .set at
-.endm
-.macro SET_VREG_OBJECT reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    sw      \reg, 0(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      \reg, 0(AT)
-    .set at
-.endm
-.macro SET_VREG_FLOAT reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    swc1    \reg, 0(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    .set at
-.endm
-
-/*
- * Get/set the 64-bit value from a Dalvik register.
- * Avoid unaligned memory accesses.
- * Note, SET_VREG_WIDE clobbers the register containing the value being stored.
- * Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number.
- */
-.macro GET_VREG_WIDE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lw      \reg, 0(AT)
-    lw      AT, 4(AT)
-    dinsu   \reg, AT, 32, 32
-    .set at
-.endm
-.macro GET_VREG_DOUBLE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lwc1    \reg, 0(AT)
-    lw      AT, 4(AT)
-    mthc1   AT, \reg
-    .set at
-.endm
-.macro SET_VREG_WIDE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    sw      \reg, 0(AT)
-    drotr32 \reg, \reg, 0
-    sw      \reg, 4(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    sw      zero, 4(AT)
-    .set at
-.endm
-.macro SET_VREG_DOUBLE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    sw      zero, 4(AT)
-    dlsa    AT, \vreg, rFP, 2
-    swc1    \reg, 0(AT)
-    mfhc1   \vreg, \reg
-    sw      \vreg, 4(AT)
-    .set at
-.endm
-
-/*
- * On-stack offsets for spilling/unspilling callee-saved registers
- * and the frame size.
- */
-#define STACK_OFFSET_RA 0
-#define STACK_OFFSET_GP 8
-#define STACK_OFFSET_S0 16
-#define STACK_OFFSET_S1 24
-#define STACK_OFFSET_S2 32
-#define STACK_OFFSET_S3 40
-#define STACK_OFFSET_S4 48
-#define STACK_OFFSET_S5 56
-#define STACK_OFFSET_S6 64
-#define STACK_SIZE      80    /* needs 16 byte alignment */
-
-/* Constants for float/double_to_int/long conversions */
-#define INT_MIN             0x80000000
-#define INT_MIN_AS_FLOAT    0xCF000000
-#define INT_MIN_AS_DOUBLE   0xC1E0000000000000
-#define LONG_MIN            0x8000000000000000
-#define LONG_MIN_AS_FLOAT   0xDF000000
-#define LONG_MIN_AS_DOUBLE  0xC3E0000000000000
diff --git a/runtime/interpreter/mterp/mips64/instruction_end.S b/runtime/interpreter/mterp/mips64/instruction_end.S
deleted file mode 100644
index 32c725c..0000000
--- a/runtime/interpreter/mterp/mips64/instruction_end.S
+++ /dev/null
@@ -1,3 +0,0 @@
-
-    .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
diff --git a/runtime/interpreter/mterp/mips64/instruction_end_alt.S b/runtime/interpreter/mterp/mips64/instruction_end_alt.S
deleted file mode 100644
index f90916f..0000000
--- a/runtime/interpreter/mterp/mips64/instruction_end_alt.S
+++ /dev/null
@@ -1,3 +0,0 @@
-
-    .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
diff --git a/runtime/interpreter/mterp/mips64/instruction_end_sister.S b/runtime/interpreter/mterp/mips64/instruction_end_sister.S
deleted file mode 100644
index c5f4886..0000000
--- a/runtime/interpreter/mterp/mips64/instruction_end_sister.S
+++ /dev/null
@@ -1,3 +0,0 @@
-
-    .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
diff --git a/runtime/interpreter/mterp/mips64/instruction_start.S b/runtime/interpreter/mterp/mips64/instruction_start.S
deleted file mode 100644
index 8874c20..0000000
--- a/runtime/interpreter/mterp/mips64/instruction_start.S
+++ /dev/null
@@ -1,4 +0,0 @@
-
-    .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
-    .text
diff --git a/runtime/interpreter/mterp/mips64/instruction_start_alt.S b/runtime/interpreter/mterp/mips64/instruction_start_alt.S
deleted file mode 100644
index 0c9ffdb..0000000
--- a/runtime/interpreter/mterp/mips64/instruction_start_alt.S
+++ /dev/null
@@ -1,4 +0,0 @@
-
-    .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
-    .text
diff --git a/runtime/interpreter/mterp/mips64/instruction_start_sister.S b/runtime/interpreter/mterp/mips64/instruction_start_sister.S
deleted file mode 100644
index 2ec51f7..0000000
--- a/runtime/interpreter/mterp/mips64/instruction_start_sister.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
-    .global artMterpAsmSisterStart
-    .text
-    .balign 4
-artMterpAsmSisterStart:
diff --git a/runtime/interpreter/mterp/mips64/invoke.S b/runtime/interpreter/mterp/mips64/invoke.S
index be647b6..c2967cf 100644
--- a/runtime/interpreter/mterp/mips64/invoke.S
+++ b/runtime/interpreter/mterp/mips64/invoke.S
@@ -1,4 +1,4 @@
-%default { "helper":"UndefinedInvokeHandler" }
+%def invoke(helper="UndefinedInvokeHandler"):
     /*
      * Generic invoke handler wrapper.
      */
@@ -18,3 +18,93 @@
     bnezc   v0, MterpFallback
     GET_INST_OPCODE v0
     GOTO_OPCODE v0
+
+%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
+    /*
+     * invoke-polymorphic handler wrapper.
+     */
+    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+    .extern $helper
+    .extern MterpShouldSwitchInterpreters
+    EXPORT_PC
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    jal     $helper
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 4
+    jal     MterpShouldSwitchInterpreters
+    bnezc   v0, MterpFallback
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+%def op_invoke_custom():
+%  invoke(helper="MterpInvokeCustom")
+
+%def op_invoke_custom_range():
+%  invoke(helper="MterpInvokeCustomRange")
+
+%def op_invoke_direct():
+%  invoke(helper="MterpInvokeDirect")
+
+%def op_invoke_direct_range():
+%  invoke(helper="MterpInvokeDirectRange")
+
+%def op_invoke_interface():
+%  invoke(helper="MterpInvokeInterface")
+    /*
+     * Handle an interface method call.
+     *
+     * for: invoke-interface, invoke-interface/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_interface_range():
+%  invoke(helper="MterpInvokeInterfaceRange")
+
+%def op_invoke_polymorphic():
+%  invoke_polymorphic(helper="MterpInvokePolymorphic")
+
+%def op_invoke_polymorphic_range():
+%  invoke_polymorphic(helper="MterpInvokePolymorphicRange")
+
+%def op_invoke_static():
+%  invoke(helper="MterpInvokeStatic")
+
+%def op_invoke_static_range():
+%  invoke(helper="MterpInvokeStaticRange")
+
+%def op_invoke_super():
+%  invoke(helper="MterpInvokeSuper")
+    /*
+     * Handle a "super" method call.
+     *
+     * for: invoke-super, invoke-super/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_super_range():
+%  invoke(helper="MterpInvokeSuperRange")
+
+%def op_invoke_virtual():
+%  invoke(helper="MterpInvokeVirtual")
+    /*
+     * Handle a virtual method call.
+     *
+     * for: invoke-virtual, invoke-virtual/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_virtual_quick():
+%  invoke(helper="MterpInvokeVirtualQuick")
+
+%def op_invoke_virtual_range():
+%  invoke(helper="MterpInvokeVirtualRange")
+
+%def op_invoke_virtual_range_quick():
+%  invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/mips64/invoke_polymorphic.S b/runtime/interpreter/mterp/mips64/invoke_polymorphic.S
deleted file mode 100644
index fa82083..0000000
--- a/runtime/interpreter/mterp/mips64/invoke_polymorphic.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "helper":"UndefinedInvokeHandler" }
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern $helper
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     $helper
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 4
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
diff --git a/runtime/interpreter/mterp/mips64/main.S b/runtime/interpreter/mterp/mips64/main.S
new file mode 100644
index 0000000..92bddb0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/main.S
@@ -0,0 +1,753 @@
+%def header():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define zero $$0  /* always zero */
+#define AT   $$at /* assembler temp */
+#define v0   $$2  /* return value */
+#define v1   $$3
+#define a0   $$4  /* argument registers */
+#define a1   $$5
+#define a2   $$6
+#define a3   $$7
+#define a4   $$8  /* expanded register arguments */
+#define a5   $$9
+#define a6   $$10
+#define a7   $$11
+#define ta0  $$8  /* alias */
+#define ta1  $$9
+#define ta2  $$10
+#define ta3  $$11
+#define t0   $$12 /* temp registers (not saved across subroutine calls) */
+#define t1   $$13
+#define t2   $$14
+#define t3   $$15
+
+#define s0   $$16 /* saved across subroutine calls (callee saved) */
+#define s1   $$17
+#define s2   $$18
+#define s3   $$19
+#define s4   $$20
+#define s5   $$21
+#define s6   $$22
+#define s7   $$23
+#define t8   $$24 /* two more temp registers */
+#define t9   $$25
+#define k0   $$26 /* kernel temporary */
+#define k1   $$27
+#define gp   $$28 /* global pointer */
+#define sp   $$29 /* stack pointer */
+#define s8   $$30 /* one more callee saved */
+#define ra   $$31 /* return address */
+
+#define f0   $$f0
+#define f1   $$f1
+#define f2   $$f2
+#define f3   $$f3
+#define f12  $$f12
+#define f13  $$f13
+
+/*
+ * It looks like the GNU assembler currently does not support the blec and bgtc
+ * idioms, which should translate into bgec and bltc respectively with swapped
+ * left and right register operands.
+ * TODO: remove these macros when the assembler is fixed.
+ */
+.macro blec lreg, rreg, target
+    bgec    \rreg, \lreg, \target
+.endm
+.macro bgtc lreg, rreg, target
+    bltc    \rreg, \lreg, \target
+.endm
+
+/*
+Mterp and MIPS64 notes:
+
+The following registers have fixed assignments:
+
+  reg nick      purpose
+  s0  rPC       interpreted program counter, used for fetching instructions
+  s1  rFP       interpreted frame pointer, used for accessing locals and args
+  s2  rSELF     self (Thread) pointer
+  s3  rINST     first 16-bit code unit of current instruction
+  s4  rIBASE    interpreted instruction base pointer, used for computed goto
+  s5  rREFS     base of object references in shadow frame  (ideally, we'll get rid of this later).
+  s6  rPROFILE  jit profile hotness countdown
+*/
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rPC      s0
+#define CFI_DEX  16  // DWARF register number of the register holding dex-pc (s0).
+#define CFI_TMP  4   // DWARF register number of the first argument register (a0).
+#define rFP      s1
+#define rSELF    s2
+#define rINST    s3
+#define rIBASE   s4
+#define rREFS    s5
+#define rPROFILE s6
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+#include "interpreter/cfi_asm_support.h"
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
+ * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
+#define OFF_FP_SHADOWFRAME OFF_FP(0)
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array.  For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+    sd      rPC, OFF_FP_DEX_PC_PTR(rFP)
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+    ld      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
+ */
+.macro FETCH_INST
+    lhu     rINST, 0(rPC)
+.endm
+
+/* Advance rPC by some number of code units. */
+.macro ADVANCE count
+    daddu   rPC, rPC, (\count) * 2
+.endm
+
+/*
+ * Fetch the next instruction from an offset specified by _reg and advance xPC.
+ * xPC to point to the next instruction.  "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.  Must not set flags.
+ *
+ */
+.macro FETCH_ADVANCE_INST_RB reg
+    daddu   rPC, rPC, \reg
+    FETCH_INST
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset.  Advances rPC
+ * to point to the next instruction.
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss.  (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+    ADVANCE \count
+    FETCH_INST
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
+ * rINST ahead of possible exception point.  Be sure to manually advance rPC
+ * later.
+ */
+.macro PREFETCH_INST count
+    lhu     rINST, ((\count) * 2)(rPC)
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+    and     \reg, rINST, 255
+.endm
+
+/*
+ * Begin executing the opcode in _reg.
+ */
+.macro GOTO_OPCODE reg
+    .set noat
+    sll     AT, \reg, 7
+    daddu   AT, rIBASE, AT
+    jic     AT, 0
+    .set at
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ * Note, GET_VREG does sign extension to 64 bits while
+ * GET_VREG_U does zero extension to 64 bits.
+ * One is useful for arithmetic while the other is
+ * useful for storing the result value as 64-bit.
+ */
+.macro GET_VREG reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    lw      \reg, 0(AT)
+    .set at
+.endm
+.macro GET_VREG_U reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    lwu     \reg, 0(AT)
+    .set at
+.endm
+.macro GET_VREG_FLOAT reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    lwc1    \reg, 0(AT)
+    .set at
+.endm
+.macro SET_VREG reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    sw      \reg, 0(AT)
+    dlsa    AT, \vreg, rREFS, 2
+    sw      zero, 0(AT)
+    .set at
+.endm
+.macro SET_VREG_OBJECT reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    sw      \reg, 0(AT)
+    dlsa    AT, \vreg, rREFS, 2
+    sw      \reg, 0(AT)
+    .set at
+.endm
+.macro SET_VREG_FLOAT reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    swc1    \reg, 0(AT)
+    dlsa    AT, \vreg, rREFS, 2
+    sw      zero, 0(AT)
+    .set at
+.endm
+
+/*
+ * Get/set the 64-bit value from a Dalvik register.
+ * Avoid unaligned memory accesses.
+ * Note, SET_VREG_WIDE clobbers the register containing the value being stored.
+ * Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number.
+ */
+.macro GET_VREG_WIDE reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    lw      \reg, 0(AT)
+    lw      AT, 4(AT)
+    dinsu   \reg, AT, 32, 32
+    .set at
+.endm
+.macro GET_VREG_DOUBLE reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    lwc1    \reg, 0(AT)
+    lw      AT, 4(AT)
+    mthc1   AT, \reg
+    .set at
+.endm
+.macro SET_VREG_WIDE reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    sw      \reg, 0(AT)
+    drotr32 \reg, \reg, 0
+    sw      \reg, 4(AT)
+    dlsa    AT, \vreg, rREFS, 2
+    sw      zero, 0(AT)
+    sw      zero, 4(AT)
+    .set at
+.endm
+.macro SET_VREG_DOUBLE reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rREFS, 2
+    sw      zero, 0(AT)
+    sw      zero, 4(AT)
+    dlsa    AT, \vreg, rFP, 2
+    swc1    \reg, 0(AT)
+    mfhc1   \vreg, \reg
+    sw      \vreg, 4(AT)
+    .set at
+.endm
+
+/*
+ * On-stack offsets for spilling/unspilling callee-saved registers
+ * and the frame size.
+ */
+#define STACK_OFFSET_RA 0
+#define STACK_OFFSET_GP 8
+#define STACK_OFFSET_S0 16
+#define STACK_OFFSET_S1 24
+#define STACK_OFFSET_S2 32
+#define STACK_OFFSET_S3 40
+#define STACK_OFFSET_S4 48
+#define STACK_OFFSET_S5 56
+#define STACK_OFFSET_S6 64
+#define STACK_SIZE      80    /* needs 16 byte alignment */
+
+/* Constants for float/double_to_int/long conversions */
+#define INT_MIN             0x80000000
+#define INT_MIN_AS_FLOAT    0xCF000000
+#define INT_MIN_AS_DOUBLE   0xC1E0000000000000
+#define LONG_MIN            0x8000000000000000
+#define LONG_MIN_AS_FLOAT   0xDF000000
+#define LONG_MIN_AS_DOUBLE  0xC3E0000000000000
+
+%def entry():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Interpreter entry point.
+ */
+
+    .set    reorder
+
+    .text
+    .global ExecuteMterpImpl
+    .type   ExecuteMterpImpl, %function
+    .balign 16
+/*
+ * On entry:
+ *  a0  Thread* self
+ *  a1  dex_instructions
+ *  a2  ShadowFrame
+ *  a3  JValue* result_register
+ *
+ */
+ExecuteMterpImpl:
+    .cfi_startproc
+    .cpsetup t9, t8, ExecuteMterpImpl
+
+    .cfi_def_cfa sp, 0
+    daddu   sp, sp, -STACK_SIZE
+    .cfi_adjust_cfa_offset STACK_SIZE
+
+    sd      t8, STACK_OFFSET_GP(sp)
+    .cfi_rel_offset 28, STACK_OFFSET_GP
+    sd      ra, STACK_OFFSET_RA(sp)
+    .cfi_rel_offset 31, STACK_OFFSET_RA
+
+    sd      s0, STACK_OFFSET_S0(sp)
+    .cfi_rel_offset 16, STACK_OFFSET_S0
+    sd      s1, STACK_OFFSET_S1(sp)
+    .cfi_rel_offset 17, STACK_OFFSET_S1
+    sd      s2, STACK_OFFSET_S2(sp)
+    .cfi_rel_offset 18, STACK_OFFSET_S2
+    sd      s3, STACK_OFFSET_S3(sp)
+    .cfi_rel_offset 19, STACK_OFFSET_S3
+    sd      s4, STACK_OFFSET_S4(sp)
+    .cfi_rel_offset 20, STACK_OFFSET_S4
+    sd      s5, STACK_OFFSET_S5(sp)
+    .cfi_rel_offset 21, STACK_OFFSET_S5
+    sd      s6, STACK_OFFSET_S6(sp)
+    .cfi_rel_offset 22, STACK_OFFSET_S6
+
+    /* Remember the return register */
+    sd      a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
+
+    /* Remember the dex instruction pointer */
+    sd      a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
+
+    /* set up "named" registers */
+    move    rSELF, a0
+    daddu   rFP, a2, SHADOWFRAME_VREGS_OFFSET
+    lw      v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
+    dlsa    rREFS, v0, rFP, 2
+    lw      v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
+    dlsa    rPC, v0, a1, 1
+    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
+    EXPORT_PC
+
+    /* Starting ibase */
+    REFRESH_IBASE
+
+    /* Set up for backwards branches & osr profiling */
+    ld      a0, OFF_FP_METHOD(rFP)
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rSELF
+    jal     MterpSetUpHotnessCountdown
+    move    rPROFILE, v0                # Starting hotness countdown to rPROFILE
+
+    /* start executing the instruction at rPC */
+    FETCH_INST
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+    /* NOTE: no fallthrough */
+
+%def alt_stub():
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    daddu   ra, ra, (${opnum} * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
+
+%def fallback():
+/* Transfer stub to alternate interpreter */
+    b       MterpFallback
+
+%def helpers():
+%  pass
+
+%def footer():
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+
+    .extern MterpLogDivideByZeroException
+common_errDivideByZero:
+    EXPORT_PC
+#if MTERP_LOGGING
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    jal     MterpLogDivideByZeroException
+#endif
+    b       MterpCommonFallback
+
+    .extern MterpLogArrayIndexException
+common_errArrayIndex:
+    EXPORT_PC
+#if MTERP_LOGGING
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    jal     MterpLogArrayIndexException
+#endif
+    b       MterpCommonFallback
+
+    .extern MterpLogNullObjectException
+common_errNullObject:
+    EXPORT_PC
+#if MTERP_LOGGING
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    jal     MterpLogNullObjectException
+#endif
+    b       MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary.  If there is a pending
+ * exception, handle it.  Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+    ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)
+    beqzc   a0, MterpFallback                       # If not, fall back to reference interpreter.
+    /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+    .extern MterpHandleException
+    .extern MterpShouldSwitchInterpreters
+MterpException:
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    jal     MterpHandleException                    # (self, shadow_frame)
+    beqzc   v0, MterpExceptionReturn                # no local catch, back to caller.
+    ld      a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
+    lwu     a1, OFF_FP_DEX_PC(rFP)
+    REFRESH_IBASE
+    dlsa    rPC, a1, a0, 1                          # generate new dex_pc_ptr
+    /* Do we need to switch interpreters? */
+    jal     MterpShouldSwitchInterpreters
+    bnezc   v0, MterpFallback
+    /* resume execution at catch block */
+    EXPORT_PC
+    FETCH_INST
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+    /* NOTE: no fallthrough */
+
+/*
+ * Common handling for branches with support for Jit profiling.
+ * On entry:
+ *    rINST          <= signed offset
+ *    rPROFILE       <= signed hotness countdown (expanded to 64 bits)
+ *
+ * We have quite a few different cases for branch profiling, OSR detection and
+ * suspend check support here.
+ *
+ * Taken backward branches:
+ *    If profiling active, do hotness countdown and report if we hit zero.
+ *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *    Is there a pending suspend request?  If so, suspend.
+ *
+ * Taken forward branches and not-taken backward branches:
+ *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *
+ * Our most common case is expected to be a taken backward branch with active jit profiling,
+ * but no full OSR check and no pending suspend request.
+ * Next most common case is not-taken branch with no full OSR check.
+ *
+ */
+MterpCommonTakenBranchNoFlags:
+    bgtzc   rINST, .L_forward_branch    # don't add forward branches to hotness
+/*
+ * We need to subtract 1 from positive values and we should not see 0 here,
+ * so we may use the result of the comparison with -1.
+ */
+    li      v0, JIT_CHECK_OSR
+    beqc    rPROFILE, v0, .L_osr_check
+    bltc    rPROFILE, v0, .L_resume_backward_branch
+    dsubu   rPROFILE, 1
+    beqzc   rPROFILE, .L_add_batch      # counted down to zero - report
+.L_resume_backward_branch:
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    REFRESH_IBASE
+    daddu   a2, rINST, rINST            # a2<- byte offset
+    FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
+    and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+    bnezc   ra, .L_suspend_request_pending
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+.L_suspend_request_pending:
+    EXPORT_PC
+    move    a0, rSELF
+    jal     MterpSuspendCheck           # (self)
+    bnezc   v0, MterpFallback
+    REFRESH_IBASE                       # might have changed during suspend
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+.L_no_count_backwards:
+    li      v0, JIT_CHECK_OSR           # check for possible OSR re-entry
+    bnec    rPROFILE, v0, .L_resume_backward_branch
+.L_osr_check:
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rINST
+    EXPORT_PC
+    jal MterpMaybeDoOnStackReplacement  # (self, shadow_frame, offset)
+    bnezc   v0, MterpOnStackReplacement
+    b       .L_resume_backward_branch
+
+.L_forward_branch:
+    li      v0, JIT_CHECK_OSR           # check for possible OSR re-entry
+    beqc    rPROFILE, v0, .L_check_osr_forward
+.L_resume_forward_branch:
+    daddu   a2, rINST, rINST            # a2<- byte offset
+    FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+.L_check_osr_forward:
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rINST
+    EXPORT_PC
+    jal     MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
+    bnezc   v0, MterpOnStackReplacement
+    b       .L_resume_forward_branch
+
+.L_add_batch:
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
+    ld      a0, OFF_FP_METHOD(rFP)
+    move    a2, rSELF
+    jal     MterpAddHotnessBatch        # (method, shadow_frame, self)
+    move    rPROFILE, v0                # restore new hotness countdown to rPROFILE
+    b       .L_no_count_backwards
+
+/*
+ * Entered from the conditional branch handlers when OSR check request active on
+ * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
+ */
+.L_check_not_taken_osr:
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    li      a2, 2
+    EXPORT_PC
+    jal     MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
+    bnezc   v0, MterpOnStackReplacement
+    FETCH_ADVANCE_INST 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rINST                               # rINST contains offset
+    jal     MterpLogOSR
+#endif
+    li      v0, 1                                   # Signal normal return
+    b       MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+    .extern MterpLogFallback
+MterpFallback:
+    EXPORT_PC
+#if MTERP_LOGGING
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    jal     MterpLogFallback
+#endif
+MterpCommonFallback:
+    li      v0, 0                                   # signal retry with reference interpreter.
+    b       MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and RA.  Here we restore SP, restore the registers, and then restore
+ * RA to PC.
+ *
+ * On entry:
+ *  uint32_t* rFP  (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+    li      v0, 1                                   # signal return to caller.
+    b       MterpDone
+/*
+ * Returned value is expected in a0 and if it's not 64-bit, the 32 most
+ * significant bits of a0 must be zero-extended or sign-extended
+ * depending on the return type.
+ */
+MterpReturn:
+    ld      a2, OFF_FP_RESULT_REGISTER(rFP)
+    sd      a0, 0(a2)
+    li      v0, 1                                   # signal return to caller.
+MterpDone:
+/*
+ * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
+ * checking for OSR.  If greater than zero, we might have unreported hotness to register
+ * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
+ * should only reach zero immediately after a hotness decrement, and is then reset to either
+ * a negative special state or the new non-zero countdown value.
+ */
+    blez    rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
+
+MterpProfileActive:
+    move    rINST, v0                   # stash return value
+    /* Report cached hotness counts */
+    ld      a0, OFF_FP_METHOD(rFP)
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rSELF
+    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
+    jal     MterpAddHotnessBatch        # (method, shadow_frame, self)
+    move    v0, rINST                   # restore return value
+
+.L_pop_and_return:
+    ld      s6, STACK_OFFSET_S6(sp)
+    .cfi_restore 22
+    ld      s5, STACK_OFFSET_S5(sp)
+    .cfi_restore 21
+    ld      s4, STACK_OFFSET_S4(sp)
+    .cfi_restore 20
+    ld      s3, STACK_OFFSET_S3(sp)
+    .cfi_restore 19
+    ld      s2, STACK_OFFSET_S2(sp)
+    .cfi_restore 18
+    ld      s1, STACK_OFFSET_S1(sp)
+    .cfi_restore 17
+    ld      s0, STACK_OFFSET_S0(sp)
+    .cfi_restore 16
+
+    ld      ra, STACK_OFFSET_RA(sp)
+    .cfi_restore 31
+
+    ld      t8, STACK_OFFSET_GP(sp)
+    .cpreturn
+    .cfi_restore 28
+
+    .set    noreorder
+    jr      ra
+    daddu   sp, sp, STACK_SIZE
+    .cfi_adjust_cfa_offset -STACK_SIZE
+
+    .cfi_endproc
+    .set    reorder
+    .size ExecuteMterpImpl, .-ExecuteMterpImpl
+
+%def instruction_end():
+
+    .global artMterpAsmInstructionEnd
+artMterpAsmInstructionEnd:
+
+%def instruction_end_alt():
+
+    .global artMterpAsmAltInstructionEnd
+artMterpAsmAltInstructionEnd:
+
+%def instruction_start():
+
+    .global artMterpAsmInstructionStart
+artMterpAsmInstructionStart = .L_op_nop
+    .text
+
+%def instruction_start_alt():
+
+    .global artMterpAsmAltInstructionStart
+artMterpAsmAltInstructionStart = .L_ALT_op_nop
+    .text
+
+%def opcode_start():
+%  pass
+%def opcode_end():
+%  pass
diff --git a/runtime/interpreter/mterp/mips64/object.S b/runtime/interpreter/mterp/mips64/object.S
new file mode 100644
index 0000000..a5a2b3d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/object.S
@@ -0,0 +1,262 @@
+%def field(helper=""):
+TODO
+
+%def op_check_cast():
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast vAA, class//BBBB */
+    .extern MterpCheckCast
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- BBBB
+    srl     a1, rINST, 8                # a1 <- AA
+    dlsa    a1, a1, rFP, 2              # a1 <- &object
+    ld      a2, OFF_FP_METHOD(rFP)      # a2 <- method
+    move    a3, rSELF                   # a3 <- self
+    jal     MterpCheckCast              # (index, &obj, method, self)
+    PREFETCH_INST 2
+    bnez    v0, MterpPossibleException
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_iget(is_object="0", helper="MterpIGetU32"):
+%  field(helper=helper)
+
+%def op_iget_boolean():
+%  op_iget(helper="MterpIGetU8")
+
+%def op_iget_boolean_quick():
+%  op_iget_quick(load="lbu")
+
+%def op_iget_byte():
+%  op_iget(helper="MterpIGetI8")
+
+%def op_iget_byte_quick():
+%  op_iget_quick(load="lb")
+
+%def op_iget_char():
+%  op_iget(helper="MterpIGetU16")
+
+%def op_iget_char_quick():
+%  op_iget_quick(load="lhu")
+
+%def op_iget_object():
+%  op_iget(is_object="1", helper="MterpIGetObj")
+
+%def op_iget_object_quick():
+    /* For: iget-object-quick */
+    /* op vA, vB, offset//CCCC */
+    .extern artIGetObjectFromMterp
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    EXPORT_PC
+    GET_VREG_U a0, a2                   # a0 <- object we're operating on
+    jal     artIGetObjectFromMterp      # (obj, offset)
+    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    ext     a2, rINST, 8, 4             # a2 <- A
+    PREFETCH_INST 2
+    bnez    a3, MterpPossibleException  # bail out
+    SET_VREG_OBJECT v0, a2              # fp[A] <- v0
+    ADVANCE 2                           # advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_iget_quick(load="lw"):
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    /* op vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- object we're operating on
+    ext     a4, rINST, 8, 4             # a4 <- A
+    daddu   a1, a1, a3
+    beqz    a3, common_errNullObject    # object was null
+    $load   a0, 0(a1)                   # a0 <- obj.field
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    SET_VREG a0, a4                     # fp[A] <- a0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_iget_short():
+%  op_iget(helper="MterpIGetI16")
+
+%def op_iget_short_quick():
+%  op_iget_quick(load="lh")
+
+%def op_iget_wide():
+%  op_iget(helper="MterpIGetU64")
+
+%def op_iget_wide_quick():
+    /* iget-wide-quick vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a4, 2(rPC)                  # a4 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- object we're operating on
+    ext     a2, rINST, 8, 4             # a2 <- A
+    beqz    a3, common_errNullObject    # object was null
+    daddu   a4, a3, a4                  # create direct pointer
+    lw      a0, 0(a4)
+    lw      a1, 4(a4)
+    dinsu   a0, a1, 32, 32
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    SET_VREG_WIDE a0, a2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_instance_of():
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     */
+    /* instance-of vA, vB, class//CCCC */
+    .extern MterpInstanceOf
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- CCCC
+    srl     a1, rINST, 12               # a1 <- B
+    dlsa    a1, a1, rFP, 2              # a1 <- &object
+    ld      a2, OFF_FP_METHOD(rFP)      # a2 <- method
+    move    a3, rSELF                   # a3 <- self
+    jal     MterpInstanceOf             # (index, &obj, method, self)
+    ld      a1, THREAD_EXCEPTION_OFFSET(rSELF)
+    ext     a2, rINST, 8, 4             # a2 <- A
+    PREFETCH_INST 2
+    bnez    a1, MterpException
+    ADVANCE 2                           # advance rPC
+    SET_VREG v0, a2                     # vA <- v0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_iput(is_object="0", helper="MterpIPutU32"):
+%  field(helper=helper)
+
+%def op_iput_boolean():
+%  op_iput(helper="MterpIPutU8")
+
+%def op_iput_boolean_quick():
+%  op_iput_quick(store="sb")
+
+%def op_iput_byte():
+%  op_iput(helper="MterpIPutI8")
+
+%def op_iput_byte_quick():
+%  op_iput_quick(store="sb")
+
+%def op_iput_char():
+%  op_iput(helper="MterpIPutU16")
+
+%def op_iput_char_quick():
+%  op_iput_quick(store="sh")
+
+%def op_iput_object():
+%  op_iput(is_object="1", helper="MterpIPutObj")
+
+%def op_iput_object_quick():
+    .extern MterpIputObjectQuick
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rINST
+    jal     MterpIputObjectQuick
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_iput_quick(store="sw"):
+    /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
+    /* op vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- fp[B], the object pointer
+    ext     a2, rINST, 8, 4             # a2 <- A
+    beqz    a3, common_errNullObject    # object was null
+    GET_VREG a0, a2                     # a0 <- fp[A]
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    daddu   a1, a1, a3
+    $store  a0, 0(a1)                   # obj.field <- a0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_iput_short():
+%  op_iput(helper="MterpIPutI16")
+
+%def op_iput_short_quick():
+%  op_iput_quick(store="sh")
+
+%def op_iput_wide():
+%  op_iput(helper="MterpIPutU64")
+
+%def op_iput_wide_quick():
+    /* iput-wide-quick vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a3, 2(rPC)                  # a3 <- field byte offset
+    GET_VREG_U a2, a2                   # a2 <- fp[B], the object pointer
+    ext     a0, rINST, 8, 4             # a0 <- A
+    beqz    a2, common_errNullObject    # object was null
+    GET_VREG_WIDE a0, a0                # a0 <- fp[A]
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    daddu   a1, a2, a3                  # create a direct pointer
+    sw      a0, 0(a1)
+    dsrl32  a0, a0, 0
+    sw      a0, 4(a1)
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_new_instance():
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance vAA, class//BBBB */
+    .extern MterpNewInstance
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rSELF
+    move    a2, rINST
+    jal     MterpNewInstance            # (shadow_frame, self, inst_data)
+    beqzc   v0, MterpPossibleException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_sget(is_object="0", helper="MterpSGetU32"):
+%  field(helper=helper)
+
+%def op_sget_boolean():
+%  op_sget(helper="MterpSGetU8")
+
+%def op_sget_byte():
+%  op_sget(helper="MterpSGetI8")
+
+%def op_sget_char():
+%  op_sget(helper="MterpSGetU16")
+
+%def op_sget_object():
+%  op_sget(is_object="1", helper="MterpSGetObj")
+
+%def op_sget_short():
+%  op_sget(helper="MterpSGetI16")
+
+%def op_sget_wide():
+%  op_sget(helper="MterpSGetU64")
+
+%def op_sput(is_object="0", helper="MterpSPutU32"):
+%  field(helper=helper)
+
+%def op_sput_boolean():
+%  op_sput(helper="MterpSPutU8")
+
+%def op_sput_byte():
+%  op_sput(helper="MterpSPutI8")
+
+%def op_sput_char():
+%  op_sput(helper="MterpSPutU16")
+
+%def op_sput_object():
+%  op_sput(is_object="1", helper="MterpSPutObj")
+
+%def op_sput_short():
+%  op_sput(helper="MterpSPutI16")
+
+%def op_sput_wide():
+%  op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/mips64/op_add_double.S b/runtime/interpreter/mterp/mips64/op_add_double.S
deleted file mode 100644
index 1520e32..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide.S" {"instr":"add.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_double_2addr.S b/runtime/interpreter/mterp/mips64/op_add_double_2addr.S
deleted file mode 100644
index c14382e..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide2addr.S" {"instr":"add.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_float.S b/runtime/interpreter/mterp/mips64/op_add_float.S
deleted file mode 100644
index c6ed558..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop.S" {"instr":"add.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_float_2addr.S b/runtime/interpreter/mterp/mips64/op_add_float_2addr.S
deleted file mode 100644
index 4c20547..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop2addr.S" {"instr":"add.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int.S b/runtime/interpreter/mterp/mips64/op_add_int.S
deleted file mode 100644
index 6e569de..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int_2addr.S b/runtime/interpreter/mterp/mips64/op_add_int_2addr.S
deleted file mode 100644
index 2a84124..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int_lit16.S b/runtime/interpreter/mterp/mips64/op_add_int_lit16.S
deleted file mode 100644
index 94b053b..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int_lit8.S b/runtime/interpreter/mterp/mips64/op_add_int_lit8.S
deleted file mode 100644
index 3b6d734..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_long.S b/runtime/interpreter/mterp/mips64/op_add_long.S
deleted file mode 100644
index c8d702f..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"daddu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_long_2addr.S b/runtime/interpreter/mterp/mips64/op_add_long_2addr.S
deleted file mode 100644
index 928ff54..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"daddu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_aget.S b/runtime/interpreter/mterp/mips64/op_aget.S
deleted file mode 100644
index 0472a06..0000000
--- a/runtime/interpreter/mterp/mips64/op_aget.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default { "load":"lw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if $shift
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, $shift          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    $load   a2, $data_offset(a0)        # a2 <- vBB[vCC]
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a2, a4                     # vAA <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aget_boolean.S b/runtime/interpreter/mterp/mips64/op_aget_boolean.S
deleted file mode 100644
index d5be01b..0000000
--- a/runtime/interpreter/mterp/mips64/op_aget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aget.S" { "load":"lbu", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_byte.S b/runtime/interpreter/mterp/mips64/op_aget_byte.S
deleted file mode 100644
index 084de8d..0000000
--- a/runtime/interpreter/mterp/mips64/op_aget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aget.S" { "load":"lb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_char.S b/runtime/interpreter/mterp/mips64/op_aget_char.S
deleted file mode 100644
index 6c99ed5..0000000
--- a/runtime/interpreter/mterp/mips64/op_aget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aget.S" { "load":"lhu", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_object.S b/runtime/interpreter/mterp/mips64/op_aget_object.S
deleted file mode 100644
index 6374a05..0000000
--- a/runtime/interpreter/mterp/mips64/op_aget_object.S
+++ /dev/null
@@ -1,21 +0,0 @@
-    /*
-     * Array object get.  vAA <- vBB[vCC].
-     *
-     * for: aget-object
-     */
-    /* op vAA, vBB, vCC */
-    .extern artAGetObjectFromMterp
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    EXPORT_PC
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    jal     artAGetObjectFromMterp      # (array, index)
-    ld      a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    srl     a4, rINST, 8                # a4 <- AA
-    PREFETCH_INST 2
-    bnez    a1, MterpException
-    SET_VREG_OBJECT v0, a4              # vAA <- v0
-    ADVANCE 2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aget_short.S b/runtime/interpreter/mterp/mips64/op_aget_short.S
deleted file mode 100644
index 0158b0a..0000000
--- a/runtime/interpreter/mterp/mips64/op_aget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aget.S" { "load":"lh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_wide.S b/runtime/interpreter/mterp/mips64/op_aget_wide.S
deleted file mode 100644
index 0945aca..0000000
--- a/runtime/interpreter/mterp/mips64/op_aget_wide.S
+++ /dev/null
@@ -1,21 +0,0 @@
-    /*
-     * Array get, 64 bits.  vAA <- vBB[vCC].
-     *
-     */
-    /* aget-wide vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    dlsa    a0, a1, a0, 3               # a0 <- arrayObj + index*width
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    lw      a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
-    lw      a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)
-    dinsu   a2, a3, 32, 32              # a2 <- vBB[vCC]
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a2, a4                # vAA <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_and_int.S b/runtime/interpreter/mterp/mips64/op_and_int.S
deleted file mode 100644
index f0792a8..0000000
--- a/runtime/interpreter/mterp/mips64/op_and_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_int_2addr.S b/runtime/interpreter/mterp/mips64/op_and_int_2addr.S
deleted file mode 100644
index 08dc615..0000000
--- a/runtime/interpreter/mterp/mips64/op_and_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_int_lit16.S b/runtime/interpreter/mterp/mips64/op_and_int_lit16.S
deleted file mode 100644
index 65d28ad..0000000
--- a/runtime/interpreter/mterp/mips64/op_and_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_int_lit8.S b/runtime/interpreter/mterp/mips64/op_and_int_lit8.S
deleted file mode 100644
index ab84bb7..0000000
--- a/runtime/interpreter/mterp/mips64/op_and_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_long.S b/runtime/interpreter/mterp/mips64/op_and_long.S
deleted file mode 100644
index e383ba0..0000000
--- a/runtime/interpreter/mterp/mips64/op_and_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_long_2addr.S b/runtime/interpreter/mterp/mips64/op_and_long_2addr.S
deleted file mode 100644
index f863bb9..0000000
--- a/runtime/interpreter/mterp/mips64/op_and_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_aput.S b/runtime/interpreter/mterp/mips64/op_aput.S
deleted file mode 100644
index 9bfda97..0000000
--- a/runtime/interpreter/mterp/mips64/op_aput.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default { "store":"sw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if $shift
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, $shift          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_VREG a2, a4                     # a2 <- vAA
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    $store  a2, $data_offset(a0)        # vBB[vCC] <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aput_boolean.S b/runtime/interpreter/mterp/mips64/op_aput_boolean.S
deleted file mode 100644
index 6707a1f..0000000
--- a/runtime/interpreter/mterp/mips64/op_aput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_byte.S b/runtime/interpreter/mterp/mips64/op_aput_byte.S
deleted file mode 100644
index 7b9ce48..0000000
--- a/runtime/interpreter/mterp/mips64/op_aput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_char.S b/runtime/interpreter/mterp/mips64/op_aput_char.S
deleted file mode 100644
index 82bc8f7..0000000
--- a/runtime/interpreter/mterp/mips64/op_aput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_object.S b/runtime/interpreter/mterp/mips64/op_aput_object.S
deleted file mode 100644
index b132456..0000000
--- a/runtime/interpreter/mterp/mips64/op_aput_object.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /*
-     * Store an object into an array.  vBB[vCC] <- vAA.
-     */
-    /* op vAA, vBB, vCC */
-    .extern MterpAputObject
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rINST
-    jal     MterpAputObject
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aput_short.S b/runtime/interpreter/mterp/mips64/op_aput_short.S
deleted file mode 100644
index a7af294..0000000
--- a/runtime/interpreter/mterp/mips64/op_aput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_wide.S b/runtime/interpreter/mterp/mips64/op_aput_wide.S
deleted file mode 100644
index a1d7a3b..0000000
--- a/runtime/interpreter/mterp/mips64/op_aput_wide.S
+++ /dev/null
@@ -1,21 +0,0 @@
-    /*
-     * Array put, 64 bits.  vBB[vCC] <- vAA.
-     *
-     */
-    /* aput-wide vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    dlsa    a0, a1, a0, 3               # a0 <- arrayObj + index*width
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    GET_VREG_WIDE a2, a4                # a2 <- vAA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    sw      a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
-    dsrl32  a2, a2, 0
-    sw      a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)  # vBB[vCC] <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_array_length.S b/runtime/interpreter/mterp/mips64/op_array_length.S
deleted file mode 100644
index 2d9e172..0000000
--- a/runtime/interpreter/mterp/mips64/op_array_length.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /*
-     * Return the length of an array.
-     */
-    srl     a1, rINST, 12               # a1 <- B
-    GET_VREG_U a0, a1                   # a0 <- vB (object ref)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a0, common_errNullObject    # yup, fail
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- array length
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a3, a2                     # vB <- length
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_check_cast.S b/runtime/interpreter/mterp/mips64/op_check_cast.S
deleted file mode 100644
index 472595d..0000000
--- a/runtime/interpreter/mterp/mips64/op_check_cast.S
+++ /dev/null
@@ -1,17 +0,0 @@
-    /*
-     * Check to see if a cast from one class to another is allowed.
-     */
-    /* check-cast vAA, class//BBBB */
-    .extern MterpCheckCast
-    EXPORT_PC
-    lhu     a0, 2(rPC)                  # a0 <- BBBB
-    srl     a1, rINST, 8                # a1 <- AA
-    dlsa    a1, a1, rFP, 2              # a1 <- &object
-    ld      a2, OFF_FP_METHOD(rFP)      # a2 <- method
-    move    a3, rSELF                   # a3 <- self
-    jal     MterpCheckCast              # (index, &obj, method, self)
-    PREFETCH_INST 2
-    bnez    v0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_cmp_long.S b/runtime/interpreter/mterp/mips64/op_cmp_long.S
deleted file mode 100644
index 6e9376c..0000000
--- a/runtime/interpreter/mterp/mips64/op_cmp_long.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    /* cmp-long vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    slt     a2, a0, a1
-    slt     a0, a1, a0
-    subu    a0, a0, a2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                     # vAA <- result
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_cmpg_double.S b/runtime/interpreter/mterp/mips64/op_cmpg_double.S
deleted file mode 100644
index a8e2ef9..0000000
--- a/runtime/interpreter/mterp/mips64/op_cmpg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fcmpWide.S" {"gt_bias":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_cmpg_float.S b/runtime/interpreter/mterp/mips64/op_cmpg_float.S
deleted file mode 100644
index 0c93eac..0000000
--- a/runtime/interpreter/mterp/mips64/op_cmpg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fcmp.S" {"gt_bias":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_cmpl_double.S b/runtime/interpreter/mterp/mips64/op_cmpl_double.S
deleted file mode 100644
index 9111b06..0000000
--- a/runtime/interpreter/mterp/mips64/op_cmpl_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fcmpWide.S" {"gt_bias":"0"}
diff --git a/runtime/interpreter/mterp/mips64/op_cmpl_float.S b/runtime/interpreter/mterp/mips64/op_cmpl_float.S
deleted file mode 100644
index b047451..0000000
--- a/runtime/interpreter/mterp/mips64/op_cmpl_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fcmp.S" {"gt_bias":"0"}
diff --git a/runtime/interpreter/mterp/mips64/op_const.S b/runtime/interpreter/mterp/mips64/op_const.S
deleted file mode 100644
index 4b0d69b..0000000
--- a/runtime/interpreter/mterp/mips64/op_const.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* const vAA, #+BBBBbbbb */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (high)
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vAA <- +BBBBbbbb
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_16.S b/runtime/interpreter/mterp/mips64/op_const_16.S
deleted file mode 100644
index 51e68a7..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* const/16 vAA, #+BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- sign-extended BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vAA <- +BBBB
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_4.S b/runtime/interpreter/mterp/mips64/op_const_4.S
deleted file mode 100644
index 0a58bff..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_4.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* const/4 vA, #+B */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    seh     a0, rINST                   # sign extend B in rINST
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    sra     a0, a0, 12                  # shift B into its final position
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vA <- +B
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_class.S b/runtime/interpreter/mterp/mips64/op_const_class.S
deleted file mode 100644
index 3f0c716..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_class.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/mips64/op_const_high16.S b/runtime/interpreter/mterp/mips64/op_const_high16.S
deleted file mode 100644
index 43effb6..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_high16.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* const/high16 vAA, #+BBBB0000 */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    sll     a0, a0, 16                  # a0 <- BBBB0000
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vAA <- +BBBB0000
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_method_handle.S b/runtime/interpreter/mterp/mips64/op_const_method_handle.S
deleted file mode 100644
index 43584d1..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_method_handle.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/mips64/op_const_method_type.S b/runtime/interpreter/mterp/mips64/op_const_method_type.S
deleted file mode 100644
index 553b284..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_method_type.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/mips64/op_const_string.S b/runtime/interpreter/mterp/mips64/op_const_string.S
deleted file mode 100644
index 96cbb5a..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_string.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/mips64/op_const_string_jumbo.S b/runtime/interpreter/mterp/mips64/op_const_string_jumbo.S
deleted file mode 100644
index 47f2101..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_string_jumbo.S
+++ /dev/null
@@ -1,15 +0,0 @@
-    /* const/string vAA, String//BBBBBBBB */
-    .extern MterpConstString
-    EXPORT_PC
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a4, 4(rPC)                  # a4 <- BBBB (high)
-    srl     a1, rINST, 8                # a1 <- AA
-    ins     a0, a4, 16, 16              # a0 <- BBBBbbbb
-    daddu   a2, rFP, OFF_FP_SHADOWFRAME
-    move    a3, rSELF
-    jal     MterpConstString            # (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 3                     # load rINST
-    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
-    ADVANCE 3                           # advance rPC
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide.S b/runtime/interpreter/mterp/mips64/op_const_wide.S
deleted file mode 100644
index f7eaf7c..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_wide.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
-    srl     a4, rINST, 8                # a4 <- AA
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (low middle)
-    lh      a2, 6(rPC)                  # a2 <- hhhh (high middle)
-    lh      a3, 8(rPC)                  # a3 <- HHHH (high)
-    FETCH_ADVANCE_INST 5                # advance rPC, load rINST
-    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
-    ins     a2, a3, 16, 16              # a2 = HHHHhhhh
-    dinsu   a0, a2, 32, 32              # a0 = HHHHhhhhBBBBbbbb
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a4                # vAA <- +HHHHhhhhBBBBbbbb
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide_16.S b/runtime/interpreter/mterp/mips64/op_const_wide_16.S
deleted file mode 100644
index 3a70937..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_wide_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* const-wide/16 vAA, #+BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- sign-extended BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- +BBBB
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide_32.S b/runtime/interpreter/mterp/mips64/op_const_wide_32.S
deleted file mode 100644
index 867197c..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_wide_32.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* const-wide/32 vAA, #+BBBBbbbb */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (high)
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- +BBBBbbbb
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide_high16.S b/runtime/interpreter/mterp/mips64/op_const_wide_high16.S
deleted file mode 100644
index d741631..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_wide_high16.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* const-wide/high16 vAA, #+BBBB000000000000 */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    dsll32  a0, a0, 16                  # a0 <- BBBB000000000000
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- +BBBB000000000000
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_div_double.S b/runtime/interpreter/mterp/mips64/op_div_double.S
deleted file mode 100644
index 44998f0..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide.S" {"instr":"div.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_double_2addr.S b/runtime/interpreter/mterp/mips64/op_div_double_2addr.S
deleted file mode 100644
index 396af79..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide2addr.S" {"instr":"div.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_float.S b/runtime/interpreter/mterp/mips64/op_div_float.S
deleted file mode 100644
index 7b09d52..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop.S" {"instr":"div.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_float_2addr.S b/runtime/interpreter/mterp/mips64/op_div_float_2addr.S
deleted file mode 100644
index e74fdda..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop2addr.S" {"instr":"div.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int.S b/runtime/interpreter/mterp/mips64/op_div_int.S
deleted file mode 100644
index fb04acb..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int_2addr.S b/runtime/interpreter/mterp/mips64/op_div_int_2addr.S
deleted file mode 100644
index db29b84..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int_lit16.S b/runtime/interpreter/mterp/mips64/op_div_int_lit16.S
deleted file mode 100644
index e903dde..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int_lit8.S b/runtime/interpreter/mterp/mips64/op_div_int_lit8.S
deleted file mode 100644
index 0559605..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_long.S b/runtime/interpreter/mterp/mips64/op_div_long.S
deleted file mode 100644
index 01fc2b2..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"ddiv a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_long_2addr.S b/runtime/interpreter/mterp/mips64/op_div_long_2addr.S
deleted file mode 100644
index 9627ab8..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"ddiv a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_float.S b/runtime/interpreter/mterp/mips64/op_double_to_float.S
deleted file mode 100644
index 2b2acee..0000000
--- a/runtime/interpreter/mterp/mips64/op_double_to_float.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
-    cvt.s.d f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_int.S b/runtime/interpreter/mterp/mips64/op_double_to_int.S
deleted file mode 100644
index d099522..0000000
--- a/runtime/interpreter/mterp/mips64/op_double_to_int.S
+++ /dev/null
@@ -1,3 +0,0 @@
-%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
-    trunc.w.d f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_long.S b/runtime/interpreter/mterp/mips64/op_double_to_long.S
deleted file mode 100644
index 9b65da5..0000000
--- a/runtime/interpreter/mterp/mips64/op_double_to_long.S
+++ /dev/null
@@ -1,3 +0,0 @@
-%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
-    trunc.l.d f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_fill_array_data.S b/runtime/interpreter/mterp/mips64/op_fill_array_data.S
deleted file mode 100644
index c90f0b9..0000000
--- a/runtime/interpreter/mterp/mips64/op_fill_array_data.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /* fill-array-data vAA, +BBBBBBBB */
-    .extern MterpFillArrayData
-    EXPORT_PC
-    lh      a1, 2(rPC)                  # a1 <- bbbb (lo)
-    lh      a0, 4(rPC)                  # a0 <- BBBB (hi)
-    srl     a3, rINST, 8                # a3 <- AA
-    ins     a1, a0, 16, 16              # a1 <- BBBBbbbb
-    GET_VREG_U a0, a3                   # a0 <- vAA (array object)
-    dlsa    a1, a1, rPC, 1              # a1 <- PC + BBBBbbbb*2 (array data off.)
-    jal     MterpFillArrayData          # (obj, payload)
-    beqzc   v0, MterpPossibleException  # exception?
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_filled_new_array.S b/runtime/interpreter/mterp/mips64/op_filled_new_array.S
deleted file mode 100644
index 35f55c2..0000000
--- a/runtime/interpreter/mterp/mips64/op_filled_new_array.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default { "helper":"MterpFilledNewArray" }
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
-    .extern $helper
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rSELF
-    jal     $helper
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_filled_new_array_range.S b/runtime/interpreter/mterp/mips64/op_filled_new_array_range.S
deleted file mode 100644
index a4e18f6..0000000
--- a/runtime/interpreter/mterp/mips64/op_filled_new_array_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_double.S b/runtime/interpreter/mterp/mips64/op_float_to_double.S
deleted file mode 100644
index 6accfee..0000000
--- a/runtime/interpreter/mterp/mips64/op_float_to_double.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
-    cvt.d.s f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_int.S b/runtime/interpreter/mterp/mips64/op_float_to_int.S
deleted file mode 100644
index 2806973..0000000
--- a/runtime/interpreter/mterp/mips64/op_float_to_int.S
+++ /dev/null
@@ -1,3 +0,0 @@
-%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
-    trunc.w.s f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_long.S b/runtime/interpreter/mterp/mips64/op_float_to_long.S
deleted file mode 100644
index c40c8a6..0000000
--- a/runtime/interpreter/mterp/mips64/op_float_to_long.S
+++ /dev/null
@@ -1,3 +0,0 @@
-%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
-    trunc.l.s f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_goto.S b/runtime/interpreter/mterp/mips64/op_goto.S
deleted file mode 100644
index 68fc83d..0000000
--- a/runtime/interpreter/mterp/mips64/op_goto.S
+++ /dev/null
@@ -1,10 +0,0 @@
-    /*
-     * Unconditional branch, 8-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto +AA */
-    srl     rINST, rINST, 8
-    seb     rINST, rINST                # rINST <- offset (sign-extended AA)
-    b       MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips64/op_goto_16.S b/runtime/interpreter/mterp/mips64/op_goto_16.S
deleted file mode 100644
index ae56066..0000000
--- a/runtime/interpreter/mterp/mips64/op_goto_16.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /*
-     * Unconditional branch, 16-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto/16 +AAAA */
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended AAAA)
-    b       MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips64/op_goto_32.S b/runtime/interpreter/mterp/mips64/op_goto_32.S
deleted file mode 100644
index 498b6d6..0000000
--- a/runtime/interpreter/mterp/mips64/op_goto_32.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /*
-     * Unconditional branch, 32-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     *
-     * Unlike most opcodes, this one is allowed to branch to itself, so
-     * our "backward branch" test must be "<=0" instead of "<0".
-     */
-    /* goto/32 +AAAAAAAA */
-    lh      rINST, 2(rPC)               # rINST <- aaaa (low)
-    lh      a1, 4(rPC)                  # a1 <- AAAA (high)
-    ins     rINST, a1, 16, 16           # rINST <- offset (sign-extended AAAAaaaa)
-    b       MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips64/op_if_eq.S b/runtime/interpreter/mterp/mips64/op_if_eq.S
deleted file mode 100644
index aa35cad..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_eq.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/bincmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_eqz.S b/runtime/interpreter/mterp/mips64/op_if_eqz.S
deleted file mode 100644
index 0fe3418..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_eqz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/zcmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_ge.S b/runtime/interpreter/mterp/mips64/op_if_ge.S
deleted file mode 100644
index 59fdcc5..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_ge.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/bincmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_gez.S b/runtime/interpreter/mterp/mips64/op_if_gez.S
deleted file mode 100644
index 57f1f66..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_gez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/zcmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_gt.S b/runtime/interpreter/mterp/mips64/op_if_gt.S
deleted file mode 100644
index 26cc119..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_gt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/bincmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_gtz.S b/runtime/interpreter/mterp/mips64/op_if_gtz.S
deleted file mode 100644
index 69fcacb..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_gtz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/zcmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_le.S b/runtime/interpreter/mterp/mips64/op_if_le.S
deleted file mode 100644
index a7fce17..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_le.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/bincmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_lez.S b/runtime/interpreter/mterp/mips64/op_if_lez.S
deleted file mode 100644
index f3edcc6..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_lez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/zcmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_lt.S b/runtime/interpreter/mterp/mips64/op_if_lt.S
deleted file mode 100644
index a975a31..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_lt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/bincmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_ltz.S b/runtime/interpreter/mterp/mips64/op_if_ltz.S
deleted file mode 100644
index c1d730d..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_ltz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/zcmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_ne.S b/runtime/interpreter/mterp/mips64/op_if_ne.S
deleted file mode 100644
index f143ee9..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_ne.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/bincmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_nez.S b/runtime/interpreter/mterp/mips64/op_if_nez.S
deleted file mode 100644
index 1856b96..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_nez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/zcmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget.S b/runtime/interpreter/mterp/mips64/op_iget.S
deleted file mode 100644
index e91f099..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIGetU32"}
-%include "mips64/field.S" { }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_boolean.S b/runtime/interpreter/mterp/mips64/op_iget_boolean.S
deleted file mode 100644
index dc2a42a..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_boolean_quick.S b/runtime/interpreter/mterp/mips64/op_iget_boolean_quick.S
deleted file mode 100644
index 979dc70..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget_quick.S" { "load":"lbu" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_byte.S b/runtime/interpreter/mterp/mips64/op_iget_byte.S
deleted file mode 100644
index c5bf650..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_byte_quick.S b/runtime/interpreter/mterp/mips64/op_iget_byte_quick.S
deleted file mode 100644
index cb35556..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget_quick.S" { "load":"lb" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_char.S b/runtime/interpreter/mterp/mips64/op_iget_char.S
deleted file mode 100644
index 3bf0c5a..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_char_quick.S b/runtime/interpreter/mterp/mips64/op_iget_char_quick.S
deleted file mode 100644
index 6034567..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget_quick.S" { "load":"lhu" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_object.S b/runtime/interpreter/mterp/mips64/op_iget_object.S
deleted file mode 100644
index 23fa187..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_object_quick.S b/runtime/interpreter/mterp/mips64/op_iget_object_quick.S
deleted file mode 100644
index 171d543..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_object_quick.S
+++ /dev/null
@@ -1,16 +0,0 @@
-    /* For: iget-object-quick */
-    /* op vA, vB, offset//CCCC */
-    .extern artIGetObjectFromMterp
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    EXPORT_PC
-    GET_VREG_U a0, a2                   # a0 <- object we're operating on
-    jal     artIGetObjectFromMterp      # (obj, offset)
-    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    PREFETCH_INST 2
-    bnez    a3, MterpPossibleException  # bail out
-    SET_VREG_OBJECT v0, a2              # fp[A] <- v0
-    ADVANCE 2                           # advance rPC
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iget_quick.S b/runtime/interpreter/mterp/mips64/op_iget_quick.S
deleted file mode 100644
index fee6ab7..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "load":"lw" }
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- object we're operating on
-    ext     a4, rINST, 8, 4             # a4 <- A
-    daddu   a1, a1, a3
-    beqz    a3, common_errNullObject    # object was null
-    $load   a0, 0(a1)                   # a0 <- obj.field
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    SET_VREG a0, a4                     # fp[A] <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iget_short.S b/runtime/interpreter/mterp/mips64/op_iget_short.S
deleted file mode 100644
index a9927fc..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_short_quick.S b/runtime/interpreter/mterp/mips64/op_iget_short_quick.S
deleted file mode 100644
index 6e152db..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget_quick.S" { "load":"lh" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_wide.S b/runtime/interpreter/mterp/mips64/op_iget_wide.S
deleted file mode 100644
index 40f3645..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget.S" { "helper":"MterpIGetU64" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_wide_quick.S b/runtime/interpreter/mterp/mips64/op_iget_wide_quick.S
deleted file mode 100644
index 2adc6ad..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_wide_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /* iget-wide-quick vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a4, 2(rPC)                  # a4 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- object we're operating on
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a3, common_errNullObject    # object was null
-    daddu   a4, a3, a4                  # create direct pointer
-    lw      a0, 0(a4)
-    lw      a1, 4(a4)
-    dinsu   a0, a1, 32, 32
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    SET_VREG_WIDE a0, a2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_instance_of.S b/runtime/interpreter/mterp/mips64/op_instance_of.S
deleted file mode 100644
index 39a5dc7..0000000
--- a/runtime/interpreter/mterp/mips64/op_instance_of.S
+++ /dev/null
@@ -1,23 +0,0 @@
-    /*
-     * Check to see if an object reference is an instance of a class.
-     *
-     * Most common situation is a non-null object, being compared against
-     * an already-resolved class.
-     */
-    /* instance-of vA, vB, class//CCCC */
-    .extern MterpInstanceOf
-    EXPORT_PC
-    lhu     a0, 2(rPC)                  # a0 <- CCCC
-    srl     a1, rINST, 12               # a1 <- B
-    dlsa    a1, a1, rFP, 2              # a1 <- &object
-    ld      a2, OFF_FP_METHOD(rFP)      # a2 <- method
-    move    a3, rSELF                   # a3 <- self
-    jal     MterpInstanceOf             # (index, &obj, method, self)
-    ld      a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    PREFETCH_INST 2
-    bnez    a1, MterpException
-    ADVANCE 2                           # advance rPC
-    SET_VREG v0, a2                     # vA <- v0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_byte.S b/runtime/interpreter/mterp/mips64/op_int_to_byte.S
deleted file mode 100644
index 1993e07..0000000
--- a/runtime/interpreter/mterp/mips64/op_int_to_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unop.S" {"instr":"seb     a0, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_char.S b/runtime/interpreter/mterp/mips64/op_int_to_char.S
deleted file mode 100644
index 8f03acd..0000000
--- a/runtime/interpreter/mterp/mips64/op_int_to_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unop.S" {"instr":"and     a0, a0, 0xffff"}
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_double.S b/runtime/interpreter/mterp/mips64/op_int_to_double.S
deleted file mode 100644
index 6df71be..0000000
--- a/runtime/interpreter/mterp/mips64/op_int_to_double.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
-    cvt.d.w f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_float.S b/runtime/interpreter/mterp/mips64/op_int_to_float.S
deleted file mode 100644
index 77e9eba..0000000
--- a/runtime/interpreter/mterp/mips64/op_int_to_float.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
-    cvt.s.w f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_long.S b/runtime/interpreter/mterp/mips64/op_int_to_long.S
deleted file mode 100644
index 7b9ad86..0000000
--- a/runtime/interpreter/mterp/mips64/op_int_to_long.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* int-to-long vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB (sign-extended to 64 bits)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vA <- vB
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_short.S b/runtime/interpreter/mterp/mips64/op_int_to_short.S
deleted file mode 100644
index 4a3f234..0000000
--- a/runtime/interpreter/mterp/mips64/op_int_to_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unop.S" {"instr":"seh     a0, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_custom.S b/runtime/interpreter/mterp/mips64/op_invoke_custom.S
deleted file mode 100644
index 964253d..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_custom.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeCustom" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_custom_range.S b/runtime/interpreter/mterp/mips64/op_invoke_custom_range.S
deleted file mode 100644
index e6585e3..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_custom_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_direct.S b/runtime/interpreter/mterp/mips64/op_invoke_direct.S
deleted file mode 100644
index 5047118..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_direct.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_direct_range.S b/runtime/interpreter/mterp/mips64/op_invoke_direct_range.S
deleted file mode 100644
index 5c9b95f..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_direct_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_interface.S b/runtime/interpreter/mterp/mips64/op_invoke_interface.S
deleted file mode 100644
index ed148ad..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_interface.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeInterface" }
-    /*
-     * Handle an interface method call.
-     *
-     * for: invoke-interface, invoke-interface/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_interface_range.S b/runtime/interpreter/mterp/mips64/op_invoke_interface_range.S
deleted file mode 100644
index 91c231e..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_interface_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_polymorphic.S b/runtime/interpreter/mterp/mips64/op_invoke_polymorphic.S
deleted file mode 100644
index d9324d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_polymorphic.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/mips64/op_invoke_polymorphic_range.S
deleted file mode 100644
index 8e0ecb5..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_polymorphic_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_static.S b/runtime/interpreter/mterp/mips64/op_invoke_static.S
deleted file mode 100644
index 44f5cb7..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_static.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeStatic" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_static_range.S b/runtime/interpreter/mterp/mips64/op_invoke_static_range.S
deleted file mode 100644
index 289e5aa..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_static_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_super.S b/runtime/interpreter/mterp/mips64/op_invoke_super.S
deleted file mode 100644
index b13fffe..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_super.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeSuper" }
-    /*
-     * Handle a "super" method call.
-     *
-     * for: invoke-super, invoke-super/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_super_range.S b/runtime/interpreter/mterp/mips64/op_invoke_super_range.S
deleted file mode 100644
index 350b975..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_super_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual.S
deleted file mode 100644
index 0d26cda..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_virtual.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeVirtual" }
-    /*
-     * Handle a virtual method call.
-     *
-     * for: invoke-virtual, invoke-virtual/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual_quick.S
deleted file mode 100644
index f39562c..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_virtual_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual_range.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range.S
deleted file mode 100644
index 0bb43f8..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_virtual_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range_quick.S
deleted file mode 100644
index c448851..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_virtual_range_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput.S b/runtime/interpreter/mterp/mips64/op_iput.S
deleted file mode 100644
index 81ab911..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIPutU32" }
-%include "mips64/field.S" { }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_boolean.S b/runtime/interpreter/mterp/mips64/op_iput_boolean.S
deleted file mode 100644
index 8e1d083..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_boolean_quick.S b/runtime/interpreter/mterp/mips64/op_iput_boolean_quick.S
deleted file mode 100644
index df99948..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_byte.S b/runtime/interpreter/mterp/mips64/op_iput_byte.S
deleted file mode 100644
index ce3b614..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_byte_quick.S b/runtime/interpreter/mterp/mips64/op_iput_byte_quick.S
deleted file mode 100644
index df99948..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_char.S b/runtime/interpreter/mterp/mips64/op_iput_char.S
deleted file mode 100644
index 1d587fa..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_char_quick.S b/runtime/interpreter/mterp/mips64/op_iput_char_quick.S
deleted file mode 100644
index a6286b7..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_object.S b/runtime/interpreter/mterp/mips64/op_iput_object.S
deleted file mode 100644
index d3316dd..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput.S" { "is_object":"1", "helper":"MterpIPutObj" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_object_quick.S b/runtime/interpreter/mterp/mips64/op_iput_object_quick.S
deleted file mode 100644
index 658ef42..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_object_quick.S
+++ /dev/null
@@ -1,10 +0,0 @@
-    .extern MterpIputObjectQuick
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rINST
-    jal     MterpIputObjectQuick
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_quick.S b/runtime/interpreter/mterp/mips64/op_iput_quick.S
deleted file mode 100644
index b95adfc..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "store":"sw" }
-    /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- fp[B], the object pointer
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a3, common_errNullObject    # object was null
-    GET_VREG a0, a2                     # a0 <- fp[A]
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    daddu   a1, a1, a3
-    $store  a0, 0(a1)                   # obj.field <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_short.S b/runtime/interpreter/mterp/mips64/op_iput_short.S
deleted file mode 100644
index dd68bbe..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_short_quick.S b/runtime/interpreter/mterp/mips64/op_iput_short_quick.S
deleted file mode 100644
index a6286b7..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_wide.S b/runtime/interpreter/mterp/mips64/op_iput_wide.S
deleted file mode 100644
index 05194b3..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput.S" { "helper":"MterpIPutU64" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_wide_quick.S b/runtime/interpreter/mterp/mips64/op_iput_wide_quick.S
deleted file mode 100644
index 95a8ad8..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_wide_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /* iput-wide-quick vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a3, 2(rPC)                  # a3 <- field byte offset
-    GET_VREG_U a2, a2                   # a2 <- fp[B], the object pointer
-    ext     a0, rINST, 8, 4             # a0 <- A
-    beqz    a2, common_errNullObject    # object was null
-    GET_VREG_WIDE a0, a0                # a0 <- fp[A]
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    daddu   a1, a2, a3                  # create a direct pointer
-    sw      a0, 0(a1)
-    dsrl32  a0, a0, 0
-    sw      a0, 4(a1)
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_long_to_double.S b/runtime/interpreter/mterp/mips64/op_long_to_double.S
deleted file mode 100644
index 8503e76..0000000
--- a/runtime/interpreter/mterp/mips64/op_long_to_double.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
-    cvt.d.l f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_long_to_float.S b/runtime/interpreter/mterp/mips64/op_long_to_float.S
deleted file mode 100644
index 31f5c0e..0000000
--- a/runtime/interpreter/mterp/mips64/op_long_to_float.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
-    cvt.s.l f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_long_to_int.S b/runtime/interpreter/mterp/mips64/op_long_to_int.S
deleted file mode 100644
index 4ef4b51..0000000
--- a/runtime/interpreter/mterp/mips64/op_long_to_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%include "mips64/op_move.S"
diff --git a/runtime/interpreter/mterp/mips64/op_monitor_enter.S b/runtime/interpreter/mterp/mips64/op_monitor_enter.S
deleted file mode 100644
index 36ae503..0000000
--- a/runtime/interpreter/mterp/mips64/op_monitor_enter.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /*
-     * Synchronize on an object.
-     */
-    /* monitor-enter vAA */
-    .extern artLockObjectFromCode
-    EXPORT_PC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vAA (object)
-    move    a1, rSELF                   # a1 <- self
-    jal     artLockObjectFromCode
-    bnezc   v0, MterpException
-    FETCH_ADVANCE_INST 1
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_monitor_exit.S b/runtime/interpreter/mterp/mips64/op_monitor_exit.S
deleted file mode 100644
index 9945952..0000000
--- a/runtime/interpreter/mterp/mips64/op_monitor_exit.S
+++ /dev/null
@@ -1,18 +0,0 @@
-    /*
-     * Unlock an object.
-     *
-     * Exceptions that occur when unlocking a monitor need to appear as
-     * if they happened at the following instruction.  See the Dalvik
-     * instruction spec.
-     */
-    /* monitor-exit vAA */
-    .extern artUnlockObjectFromCode
-    EXPORT_PC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vAA (object)
-    move    a1, rSELF                   # a1 <- self
-    jal     artUnlockObjectFromCode     # v0 <- success for unlock(self, obj)
-    bnezc   v0, MterpException
-    FETCH_ADVANCE_INST 1                # before throw: advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move.S b/runtime/interpreter/mterp/mips64/op_move.S
deleted file mode 100644
index c79f6cd..0000000
--- a/runtime/interpreter/mterp/mips64/op_move.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT a0, a2              # vA <- vB
-    .else
-    SET_VREG a0, a2                     # vA <- vB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_16.S b/runtime/interpreter/mterp/mips64/op_move_16.S
deleted file mode 100644
index 9d5c4dc..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_16.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    lhu     a3, 4(rPC)                  # a3 <- BBBB
-    lhu     a2, 2(rPC)                  # a2 <- AAAA
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vBBBB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT a0, a2              # vAAAA <- vBBBB
-    .else
-    SET_VREG a0, a2                     # vAAAA <- vBBBB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_exception.S b/runtime/interpreter/mterp/mips64/op_move_exception.S
deleted file mode 100644
index d226718..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_exception.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* move-exception vAA */
-    srl     a2, rINST, 8                # a2 <- AA
-    ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)  # load exception obj
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    SET_VREG_OBJECT a0, a2              # vAA <- exception obj
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    sd      zero, THREAD_EXCEPTION_OFFSET(rSELF)  # clear exception
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_from16.S b/runtime/interpreter/mterp/mips64/op_move_from16.S
deleted file mode 100644
index 6d6bde0..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_from16.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    lhu     a3, 2(rPC)                  # a3 <- BBBB
-    srl     a2, rINST, 8                # a2 <- AA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vBBBB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT a0, a2              # vAA <- vBBBB
-    .else
-    SET_VREG a0, a2                     # vAA <- vBBBB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_object.S b/runtime/interpreter/mterp/mips64/op_move_object.S
deleted file mode 100644
index 47e0272..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_object_16.S b/runtime/interpreter/mterp/mips64/op_move_object_16.S
deleted file mode 100644
index a777dcd..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_object_16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_object_from16.S b/runtime/interpreter/mterp/mips64/op_move_object_from16.S
deleted file mode 100644
index ab55ebd..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_object_from16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_result.S b/runtime/interpreter/mterp/mips64/op_move_result.S
deleted file mode 100644
index 1ec28cb..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_result.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    srl     a2, rINST, 8                # a2 <- AA
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
-    lw      a0, 0(a0)                   # a0 <- result.i
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT a0, a2              # vAA <- result
-    .else
-    SET_VREG a0, a2                     # vAA <- result
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_result_object.S b/runtime/interpreter/mterp/mips64/op_move_result_object.S
deleted file mode 100644
index e76bc22..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_result_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_result_wide.S b/runtime/interpreter/mterp/mips64/op_move_result_wide.S
deleted file mode 100644
index 3ba0d72..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_result_wide.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* for: move-result-wide */
-    /* op vAA */
-    srl     a2, rINST, 8                # a2 <- AA
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
-    ld      a0, 0(a0)                   # a0 <- result.j
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- result
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_wide.S b/runtime/interpreter/mterp/mips64/op_move_wide.S
deleted file mode 100644
index ea23f87..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_wide.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    ext     a2, rINST, 8, 4             # a2 <- A
-    GET_VREG_WIDE a0, a3                # a0 <- vB
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vA <- vB
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_wide_16.S b/runtime/interpreter/mterp/mips64/op_move_wide_16.S
deleted file mode 100644
index 8ec6068..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_wide_16.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    lhu     a3, 4(rPC)                  # a3 <- BBBB
-    lhu     a2, 2(rPC)                  # a2 <- AAAA
-    GET_VREG_WIDE a0, a3                # a0 <- vBBBB
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAAAA <- vBBBB
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_wide_from16.S b/runtime/interpreter/mterp/mips64/op_move_wide_from16.S
deleted file mode 100644
index 11d5603..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_wide_from16.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    lhu     a3, 2(rPC)                  # a3 <- BBBB
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_WIDE a0, a3                # a0 <- vBBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- vBBBB
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_mul_double.S b/runtime/interpreter/mterp/mips64/op_mul_double.S
deleted file mode 100644
index e7e17f7..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide.S" {"instr":"mul.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_double_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_double_2addr.S
deleted file mode 100644
index f404d46..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide2addr.S" {"instr":"mul.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_float.S b/runtime/interpreter/mterp/mips64/op_mul_float.S
deleted file mode 100644
index 9a695fc..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop.S" {"instr":"mul.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_float_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_float_2addr.S
deleted file mode 100644
index a134a34..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop2addr.S" {"instr":"mul.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int.S b/runtime/interpreter/mterp/mips64/op_mul_int.S
deleted file mode 100644
index e1b90ff..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_int_2addr.S
deleted file mode 100644
index c0c4063..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int_lit16.S b/runtime/interpreter/mterp/mips64/op_mul_int_lit16.S
deleted file mode 100644
index bb4fff8..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int_lit8.S b/runtime/interpreter/mterp/mips64/op_mul_int_lit8.S
deleted file mode 100644
index da11ea9..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_long.S b/runtime/interpreter/mterp/mips64/op_mul_long.S
deleted file mode 100644
index ec32850..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"dmul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_long_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_long_2addr.S
deleted file mode 100644
index eb50cda..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"dmul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_neg_double.S b/runtime/interpreter/mterp/mips64/op_neg_double.S
deleted file mode 100644
index a135d61..0000000
--- a/runtime/interpreter/mterp/mips64/op_neg_double.S
+++ /dev/null
@@ -1,3 +0,0 @@
-%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
-    neg.d   f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_neg_float.S b/runtime/interpreter/mterp/mips64/op_neg_float.S
deleted file mode 100644
index 78019f0..0000000
--- a/runtime/interpreter/mterp/mips64/op_neg_float.S
+++ /dev/null
@@ -1,3 +0,0 @@
-%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
-    neg.s   f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_neg_int.S b/runtime/interpreter/mterp/mips64/op_neg_int.S
deleted file mode 100644
index 31538c0..0000000
--- a/runtime/interpreter/mterp/mips64/op_neg_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unop.S" {"instr":"subu    a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_neg_long.S b/runtime/interpreter/mterp/mips64/op_neg_long.S
deleted file mode 100644
index bc80d06..0000000
--- a/runtime/interpreter/mterp/mips64/op_neg_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unopWide.S" {"instr":"dsubu   a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_new_array.S b/runtime/interpreter/mterp/mips64/op_new_array.S
deleted file mode 100644
index d78b4ac..0000000
--- a/runtime/interpreter/mterp/mips64/op_new_array.S
+++ /dev/null
@@ -1,19 +0,0 @@
-    /*
-     * Allocate an array of objects, specified with the array class
-     * and a count.
-     *
-     * The verifier guarantees that this is an array class, so we don't
-     * check for it here.
-     */
-    /* new-array vA, vB, class//CCCC */
-    .extern MterpNewArray
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rINST
-    move    a3, rSELF
-    jal     MterpNewArray
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_new_instance.S b/runtime/interpreter/mterp/mips64/op_new_instance.S
deleted file mode 100644
index cc5e13e..0000000
--- a/runtime/interpreter/mterp/mips64/op_new_instance.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /*
-     * Create a new instance of a class.
-     */
-    /* new-instance vAA, class//BBBB */
-    .extern MterpNewInstance
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rSELF
-    move    a2, rINST
-    jal     MterpNewInstance            # (shadow_frame, self, inst_data)
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_nop.S b/runtime/interpreter/mterp/mips64/op_nop.S
deleted file mode 100644
index cc803a7..0000000
--- a/runtime/interpreter/mterp/mips64/op_nop.S
+++ /dev/null
@@ -1,3 +0,0 @@
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_not_int.S b/runtime/interpreter/mterp/mips64/op_not_int.S
deleted file mode 100644
index 5954095..0000000
--- a/runtime/interpreter/mterp/mips64/op_not_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unop.S" {"instr":"nor     a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_not_long.S b/runtime/interpreter/mterp/mips64/op_not_long.S
deleted file mode 100644
index c8f5da7..0000000
--- a/runtime/interpreter/mterp/mips64/op_not_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unopWide.S" {"instr":"nor     a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int.S b/runtime/interpreter/mterp/mips64/op_or_int.S
deleted file mode 100644
index 0102355..0000000
--- a/runtime/interpreter/mterp/mips64/op_or_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int_2addr.S b/runtime/interpreter/mterp/mips64/op_or_int_2addr.S
deleted file mode 100644
index eed8900..0000000
--- a/runtime/interpreter/mterp/mips64/op_or_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int_lit16.S b/runtime/interpreter/mterp/mips64/op_or_int_lit16.S
deleted file mode 100644
index 16a0f3e..0000000
--- a/runtime/interpreter/mterp/mips64/op_or_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int_lit8.S b/runtime/interpreter/mterp/mips64/op_or_int_lit8.S
deleted file mode 100644
index dbbf790..0000000
--- a/runtime/interpreter/mterp/mips64/op_or_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_long.S b/runtime/interpreter/mterp/mips64/op_or_long.S
deleted file mode 100644
index e6f8639..0000000
--- a/runtime/interpreter/mterp/mips64/op_or_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_long_2addr.S b/runtime/interpreter/mterp/mips64/op_or_long_2addr.S
deleted file mode 100644
index ad5e6c8..0000000
--- a/runtime/interpreter/mterp/mips64/op_or_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_packed_switch.S b/runtime/interpreter/mterp/mips64/op_packed_switch.S
deleted file mode 100644
index 44e77a4..0000000
--- a/runtime/interpreter/mterp/mips64/op_packed_switch.S
+++ /dev/null
@@ -1,21 +0,0 @@
-%default { "func":"MterpDoPackedSwitch" }
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBBBBBB */
-    .extern $func
-    lh      a0, 2(rPC)                  # a0 <- bbbb (lo)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (hi)
-    srl     a3, rINST, 8                # a3 <- AA
-    ins     a0, a1, 16, 16              # a0 <- BBBBbbbb
-    GET_VREG a1, a3                     # a1 <- vAA
-    dlsa    a0, a0, rPC, 1              # a0 <- PC + BBBBbbbb*2
-    jal     $func                       # v0 <- code-unit branch offset
-    move    rINST, v0
-    b       MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips64/op_rem_double.S b/runtime/interpreter/mterp/mips64/op_rem_double.S
deleted file mode 100644
index ba61cfd..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_double.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /* rem-double vAA, vBB, vCC */
-    .extern fmod
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f12, a2             # f12 <- vBB
-    GET_VREG_DOUBLE f13, a3             # f13 <- vCC
-    jal     fmod                        # f0 <- f12 op f13
-    srl     a4, rINST, 8                # a4 <- AA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a4              # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_double_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_double_2addr.S
deleted file mode 100644
index c649f0d..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_double_2addr.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /* rem-double/2addr vA, vB */
-    .extern fmod
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_DOUBLE f12, a2             # f12 <- vA
-    GET_VREG_DOUBLE f13, a3             # f13 <- vB
-    jal     fmod                        # f0 <- f12 op f13
-    ext     a2, rINST, 8, 4             # a2 <- A
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a2              # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_float.S b/runtime/interpreter/mterp/mips64/op_rem_float.S
deleted file mode 100644
index 3967b0b..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_float.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /* rem-float vAA, vBB, vCC */
-    .extern fmodf
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f12, a2              # f12 <- vBB
-    GET_VREG_FLOAT f13, a3              # f13 <- vCC
-    jal     fmodf                       # f0 <- f12 op f13
-    srl     a4, rINST, 8                # a4 <- AA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a4               # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_float_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_float_2addr.S
deleted file mode 100644
index 3fed41e..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_float_2addr.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /* rem-float/2addr vA, vB */
-    .extern fmodf
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_FLOAT f12, a2              # f12 <- vA
-    GET_VREG_FLOAT f13, a3              # f13 <- vB
-    jal     fmodf                       # f0 <- f12 op f13
-    ext     a2, rINST, 8, 4             # a2 <- A
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a2               # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int.S b/runtime/interpreter/mterp/mips64/op_rem_int.S
deleted file mode 100644
index c05e9c4..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_int_2addr.S
deleted file mode 100644
index a4e162d..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int_lit16.S b/runtime/interpreter/mterp/mips64/op_rem_int_lit16.S
deleted file mode 100644
index 3284f14..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int_lit8.S b/runtime/interpreter/mterp/mips64/op_rem_int_lit8.S
deleted file mode 100644
index 1e6a584..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_long.S b/runtime/interpreter/mterp/mips64/op_rem_long.S
deleted file mode 100644
index 32b2d19..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"dmod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_long_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_long_2addr.S
deleted file mode 100644
index ad658e1..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"dmod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_return.S b/runtime/interpreter/mterp/mips64/op_return.S
deleted file mode 100644
index edd795f..0000000
--- a/runtime/interpreter/mterp/mips64/op_return.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"instr":"GET_VREG"}
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return (sign-extend), return-object (zero-extend)
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    .extern MterpSuspendCheck
-    jal     MterpThreadFenceForConstructor
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    srl     a2, rINST, 8                # a2 <- AA
-    $instr  a0, a2                      # a0 <- vAA
-    b       MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_return_object.S b/runtime/interpreter/mterp/mips64/op_return_object.S
deleted file mode 100644
index b69b880..0000000
--- a/runtime/interpreter/mterp/mips64/op_return_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_return.S" {"instr":"GET_VREG_U"}
diff --git a/runtime/interpreter/mterp/mips64/op_return_void.S b/runtime/interpreter/mterp/mips64/op_return_void.S
deleted file mode 100644
index f6eee91..0000000
--- a/runtime/interpreter/mterp/mips64/op_return_void.S
+++ /dev/null
@@ -1,11 +0,0 @@
-    .extern MterpThreadFenceForConstructor
-    .extern MterpSuspendCheck
-    jal     MterpThreadFenceForConstructor
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    li      a0, 0
-    b       MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S
deleted file mode 100644
index 4e9b640..0000000
--- a/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    .extern MterpSuspendCheck
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    li      a0, 0
-    b       MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_return_wide.S b/runtime/interpreter/mterp/mips64/op_return_wide.S
deleted file mode 100644
index 91ca1fa..0000000
--- a/runtime/interpreter/mterp/mips64/op_return_wide.S
+++ /dev/null
@@ -1,17 +0,0 @@
-    /*
-     * Return a 64-bit value.
-     */
-    /* return-wide vAA */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    .extern MterpSuspendCheck
-    jal     MterpThreadFenceForConstructor
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_WIDE a0, a2                # a0 <- vAA
-    b       MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_rsub_int.S b/runtime/interpreter/mterp/mips64/op_rsub_int.S
deleted file mode 100644
index fa31a0a..0000000
--- a/runtime/interpreter/mterp/mips64/op_rsub_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_rsub_int_lit8.S b/runtime/interpreter/mterp/mips64/op_rsub_int_lit8.S
deleted file mode 100644
index c31ff32..0000000
--- a/runtime/interpreter/mterp/mips64/op_rsub_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget.S b/runtime/interpreter/mterp/mips64/op_sget.S
deleted file mode 100644
index 200da35..0000000
--- a/runtime/interpreter/mterp/mips64/op_sget.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSGetU32" }
-%include "mips64/field.S" { }
diff --git a/runtime/interpreter/mterp/mips64/op_sget_boolean.S b/runtime/interpreter/mterp/mips64/op_sget_boolean.S
deleted file mode 100644
index 8abb396..0000000
--- a/runtime/interpreter/mterp/mips64/op_sget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sget.S" {"helper":"MterpSGetU8"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_byte.S b/runtime/interpreter/mterp/mips64/op_sget_byte.S
deleted file mode 100644
index 68623f6..0000000
--- a/runtime/interpreter/mterp/mips64/op_sget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sget.S" {"helper":"MterpSGetI8"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_char.S b/runtime/interpreter/mterp/mips64/op_sget_char.S
deleted file mode 100644
index 3c7b962..0000000
--- a/runtime/interpreter/mterp/mips64/op_sget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sget.S" {"helper":"MterpSGetU16"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_object.S b/runtime/interpreter/mterp/mips64/op_sget_object.S
deleted file mode 100644
index 3b260e6..0000000
--- a/runtime/interpreter/mterp/mips64/op_sget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_short.S b/runtime/interpreter/mterp/mips64/op_sget_short.S
deleted file mode 100644
index 9a8579b..0000000
--- a/runtime/interpreter/mterp/mips64/op_sget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sget.S" {"helper":"MterpSGetI16"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_wide.S b/runtime/interpreter/mterp/mips64/op_sget_wide.S
deleted file mode 100644
index 14f232c..0000000
--- a/runtime/interpreter/mterp/mips64/op_sget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sget.S" {"helper":"MterpSGetU64"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_int.S b/runtime/interpreter/mterp/mips64/op_shl_int.S
deleted file mode 100644
index 784481f..0000000
--- a/runtime/interpreter/mterp/mips64/op_shl_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_int_2addr.S b/runtime/interpreter/mterp/mips64/op_shl_int_2addr.S
deleted file mode 100644
index a6c8a78..0000000
--- a/runtime/interpreter/mterp/mips64/op_shl_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_int_lit8.S b/runtime/interpreter/mterp/mips64/op_shl_int_lit8.S
deleted file mode 100644
index 36ef207..0000000
--- a/runtime/interpreter/mterp/mips64/op_shl_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_long.S b/runtime/interpreter/mterp/mips64/op_shl_long.S
deleted file mode 100644
index 225a2cb..0000000
--- a/runtime/interpreter/mterp/mips64/op_shl_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"dsll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_long_2addr.S b/runtime/interpreter/mterp/mips64/op_shl_long_2addr.S
deleted file mode 100644
index c04d882..0000000
--- a/runtime/interpreter/mterp/mips64/op_shl_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"dsll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_int.S b/runtime/interpreter/mterp/mips64/op_shr_int.S
deleted file mode 100644
index eded037..0000000
--- a/runtime/interpreter/mterp/mips64/op_shr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_int_2addr.S b/runtime/interpreter/mterp/mips64/op_shr_int_2addr.S
deleted file mode 100644
index 5b4d96f..0000000
--- a/runtime/interpreter/mterp/mips64/op_shr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_int_lit8.S b/runtime/interpreter/mterp/mips64/op_shr_int_lit8.S
deleted file mode 100644
index 175eb86..0000000
--- a/runtime/interpreter/mterp/mips64/op_shr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_long.S b/runtime/interpreter/mterp/mips64/op_shr_long.S
deleted file mode 100644
index 0db38c8..0000000
--- a/runtime/interpreter/mterp/mips64/op_shr_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"dsra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_long_2addr.S b/runtime/interpreter/mterp/mips64/op_shr_long_2addr.S
deleted file mode 100644
index 48131ad..0000000
--- a/runtime/interpreter/mterp/mips64/op_shr_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"dsra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sparse_switch.S b/runtime/interpreter/mterp/mips64/op_sparse_switch.S
deleted file mode 100644
index b065aaa..0000000
--- a/runtime/interpreter/mterp/mips64/op_sparse_switch.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/mips64/op_sput.S b/runtime/interpreter/mterp/mips64/op_sput.S
deleted file mode 100644
index 0bd6837..0000000
--- a/runtime/interpreter/mterp/mips64/op_sput.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSPutU32"}
-%include "mips64/field.S" { }
diff --git a/runtime/interpreter/mterp/mips64/op_sput_boolean.S b/runtime/interpreter/mterp/mips64/op_sput_boolean.S
deleted file mode 100644
index 2e769d5..0000000
--- a/runtime/interpreter/mterp/mips64/op_sput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_byte.S b/runtime/interpreter/mterp/mips64/op_sput_byte.S
deleted file mode 100644
index 0b04b59..0000000
--- a/runtime/interpreter/mterp/mips64/op_sput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_char.S b/runtime/interpreter/mterp/mips64/op_sput_char.S
deleted file mode 100644
index 4a80375..0000000
--- a/runtime/interpreter/mterp/mips64/op_sput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_object.S b/runtime/interpreter/mterp/mips64/op_sput_object.S
deleted file mode 100644
index 09bd0fb..0000000
--- a/runtime/interpreter/mterp/mips64/op_sput_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sput.S" {"is_object":"1", "helper":"MterpSPutObj"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_short.S b/runtime/interpreter/mterp/mips64/op_sput_short.S
deleted file mode 100644
index c00043b..0000000
--- a/runtime/interpreter/mterp/mips64/op_sput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_wide.S b/runtime/interpreter/mterp/mips64/op_sput_wide.S
deleted file mode 100644
index 070d17f..0000000
--- a/runtime/interpreter/mterp/mips64/op_sput_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sput.S" {"helper":"MterpSPutU64"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_double.S b/runtime/interpreter/mterp/mips64/op_sub_double.S
deleted file mode 100644
index 40a6c89..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide.S" {"instr":"sub.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_double_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_double_2addr.S
deleted file mode 100644
index 984737e..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide2addr.S" {"instr":"sub.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_float.S b/runtime/interpreter/mterp/mips64/op_sub_float.S
deleted file mode 100644
index 9010592..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop.S" {"instr":"sub.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_float_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_float_2addr.S
deleted file mode 100644
index e7d4ffe..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop2addr.S" {"instr":"sub.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_int.S b/runtime/interpreter/mterp/mips64/op_sub_int.S
deleted file mode 100644
index 609ea05..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_int_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_int_2addr.S
deleted file mode 100644
index ba2f1e8..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_long.S b/runtime/interpreter/mterp/mips64/op_sub_long.S
deleted file mode 100644
index 09a6afd..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"dsubu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_long_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_long_2addr.S
deleted file mode 100644
index b9ec82a..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"dsubu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_throw.S b/runtime/interpreter/mterp/mips64/op_throw.S
deleted file mode 100644
index 6418d57..0000000
--- a/runtime/interpreter/mterp/mips64/op_throw.S
+++ /dev/null
@@ -1,10 +0,0 @@
-    /*
-     * Throw an exception object in the current thread.
-     */
-    /* throw vAA */
-    EXPORT_PC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vAA (exception object)
-    beqzc   a0, common_errNullObject
-    sd      a0, THREAD_EXCEPTION_OFFSET(rSELF)  # thread->exception <- obj
-    b       MterpException
diff --git a/runtime/interpreter/mterp/mips64/op_unused_3e.S b/runtime/interpreter/mterp/mips64/op_unused_3e.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_3e.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_3f.S b/runtime/interpreter/mterp/mips64/op_unused_3f.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_3f.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_40.S b/runtime/interpreter/mterp/mips64/op_unused_40.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_40.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_41.S b/runtime/interpreter/mterp/mips64/op_unused_41.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_41.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_42.S b/runtime/interpreter/mterp/mips64/op_unused_42.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_42.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_43.S b/runtime/interpreter/mterp/mips64/op_unused_43.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_43.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_79.S b/runtime/interpreter/mterp/mips64/op_unused_79.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_79.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_7a.S b/runtime/interpreter/mterp/mips64/op_unused_7a.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_7a.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f3.S b/runtime/interpreter/mterp/mips64/op_unused_f3.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_f3.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f4.S b/runtime/interpreter/mterp/mips64/op_unused_f4.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_f4.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f5.S b/runtime/interpreter/mterp/mips64/op_unused_f5.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_f5.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f6.S b/runtime/interpreter/mterp/mips64/op_unused_f6.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_f6.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f7.S b/runtime/interpreter/mterp/mips64/op_unused_f7.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_f7.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f8.S b/runtime/interpreter/mterp/mips64/op_unused_f8.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_f8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f9.S b/runtime/interpreter/mterp/mips64/op_unused_f9.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_f9.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fc.S b/runtime/interpreter/mterp/mips64/op_unused_fc.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_fc.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fd.S b/runtime/interpreter/mterp/mips64/op_unused_fd.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_fd.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_int.S b/runtime/interpreter/mterp/mips64/op_ushr_int.S
deleted file mode 100644
index 37c90cb..0000000
--- a/runtime/interpreter/mterp/mips64/op_ushr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_int_2addr.S b/runtime/interpreter/mterp/mips64/op_ushr_int_2addr.S
deleted file mode 100644
index d6bf413..0000000
--- a/runtime/interpreter/mterp/mips64/op_ushr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_int_lit8.S b/runtime/interpreter/mterp/mips64/op_ushr_int_lit8.S
deleted file mode 100644
index 2a2d843..0000000
--- a/runtime/interpreter/mterp/mips64/op_ushr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_long.S b/runtime/interpreter/mterp/mips64/op_ushr_long.S
deleted file mode 100644
index e724405..0000000
--- a/runtime/interpreter/mterp/mips64/op_ushr_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"dsrl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_long_2addr.S b/runtime/interpreter/mterp/mips64/op_ushr_long_2addr.S
deleted file mode 100644
index d2cf135..0000000
--- a/runtime/interpreter/mterp/mips64/op_ushr_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"dsrl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int.S b/runtime/interpreter/mterp/mips64/op_xor_int.S
deleted file mode 100644
index ee25ebc..0000000
--- a/runtime/interpreter/mterp/mips64/op_xor_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int_2addr.S b/runtime/interpreter/mterp/mips64/op_xor_int_2addr.S
deleted file mode 100644
index 0f04967..0000000
--- a/runtime/interpreter/mterp/mips64/op_xor_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int_lit16.S b/runtime/interpreter/mterp/mips64/op_xor_int_lit16.S
deleted file mode 100644
index ecb21ae..0000000
--- a/runtime/interpreter/mterp/mips64/op_xor_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int_lit8.S b/runtime/interpreter/mterp/mips64/op_xor_int_lit8.S
deleted file mode 100644
index 115ae99..0000000
--- a/runtime/interpreter/mterp/mips64/op_xor_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_long.S b/runtime/interpreter/mterp/mips64/op_xor_long.S
deleted file mode 100644
index 7ebabc2..0000000
--- a/runtime/interpreter/mterp/mips64/op_xor_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_long_2addr.S b/runtime/interpreter/mterp/mips64/op_xor_long_2addr.S
deleted file mode 100644
index 0f1919a..0000000
--- a/runtime/interpreter/mterp/mips64/op_xor_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/other.S b/runtime/interpreter/mterp/mips64/other.S
new file mode 100644
index 0000000..789efee
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/other.S
@@ -0,0 +1,355 @@
+%def const(helper="UndefinedConstHandler"):
+    /* const/class vAA, type@BBBB */
+    /* const/method-handle vAA, method_handle@BBBB */
+    /* const/method-type vAA, proto@BBBB */
+    /* const/string vAA, string@@BBBB */
+    .extern $helper
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- BBBB
+    srl     a1, rINST, 8                # a1 <- AA
+    daddu   a2, rFP, OFF_FP_SHADOWFRAME
+    move    a3, rSELF
+    jal     $helper                     # (index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST 2                     # load rINST
+    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
+    ADVANCE 2                           # advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def unused():
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+%def op_const():
+    /* const vAA, #+BBBBbbbb */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
+    lh      a1, 4(rPC)                  # a1 <- BBBB (high)
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vAA <- +BBBBbbbb
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_const_16():
+    /* const/16 vAA, #+BBBB */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- sign-extended BBBB
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vAA <- +BBBB
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_const_4():
+    /* const/4 vA, #+B */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    seh     a0, rINST                   # sign extend B in rINST
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    sra     a0, a0, 12                  # shift B into its final position
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vA <- +B
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_const_class():
+%  const(helper="MterpConstClass")
+
+%def op_const_high16():
+    /* const/high16 vAA, #+BBBB0000 */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- BBBB
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    sll     a0, a0, 16                  # a0 <- BBBB0000
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vAA <- +BBBB0000
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_const_method_handle():
+%  const(helper="MterpConstMethodHandle")
+
+%def op_const_method_type():
+%  const(helper="MterpConstMethodType")
+
+%def op_const_string():
+%  const(helper="MterpConstString")
+
+%def op_const_string_jumbo():
+    /* const/string vAA, String//BBBBBBBB */
+    .extern MterpConstString
+    EXPORT_PC
+    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
+    lh      a4, 4(rPC)                  # a4 <- BBBB (high)
+    srl     a1, rINST, 8                # a1 <- AA
+    ins     a0, a4, 16, 16              # a0 <- BBBBbbbb
+    daddu   a2, rFP, OFF_FP_SHADOWFRAME
+    move    a3, rSELF
+    jal     MterpConstString            # (index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST 3                     # load rINST
+    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
+    ADVANCE 3                           # advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_const_wide():
+    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+    srl     a4, rINST, 8                # a4 <- AA
+    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
+    lh      a1, 4(rPC)                  # a1 <- BBBB (low middle)
+    lh      a2, 6(rPC)                  # a2 <- hhhh (high middle)
+    lh      a3, 8(rPC)                  # a3 <- HHHH (high)
+    FETCH_ADVANCE_INST 5                # advance rPC, load rINST
+    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
+    ins     a2, a3, 16, 16              # a2 = HHHHhhhh
+    dinsu   a0, a2, 32, 32              # a0 = HHHHhhhhBBBBbbbb
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a4                # vAA <- +HHHHhhhhBBBBbbbb
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_const_wide_16():
+    /* const-wide/16 vAA, #+BBBB */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- sign-extended BBBB
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAA <- +BBBB
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_const_wide_32():
+    /* const-wide/32 vAA, #+BBBBbbbb */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
+    lh      a1, 4(rPC)                  # a1 <- BBBB (high)
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAA <- +BBBBbbbb
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_const_wide_high16():
+    /* const-wide/high16 vAA, #+BBBB000000000000 */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- BBBB
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    dsll32  a0, a0, 16                  # a0 <- BBBB000000000000
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAA <- +BBBB000000000000
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_monitor_enter():
+    /*
+     * Synchronize on an object.
+     */
+    /* monitor-enter vAA */
+    .extern artLockObjectFromCode
+    EXPORT_PC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vAA (object)
+    move    a1, rSELF                   # a1 <- self
+    jal     artLockObjectFromCode
+    bnezc   v0, MterpException
+    FETCH_ADVANCE_INST 1
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_monitor_exit():
+    /*
+     * Unlock an object.
+     *
+     * Exceptions that occur when unlocking a monitor need to appear as
+     * if they happened at the following instruction.  See the Dalvik
+     * instruction spec.
+     */
+    /* monitor-exit vAA */
+    .extern artUnlockObjectFromCode
+    EXPORT_PC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vAA (object)
+    move    a1, rSELF                   # a1 <- self
+    jal     artUnlockObjectFromCode     # v0 <- success for unlock(self, obj)
+    bnezc   v0, MterpException
+    FETCH_ADVANCE_INST 1                # before throw: advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_move(is_object="0"):
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_VREG a0, a3                     # a0 <- vB
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT a0, a2              # vA <- vB
+    .else
+    SET_VREG a0, a2                     # vA <- vB
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_move_16(is_object="0"):
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    lhu     a3, 4(rPC)                  # a3 <- BBBB
+    lhu     a2, 2(rPC)                  # a2 <- AAAA
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    GET_VREG a0, a3                     # a0 <- vBBBB
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT a0, a2              # vAAAA <- vBBBB
+    .else
+    SET_VREG a0, a2                     # vAAAA <- vBBBB
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_move_exception():
+    /* move-exception vAA */
+    srl     a2, rINST, 8                # a2 <- AA
+    ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)  # load exception obj
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    SET_VREG_OBJECT a0, a2              # vAA <- exception obj
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    sd      zero, THREAD_EXCEPTION_OFFSET(rSELF)  # clear exception
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_move_from16(is_object="0"):
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    lhu     a3, 2(rPC)                  # a3 <- BBBB
+    srl     a2, rINST, 8                # a2 <- AA
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_VREG a0, a3                     # a0 <- vBBBB
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT a0, a2              # vAA <- vBBBB
+    .else
+    SET_VREG a0, a2                     # vAA <- vBBBB
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_move_object():
+%  op_move(is_object="1")
+
+%def op_move_object_16():
+%  op_move_16(is_object="1")
+
+%def op_move_object_from16():
+%  op_move_from16(is_object="1")
+
+%def op_move_result(is_object="0"):
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    srl     a2, rINST, 8                # a2 <- AA
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
+    lw      a0, 0(a0)                   # a0 <- result.i
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT a0, a2              # vAA <- result
+    .else
+    SET_VREG a0, a2                     # vAA <- result
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_move_result_object():
+%  op_move_result(is_object="1")
+
+%def op_move_result_wide():
+    /* for: move-result-wide */
+    /* op vAA */
+    srl     a2, rINST, 8                # a2 <- AA
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
+    ld      a0, 0(a0)                   # a0 <- result.j
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAA <- result
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_move_wide():
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    ext     a2, rINST, 8, 4             # a2 <- A
+    GET_VREG_WIDE a0, a3                # a0 <- vB
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vA <- vB
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_move_wide_16():
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    lhu     a3, 4(rPC)                  # a3 <- BBBB
+    lhu     a2, 2(rPC)                  # a2 <- AAAA
+    GET_VREG_WIDE a0, a3                # a0 <- vBBBB
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAAAA <- vBBBB
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_move_wide_from16():
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    lhu     a3, 2(rPC)                  # a3 <- BBBB
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_WIDE a0, a3                # a0 <- vBBBB
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAA <- vBBBB
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_nop():
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+%def op_unused_3e():
+%  unused()
+
+%def op_unused_3f():
+%  unused()
+
+%def op_unused_40():
+%  unused()
+
+%def op_unused_41():
+%  unused()
+
+%def op_unused_42():
+%  unused()
+
+%def op_unused_43():
+%  unused()
+
+%def op_unused_79():
+%  unused()
+
+%def op_unused_7a():
+%  unused()
+
+%def op_unused_f3():
+%  unused()
+
+%def op_unused_f4():
+%  unused()
+
+%def op_unused_f5():
+%  unused()
+
+%def op_unused_f6():
+%  unused()
+
+%def op_unused_f7():
+%  unused()
+
+%def op_unused_f8():
+%  unused()
+
+%def op_unused_f9():
+%  unused()
+
+%def op_unused_fc():
+%  unused()
+
+%def op_unused_fd():
+%  unused()
diff --git a/runtime/interpreter/mterp/mips64/unop.S b/runtime/interpreter/mterp/mips64/unop.S
deleted file mode 100644
index e3f7ea0..0000000
--- a/runtime/interpreter/mterp/mips64/unop.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default {"preinstr":""}
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "a0 = op a0".
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      not-int, neg-int
-     */
-    /* unop vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    ext     a2, rINST, 8, 4             # a2 <- A
-    $preinstr                           # optional op
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    $instr                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/unopWide.S b/runtime/interpreter/mterp/mips64/unopWide.S
deleted file mode 100644
index c0dd1aa..0000000
--- a/runtime/interpreter/mterp/mips64/unopWide.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {"preinstr":""}
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "a0 = op a0".
-     *
-     * For: not-long, neg-long
-     */
-    /* unop vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a3                # a0 <- vB
-    ext     a2, rINST, 8, 4             # a2 <- A
-    $preinstr                           # optional op
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    $instr                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/unused.S b/runtime/interpreter/mterp/mips64/unused.S
deleted file mode 100644
index 30d38bd6..0000000
--- a/runtime/interpreter/mterp/mips64/unused.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
diff --git a/runtime/interpreter/mterp/mips64/zcmp.S b/runtime/interpreter/mterp/mips64/zcmp.S
deleted file mode 100644
index 75db49e..0000000
--- a/runtime/interpreter/mterp/mips64/zcmp.S
+++ /dev/null
@@ -1,17 +0,0 @@
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-lez" you would use "le".
-     *
-     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended BBBB)
-    GET_VREG a0, a2                     # a0 <- vAA
-    b${condition}zc a0, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 65c1aa8..912c444 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -25,6 +25,7 @@
 #include "interpreter/interpreter_common.h"
 #include "interpreter/interpreter_intrinsics.h"
 #include "interpreter/shadow_frame-inl.h"
+#include "mirror/string-alloc-inl.h"
 
 namespace art {
 namespace interpreter {
@@ -34,11 +35,11 @@
 void CheckMterpAsmConstants() {
   /*
    * If we're using computed goto instruction transitions, make sure
-   * none of the handlers overflows the 128-byte limit.  This won't tell
+   * none of the handlers overflows the byte limit.  This won't tell
    * which one did, but if any one is too big the total size will
    * overflow.
    */
-  const int width = 128;
+  const int width = kMterpHandlerSize;
   int interp_size = (uintptr_t) artMterpAsmInstructionEnd -
                     (uintptr_t) artMterpAsmInstructionStart;
   if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
@@ -48,11 +49,7 @@
 }
 
 void InitMterpTls(Thread* self) {
-  self->SetMterpDefaultIBase(artMterpAsmInstructionStart);
-  self->SetMterpAltIBase(artMterpAsmAltInstructionStart);
-  self->SetMterpCurrentIBase((kTraceExecutionEnabled || kTestExportPC) ?
-                             artMterpAsmAltInstructionStart :
-                             artMterpAsmInstructionStart);
+  self->SetMterpCurrentIBase(artMterpAsmInstructionStart);
 }
 
 /*
@@ -146,22 +143,22 @@
   return entries[index];
 }
 
-extern "C" size_t MterpShouldSwitchInterpreters()
+bool CanUseMterp()
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const Runtime* const runtime = Runtime::Current();
-  const instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
-  return instrumentation->NonJitProfilingActive() ||
-      Dbg::IsDebuggerActive() ||
+  return
+      runtime->IsStarted() &&
+      !runtime->IsAotCompiler() &&
+      !Dbg::IsDebuggerActive() &&
+      !runtime->GetInstrumentation()->IsActive() &&
+      // mterp only knows how to deal with the normal exits. It cannot handle any of the
+      // non-standard force-returns.
+      !runtime->AreNonStandardExitsEnabled() &&
       // An async exception has been thrown. We need to go to the switch interpreter. MTerp doesn't
       // know how to deal with these so we could end up never dealing with it if we are in an
-      // infinite loop. Since this can be called in a tight loop and getting the current thread
-      // requires a TLS read we instead first check a short-circuit runtime flag that will only be
-      // set if something tries to set an async exception. This will make this function faster in
-      // the common case where no async exception has ever been sent. We don't need to worry about
-      // synchronization on the runtime flag since it is only set in a checkpoint which will either
-      // take place on the current thread or act as a synchronization point.
-      (UNLIKELY(runtime->AreAsyncExceptionsThrown()) &&
-       Thread::Current()->IsAsyncExceptionPending());
+      // infinite loop.
+      !runtime->AreAsyncExceptionsThrown() &&
+      (runtime->GetJit() == nullptr || !runtime->GetJit()->JitAtFirstUse());
 }
 
 
@@ -172,7 +169,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoFastInvoke<kVirtual>(
+  return DoInvoke<kVirtual, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -183,7 +180,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvoke<kSuper, false, false>(
+  return DoInvoke<kSuper, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -194,7 +191,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvoke<kInterface, false, false>(
+  return DoInvoke<kInterface, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -205,7 +202,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoFastInvoke<kDirect>(
+  return DoInvoke<kDirect, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -216,7 +213,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoFastInvoke<kStatic>(
+  return DoInvoke<kStatic, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -227,7 +224,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvokeCustom<false /* is_range */>(
+  return DoInvokeCustom</* is_range= */ false>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -238,7 +235,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvokePolymorphic<false /* is_range */>(
+  return DoInvokePolymorphic</* is_range= */ false>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -249,7 +246,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvoke<kVirtual, true, false>(
+  return DoInvoke<kVirtual, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -260,7 +257,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvoke<kSuper, true, false>(
+  return DoInvoke<kSuper, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -271,7 +268,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvoke<kInterface, true, false>(
+  return DoInvoke<kInterface, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -282,7 +279,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvoke<kDirect, true, false>(
+  return DoInvoke<kDirect, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -293,7 +290,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvoke<kStatic, true, false>(
+  return DoInvoke<kStatic, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -304,7 +301,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvokeCustom<true /* is_range */>(self, *shadow_frame, inst, inst_data, result_register);
+  return DoInvokeCustom</*is_range=*/ true>(self, *shadow_frame, inst, inst_data, result_register);
 }
 
 extern "C" size_t MterpInvokePolymorphicRange(Thread* self,
@@ -314,7 +311,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvokePolymorphic<true /* is_range */>(
+  return DoInvokePolymorphic</* is_range= */ true>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -325,25 +322,8 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  const uint32_t vregC = inst->VRegC_35c();
-  const uint32_t vtable_idx = inst->VRegB_35c();
-  ObjPtr<mirror::Object> const receiver = shadow_frame->GetVRegReference(vregC);
-  if (receiver != nullptr) {
-    ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
-        vtable_idx, kRuntimePointerSize);
-    if ((called_method != nullptr) && called_method->IsIntrinsic()) {
-      if (MterpHandleIntrinsic(shadow_frame, called_method, inst, inst_data, result_register)) {
-        jit::Jit* jit = Runtime::Current()->GetJit();
-        if (jit != nullptr) {
-          jit->InvokeVirtualOrInterface(
-              receiver, shadow_frame->GetMethod(), shadow_frame->GetDexPC(), called_method);
-        }
-        return !self->IsExceptionPending();
-      }
-    }
-  }
-  return DoInvokeVirtualQuick<false>(
-      self, *shadow_frame, inst, inst_data, result_register);
+  return DoInvoke<kVirtual, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true,
+      /*is_quick=*/ true>(self, *shadow_frame, inst, inst_data, result_register);
 }
 
 extern "C" size_t MterpInvokeVirtualQuickRange(Thread* self,
@@ -353,8 +333,8 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvokeVirtualQuick<true>(
-      self, *shadow_frame, inst, inst_data, result_register);
+  return DoInvoke<kVirtual, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true,
+      /*is_quick=*/ true>(self, *shadow_frame, inst, inst_data, result_register);
 }
 
 extern "C" void MterpThreadFenceForConstructor() {
@@ -382,8 +362,8 @@
   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
                                                    shadow_frame->GetMethod(),
                                                    self,
-                                                   /* can_run_clinit */ false,
-                                                   /* verify_access */ false);
+                                                   /* can_run_clinit= */ false,
+                                                   /* verify_access= */ false);
   if (UNLIKELY(c == nullptr)) {
     return true;
   }
@@ -470,8 +450,8 @@
   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
                                                    shadow_frame->GetMethod(),
                                                    self,
-                                                   /* can_run_clinit */ false,
-                                                   /* verify_access */ false);
+                                                   /* can_run_clinit= */ false,
+                                                   /* verify_access= */ false);
   if (LIKELY(c != nullptr)) {
     if (UNLIKELY(c->IsStringClass())) {
       gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
@@ -559,8 +539,20 @@
   return MoveToExceptionHandler(self, *shadow_frame, instrumentation);
 }
 
+struct MterpCheckHelper {
+  DECLARE_RUNTIME_DEBUG_FLAG(kSlowMode);
+};
+DEFINE_RUNTIME_DEBUG_FLAG(MterpCheckHelper, kSlowMode);
+
 extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr)
     REQUIRES_SHARED(Locks::mutator_lock_) {
+  // Check that we are using the right interpreter.
+  if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) {
+    // The flag might be currently being updated on all threads. Retry with lock.
+    MutexLock tll_mu(self, *Locks::thread_list_lock_);
+    DCHECK_EQ(self->UseMterp(), CanUseMterp());
+  }
+  DCHECK(!Runtime::Current()->IsActiveTransaction());
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   uint16_t inst_data = inst->Fetch16(0);
   if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
@@ -576,6 +568,9 @@
     // Save invalid dex pc to force segfault if improperly used.
     shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(kExportPCPoison));
   }
+  if (MterpCheckHelper::kSlowMode) {
+    shadow_frame->CheckConsistentVRegs();
+  }
 }
 
 extern "C" void MterpLogDivideByZeroException(Thread* self, ShadowFrame* shadow_frame)
@@ -660,7 +655,7 @@
 extern "C" size_t MterpSuspendCheck(Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   self->AllowThreadSuspension();
-  return MterpShouldSwitchInterpreters();
+  return !self->UseMterp();
 }
 
 // Execute single field access instruction (get/put, static/instance).
@@ -683,8 +678,8 @@
   if (kIsPrimitive) {
     if (kIsRead) {
       PrimType value = UNLIKELY(is_volatile)
-          ? obj->GetFieldPrimitive<PrimType, /*kIsVolatile*/ true>(offset)
-          : obj->GetFieldPrimitive<PrimType, /*kIsVolatile*/ false>(offset);
+          ? obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset)
+          : obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset);
       if (sizeof(PrimType) == sizeof(uint64_t)) {
         shadow_frame->SetVRegLong(vRegA, value);  // Set two consecutive registers.
       } else {
@@ -695,9 +690,9 @@
           ? shadow_frame->GetVRegLong(vRegA)
           : shadow_frame->GetVReg(vRegA);
       if (UNLIKELY(is_volatile)) {
-        obj->SetFieldPrimitive<PrimType, /*kIsVolatile*/ true>(offset, value);
+        obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset, value);
       } else {
-        obj->SetFieldPrimitive<PrimType, /*kIsVolatile*/ false>(offset, value);
+        obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset, value);
       }
     }
   } else {  // Object.
@@ -709,9 +704,9 @@
     } else {  // Write.
       ObjPtr<mirror::Object> value = shadow_frame->GetVRegReference(vRegA);
       if (UNLIKELY(is_volatile)) {
-        obj->SetFieldObjectVolatile</*kTransactionActive*/ false>(offset, value);
+        obj->SetFieldObjectVolatile</*kTransactionActive=*/ false>(offset, value);
       } else {
-        obj->SetFieldObject</*kTransactionActive*/ false>(offset, value);
+        obj->SetFieldObject</*kTransactionActive=*/ false>(offset, value);
       }
     }
   }
@@ -730,7 +725,7 @@
   shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(inst));
   ArtMethod* referrer = shadow_frame->GetMethod();
   uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
-  ArtField* field = FindFieldFromCode<kAccessType, /* access_checks */ false>(
+  ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>(
       field_idx, referrer, self, sizeof(PrimType));
   if (UNLIKELY(field == nullptr)) {
     DCHECK(self->IsExceptionPending());
@@ -748,6 +743,10 @@
   return true;
 }
 
+// This methods is called from assembly to handle field access instructions.
+//
+// This method is fairly hot.  It is long, but it has been carefully optimized.
+// It contains only fully inlined methods -> no spills -> no prologue/epilogue.
 template<typename PrimType, FindFieldType kAccessType>
 ALWAYS_INLINE bool MterpFieldAccessFast(Instruction* inst,
                                         uint16_t inst_data,
@@ -756,8 +755,32 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
 
+  // Try to find the field in small thread-local cache first.
+  InterpreterCache* tls_cache = self->GetInterpreterCache();
+  size_t tls_value;
+  if (LIKELY(tls_cache->Get(inst, &tls_value))) {
+    // The meaning of the cache value is opcode-specific.
+    // It is ArtFiled* for static fields and the raw offset for instance fields.
+    size_t offset = kIsStatic
+        ? reinterpret_cast<ArtField*>(tls_value)->GetOffset().SizeValue()
+        : tls_value;
+    if (kIsDebugBuild) {
+      uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
+      ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>(
+          field_idx, shadow_frame->GetMethod(), self, sizeof(PrimType));
+      DCHECK_EQ(offset, field->GetOffset().SizeValue());
+    }
+    ObjPtr<mirror::Object> obj = kIsStatic
+        ? reinterpret_cast<ArtField*>(tls_value)->GetDeclaringClass()
+        : MakeObjPtr(shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data)));
+    if (LIKELY(obj != nullptr)) {
+      MterpFieldAccess<PrimType, kAccessType>(
+          inst, inst_data, shadow_frame, obj, MemberOffset(offset), /* is_volatile= */ false);
+      return true;
+    }
+  }
+
   // This effectively inlines the fast path from ArtMethod::GetDexCache.
-  // It avoids non-inlined call which in turn allows elimination of the prologue and epilogue.
   ArtMethod* referrer = shadow_frame->GetMethod();
   if (LIKELY(!referrer->IsObsolete())) {
     // Avoid read barriers, since we need only the pointer to the native (non-movable)
@@ -771,12 +794,20 @@
     if (LIKELY(field != nullptr)) {
       bool initialized = !kIsStatic || field->GetDeclaringClass()->IsInitialized();
       if (LIKELY(initialized)) {
-        DCHECK_EQ(field, (FindFieldFromCode<kAccessType, /* access_checks */ false>(
+        DCHECK_EQ(field, (FindFieldFromCode<kAccessType, /* access_checks= */ false>(
             field_idx, referrer, self, sizeof(PrimType))));
         ObjPtr<mirror::Object> obj = kIsStatic
             ? field->GetDeclaringClass().Ptr()
             : shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data));
         if (LIKELY(kIsStatic || obj != nullptr)) {
+          // Only non-volatile fields are allowed in the thread-local cache.
+          if (LIKELY(!field->IsVolatile())) {
+            if (kIsStatic) {
+              tls_cache->Set(inst, reinterpret_cast<uintptr_t>(field));
+            } else {
+              tls_cache->Set(inst, field->GetOffset().SizeValue());
+            }
+          }
           MterpFieldAccess<PrimType, kAccessType>(
               inst, inst_data, shadow_frame, obj, field->GetOffset(), field->IsVolatile());
           return true;
@@ -895,7 +926,7 @@
   jit::Jit* jit = Runtime::Current()->GetJit();
   if (jit != nullptr) {
     int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown();
-    jit->AddSamples(self, method, count, /*with_backedges*/ true);
+    jit->AddSamples(self, method, count, /*with_backedges=*/ true);
   }
   return MterpSetUpHotnessCountdown(method, shadow_frame, self);
 }
@@ -920,7 +951,7 @@
     osr_countdown = jit::Jit::kJitRecheckOSRThreshold;
     if (offset <= 0) {
       // Keep updating hotness in case a compilation request was dropped.  Eventually it will retry.
-      jit->AddSamples(self, method, osr_countdown, /*with_backedges*/ true);
+      jit->AddSamples(self, method, osr_countdown, /*with_backedges=*/ true);
     }
     did_osr = jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
   }
diff --git a/runtime/interpreter/mterp/mterp.h b/runtime/interpreter/mterp/mterp.h
index 1a56d26..af52758 100644
--- a/runtime/interpreter/mterp/mterp.h
+++ b/runtime/interpreter/mterp/mterp.h
@@ -25,8 +25,6 @@
  */
 extern "C" void* artMterpAsmInstructionStart[];
 extern "C" void* artMterpAsmInstructionEnd[];
-extern "C" void* artMterpAsmAltInstructionStart[];
-extern "C" void* artMterpAsmAltInstructionEnd[];
 
 namespace art {
 
@@ -36,12 +34,7 @@
 
 void InitMterpTls(Thread* self);
 void CheckMterpAsmConstants();
-
-// The return type should be 'bool' but our assembly stubs expect 'bool'
-// to be zero-extended to the whole register and that's broken on x86-64
-// as a 'bool' is returned in 'al' and the rest of 'rax' is garbage.
-// TODO: Fix mterp and stubs and revert this workaround. http://b/30232671
-extern "C" size_t MterpShouldSwitchInterpreters();
+bool CanUseMterp();
 
 // Poison value for TestExportPC.  If we segfault with this value, it means that a mterp
 // handler for a recent opcode failed to export the Dalvik PC prior to a possible exit from
@@ -50,6 +43,8 @@
 // Set true to enable poison testing of ExportPC.  Uses Alt interpreter.
 constexpr bool kTestExportPC = false;
 
+constexpr size_t kMterpHandlerSize = 128;
+
 }  // namespace interpreter
 }  // namespace art
 
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
deleted file mode 100644
index 25512ae..0000000
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ /dev/null
@@ -1,12409 +0,0 @@
-/*
- * This file was generated automatically by gen-mterp.py for 'arm'.
- *
- * --> DO NOT EDIT <--
- */
-
-/* File: arm/header.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-  Art assembly interpreter notes:
-
-  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
-  handle invoke, allows higher-level code to create frame & shadow frame.
-
-  Once that's working, support direct entry code & eliminate shadow frame (and
-  excess locals allocation.
-
-  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
-  base of the vreg array within the shadow frame.  Access the other fields,
-  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
-  the shadow frame mechanism of double-storing object references - via rFP &
-  number_of_vregs_.
-
- */
-
-/*
-ARM EABI general notes:
-
-r0-r3 hold first 4 args to a method; they are not preserved across method calls
-r4-r8 are available for general use
-r9 is given special treatment in some situations, but not for us
-r10 (sl) seems to be generally available
-r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
-r12 (ip) is scratch -- not preserved across method calls
-r13 (sp) should be managed carefully in case a signal arrives
-r14 (lr) must be preserved
-r15 (pc) can be tinkered with directly
-
-r0 holds returns of <= 4 bytes
-r0-r1 hold returns of 8 bytes, low word in r0
-
-Callee must save/restore r4+ (except r12) if it modifies them.  If VFP
-is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
-s0-s15 (d0-d7, q0-a3) do not need to be.
-
-Stack is "full descending".  Only the arguments that don't fit in the first 4
-registers are placed on the stack.  "sp" points at the first stacked argument
-(i.e. the 5th arg).
-
-VFP: single-precision results in s0, double-precision results in d0.
-
-In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
-64-bit quantities (long long, double) must be 64-bit aligned.
-*/
-
-/*
-Mterp and ARM notes:
-
-The following registers have fixed assignments:
-
-  reg nick      purpose
-  r4  rPC       interpreted program counter, used for fetching instructions
-  r5  rFP       interpreted frame pointer, used for accessing locals and args
-  r6  rSELF     self (Thread) pointer
-  r7  rINST     first 16-bit code unit of current instruction
-  r8  rIBASE    interpreted instruction base pointer, used for computed goto
-  r10 rPROFILE  branch profiling countdown
-  r11 rREFS     base of object references in shadow frame  (ideally, we'll get rid of this later).
-
-Macros are provided for common operations.  Each macro MUST emit only
-one instruction to make instruction-counting easier.  They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rPC      r4
-#define CFI_DEX  4  // DWARF register number of the register holding dex-pc (xPC).
-#define CFI_TMP  0  // DWARF register number of the first argument register (r0).
-#define rFP      r5
-#define rSELF    r6
-#define rINST    r7
-#define rIBASE   r8
-#define rPROFILE r10
-#define rREFS    r11
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
-    str  rPC, [rFP, #OFF_FP_DEX_PC_PTR]
-.endm
-
-.macro EXPORT_DEX_PC tmp
-    ldr  \tmp, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
-    str  rPC, [rFP, #OFF_FP_DEX_PC_PTR]
-    sub  \tmp, rPC, \tmp
-    asr  \tmp, #1
-    str  \tmp, [rFP, #OFF_FP_DEX_PC]
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
- */
-.macro FETCH_INST
-    ldrh    rINST, [rPC]
-.endm
-
-/*
- * Fetch the next instruction from the specified offset.  Advances rPC
- * to point to the next instruction.  "_count" is in 16-bit code units.
- *
- * Because of the limited size of immediate constants on ARM, this is only
- * suitable for small forward movements (i.e. don't try to implement "goto"
- * with this).
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss.  (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
-    ldrh    rINST, [rPC, #((\count)*2)]!
-.endm
-
-/*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to rPC and rINST).
- */
-.macro PREFETCH_ADVANCE_INST dreg, sreg, count
-    ldrh    \dreg, [\sreg, #((\count)*2)]!
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
- * rINST ahead of possible exception point.  Be sure to manually advance rPC
- * later.
- */
-.macro PREFETCH_INST count
-    ldrh    rINST, [rPC, #((\count)*2)]
-.endm
-
-/* Advance rPC by some number of code units. */
-.macro ADVANCE count
-  add  rPC, #((\count)*2)
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg.  Updates
- * rPC to point to the next instruction.  "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.
- *
- * We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
- * bits that hold the shift distance are used for the half/byte/sign flags.
- * In some cases we can pre-double _reg for free, so we require a byte offset
- * here.
- */
-.macro FETCH_ADVANCE_INST_RB reg
-    ldrh    rINST, [rPC, \reg]!
-.endm
-
-/*
- * Fetch a half-word code unit from an offset past the current PC.  The
- * "_count" value is in 16-bit code units.  Does not advance rPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-.macro FETCH reg, count
-    ldrh    \reg, [rPC, #((\count)*2)]
-.endm
-
-.macro FETCH_S reg, count
-    ldrsh   \reg, [rPC, #((\count)*2)]
-.endm
-
-/*
- * Fetch one byte from an offset past the current PC.  Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-.macro FETCH_B reg, count, byte
-    ldrb     \reg, [rPC, #((\count)*2+(\byte))]
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
-    and     \reg, rINST, #255
-.endm
-
-/*
- * Put the prefetched instruction's opcode field into the specified register.
- */
-.macro GET_PREFETCHED_OPCODE oreg, ireg
-    and     \oreg, \ireg, #255
-.endm
-
-/*
- * Begin executing the opcode in _reg.  Because this only jumps within the
- * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
- */
-.macro GOTO_OPCODE reg
-    add     pc, rIBASE, \reg, lsl #7
-.endm
-.macro GOTO_OPCODE_BASE base,reg
-    add     pc, \base, \reg, lsl #7
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-.macro GET_VREG reg, vreg
-    ldr     \reg, [rFP, \vreg, lsl #2]
-.endm
-.macro SET_VREG reg, vreg
-    str     \reg, [rFP, \vreg, lsl #2]
-    mov     \reg, #0
-    str     \reg, [rREFS, \vreg, lsl #2]
-.endm
-.macro SET_VREG_OBJECT reg, vreg, tmpreg
-    str     \reg, [rFP, \vreg, lsl #2]
-    str     \reg, [rREFS, \vreg, lsl #2]
-.endm
-.macro SET_VREG_SHADOW reg, vreg
-    str     \reg, [rREFS, \vreg, lsl #2]
-.endm
-
-/*
- * Clear the corresponding shadow regs for a vreg pair
- */
-.macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2
-    mov     \tmp1, #0
-    add     \tmp2, \vreg, #1
-    SET_VREG_SHADOW \tmp1, \vreg
-    SET_VREG_SHADOW \tmp1, \tmp2
-.endm
-
-/*
- * Convert a virtual register index into an address.
- */
-.macro VREG_INDEX_TO_ADDR reg, vreg
-    add     \reg, rFP, \vreg, lsl #2   /* WARNING/FIXME: handle shadow frame vreg zero if store */
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
-  ldr     rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
-.endm
-
-/*
- * cfi support macros.
- */
-.macro ENTRY name
-    .arm
-    .type \name, #function
-    .hidden \name  // Hide this as a global symbol, so we do not incur plt calls.
-    .global \name
-    /* Cache alignment for function entry */
-    .balign 16
-\name:
-    .cfi_startproc
-    .fnstart
-.endm
-
-.macro END name
-    .fnend
-    .cfi_endproc
-    .size \name, .-\name
-.endm
-
-/* File: arm/entry.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
-    .text
-    .align  2
-
-/*
- * On entry:
- *  r0  Thread* self/
- *  r1  insns_
- *  r2  ShadowFrame
- *  r3  JValue* result_register
- *
- */
-
-ENTRY ExecuteMterpImpl
-    stmfd   sp!, {r3-r10,fp,lr}         @ save 10 regs, (r3 just to align 64)
-    .cfi_adjust_cfa_offset 40
-    .cfi_rel_offset r3, 0
-    .cfi_rel_offset r4, 4
-    .cfi_rel_offset r5, 8
-    .cfi_rel_offset r6, 12
-    .cfi_rel_offset r7, 16
-    .cfi_rel_offset r8, 20
-    .cfi_rel_offset r9, 24
-    .cfi_rel_offset r10, 28
-    .cfi_rel_offset fp, 32
-    .cfi_rel_offset lr, 36
-
-    /* Remember the return register */
-    str     r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
-
-    /* Remember the dex instruction pointer */
-    str     r1, [r2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
-
-    /* set up "named" registers */
-    mov     rSELF, r0
-    ldr     r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
-    add     rFP, r2, #SHADOWFRAME_VREGS_OFFSET     @ point to vregs.
-    VREG_INDEX_TO_ADDR rREFS, r0                   @ point to reference array in shadow frame
-    ldr     r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET]   @ Get starting dex_pc.
-    add     rPC, r1, r0, lsl #1                    @ Create direct pointer to 1st dex opcode
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-    EXPORT_PC
-
-    /* Starting ibase */
-    ldr     rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
-
-    /* Set up for backwards branches & osr profiling */
-    ldr     r0, [rFP, #OFF_FP_METHOD]
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rSELF
-    bl      MterpSetUpHotnessCountdown
-    mov     rPROFILE, r0                @ Starting hotness countdown to rPROFILE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST                          @ load rINST from rPC
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* NOTE: no fallthrough */
-
-/* File: arm/instruction_start.S */
-
-    .type artMterpAsmInstructionStart, #object
-    .hidden artMterpAsmInstructionStart
-    .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
-    .text
-
-/* ------------------------------ */
-    .balign 128
-.L_op_nop: /* 0x00 */
-/* File: arm/op_nop.S */
-    FETCH_ADVANCE_INST 1                @ advance to next instr, load rINST
-    GET_INST_OPCODE ip                  @ ip<- opcode from rINST
-    GOTO_OPCODE ip                      @ execute it
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move: /* 0x01 */
-/* File: arm/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    mov     r1, rINST, lsr #12          @ r1<- B from 15:12
-    ubfx    r0, rINST, #8, #4           @ r0<- A from 11:8
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    GET_VREG r2, r1                     @ r2<- fp[B]
-    GET_INST_OPCODE ip                  @ ip<- opcode from rINST
-    .if 0
-    SET_VREG_OBJECT r2, r0              @ fp[A]<- r2
-    .else
-    SET_VREG r2, r0                     @ fp[A]<- r2
-    .endif
-    GOTO_OPCODE ip                      @ execute next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_from16: /* 0x02 */
-/* File: arm/op_move_from16.S */
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    FETCH r1, 1                         @ r1<- BBBB
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_VREG r2, r1                     @ r2<- fp[BBBB]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    .if 0
-    SET_VREG_OBJECT r2, r0              @ fp[AA]<- r2
-    .else
-    SET_VREG r2, r0                     @ fp[AA]<- r2
-    .endif
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_16: /* 0x03 */
-/* File: arm/op_move_16.S */
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    FETCH r1, 2                         @ r1<- BBBB
-    FETCH r0, 1                         @ r0<- AAAA
-    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    GET_VREG r2, r1                     @ r2<- fp[BBBB]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    .if 0
-    SET_VREG_OBJECT r2, r0              @ fp[AAAA]<- r2
-    .else
-    SET_VREG r2, r0                     @ fp[AAAA]<- r2
-    .endif
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide: /* 0x04 */
-/* File: arm/op_move_wide.S */
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
-    VREG_INDEX_TO_ADDR r2, rINST        @ r2<- &fp[A]
-    ldmia   r3, {r0-r1}                 @ r0/r1<- fp[B]
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r2, {r0-r1}                 @ fp[A]<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide_from16: /* 0x05 */
-/* File: arm/op_move_wide_from16.S */
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    FETCH r3, 1                         @ r3<- BBBB
-    mov     rINST, rINST, lsr #8        @ rINST<- AA
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BBBB]
-    VREG_INDEX_TO_ADDR r2, rINST        @ r2<- &fp[AA]
-    ldmia   r3, {r0-r1}                 @ r0/r1<- fp[BBBB]
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r2, {r0-r1}                 @ fp[AA]<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide_16: /* 0x06 */
-/* File: arm/op_move_wide_16.S */
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    FETCH r3, 2                         @ r3<- BBBB
-    FETCH r2, 1                         @ r2<- AAAA
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BBBB]
-    VREG_INDEX_TO_ADDR lr, r2           @ r2<- &fp[AAAA]
-    ldmia   r3, {r0-r1}                 @ r0/r1<- fp[BBBB]
-    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    CLEAR_SHADOW_PAIR r2, r3, ip        @ Zero out the shadow regs
-    stmia   lr, {r0-r1}                 @ fp[AAAA]<- r0/r1
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object: /* 0x07 */
-/* File: arm/op_move_object.S */
-/* File: arm/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    mov     r1, rINST, lsr #12          @ r1<- B from 15:12
-    ubfx    r0, rINST, #8, #4           @ r0<- A from 11:8
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    GET_VREG r2, r1                     @ r2<- fp[B]
-    GET_INST_OPCODE ip                  @ ip<- opcode from rINST
-    .if 1
-    SET_VREG_OBJECT r2, r0              @ fp[A]<- r2
-    .else
-    SET_VREG r2, r0                     @ fp[A]<- r2
-    .endif
-    GOTO_OPCODE ip                      @ execute next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object_from16: /* 0x08 */
-/* File: arm/op_move_object_from16.S */
-/* File: arm/op_move_from16.S */
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    FETCH r1, 1                         @ r1<- BBBB
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_VREG r2, r1                     @ r2<- fp[BBBB]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    .if 1
-    SET_VREG_OBJECT r2, r0              @ fp[AA]<- r2
-    .else
-    SET_VREG r2, r0                     @ fp[AA]<- r2
-    .endif
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object_16: /* 0x09 */
-/* File: arm/op_move_object_16.S */
-/* File: arm/op_move_16.S */
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    FETCH r1, 2                         @ r1<- BBBB
-    FETCH r0, 1                         @ r0<- AAAA
-    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    GET_VREG r2, r1                     @ r2<- fp[BBBB]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    .if 1
-    SET_VREG_OBJECT r2, r0              @ fp[AAAA]<- r2
-    .else
-    SET_VREG r2, r0                     @ fp[AAAA]<- r2
-    .endif
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result: /* 0x0a */
-/* File: arm/op_move_result.S */
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    mov     r2, rINST, lsr #8           @ r2<- AA
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    ldr     r0, [rFP, #OFF_FP_RESULT_REGISTER]  @ get pointer to result JType.
-    ldr     r0, [r0]                    @ r0 <- result.i.
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    .if 0
-    SET_VREG_OBJECT r0, r2, r1          @ fp[AA]<- r0
-    .else
-    SET_VREG r0, r2                     @ fp[AA]<- r0
-    .endif
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result_wide: /* 0x0b */
-/* File: arm/op_move_result_wide.S */
-    /* move-result-wide vAA */
-    mov     rINST, rINST, lsr #8        @ rINST<- AA
-    ldr     r3, [rFP, #OFF_FP_RESULT_REGISTER]
-    VREG_INDEX_TO_ADDR r2, rINST        @ r2<- &fp[AA]
-    ldmia   r3, {r0-r1}                 @ r0/r1<- retval.j
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    stmia   r2, {r0-r1}                 @ fp[AA]<- r0/r1
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result_object: /* 0x0c */
-/* File: arm/op_move_result_object.S */
-/* File: arm/op_move_result.S */
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    mov     r2, rINST, lsr #8           @ r2<- AA
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    ldr     r0, [rFP, #OFF_FP_RESULT_REGISTER]  @ get pointer to result JType.
-    ldr     r0, [r0]                    @ r0 <- result.i.
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    .if 1
-    SET_VREG_OBJECT r0, r2, r1          @ fp[AA]<- r0
-    .else
-    SET_VREG r0, r2                     @ fp[AA]<- r0
-    .endif
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_exception: /* 0x0d */
-/* File: arm/op_move_exception.S */
-    /* move-exception vAA */
-    mov     r2, rINST, lsr #8           @ r2<- AA
-    ldr     r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
-    mov     r1, #0                      @ r1<- 0
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    SET_VREG_OBJECT r3, r2              @ fp[AA]<- exception obj
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    str     r1, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ clear exception
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_void: /* 0x0e */
-/* File: arm/op_return_void.S */
-    .extern MterpThreadFenceForConstructor
-    bl      MterpThreadFenceForConstructor
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    mov     r0, rSELF
-    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    blne    MterpSuspendCheck                       @ (self)
-    mov    r0, #0
-    mov    r1, #0
-    b      MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return: /* 0x0f */
-/* File: arm/op_return.S */
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return, return-object
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    bl      MterpThreadFenceForConstructor
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    mov     r0, rSELF
-    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    blne    MterpSuspendCheck                       @ (self)
-    mov     r2, rINST, lsr #8           @ r2<- AA
-    GET_VREG r0, r2                     @ r0<- vAA
-    mov     r1, #0
-    b       MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_wide: /* 0x10 */
-/* File: arm/op_return_wide.S */
-    /*
-     * Return a 64-bit value.
-     */
-    /* return-wide vAA */
-    .extern MterpThreadFenceForConstructor
-    bl      MterpThreadFenceForConstructor
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    mov     r0, rSELF
-    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    blne    MterpSuspendCheck                       @ (self)
-    mov     r2, rINST, lsr #8           @ r2<- AA
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[AA]
-    ldmia   r2, {r0-r1}                 @ r0/r1 <- vAA/vAA+1
-    b       MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_object: /* 0x11 */
-/* File: arm/op_return_object.S */
-/* File: arm/op_return.S */
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return, return-object
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    bl      MterpThreadFenceForConstructor
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    mov     r0, rSELF
-    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    blne    MterpSuspendCheck                       @ (self)
-    mov     r2, rINST, lsr #8           @ r2<- AA
-    GET_VREG r0, r2                     @ r0<- vAA
-    mov     r1, #0
-    b       MterpReturn
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_4: /* 0x12 */
-/* File: arm/op_const_4.S */
-    /* const/4 vA, #+B */
-    sbfx    r1, rINST, #12, #4          @ r1<- sssssssB (sign-extended)
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ ip<- opcode from rINST
-    SET_VREG r1, r0                     @ fp[A]<- r1
-    GOTO_OPCODE ip                      @ execute next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_16: /* 0x13 */
-/* File: arm/op_const_16.S */
-    /* const/16 vAA, #+BBBB */
-    FETCH_S r0, 1                       @ r0<- ssssBBBB (sign-extended)
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    SET_VREG r0, r3                     @ vAA<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const: /* 0x14 */
-/* File: arm/op_const.S */
-    /* const vAA, #+BBBBbbbb */
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    FETCH r0, 1                         @ r0<- bbbb (low)
-    FETCH r1, 2                         @ r1<- BBBB (high)
-    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r3                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_high16: /* 0x15 */
-/* File: arm/op_const_high16.S */
-    /* const/high16 vAA, #+BBBB0000 */
-    FETCH r0, 1                         @ r0<- 0000BBBB (zero-extended)
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    mov     r0, r0, lsl #16             @ r0<- BBBB0000
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    SET_VREG r0, r3                     @ vAA<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_16: /* 0x16 */
-/* File: arm/op_const_wide_16.S */
-    /* const-wide/16 vAA, #+BBBB */
-    FETCH_S r0, 1                       @ r0<- ssssBBBB (sign-extended)
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    mov     r1, r0, asr #31             @ r1<- ssssssss
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    CLEAR_SHADOW_PAIR r3, r2, lr        @ Zero out the shadow regs
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[AA]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r3, {r0-r1}                 @ vAA<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_32: /* 0x17 */
-/* File: arm/op_const_wide_32.S */
-    /* const-wide/32 vAA, #+BBBBbbbb */
-    FETCH r0, 1                         @ r0<- 0000bbbb (low)
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    FETCH_S r2, 2                       @ r2<- ssssBBBB (high)
-    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    orr     r0, r0, r2, lsl #16         @ r0<- BBBBbbbb
-    CLEAR_SHADOW_PAIR r3, r2, lr        @ Zero out the shadow regs
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[AA]
-    mov     r1, r0, asr #31             @ r1<- ssssssss
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r3, {r0-r1}                 @ vAA<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide: /* 0x18 */
-/* File: arm/op_const_wide.S */
-    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
-    FETCH r0, 1                         @ r0<- bbbb (low)
-    FETCH r1, 2                         @ r1<- BBBB (low middle)
-    FETCH r2, 3                         @ r2<- hhhh (high middle)
-    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb (low word)
-    FETCH r3, 4                         @ r3<- HHHH (high)
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    orr     r1, r2, r3, lsl #16         @ r1<- HHHHhhhh (high word)
-    CLEAR_SHADOW_PAIR r9, r2, r3        @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 5                @ advance rPC, load rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_high16: /* 0x19 */
-/* File: arm/op_const_wide_high16.S */
-    /* const-wide/high16 vAA, #+BBBB000000000000 */
-    FETCH r1, 1                         @ r1<- 0000BBBB (zero-extended)
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    mov     r0, #0                      @ r0<- 00000000
-    mov     r1, r1, lsl #16             @ r1<- BBBB0000
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    CLEAR_SHADOW_PAIR r3, r0, r2        @ Zero shadow regs
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[AA]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r3, {r0-r1}                 @ vAA<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_string: /* 0x1a */
-/* File: arm/op_const_string.S */
-/* File: arm/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstString
-    EXPORT_PC
-    FETCH   r0, 1                       @ r0<- BBBB
-    mov     r1, rINST, lsr #8           @ r1<- AA
-    add     r2, rFP, #OFF_FP_SHADOWFRAME
-    mov     r3, rSELF
-    bl      MterpConstString                     @ (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     @ load rINST
-    cmp     r0, #0                      @ fail?
-    bne     MterpPossibleException      @ let reference interpreter deal with it.
-    ADVANCE 2                           @ advance rPC
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
-/* File: arm/op_const_string_jumbo.S */
-    /* const/string vAA, String@BBBBBBBB */
-    EXPORT_PC
-    FETCH r0, 1                         @ r0<- bbbb (low)
-    FETCH r2, 2                         @ r2<- BBBB (high)
-    mov     r1, rINST, lsr #8           @ r1<- AA
-    orr     r0, r0, r2, lsl #16         @ r1<- BBBBbbbb
-    add     r2, rFP, #OFF_FP_SHADOWFRAME
-    mov     r3, rSELF
-    bl      MterpConstString            @ (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 3                     @ advance rPC
-    cmp     r0, #0                      @ fail?
-    bne     MterpPossibleException      @ let reference interpreter deal with it.
-    ADVANCE 3                           @ advance rPC
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_class: /* 0x1c */
-/* File: arm/op_const_class.S */
-/* File: arm/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstClass
-    EXPORT_PC
-    FETCH   r0, 1                       @ r0<- BBBB
-    mov     r1, rINST, lsr #8           @ r1<- AA
-    add     r2, rFP, #OFF_FP_SHADOWFRAME
-    mov     r3, rSELF
-    bl      MterpConstClass                     @ (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     @ load rINST
-    cmp     r0, #0                      @ fail?
-    bne     MterpPossibleException      @ let reference interpreter deal with it.
-    ADVANCE 2                           @ advance rPC
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_monitor_enter: /* 0x1d */
-/* File: arm/op_monitor_enter.S */
-    /*
-     * Synchronize on an object.
-     */
-    /* monitor-enter vAA */
-    EXPORT_PC
-    mov      r2, rINST, lsr #8           @ r2<- AA
-    GET_VREG r0, r2                      @ r0<- vAA (object)
-    mov      r1, rSELF                   @ r1<- self
-    bl       artLockObjectFromCode
-    cmp      r0, #0
-    bne      MterpException
-    FETCH_ADVANCE_INST 1
-    GET_INST_OPCODE ip                   @ extract opcode from rINST
-    GOTO_OPCODE ip                       @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_monitor_exit: /* 0x1e */
-/* File: arm/op_monitor_exit.S */
-    /*
-     * Unlock an object.
-     *
-     * Exceptions that occur when unlocking a monitor need to appear as
-     * if they happened at the following instruction.  See the Dalvik
-     * instruction spec.
-     */
-    /* monitor-exit vAA */
-    EXPORT_PC
-    mov      r2, rINST, lsr #8          @ r2<- AA
-    GET_VREG r0, r2                     @ r0<- vAA (object)
-    mov      r1, rSELF                  @ r0<- self
-    bl       artUnlockObjectFromCode    @ r0<- success for unlock(self, obj)
-    cmp     r0, #0                      @ failed?
-    bne     MterpException
-    FETCH_ADVANCE_INST 1                @ before throw: advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_check_cast: /* 0x1f */
-/* File: arm/op_check_cast.S */
-    /*
-     * Check to see if a cast from one class to another is allowed.
-     */
-    /* check-cast vAA, class@BBBB */
-    EXPORT_PC
-    FETCH    r0, 1                      @ r0<- BBBB
-    mov      r1, rINST, lsr #8          @ r1<- AA
-    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &object
-    ldr      r2, [rFP, #OFF_FP_METHOD]  @ r2<- method
-    mov      r3, rSELF                  @ r3<- self
-    bl       MterpCheckCast             @ (index, &obj, method, self)
-    PREFETCH_INST 2
-    cmp      r0, #0
-    bne      MterpPossibleException
-    ADVANCE  2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_instance_of: /* 0x20 */
-/* File: arm/op_instance_of.S */
-    /*
-     * Check to see if an object reference is an instance of a class.
-     *
-     * Most common situation is a non-null object, being compared against
-     * an already-resolved class.
-     */
-    /* instance-of vA, vB, class@CCCC */
-    EXPORT_PC
-    FETCH     r0, 1                     @ r0<- CCCC
-    mov       r1, rINST, lsr #12        @ r1<- B
-    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &object
-    ldr       r2, [rFP, #OFF_FP_METHOD] @ r2<- method
-    mov       r3, rSELF                 @ r3<- self
-    bl        MterpInstanceOf           @ (index, &obj, method, self)
-    ldr       r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
-    ubfx      r9, rINST, #8, #4         @ r9<- A
-    PREFETCH_INST 2
-    cmp       r1, #0                    @ exception pending?
-    bne       MterpException
-    ADVANCE 2                           @ advance rPC
-    SET_VREG r0, r9                     @ vA<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_array_length: /* 0x21 */
-/* File: arm/op_array_length.S */
-    /*
-     * Return the length of an array.
-     */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    GET_VREG r0, r1                     @ r0<- vB (object ref)
-    cmp     r0, #0                      @ is object null?
-    beq     common_errNullObject        @ yup, fail
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- array length
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r3, r2                     @ vB<- length
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_new_instance: /* 0x22 */
-/* File: arm/op_new_instance.S */
-    /*
-     * Create a new instance of a class.
-     */
-    /* new-instance vAA, class@BBBB */
-    EXPORT_PC
-    add     r0, rFP, #OFF_FP_SHADOWFRAME
-    mov     r1, rSELF
-    mov     r2, rINST
-    bl      MterpNewInstance           @ (shadow_frame, self, inst_data)
-    cmp     r0, #0
-    beq     MterpPossibleException
-    FETCH_ADVANCE_INST 2               @ advance rPC, load rINST
-    GET_INST_OPCODE ip                 @ extract opcode from rINST
-    GOTO_OPCODE ip                     @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_new_array: /* 0x23 */
-/* File: arm/op_new_array.S */
-    /*
-     * Allocate an array of objects, specified with the array class
-     * and a count.
-     *
-     * The verifier guarantees that this is an array class, so we don't
-     * check for it here.
-     */
-    /* new-array vA, vB, class@CCCC */
-    EXPORT_PC
-    add     r0, rFP, #OFF_FP_SHADOWFRAME
-    mov     r1, rPC
-    mov     r2, rINST
-    mov     r3, rSELF
-    bl      MterpNewArray
-    cmp     r0, #0
-    beq     MterpPossibleException
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_filled_new_array: /* 0x24 */
-/* File: arm/op_filled_new_array.S */
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    .extern MterpFilledNewArray
-    EXPORT_PC
-    add     r0, rFP, #OFF_FP_SHADOWFRAME
-    mov     r1, rPC
-    mov     r2, rSELF
-    bl      MterpFilledNewArray
-    cmp     r0, #0
-    beq     MterpPossibleException
-    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
-/* File: arm/op_filled_new_array_range.S */
-/* File: arm/op_filled_new_array.S */
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    .extern MterpFilledNewArrayRange
-    EXPORT_PC
-    add     r0, rFP, #OFF_FP_SHADOWFRAME
-    mov     r1, rPC
-    mov     r2, rSELF
-    bl      MterpFilledNewArrayRange
-    cmp     r0, #0
-    beq     MterpPossibleException
-    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_fill_array_data: /* 0x26 */
-/* File: arm/op_fill_array_data.S */
-    /* fill-array-data vAA, +BBBBBBBB */
-    EXPORT_PC
-    FETCH r0, 1                         @ r0<- bbbb (lo)
-    FETCH r1, 2                         @ r1<- BBBB (hi)
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    orr     r1, r0, r1, lsl #16         @ r1<- BBBBbbbb
-    GET_VREG r0, r3                     @ r0<- vAA (array object)
-    add     r1, rPC, r1, lsl #1         @ r1<- PC + BBBBbbbb*2 (array data off.)
-    bl      MterpFillArrayData          @ (obj, payload)
-    cmp     r0, #0                      @ 0 means an exception is thrown
-    beq     MterpPossibleException      @ exception?
-    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_throw: /* 0x27 */
-/* File: arm/op_throw.S */
-    /*
-     * Throw an exception object in the current thread.
-     */
-    /* throw vAA */
-    EXPORT_PC
-    mov      r2, rINST, lsr #8           @ r2<- AA
-    GET_VREG r1, r2                      @ r1<- vAA (exception object)
-    cmp      r1, #0                      @ null object?
-    beq      common_errNullObject        @ yes, throw an NPE instead
-    str      r1, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ thread->exception<- obj
-    b        MterpException
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto: /* 0x28 */
-/* File: arm/op_goto.S */
-    /*
-     * Unconditional branch, 8-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto +AA */
-    sbfx    rINST, rINST, #8, #8           @ rINST<- ssssssAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto_16: /* 0x29 */
-/* File: arm/op_goto_16.S */
-    /*
-     * Unconditional branch, 16-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto/16 +AAAA */
-    FETCH_S rINST, 1                    @ rINST<- ssssAAAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto_32: /* 0x2a */
-/* File: arm/op_goto_32.S */
-    /*
-     * Unconditional branch, 32-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     *
-     * Unlike most opcodes, this one is allowed to branch to itself, so
-     * our "backward branch" test must be "<=0" instead of "<0".  Because
-     * we need the V bit set, we'll use an adds to convert from Dalvik
-     * offset to byte offset.
-     */
-    /* goto/32 +AAAAAAAA */
-    FETCH r0, 1                         @ r0<- aaaa (lo)
-    FETCH r3, 2                         @ r1<- AAAA (hi)
-    orrs    rINST, r0, r3, lsl #16      @ rINST<- AAAAaaaa
-    b       MterpCommonTakenBranch
-
-/* ------------------------------ */
-    .balign 128
-.L_op_packed_switch: /* 0x2b */
-/* File: arm/op_packed_switch.S */
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBB */
-    FETCH r0, 1                         @ r0<- bbbb (lo)
-    FETCH r1, 2                         @ r1<- BBBB (hi)
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb
-    GET_VREG r1, r3                     @ r1<- vAA
-    add     r0, rPC, r0, lsl #1         @ r0<- PC + BBBBbbbb*2
-    bl      MterpDoPackedSwitch                       @ r0<- code-unit branch offset
-    movs    rINST, r0
-    b       MterpCommonTakenBranch
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sparse_switch: /* 0x2c */
-/* File: arm/op_sparse_switch.S */
-/* File: arm/op_packed_switch.S */
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBB */
-    FETCH r0, 1                         @ r0<- bbbb (lo)
-    FETCH r1, 2                         @ r1<- BBBB (hi)
-    mov     r3, rINST, lsr #8           @ r3<- AA
-    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb
-    GET_VREG r1, r3                     @ r1<- vAA
-    add     r0, rPC, r0, lsl #1         @ r0<- PC + BBBBbbbb*2
-    bl      MterpDoSparseSwitch                       @ r0<- code-unit branch offset
-    movs    rINST, r0
-    b       MterpCommonTakenBranch
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpl_float: /* 0x2d */
-/* File: arm/op_cmpl_float.S */
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * int compare(x, y) {
-     *     if (x == y) {
-     *         return 0;
-     *     } else if (x > y) {
-     *         return 1;
-     *     } else if (x < y) {
-     *         return -1;
-     *     } else {
-     *         return -1;
-     *     }
-     * }
-     */
-    /* op vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    flds    s0, [r2]                    @ s0<- vBB
-    flds    s1, [r3]                    @ s1<- vCC
-    vcmpe.f32  s0, s1                   @ compare (vBB, vCC)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    mvn     r0, #0                      @ r0<- -1 (default)
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fmstat                              @ export status flags
-    movgt   r0, #1                      @ (greater than) r1<- 1
-    moveq   r0, #0                      @ (equal) r1<- 0
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpg_float: /* 0x2e */
-/* File: arm/op_cmpg_float.S */
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * int compare(x, y) {
-     *     if (x == y) {
-     *         return 0;
-     *     } else if (x < y) {
-     *         return -1;
-     *     } else if (x > y) {
-     *         return 1;
-     *     } else {
-     *         return 1;
-     *     }
-     * }
-     */
-    /* op vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    flds    s0, [r2]                    @ s0<- vBB
-    flds    s1, [r3]                    @ s1<- vCC
-    vcmpe.f32 s0, s1                    @ compare (vBB, vCC)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    mov     r0, #1                      @ r0<- 1 (default)
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fmstat                              @ export status flags
-    mvnmi   r0, #0                      @ (less than) r1<- -1
-    moveq   r0, #0                      @ (equal) r1<- 0
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpl_double: /* 0x2f */
-/* File: arm/op_cmpl_double.S */
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * int compare(x, y) {
-     *     if (x == y) {
-     *         return 0;
-     *     } else if (x > y) {
-     *         return 1;
-     *     } else if (x < y) {
-     *         return -1;
-     *     } else {
-     *         return -1;
-     *     }
-     * }
-     */
-    /* op vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    fldd    d0, [r2]                    @ d0<- vBB
-    fldd    d1, [r3]                    @ d1<- vCC
-    vcmpe.f64 d0, d1                    @ compare (vBB, vCC)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    mvn     r0, #0                      @ r0<- -1 (default)
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fmstat                              @ export status flags
-    movgt   r0, #1                      @ (greater than) r1<- 1
-    moveq   r0, #0                      @ (equal) r1<- 0
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpg_double: /* 0x30 */
-/* File: arm/op_cmpg_double.S */
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * int compare(x, y) {
-     *     if (x == y) {
-     *         return 0;
-     *     } else if (x < y) {
-     *         return -1;
-     *     } else if (x > y) {
-     *         return 1;
-     *     } else {
-     *         return 1;
-     *     }
-     * }
-     */
-    /* op vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    fldd    d0, [r2]                    @ d0<- vBB
-    fldd    d1, [r3]                    @ d1<- vCC
-    vcmpe.f64 d0, d1                    @ compare (vBB, vCC)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    mov     r0, #1                      @ r0<- 1 (default)
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fmstat                              @ export status flags
-    mvnmi   r0, #0                      @ (less than) r1<- -1
-    moveq   r0, #0                      @ (equal) r1<- 0
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmp_long: /* 0x31 */
-/* File: arm/op_cmp_long.S */
-    /*
-     * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
-     * register based on the results of the comparison.
-     */
-    /* cmp-long vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
-    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
-    cmp     r0, r2
-    sbcs    ip, r1, r3                  @ Sets correct CCs for checking LT (but not EQ/NE)
-    mov     ip, #0
-    mvnlt   ip, #0                      @ -1
-    cmpeq   r0, r2                      @ For correct EQ/NE, we may need to repeat the first CMP
-    orrne   ip, #1
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    SET_VREG ip, r9                     @ vAA<- ip
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_eq: /* 0x32 */
-/* File: arm/op_if_eq.S */
-/* File: arm/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    GET_VREG r3, r1                     @ r3<- vB
-    GET_VREG r0, r0                     @ r0<- vA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r0, r3                      @ compare (vA, vB)
-    beq MterpCommonTakenBranchNoFlags
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    beq     .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ne: /* 0x33 */
-/* File: arm/op_if_ne.S */
-/* File: arm/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    GET_VREG r3, r1                     @ r3<- vB
-    GET_VREG r0, r0                     @ r0<- vA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r0, r3                      @ compare (vA, vB)
-    bne MterpCommonTakenBranchNoFlags
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    beq     .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_lt: /* 0x34 */
-/* File: arm/op_if_lt.S */
-/* File: arm/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    GET_VREG r3, r1                     @ r3<- vB
-    GET_VREG r0, r0                     @ r0<- vA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r0, r3                      @ compare (vA, vB)
-    blt MterpCommonTakenBranchNoFlags
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    beq     .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ge: /* 0x35 */
-/* File: arm/op_if_ge.S */
-/* File: arm/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    GET_VREG r3, r1                     @ r3<- vB
-    GET_VREG r0, r0                     @ r0<- vA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r0, r3                      @ compare (vA, vB)
-    bge MterpCommonTakenBranchNoFlags
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    beq     .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gt: /* 0x36 */
-/* File: arm/op_if_gt.S */
-/* File: arm/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    GET_VREG r3, r1                     @ r3<- vB
-    GET_VREG r0, r0                     @ r0<- vA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r0, r3                      @ compare (vA, vB)
-    bgt MterpCommonTakenBranchNoFlags
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    beq     .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_le: /* 0x37 */
-/* File: arm/op_if_le.S */
-/* File: arm/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    GET_VREG r3, r1                     @ r3<- vB
-    GET_VREG r0, r0                     @ r0<- vA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r0, r3                      @ compare (vA, vB)
-    ble MterpCommonTakenBranchNoFlags
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    beq     .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_eqz: /* 0x38 */
-/* File: arm/op_if_eqz.S */
-/* File: arm/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    GET_VREG r0, r0                     @ r0<- vAA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r0, #0                      @ compare (vA, 0)
-    beq MterpCommonTakenBranchNoFlags
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    beq     .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_nez: /* 0x39 */
-/* File: arm/op_if_nez.S */
-/* File: arm/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    GET_VREG r0, r0                     @ r0<- vAA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r0, #0                      @ compare (vA, 0)
-    bne MterpCommonTakenBranchNoFlags
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    beq     .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ltz: /* 0x3a */
-/* File: arm/op_if_ltz.S */
-/* File: arm/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    GET_VREG r0, r0                     @ r0<- vAA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r0, #0                      @ compare (vA, 0)
-    blt MterpCommonTakenBranchNoFlags
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    beq     .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gez: /* 0x3b */
-/* File: arm/op_if_gez.S */
-/* File: arm/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    GET_VREG r0, r0                     @ r0<- vAA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r0, #0                      @ compare (vA, 0)
-    bge MterpCommonTakenBranchNoFlags
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    beq     .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gtz: /* 0x3c */
-/* File: arm/op_if_gtz.S */
-/* File: arm/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    GET_VREG r0, r0                     @ r0<- vAA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r0, #0                      @ compare (vA, 0)
-    bgt MterpCommonTakenBranchNoFlags
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    beq     .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_lez: /* 0x3d */
-/* File: arm/op_if_lez.S */
-/* File: arm/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    GET_VREG r0, r0                     @ r0<- vAA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r0, #0                      @ compare (vA, 0)
-    ble MterpCommonTakenBranchNoFlags
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    beq     .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_3e: /* 0x3e */
-/* File: arm/op_unused_3e.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_3f: /* 0x3f */
-/* File: arm/op_unused_3f.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_40: /* 0x40 */
-/* File: arm/op_unused_40.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_41: /* 0x41 */
-/* File: arm/op_unused_41.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_42: /* 0x42 */
-/* File: arm/op_unused_42.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_43: /* 0x43 */
-/* File: arm/op_unused_43.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget: /* 0x44 */
-/* File: arm/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B r2, 1, 0                    @ r2<- BB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    FETCH_B r3, 1, 1                    @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #2     @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    ldr   r2, [r0, #MIRROR_INT_ARRAY_DATA_OFFSET]     @ r2<- vBB[vCC]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r2, r9                     @ vAA<- r2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_wide: /* 0x45 */
-/* File: arm/op_aget_wide.S */
-    /*
-     * Array get, 64 bits.  vAA <- vBB[vCC].
-     *
-     * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
-     */
-    /* aget-wide vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #3          @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
-    ldrd    r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]  @ r2/r3<- vBB[vCC]
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r2-r3}                 @ vAA/vAA+1<- r2/r3
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_object: /* 0x46 */
-/* File: arm/op_aget_object.S */
-    /*
-     * Array object get.  vAA <- vBB[vCC].
-     *
-     * for: aget-object
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B r2, 1, 0                    @ r2<- BB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    FETCH_B r3, 1, 1                    @ r3<- CC
-    EXPORT_PC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    bl       artAGetObjectFromMterp     @ (array, index)
-    ldr      r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
-    PREFETCH_INST 2
-    cmp      r1, #0
-    bne      MterpException
-    SET_VREG_OBJECT r0, r9
-    ADVANCE 2
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_boolean: /* 0x47 */
-/* File: arm/op_aget_boolean.S */
-/* File: arm/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B r2, 1, 0                    @ r2<- BB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    FETCH_B r3, 1, 1                    @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #0     @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    ldrb   r2, [r0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET]     @ r2<- vBB[vCC]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r2, r9                     @ vAA<- r2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_byte: /* 0x48 */
-/* File: arm/op_aget_byte.S */
-/* File: arm/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B r2, 1, 0                    @ r2<- BB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    FETCH_B r3, 1, 1                    @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #0     @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    ldrsb   r2, [r0, #MIRROR_BYTE_ARRAY_DATA_OFFSET]     @ r2<- vBB[vCC]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r2, r9                     @ vAA<- r2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_char: /* 0x49 */
-/* File: arm/op_aget_char.S */
-/* File: arm/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B r2, 1, 0                    @ r2<- BB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    FETCH_B r3, 1, 1                    @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #1     @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    ldrh   r2, [r0, #MIRROR_CHAR_ARRAY_DATA_OFFSET]     @ r2<- vBB[vCC]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r2, r9                     @ vAA<- r2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_short: /* 0x4a */
-/* File: arm/op_aget_short.S */
-/* File: arm/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B r2, 1, 0                    @ r2<- BB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    FETCH_B r3, 1, 1                    @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #1     @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    ldrsh   r2, [r0, #MIRROR_SHORT_ARRAY_DATA_OFFSET]     @ r2<- vBB[vCC]
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r2, r9                     @ vAA<- r2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput: /* 0x4b */
-/* File: arm/op_aput.S */
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B r2, 1, 0                    @ r2<- BB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    FETCH_B r3, 1, 1                    @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]     @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #2     @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_VREG r2, r9                     @ r2<- vAA
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    str  r2, [r0, #MIRROR_INT_ARRAY_DATA_OFFSET]     @ vBB[vCC]<- r2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_wide: /* 0x4c */
-/* File: arm/op_aput_wide.S */
-    /*
-     * Array put, 64 bits.  vBB[vCC] <- vAA.
-     *
-     * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
-     */
-    /* aput-wide vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #3          @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    ldmia   r9, {r2-r3}                 @ r2/r3<- vAA/vAA+1
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    strd    r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]  @ r2/r3<- vBB[vCC]
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_object: /* 0x4d */
-/* File: arm/op_aput_object.S */
-    /*
-     * Store an object into an array.  vBB[vCC] <- vAA.
-     */
-    /* op vAA, vBB, vCC */
-    EXPORT_PC
-    add     r0, rFP, #OFF_FP_SHADOWFRAME
-    mov     r1, rPC
-    mov     r2, rINST
-    bl      MterpAputObject
-    cmp     r0, #0
-    beq     MterpPossibleException
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_boolean: /* 0x4e */
-/* File: arm/op_aput_boolean.S */
-/* File: arm/op_aput.S */
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B r2, 1, 0                    @ r2<- BB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    FETCH_B r3, 1, 1                    @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]     @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #0     @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_VREG r2, r9                     @ r2<- vAA
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    strb  r2, [r0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET]     @ vBB[vCC]<- r2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_byte: /* 0x4f */
-/* File: arm/op_aput_byte.S */
-/* File: arm/op_aput.S */
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B r2, 1, 0                    @ r2<- BB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    FETCH_B r3, 1, 1                    @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]     @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #0     @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_VREG r2, r9                     @ r2<- vAA
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    strb  r2, [r0, #MIRROR_BYTE_ARRAY_DATA_OFFSET]     @ vBB[vCC]<- r2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_char: /* 0x50 */
-/* File: arm/op_aput_char.S */
-/* File: arm/op_aput.S */
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B r2, 1, 0                    @ r2<- BB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    FETCH_B r3, 1, 1                    @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]     @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #1     @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_VREG r2, r9                     @ r2<- vAA
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    strh  r2, [r0, #MIRROR_CHAR_ARRAY_DATA_OFFSET]     @ vBB[vCC]<- r2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_short: /* 0x51 */
-/* File: arm/op_aput_short.S */
-/* File: arm/op_aput.S */
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B r2, 1, 0                    @ r2<- BB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    FETCH_B r3, 1, 1                    @ r3<- CC
-    GET_VREG r0, r2                     @ r0<- vBB (array object)
-    GET_VREG r1, r3                     @ r1<- vCC (requested index)
-    cmp     r0, #0                      @ null array object?
-    beq     common_errNullObject        @ yes, bail
-    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]     @ r3<- arrayObj->length
-    add     r0, r0, r1, lsl #1     @ r0<- arrayObj + index*width
-    cmp     r1, r3                      @ compare unsigned index, length
-    bcs     common_errArrayIndex        @ index >= length, bail
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_VREG r2, r9                     @ r2<- vAA
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    strh  r2, [r0, #MIRROR_SHORT_ARRAY_DATA_OFFSET]     @ vBB[vCC]<- r2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget: /* 0x52 */
-/* File: arm/op_iget.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU32
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpIGetU32
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_wide: /* 0x53 */
-/* File: arm/op_iget_wide.S */
-/* File: arm/op_iget.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU64
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpIGetU64
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_object: /* 0x54 */
-/* File: arm/op_iget_object.S */
-/* File: arm/op_iget.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetObj
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpIGetObj
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_boolean: /* 0x55 */
-/* File: arm/op_iget_boolean.S */
-/* File: arm/op_iget.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU8
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpIGetU8
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_byte: /* 0x56 */
-/* File: arm/op_iget_byte.S */
-/* File: arm/op_iget.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetI8
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpIGetI8
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_char: /* 0x57 */
-/* File: arm/op_iget_char.S */
-/* File: arm/op_iget.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU16
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpIGetU16
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_short: /* 0x58 */
-/* File: arm/op_iget_short.S */
-/* File: arm/op_iget.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetI16
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpIGetI16
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput: /* 0x59 */
-/* File: arm/op_iput.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU32
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpIPutU32
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_wide: /* 0x5a */
-/* File: arm/op_iput_wide.S */
-/* File: arm/op_iput.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU64
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpIPutU64
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_object: /* 0x5b */
-/* File: arm/op_iput_object.S */
-/* File: arm/op_iput.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutObj
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpIPutObj
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_boolean: /* 0x5c */
-/* File: arm/op_iput_boolean.S */
-/* File: arm/op_iput.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU8
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpIPutU8
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_byte: /* 0x5d */
-/* File: arm/op_iput_byte.S */
-/* File: arm/op_iput.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutI8
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpIPutI8
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_char: /* 0x5e */
-/* File: arm/op_iput_char.S */
-/* File: arm/op_iput.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU16
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpIPutU16
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_short: /* 0x5f */
-/* File: arm/op_iput_short.S */
-/* File: arm/op_iput.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutI16
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpIPutI16
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget: /* 0x60 */
-/* File: arm/op_sget.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU32
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpSGetU32
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_wide: /* 0x61 */
-/* File: arm/op_sget_wide.S */
-/* File: arm/op_sget.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU64
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpSGetU64
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_object: /* 0x62 */
-/* File: arm/op_sget_object.S */
-/* File: arm/op_sget.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetObj
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpSGetObj
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_boolean: /* 0x63 */
-/* File: arm/op_sget_boolean.S */
-/* File: arm/op_sget.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU8
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpSGetU8
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_byte: /* 0x64 */
-/* File: arm/op_sget_byte.S */
-/* File: arm/op_sget.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetI8
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpSGetI8
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_char: /* 0x65 */
-/* File: arm/op_sget_char.S */
-/* File: arm/op_sget.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU16
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpSGetU16
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_short: /* 0x66 */
-/* File: arm/op_sget_short.S */
-/* File: arm/op_sget.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetI16
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpSGetI16
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput: /* 0x67 */
-/* File: arm/op_sput.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU32
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpSPutU32
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_wide: /* 0x68 */
-/* File: arm/op_sput_wide.S */
-/* File: arm/op_sput.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU64
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpSPutU64
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_object: /* 0x69 */
-/* File: arm/op_sput_object.S */
-/* File: arm/op_sput.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutObj
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpSPutObj
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_boolean: /* 0x6a */
-/* File: arm/op_sput_boolean.S */
-/* File: arm/op_sput.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU8
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpSPutU8
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_byte: /* 0x6b */
-/* File: arm/op_sput_byte.S */
-/* File: arm/op_sput.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutI8
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpSPutI8
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_char: /* 0x6c */
-/* File: arm/op_sput_char.S */
-/* File: arm/op_sput.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU16
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpSPutU16
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_short: /* 0x6d */
-/* File: arm/op_sput_short.S */
-/* File: arm/op_sput.S */
-/* File: arm/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutI16
-    mov      r0, rPC                       @ arg0: Instruction* inst
-    mov      r1, rINST                     @ arg1: uint16_t inst_data
-    add      r2, rFP, #OFF_FP_SHADOWFRAME  @ arg2: ShadowFrame* sf
-    mov      r3, rSELF                     @ arg3: Thread* self
-    PREFETCH_INST 2                        @ prefetch next opcode
-    bl       MterpSPutI16
-    cmp      r0, #0
-    beq      MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     @ extract opcode from rINST
-    GOTO_OPCODE ip                         @ jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual: /* 0x6e */
-/* File: arm/op_invoke_virtual.S */
-/* File: arm/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtual
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokeVirtual
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-    /*
-     * Handle a virtual method call.
-     *
-     * for: invoke-virtual, invoke-virtual/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_super: /* 0x6f */
-/* File: arm/op_invoke_super.S */
-/* File: arm/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeSuper
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokeSuper
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-    /*
-     * Handle a "super" method call.
-     *
-     * for: invoke-super, invoke-super/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_direct: /* 0x70 */
-/* File: arm/op_invoke_direct.S */
-/* File: arm/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeDirect
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokeDirect
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_static: /* 0x71 */
-/* File: arm/op_invoke_static.S */
-/* File: arm/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeStatic
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokeStatic
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_interface: /* 0x72 */
-/* File: arm/op_invoke_interface.S */
-/* File: arm/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeInterface
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokeInterface
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-    /*
-     * Handle an interface method call.
-     *
-     * for: invoke-interface, invoke-interface/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
-/* File: arm/op_return_void_no_barrier.S */
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    mov     r0, rSELF
-    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    blne    MterpSuspendCheck                       @ (self)
-    mov    r0, #0
-    mov    r1, #0
-    b      MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
-/* File: arm/op_invoke_virtual_range.S */
-/* File: arm/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualRange
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokeVirtualRange
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_super_range: /* 0x75 */
-/* File: arm/op_invoke_super_range.S */
-/* File: arm/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeSuperRange
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokeSuperRange
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
-/* File: arm/op_invoke_direct_range.S */
-/* File: arm/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeDirectRange
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokeDirectRange
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_static_range: /* 0x77 */
-/* File: arm/op_invoke_static_range.S */
-/* File: arm/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeStaticRange
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokeStaticRange
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
-/* File: arm/op_invoke_interface_range.S */
-/* File: arm/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeInterfaceRange
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokeInterfaceRange
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_79: /* 0x79 */
-/* File: arm/op_unused_79.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_7a: /* 0x7a */
-/* File: arm/op_unused_7a.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_int: /* 0x7b */
-/* File: arm/op_neg_int.S */
-/* File: arm/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op r0".
-     * This could be an ARM instruction or a function call.
-     *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r3                     @ r0<- vB
-                               @ optional op; may set condition codes
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    rsb     r0, r0, #0                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 8-9 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_not_int: /* 0x7c */
-/* File: arm/op_not_int.S */
-/* File: arm/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op r0".
-     * This could be an ARM instruction or a function call.
-     *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r3                     @ r0<- vB
-                               @ optional op; may set condition codes
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    mvn     r0, r0                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 8-9 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_long: /* 0x7d */
-/* File: arm/op_neg_long.S */
-/* File: arm/unopWide.S */
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op r0/r1".
-     * This could be an ARM instruction or a function call.
-     *
-     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-    ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    rsbs    r0, r0, #0                           @ optional op; may set condition codes
-    rsc     r1, r1, #0                              @ r0/r1<- op, r2-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-11 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_not_long: /* 0x7e */
-/* File: arm/op_not_long.S */
-/* File: arm/unopWide.S */
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op r0/r1".
-     * This could be an ARM instruction or a function call.
-     *
-     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-    ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    mvn     r0, r0                           @ optional op; may set condition codes
-    mvn     r1, r1                              @ r0/r1<- op, r2-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-11 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_float: /* 0x7f */
-/* File: arm/op_neg_float.S */
-/* File: arm/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op r0".
-     * This could be an ARM instruction or a function call.
-     *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r3                     @ r0<- vB
-                               @ optional op; may set condition codes
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    add     r0, r0, #0x80000000                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 8-9 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_double: /* 0x80 */
-/* File: arm/op_neg_double.S */
-/* File: arm/unopWide.S */
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op r0/r1".
-     * This could be an ARM instruction or a function call.
-     *
-     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-    ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    add     r1, r1, #0x80000000                              @ r0/r1<- op, r2-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-11 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_long: /* 0x81 */
-/* File: arm/op_int_to_long.S */
-/* File: arm/unopWider.S */
-    /*
-     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = op r0", where
-     * "result" is a 64-bit quantity in r0/r1.
-     *
-     * For: int-to-long, int-to-double, float-to-long, float-to-double
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    GET_VREG r0, r3                     @ r0<- vB
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-                               @ optional op; may set condition codes
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    mov     r1, r0, asr #31                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vA/vA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 9-10 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_float: /* 0x82 */
-/* File: arm/op_int_to_float.S */
-/* File: arm/funop.S */
-    /*
-     * Generic 32-bit unary floating-point operation.  Provide an "instr"
-     * line that specifies an instruction that performs "s1 = op s0".
-     *
-     * for: int-to-float, float-to-int
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    flds    s0, [r3]                    @ s0<- vB
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    fsitos  s1, s0                              @ s1<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    fsts    s1, [r9]                    @ vA<- s1
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_double: /* 0x83 */
-/* File: arm/op_int_to_double.S */
-/* File: arm/funopWider.S */
-    /*
-     * Generic 32bit-to-64bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "d0 = op s0".
-     *
-     * For: int-to-double, float-to-double
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    flds    s0, [r3]                    @ s0<- vB
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    fsitod  d0, s0                              @ d0<- op
-    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    fstd    d0, [r9]                    @ vA<- d0
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* File: arm/op_long_to_int.S */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-/* File: arm/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    mov     r1, rINST, lsr #12          @ r1<- B from 15:12
-    ubfx    r0, rINST, #8, #4           @ r0<- A from 11:8
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    GET_VREG r2, r1                     @ r2<- fp[B]
-    GET_INST_OPCODE ip                  @ ip<- opcode from rINST
-    .if 0
-    SET_VREG_OBJECT r2, r0              @ fp[A]<- r2
-    .else
-    SET_VREG r2, r0                     @ fp[A]<- r2
-    .endif
-    GOTO_OPCODE ip                      @ execute next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_float: /* 0x85 */
-/* File: arm/op_long_to_float.S */
-/* File: arm/unopNarrower.S */
-    /*
-     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = op r0/r1", where
-     * "result" is a 32-bit quantity in r0.
-     *
-     * For: long-to-float, double-to-int, double-to-float
-     *
-     * (This would work for long-to-int, but that instruction is actually
-     * an exact match for op_move.)
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
-    ldmia   r3, {r0-r1}                 @ r0/r1<- vB/vB+1
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    bl      __aeabi_l2f                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 9-10 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_double: /* 0x86 */
-/* File: arm/op_long_to_double.S */
-    /*
-     * Specialised 64-bit floating point operation.
-     *
-     * Note: The result will be returned in d2.
-     *
-     * For: long-to-double
-     */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
-    vldr    d0, [r3]                    @ d0<- vAA
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-    vcvt.f64.s32    d1, s1              @ d1<- (double)(vAAh)
-    vcvt.f64.u32    d2, s0              @ d2<- (double)(vAAl)
-    vldr            d3, constvalop_long_to_double
-    vmla.f64        d2, d1, d3          @ d2<- vAAh*2^32 + vAAl
-
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    vstr.64 d2, [r9]                    @ vAA<- d2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-    /* literal pool helper */
-constvalop_long_to_double:
-    .8byte          0x41f0000000000000
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_int: /* 0x87 */
-/* File: arm/op_float_to_int.S */
-/* File: arm/funop.S */
-    /*
-     * Generic 32-bit unary floating-point operation.  Provide an "instr"
-     * line that specifies an instruction that performs "s1 = op s0".
-     *
-     * for: int-to-float, float-to-int
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    flds    s0, [r3]                    @ s0<- vB
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    ftosizs s1, s0                              @ s1<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    fsts    s1, [r9]                    @ vA<- s1
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_long: /* 0x88 */
-/* File: arm/op_float_to_long.S */
-/* File: arm/unopWider.S */
-    /*
-     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = op r0", where
-     * "result" is a 64-bit quantity in r0/r1.
-     *
-     * For: int-to-long, int-to-double, float-to-long, float-to-double
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    GET_VREG r0, r3                     @ r0<- vB
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-                               @ optional op; may set condition codes
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    bl      f2l_doconv                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vA/vA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 9-10 instructions */
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_double: /* 0x89 */
-/* File: arm/op_float_to_double.S */
-/* File: arm/funopWider.S */
-    /*
-     * Generic 32bit-to-64bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "d0 = op s0".
-     *
-     * For: int-to-double, float-to-double
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    flds    s0, [r3]                    @ s0<- vB
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    vcvt.f64.f32  d0, s0                              @ d0<- op
-    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    fstd    d0, [r9]                    @ vA<- d0
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_int: /* 0x8a */
-/* File: arm/op_double_to_int.S */
-/* File: arm/funopNarrower.S */
-    /*
-     * Generic 64bit-to-32bit unary floating point operation.  Provide an
-     * "instr" line that specifies an instruction that performs "s0 = op d0".
-     *
-     * For: double-to-int, double-to-float
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    fldd    d0, [r3]                    @ d0<- vB
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    ftosizd  s0, d0                              @ s0<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    fsts    s0, [r9]                    @ vA<- s0
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_long: /* 0x8b */
-/* File: arm/op_double_to_long.S */
-/* File: arm/unopWide.S */
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op r0/r1".
-     * This could be an ARM instruction or a function call.
-     *
-     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-    ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    bl      d2l_doconv                              @ r0/r1<- op, r2-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-11 instructions */
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_float: /* 0x8c */
-/* File: arm/op_double_to_float.S */
-/* File: arm/funopNarrower.S */
-    /*
-     * Generic 64bit-to-32bit unary floating point operation.  Provide an
-     * "instr" line that specifies an instruction that performs "s0 = op d0".
-     *
-     * For: double-to-int, double-to-float
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    fldd    d0, [r3]                    @ d0<- vB
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    vcvt.f32.f64  s0, d0                              @ s0<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    fsts    s0, [r9]                    @ vA<- s0
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_byte: /* 0x8d */
-/* File: arm/op_int_to_byte.S */
-/* File: arm/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op r0".
-     * This could be an ARM instruction or a function call.
-     *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r3                     @ r0<- vB
-                               @ optional op; may set condition codes
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    sxtb    r0, r0                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 8-9 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_char: /* 0x8e */
-/* File: arm/op_int_to_char.S */
-/* File: arm/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op r0".
-     * This could be an ARM instruction or a function call.
-     *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r3                     @ r0<- vB
-                               @ optional op; may set condition codes
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    uxth    r0, r0                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 8-9 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_short: /* 0x8f */
-/* File: arm/op_int_to_short.S */
-/* File: arm/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op r0".
-     * This could be an ARM instruction or a function call.
-     *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
-     */
-    /* unop vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r3                     @ r0<- vB
-                               @ optional op; may set condition codes
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    sxth    r0, r0                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 8-9 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int: /* 0x90 */
-/* File: arm/op_add_int.S */
-/* File: arm/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    GET_VREG r1, r3                     @ r1<- vCC
-    GET_VREG r0, r2                     @ r0<- vBB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    add     r0, r0, r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_int: /* 0x91 */
-/* File: arm/op_sub_int.S */
-/* File: arm/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    GET_VREG r1, r3                     @ r1<- vCC
-    GET_VREG r0, r2                     @ r0<- vBB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    sub     r0, r0, r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int: /* 0x92 */
-/* File: arm/op_mul_int.S */
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-/* File: arm/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    GET_VREG r1, r3                     @ r1<- vCC
-    GET_VREG r0, r2                     @ r0<- vBB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    mul     r0, r1, r0                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int: /* 0x93 */
-/* File: arm/op_div_int.S */
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * div-int
-     *
-     */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    GET_VREG r1, r3                     @ r1<- vCC
-    GET_VREG r0, r2                     @ r0<- vBB
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r0, r0, r1                  @ r0<- op
-#else
-    bl    __aeabi_idiv                  @ r0<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 11-14 instructions */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int: /* 0x94 */
-/* File: arm/op_rem_int.S */
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * NOTE: idivmod returns quotient in r0 and remainder in r1
-     *
-     * rem-int
-     *
-     */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    GET_VREG r1, r3                     @ r1<- vCC
-    GET_VREG r0, r2                     @ r0<- vBB
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r2, r0, r1
-    mls  r1, r1, r2, r0                 @ r1<- op, r0-r2 changed
-#else
-    bl   __aeabi_idivmod                @ r1<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r1, r9                     @ vAA<- r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 11-14 instructions */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int: /* 0x95 */
-/* File: arm/op_and_int.S */
-/* File: arm/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    GET_VREG r1, r3                     @ r1<- vCC
-    GET_VREG r0, r2                     @ r0<- vBB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    and     r0, r0, r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int: /* 0x96 */
-/* File: arm/op_or_int.S */
-/* File: arm/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    GET_VREG r1, r3                     @ r1<- vCC
-    GET_VREG r0, r2                     @ r0<- vBB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    orr     r0, r0, r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int: /* 0x97 */
-/* File: arm/op_xor_int.S */
-/* File: arm/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    GET_VREG r1, r3                     @ r1<- vCC
-    GET_VREG r0, r2                     @ r0<- vBB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    eor     r0, r0, r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int: /* 0x98 */
-/* File: arm/op_shl_int.S */
-/* File: arm/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    GET_VREG r1, r3                     @ r1<- vCC
-    GET_VREG r0, r2                     @ r0<- vBB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    and     r1, r1, #31                           @ optional op; may set condition codes
-    mov     r0, r0, asl r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int: /* 0x99 */
-/* File: arm/op_shr_int.S */
-/* File: arm/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    GET_VREG r1, r3                     @ r1<- vCC
-    GET_VREG r0, r2                     @ r0<- vBB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    and     r1, r1, #31                           @ optional op; may set condition codes
-    mov     r0, r0, asr r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int: /* 0x9a */
-/* File: arm/op_ushr_int.S */
-/* File: arm/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    GET_VREG r1, r3                     @ r1<- vCC
-    GET_VREG r0, r2                     @ r0<- vBB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    and     r1, r1, #31                           @ optional op; may set condition codes
-    mov     r0, r0, lsr r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_long: /* 0x9b */
-/* File: arm/op_add_long.S */
-/* File: arm/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     rINST, rINST, lsr #8        @ rINST<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[AA]
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
-    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
-    .if 0
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    adds    r0, r0, r2                           @ optional op; may set condition codes
-    adc     r1, r1, r3                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 14-17 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_long: /* 0x9c */
-/* File: arm/op_sub_long.S */
-/* File: arm/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     rINST, rINST, lsr #8        @ rINST<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[AA]
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
-    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
-    .if 0
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    subs    r0, r0, r2                           @ optional op; may set condition codes
-    sbc     r1, r1, r3                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 14-17 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_long: /* 0x9d */
-/* File: arm/op_mul_long.S */
-    /*
-     * Signed 64-bit integer multiply.
-     *
-     * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
-     *        WX
-     *      x YZ
-     *  --------
-     *     ZW ZX
-     *  YW YX
-     *
-     * The low word of the result holds ZX, the high word holds
-     * (ZW+YX) + (the high overflow from ZX).  YW doesn't matter because
-     * it doesn't fit in the low 64 bits.
-     *
-     * Unlike most ARM math operations, multiply instructions have
-     * restrictions on using the same register more than once (Rd and Rm
-     * cannot be the same).
-     */
-    /* mul-long vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
-    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
-    mul     ip, r2, r1                  @ ip<- ZxW
-    umull   r1, lr, r2, r0              @ r1/lr <- ZxX
-    mla     r2, r0, r3, ip              @ r2<- YxX + (ZxW)
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    add     r2, r2, lr                  @ r2<- lr + low(ZxW + (YxX))
-    CLEAR_SHADOW_PAIR r0, lr, ip        @ Zero out the shadow regs
-    VREG_INDEX_TO_ADDR r0, r0           @ r0<- &fp[AA]
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r0, {r1-r2 }                @ vAA/vAA+1<- r1/r2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_long: /* 0x9e */
-/* File: arm/op_div_long.S */
-/* File: arm/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     rINST, rINST, lsr #8        @ rINST<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[AA]
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
-    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
-    .if 1
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    bl      __aeabi_ldivmod                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 14-17 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_long: /* 0x9f */
-/* File: arm/op_rem_long.S */
-/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
-/* File: arm/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     rINST, rINST, lsr #8        @ rINST<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[AA]
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
-    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
-    .if 1
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    bl      __aeabi_ldivmod                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r2,r3}     @ vAA/vAA+1<- r2/r3
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 14-17 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_long: /* 0xa0 */
-/* File: arm/op_and_long.S */
-/* File: arm/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     rINST, rINST, lsr #8        @ rINST<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[AA]
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
-    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
-    .if 0
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    and     r0, r0, r2                           @ optional op; may set condition codes
-    and     r1, r1, r3                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 14-17 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_long: /* 0xa1 */
-/* File: arm/op_or_long.S */
-/* File: arm/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     rINST, rINST, lsr #8        @ rINST<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[AA]
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
-    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
-    .if 0
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    orr     r0, r0, r2                           @ optional op; may set condition codes
-    orr     r1, r1, r3                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 14-17 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_long: /* 0xa2 */
-/* File: arm/op_xor_long.S */
-/* File: arm/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     rINST, rINST, lsr #8        @ rINST<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[AA]
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
-    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
-    .if 0
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    eor     r0, r0, r2                           @ optional op; may set condition codes
-    eor     r1, r1, r3                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 14-17 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_long: /* 0xa3 */
-/* File: arm/op_shl_long.S */
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* shl-long vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r3, r0, #255                @ r3<- BB
-    mov     r0, r0, lsr #8              @ r0<- CC
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BB]
-    GET_VREG r2, r0                     @ r2<- vCC
-    ldmia   r3, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
-    and     r2, r2, #63                 @ r2<- r2 & 0x3f
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
-    mov     r1, r1, asl r2              @ r1<- r1 << r2
-    rsb     r3, r2, #32                 @ r3<- 32 - r2
-    orr     r1, r1, r0, lsr r3          @ r1<- r1 | (r0 << (32-r2))
-    subs    ip, r2, #32                 @ ip<- r2 - 32
-    movpl   r1, r0, asl ip              @ if r2 >= 32, r1<- r0 << (r2-32)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    mov     r0, r0, asl r2              @ r0<- r0 << r2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_long: /* 0xa4 */
-/* File: arm/op_shr_long.S */
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* shr-long vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r3, r0, #255                @ r3<- BB
-    mov     r0, r0, lsr #8              @ r0<- CC
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BB]
-    GET_VREG r2, r0                     @ r2<- vCC
-    ldmia   r3, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
-    and     r2, r2, #63                 @ r0<- r0 & 0x3f
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
-    mov     r0, r0, lsr r2              @ r0<- r2 >> r2
-    rsb     r3, r2, #32                 @ r3<- 32 - r2
-    orr     r0, r0, r1, asl r3          @ r0<- r0 | (r1 << (32-r2))
-    subs    ip, r2, #32                 @ ip<- r2 - 32
-    movpl   r0, r1, asr ip              @ if r2 >= 32, r0<-r1 >> (r2-32)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    mov     r1, r1, asr r2              @ r1<- r1 >> r2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_long: /* 0xa5 */
-/* File: arm/op_ushr_long.S */
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* ushr-long vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r3, r0, #255                @ r3<- BB
-    mov     r0, r0, lsr #8              @ r0<- CC
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BB]
-    GET_VREG r2, r0                     @ r2<- vCC
-    ldmia   r3, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
-    and     r2, r2, #63                 @ r0<- r0 & 0x3f
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
-    mov     r0, r0, lsr r2              @ r0<- r2 >> r2
-    rsb     r3, r2, #32                 @ r3<- 32 - r2
-    orr     r0, r0, r1, asl r3          @ r0<- r0 | (r1 << (32-r2))
-    subs    ip, r2, #32                 @ ip<- r2 - 32
-    movpl   r0, r1, lsr ip              @ if r2 >= 32, r0<-r1 >>> (r2-32)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    mov     r1, r1, lsr r2              @ r1<- r1 >>> r2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_float: /* 0xa6 */
-/* File: arm/op_add_float.S */
-/* File: arm/fbinop.S */
-    /*
-     * Generic 32-bit floating-point operation.  Provide an "instr" line that
-     * specifies an instruction that performs "s2 = s0 op s1".  Because we
-     * use the "softfp" ABI, this must be an instruction, not a function call.
-     *
-     * For: add-float, sub-float, mul-float, div-float
-     */
-    /* floatop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    flds    s1, [r3]                    @ s1<- vCC
-    flds    s0, [r2]                    @ s0<- vBB
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    fadds   s2, s0, s1                              @ s2<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
-    fsts    s2, [r9]                    @ vAA<- s2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_float: /* 0xa7 */
-/* File: arm/op_sub_float.S */
-/* File: arm/fbinop.S */
-    /*
-     * Generic 32-bit floating-point operation.  Provide an "instr" line that
-     * specifies an instruction that performs "s2 = s0 op s1".  Because we
-     * use the "softfp" ABI, this must be an instruction, not a function call.
-     *
-     * For: add-float, sub-float, mul-float, div-float
-     */
-    /* floatop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    flds    s1, [r3]                    @ s1<- vCC
-    flds    s0, [r2]                    @ s0<- vBB
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    fsubs   s2, s0, s1                              @ s2<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
-    fsts    s2, [r9]                    @ vAA<- s2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_float: /* 0xa8 */
-/* File: arm/op_mul_float.S */
-/* File: arm/fbinop.S */
-    /*
-     * Generic 32-bit floating-point operation.  Provide an "instr" line that
-     * specifies an instruction that performs "s2 = s0 op s1".  Because we
-     * use the "softfp" ABI, this must be an instruction, not a function call.
-     *
-     * For: add-float, sub-float, mul-float, div-float
-     */
-    /* floatop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    flds    s1, [r3]                    @ s1<- vCC
-    flds    s0, [r2]                    @ s0<- vBB
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    fmuls   s2, s0, s1                              @ s2<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
-    fsts    s2, [r9]                    @ vAA<- s2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_float: /* 0xa9 */
-/* File: arm/op_div_float.S */
-/* File: arm/fbinop.S */
-    /*
-     * Generic 32-bit floating-point operation.  Provide an "instr" line that
-     * specifies an instruction that performs "s2 = s0 op s1".  Because we
-     * use the "softfp" ABI, this must be an instruction, not a function call.
-     *
-     * For: add-float, sub-float, mul-float, div-float
-     */
-    /* floatop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    flds    s1, [r3]                    @ s1<- vCC
-    flds    s0, [r2]                    @ s0<- vBB
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    fdivs   s2, s0, s1                              @ s2<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
-    fsts    s2, [r9]                    @ vAA<- s2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_float: /* 0xaa */
-/* File: arm/op_rem_float.S */
-/* EABI doesn't define a float remainder function, but libm does */
-/* File: arm/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    GET_VREG r1, r3                     @ r1<- vCC
-    GET_VREG r0, r2                     @ r0<- vBB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    bl      fmodf                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_double: /* 0xab */
-/* File: arm/op_add_double.S */
-/* File: arm/fbinopWide.S */
-    /*
-     * Generic 64-bit double-precision floating point binary operation.
-     * Provide an "instr" line that specifies an instruction that performs
-     * "d2 = d0 op d1".
-     *
-     * for: add-double, sub-double, mul-double, div-double
-     */
-    /* doubleop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    fldd    d1, [r3]                    @ d1<- vCC
-    fldd    d0, [r2]                    @ d0<- vBB
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    faddd   d2, d0, d1                              @ s2<- op
-    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
-    fstd    d2, [r9]                    @ vAA<- d2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_double: /* 0xac */
-/* File: arm/op_sub_double.S */
-/* File: arm/fbinopWide.S */
-    /*
-     * Generic 64-bit double-precision floating point binary operation.
-     * Provide an "instr" line that specifies an instruction that performs
-     * "d2 = d0 op d1".
-     *
-     * for: add-double, sub-double, mul-double, div-double
-     */
-    /* doubleop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    fldd    d1, [r3]                    @ d1<- vCC
-    fldd    d0, [r2]                    @ d0<- vBB
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    fsubd   d2, d0, d1                              @ s2<- op
-    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
-    fstd    d2, [r9]                    @ vAA<- d2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_double: /* 0xad */
-/* File: arm/op_mul_double.S */
-/* File: arm/fbinopWide.S */
-    /*
-     * Generic 64-bit double-precision floating point binary operation.
-     * Provide an "instr" line that specifies an instruction that performs
-     * "d2 = d0 op d1".
-     *
-     * for: add-double, sub-double, mul-double, div-double
-     */
-    /* doubleop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    fldd    d1, [r3]                    @ d1<- vCC
-    fldd    d0, [r2]                    @ d0<- vBB
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    fmuld   d2, d0, d1                              @ s2<- op
-    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
-    fstd    d2, [r9]                    @ vAA<- d2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_double: /* 0xae */
-/* File: arm/op_div_double.S */
-/* File: arm/fbinopWide.S */
-    /*
-     * Generic 64-bit double-precision floating point binary operation.
-     * Provide an "instr" line that specifies an instruction that performs
-     * "d2 = d0 op d1".
-     *
-     * for: add-double, sub-double, mul-double, div-double
-     */
-    /* doubleop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    mov     r3, r0, lsr #8              @ r3<- CC
-    and     r2, r0, #255                @ r2<- BB
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vCC
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
-    fldd    d1, [r3]                    @ d1<- vCC
-    fldd    d0, [r2]                    @ d0<- vBB
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    fdivd   d2, d0, d1                              @ s2<- op
-    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
-    fstd    d2, [r9]                    @ vAA<- d2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_double: /* 0xaf */
-/* File: arm/op_rem_double.S */
-/* EABI doesn't define a double remainder function, but libm does */
-/* File: arm/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH r0, 1                         @ r0<- CCBB
-    mov     rINST, rINST, lsr #8        @ rINST<- AA
-    and     r2, r0, #255                @ r2<- BB
-    mov     r3, r0, lsr #8              @ r3<- CC
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[AA]
-    VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[BB]
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[CC]
-    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
-    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
-    .if 0
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    bl      fmod                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 14-17 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
-/* File: arm/op_add_int_2addr.S */
-/* File: arm/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r1, r3                     @ r1<- vB
-    GET_VREG r0, r9                     @ r0<- vA
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-                               @ optional op; may set condition codes
-    add     r0, r0, r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
-/* File: arm/op_sub_int_2addr.S */
-/* File: arm/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r1, r3                     @ r1<- vB
-    GET_VREG r0, r9                     @ r0<- vA
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-                               @ optional op; may set condition codes
-    sub     r0, r0, r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
-/* File: arm/op_mul_int_2addr.S */
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-/* File: arm/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r1, r3                     @ r1<- vB
-    GET_VREG r0, r9                     @ r0<- vA
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-                               @ optional op; may set condition codes
-    mul     r0, r1, r0                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
-/* File: arm/op_div_int_2addr.S */
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * div-int/2addr
-     *
-     */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r1, r3                     @ r1<- vB
-    GET_VREG r0, r9                     @ r0<- vA
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r0, r0, r1                  @ r0<- op
-#else
-    bl       __aeabi_idiv               @ r0<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
-/* File: arm/op_rem_int_2addr.S */
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * NOTE: idivmod returns quotient in r0 and remainder in r1
-     *
-     * rem-int/2addr
-     *
-     */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r1, r3                     @ r1<- vB
-    GET_VREG r0, r9                     @ r0<- vA
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r2, r0, r1
-    mls     r1, r1, r2, r0              @ r1<- op
-#else
-    bl      __aeabi_idivmod             @ r1<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r1, r9                     @ vAA<- r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
-/* File: arm/op_and_int_2addr.S */
-/* File: arm/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r1, r3                     @ r1<- vB
-    GET_VREG r0, r9                     @ r0<- vA
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-                               @ optional op; may set condition codes
-    and     r0, r0, r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
-/* File: arm/op_or_int_2addr.S */
-/* File: arm/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r1, r3                     @ r1<- vB
-    GET_VREG r0, r9                     @ r0<- vA
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-                               @ optional op; may set condition codes
-    orr     r0, r0, r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
-/* File: arm/op_xor_int_2addr.S */
-/* File: arm/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r1, r3                     @ r1<- vB
-    GET_VREG r0, r9                     @ r0<- vA
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-                               @ optional op; may set condition codes
-    eor     r0, r0, r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
-/* File: arm/op_shl_int_2addr.S */
-/* File: arm/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r1, r3                     @ r1<- vB
-    GET_VREG r0, r9                     @ r0<- vA
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-    and     r1, r1, #31                           @ optional op; may set condition codes
-    mov     r0, r0, asl r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
-/* File: arm/op_shr_int_2addr.S */
-/* File: arm/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r1, r3                     @ r1<- vB
-    GET_VREG r0, r9                     @ r0<- vA
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-    and     r1, r1, #31                           @ optional op; may set condition codes
-    mov     r0, r0, asr r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
-/* File: arm/op_ushr_int_2addr.S */
-/* File: arm/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r1, r3                     @ r1<- vB
-    GET_VREG r0, r9                     @ r0<- vA
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-    and     r1, r1, #31                           @ optional op; may set condition codes
-    mov     r0, r0, lsr r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_long_2addr: /* 0xbb */
-/* File: arm/op_add_long_2addr.S */
-/* File: arm/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
-     *      rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
-    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
-    .if 0
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    adds    r0, r0, r2                           @ optional op; may set condition codes
-    adc     r1, r1, r3                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 12-15 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
-/* File: arm/op_sub_long_2addr.S */
-/* File: arm/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
-     *      rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
-    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
-    .if 0
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    subs    r0, r0, r2                           @ optional op; may set condition codes
-    sbc     r1, r1, r3                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 12-15 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
-/* File: arm/op_mul_long_2addr.S */
-    /*
-     * Signed 64-bit integer multiply, "/2addr" version.
-     *
-     * See op_mul_long for an explanation.
-     *
-     * We get a little tight on registers, so to avoid looking up &fp[A]
-     * again we stuff it into rINST.
-     */
-    /* mul-long/2addr vA, vB */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
-    VREG_INDEX_TO_ADDR rINST, r9        @ rINST<- &fp[A]
-    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
-    ldmia   rINST, {r0-r1}              @ r0/r1<- vAA/vAA+1
-    mul     ip, r2, r1                  @ ip<- ZxW
-    umull   r1, lr, r2, r0              @ r1/lr <- ZxX
-    mla     r2, r0, r3, ip              @ r2<- YxX + (ZxW)
-    mov     r0, rINST                   @ r0<- &fp[A] (free up rINST)
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    add     r2, r2, lr                  @ r2<- r2 + low(ZxW + (YxX))
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r0, {r1-r2}                 @ vAA/vAA+1<- r1/r2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_long_2addr: /* 0xbe */
-/* File: arm/op_div_long_2addr.S */
-/* File: arm/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
-     *      rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
-    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
-    .if 1
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    bl      __aeabi_ldivmod                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 12-15 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
-/* File: arm/op_rem_long_2addr.S */
-/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
-/* File: arm/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
-     *      rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
-    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
-    .if 1
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    bl      __aeabi_ldivmod                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r2,r3}     @ vAA/vAA+1<- r2/r3
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 12-15 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
-/* File: arm/op_and_long_2addr.S */
-/* File: arm/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
-     *      rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
-    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
-    .if 0
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    and     r0, r0, r2                           @ optional op; may set condition codes
-    and     r1, r1, r3                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 12-15 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
-/* File: arm/op_or_long_2addr.S */
-/* File: arm/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
-     *      rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
-    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
-    .if 0
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    orr     r0, r0, r2                           @ optional op; may set condition codes
-    orr     r1, r1, r3                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 12-15 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
-/* File: arm/op_xor_long_2addr.S */
-/* File: arm/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
-     *      rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
-    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
-    .if 0
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    eor     r0, r0, r2                           @ optional op; may set condition codes
-    eor     r1, r1, r3                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 12-15 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
-/* File: arm/op_shl_long_2addr.S */
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* shl-long/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r2, r3                     @ r2<- vB
-    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
-    and     r2, r2, #63                 @ r2<- r2 & 0x3f
-    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
-    mov     r1, r1, asl r2              @ r1<- r1 << r2
-    rsb     r3, r2, #32                 @ r3<- 32 - r2
-    orr     r1, r1, r0, lsr r3          @ r1<- r1 | (r0 << (32-r2))
-    subs    ip, r2, #32                 @ ip<- r2 - 32
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    movpl   r1, r0, asl ip              @ if r2 >= 32, r1<- r0 << (r2-32)
-    mov     r0, r0, asl r2              @ r0<- r0 << r2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
-/* File: arm/op_shr_long_2addr.S */
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* shr-long/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r2, r3                     @ r2<- vB
-    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
-    and     r2, r2, #63                 @ r2<- r2 & 0x3f
-    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
-    mov     r0, r0, lsr r2              @ r0<- r2 >> r2
-    rsb     r3, r2, #32                 @ r3<- 32 - r2
-    orr     r0, r0, r1, asl r3          @ r0<- r0 | (r1 << (32-r2))
-    subs    ip, r2, #32                 @ ip<- r2 - 32
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    movpl   r0, r1, asr ip              @ if r2 >= 32, r0<-r1 >> (r2-32)
-    mov     r1, r1, asr r2              @ r1<- r1 >> r2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
-/* File: arm/op_ushr_long_2addr.S */
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* ushr-long/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r2, r3                     @ r2<- vB
-    CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
-    and     r2, r2, #63                 @ r2<- r2 & 0x3f
-    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
-    mov     r0, r0, lsr r2              @ r0<- r2 >> r2
-    rsb     r3, r2, #32                 @ r3<- 32 - r2
-    orr     r0, r0, r1, asl r3          @ r0<- r0 | (r1 << (32-r2))
-    subs    ip, r2, #32                 @ ip<- r2 - 32
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    movpl   r0, r1, lsr ip              @ if r2 >= 32, r0<-r1 >>> (r2-32)
-    mov     r1, r1, lsr r2              @ r1<- r1 >>> r2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
-/* File: arm/op_add_float_2addr.S */
-/* File: arm/fbinop2addr.S */
-    /*
-     * Generic 32-bit floating point "/2addr" binary operation.  Provide
-     * an "instr" line that specifies an instruction that performs
-     * "s2 = s0 op s1".
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    flds    s1, [r3]                    @ s1<- vB
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    flds    s0, [r9]                    @ s0<- vA
-    fadds   s2, s0, s1                              @ s2<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fsts    s2, [r9]                    @ vAA<- s2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
-/* File: arm/op_sub_float_2addr.S */
-/* File: arm/fbinop2addr.S */
-    /*
-     * Generic 32-bit floating point "/2addr" binary operation.  Provide
-     * an "instr" line that specifies an instruction that performs
-     * "s2 = s0 op s1".
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    flds    s1, [r3]                    @ s1<- vB
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    flds    s0, [r9]                    @ s0<- vA
-    fsubs   s2, s0, s1                              @ s2<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fsts    s2, [r9]                    @ vAA<- s2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
-/* File: arm/op_mul_float_2addr.S */
-/* File: arm/fbinop2addr.S */
-    /*
-     * Generic 32-bit floating point "/2addr" binary operation.  Provide
-     * an "instr" line that specifies an instruction that performs
-     * "s2 = s0 op s1".
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    flds    s1, [r3]                    @ s1<- vB
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    flds    s0, [r9]                    @ s0<- vA
-    fmuls   s2, s0, s1                              @ s2<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fsts    s2, [r9]                    @ vAA<- s2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
-/* File: arm/op_div_float_2addr.S */
-/* File: arm/fbinop2addr.S */
-    /*
-     * Generic 32-bit floating point "/2addr" binary operation.  Provide
-     * an "instr" line that specifies an instruction that performs
-     * "s2 = s0 op s1".
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    flds    s1, [r3]                    @ s1<- vB
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    flds    s0, [r9]                    @ s0<- vA
-    fdivs   s2, s0, s1                              @ s2<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fsts    s2, [r9]                    @ vAA<- s2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_float_2addr: /* 0xca */
-/* File: arm/op_rem_float_2addr.S */
-/* EABI doesn't define a float remainder function, but libm does */
-/* File: arm/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r1, r3                     @ r1<- vB
-    GET_VREG r0, r9                     @ r0<- vA
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
-                               @ optional op; may set condition codes
-    bl      fmodf                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_double_2addr: /* 0xcb */
-/* File: arm/op_add_double_2addr.S */
-/* File: arm/fbinopWide2addr.S */
-    /*
-     * Generic 64-bit floating point "/2addr" binary operation.  Provide
-     * an "instr" line that specifies an instruction that performs
-     * "d2 = d0 op d1".
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *      div-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    CLEAR_SHADOW_PAIR r9, ip, r0        @ Zero out shadow regs
-    fldd    d1, [r3]                    @ d1<- vB
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    fldd    d0, [r9]                    @ d0<- vA
-    faddd   d2, d0, d1                              @ d2<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fstd    d2, [r9]                    @ vAA<- d2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
-/* File: arm/op_sub_double_2addr.S */
-/* File: arm/fbinopWide2addr.S */
-    /*
-     * Generic 64-bit floating point "/2addr" binary operation.  Provide
-     * an "instr" line that specifies an instruction that performs
-     * "d2 = d0 op d1".
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *      div-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    CLEAR_SHADOW_PAIR r9, ip, r0        @ Zero out shadow regs
-    fldd    d1, [r3]                    @ d1<- vB
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    fldd    d0, [r9]                    @ d0<- vA
-    fsubd   d2, d0, d1                              @ d2<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fstd    d2, [r9]                    @ vAA<- d2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
-/* File: arm/op_mul_double_2addr.S */
-/* File: arm/fbinopWide2addr.S */
-    /*
-     * Generic 64-bit floating point "/2addr" binary operation.  Provide
-     * an "instr" line that specifies an instruction that performs
-     * "d2 = d0 op d1".
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *      div-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    CLEAR_SHADOW_PAIR r9, ip, r0        @ Zero out shadow regs
-    fldd    d1, [r3]                    @ d1<- vB
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    fldd    d0, [r9]                    @ d0<- vA
-    fmuld   d2, d0, d1                              @ d2<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fstd    d2, [r9]                    @ vAA<- d2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_double_2addr: /* 0xce */
-/* File: arm/op_div_double_2addr.S */
-/* File: arm/fbinopWide2addr.S */
-    /*
-     * Generic 64-bit floating point "/2addr" binary operation.  Provide
-     * an "instr" line that specifies an instruction that performs
-     * "d2 = d0 op d1".
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *      div-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
-    CLEAR_SHADOW_PAIR r9, ip, r0        @ Zero out shadow regs
-    fldd    d1, [r3]                    @ d1<- vB
-    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-    fldd    d0, [r9]                    @ d0<- vA
-    fdivd   d2, d0, d1                              @ d2<- op
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    fstd    d2, [r9]                    @ vAA<- d2
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
-/* File: arm/op_rem_double_2addr.S */
-/* EABI doesn't define a double remainder function, but libm does */
-/* File: arm/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
-     *      rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
-    VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
-    VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
-    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
-    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
-    .if 0
-    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
-    beq     common_errDivideByZero
-    .endif
-    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
-    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-                               @ optional op; may set condition codes
-    bl      fmod                              @ result<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 12-15 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
-/* File: arm/op_add_int_lit16.S */
-/* File: arm/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
-    mov     r2, rINST, lsr #12          @ r2<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r2                     @ r0<- vB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    add     r0, r0, r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* File: arm/op_rsub_int.S */
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-/* File: arm/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
-    mov     r2, rINST, lsr #12          @ r2<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r2                     @ r0<- vB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    rsb     r0, r0, r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
-/* File: arm/op_mul_int_lit16.S */
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-/* File: arm/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
-    mov     r2, rINST, lsr #12          @ r2<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r2                     @ r0<- vB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    mul     r0, r1, r0                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
-/* File: arm/op_div_int_lit16.S */
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * div-int/lit16
-     *
-     */
-    FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
-    mov     r2, rINST, lsr #12          @ r2<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r2                     @ r0<- vB
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r0, r0, r1                  @ r0<- op
-#else
-    bl       __aeabi_idiv               @ r0<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
-/* File: arm/op_rem_int_lit16.S */
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * NOTE: idivmod returns quotient in r0 and remainder in r1
-     *
-     * rem-int/lit16
-     *
-     */
-    FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
-    mov     r2, rINST, lsr #12          @ r2<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r2                     @ r0<- vB
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r2, r0, r1
-    mls     r1, r1, r2, r0              @ r1<- op
-#else
-    bl     __aeabi_idivmod              @ r1<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r1, r9                     @ vAA<- r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
-/* File: arm/op_and_int_lit16.S */
-/* File: arm/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
-    mov     r2, rINST, lsr #12          @ r2<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r2                     @ r0<- vB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    and     r0, r0, r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
-/* File: arm/op_or_int_lit16.S */
-/* File: arm/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
-    mov     r2, rINST, lsr #12          @ r2<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r2                     @ r0<- vB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    orr     r0, r0, r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
-/* File: arm/op_xor_int_lit16.S */
-/* File: arm/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
-    mov     r2, rINST, lsr #12          @ r2<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
-    GET_VREG r0, r2                     @ r0<- vB
-    .if 0
-    cmp     r1, #0                      @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    eor     r0, r0, r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
-/* File: arm/op_add_int_lit8.S */
-/* File: arm/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC)
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r3, #255                @ r2<- BB
-    GET_VREG r0, r2                     @ r0<- vBB
-                                @ optional; typically r1<- ssssssCC (sign extended)
-    .if 0
-    @cmp     r1, #0                     @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    add     r0, r0, r3, asr #8                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
-/* File: arm/op_rsub_int_lit8.S */
-/* File: arm/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC)
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r3, #255                @ r2<- BB
-    GET_VREG r0, r2                     @ r0<- vBB
-                                @ optional; typically r1<- ssssssCC (sign extended)
-    .if 0
-    @cmp     r1, #0                     @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    rsb     r0, r0, r3, asr #8                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_lit8: /* 0xda */
-/* File: arm/op_mul_int_lit8.S */
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-/* File: arm/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC)
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r3, #255                @ r2<- BB
-    GET_VREG r0, r2                     @ r0<- vBB
-    asr     r1, r3, #8                            @ optional; typically r1<- ssssssCC (sign extended)
-    .if 0
-    @cmp     r1, #0                     @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    mul     r0, r1, r0                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_lit8: /* 0xdb */
-/* File: arm/op_div_int_lit8.S */
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * div-int/lit8
-     *
-     */
-    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r3, #255                @ r2<- BB
-    GET_VREG r0, r2                     @ r0<- vBB
-    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
-    @cmp     r1, #0                     @ is second operand zero?
-    beq     common_errDivideByZero
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r0, r0, r1                  @ r0<- op
-#else
-    bl   __aeabi_idiv                   @ r0<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                     @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-12 instructions */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
-/* File: arm/op_rem_int_lit8.S */
-    /*
-     * Specialized 32-bit binary operation
-     *
-     * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
-     * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
-     * ARMv7 CPUs that have hardware division support).
-     *
-     * NOTE: idivmod returns quotient in r0 and remainder in r1
-     *
-     * rem-int/lit8
-     *
-     */
-    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC)
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r3, #255                @ r2<- BB
-    GET_VREG r0, r2                     @ r0<- vBB
-    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
-    @cmp     r1, #0                     @ is second operand zero?
-    beq     common_errDivideByZero
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
-    sdiv    r2, r0, r1
-    mls     r1, r1, r2, r0              @ r1<- op
-#else
-    bl       __aeabi_idivmod            @ r1<- op, r0-r3 changed
-#endif
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r1, r9                     @ vAA<- r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-12 instructions */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_lit8: /* 0xdd */
-/* File: arm/op_and_int_lit8.S */
-/* File: arm/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC)
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r3, #255                @ r2<- BB
-    GET_VREG r0, r2                     @ r0<- vBB
-                                @ optional; typically r1<- ssssssCC (sign extended)
-    .if 0
-    @cmp     r1, #0                     @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    and     r0, r0, r3, asr #8                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_lit8: /* 0xde */
-/* File: arm/op_or_int_lit8.S */
-/* File: arm/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC)
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r3, #255                @ r2<- BB
-    GET_VREG r0, r2                     @ r0<- vBB
-                                @ optional; typically r1<- ssssssCC (sign extended)
-    .if 0
-    @cmp     r1, #0                     @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    orr     r0, r0, r3, asr #8                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
-/* File: arm/op_xor_int_lit8.S */
-/* File: arm/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC)
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r3, #255                @ r2<- BB
-    GET_VREG r0, r2                     @ r0<- vBB
-                                @ optional; typically r1<- ssssssCC (sign extended)
-    .if 0
-    @cmp     r1, #0                     @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    eor     r0, r0, r3, asr #8                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
-/* File: arm/op_shl_int_lit8.S */
-/* File: arm/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC)
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r3, #255                @ r2<- BB
-    GET_VREG r0, r2                     @ r0<- vBB
-    ubfx    r1, r3, #8, #5                            @ optional; typically r1<- ssssssCC (sign extended)
-    .if 0
-    @cmp     r1, #0                     @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    mov     r0, r0, asl r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
-/* File: arm/op_shr_int_lit8.S */
-/* File: arm/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC)
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r3, #255                @ r2<- BB
-    GET_VREG r0, r2                     @ r0<- vBB
-    ubfx    r1, r3, #8, #5                            @ optional; typically r1<- ssssssCC (sign extended)
-    .if 0
-    @cmp     r1, #0                     @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    mov     r0, r0, asr r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
-/* File: arm/op_ushr_int_lit8.S */
-/* File: arm/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = r0 op r1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than r0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (r1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S r3, 1                       @ r3<- ssssCCBB (sign-extended for CC)
-    mov     r9, rINST, lsr #8           @ r9<- AA
-    and     r2, r3, #255                @ r2<- BB
-    GET_VREG r0, r2                     @ r0<- vBB
-    ubfx    r1, r3, #8, #5                            @ optional; typically r1<- ssssssCC (sign extended)
-    .if 0
-    @cmp     r1, #0                     @ is second operand zero?
-    beq     common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
-    mov     r0, r0, lsr r1                              @ r0<- op, r0-r3 changed
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    SET_VREG r0, r9                @ vAA<- r0
-    GOTO_OPCODE ip                      @ jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_quick: /* 0xe3 */
-/* File: arm/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r1, 1                         @ r1<- field byte offset
-    GET_VREG r3, r2                     @ r3<- object we're operating on
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    cmp     r3, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    ldr   r0, [r3, r1]                @ r0<- obj.field
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    SET_VREG r0, r2                     @ fp[A]<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
-/* File: arm/op_iget_wide_quick.S */
-    /* iget-wide-quick vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH ip, 1                         @ ip<- field byte offset
-    GET_VREG r3, r2                     @ r3<- object we're operating on
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    cmp     r3, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    ldrd    r0, [r3, ip]                @ r0<- obj.field (64 bits, aligned)
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    VREG_INDEX_TO_ADDR r3, r2           @ r3<- &fp[A]
-    CLEAR_SHADOW_PAIR r2, ip, lr        @ Zero out the shadow regs
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    stmia   r3, {r0-r1}                 @ fp[A]<- r0/r1
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
-/* File: arm/op_iget_object_quick.S */
-    /* For: iget-object-quick */
-    /* op vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r1, 1                         @ r1<- field byte offset
-    EXPORT_PC
-    GET_VREG r0, r2                     @ r0<- object we're operating on
-    bl      artIGetObjectFromMterp      @ (obj, offset)
-    ldr     r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    PREFETCH_INST 2
-    cmp     r3, #0
-    bne     MterpPossibleException      @ bail out
-    SET_VREG_OBJECT r0, r2              @ fp[A]<- r0
-    ADVANCE 2                           @ advance rPC
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_quick: /* 0xe6 */
-/* File: arm/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r1, 1                         @ r1<- field byte offset
-    GET_VREG r3, r2                     @ r3<- fp[B], the object pointer
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    cmp     r3, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    GET_VREG r0, r2                     @ r0<- fp[A]
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    str     r0, [r3, r1]             @ obj.field<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
-/* File: arm/op_iput_wide_quick.S */
-    /* iput-wide-quick vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r3, 1                         @ r3<- field byte offset
-    GET_VREG r2, r2                     @ r2<- fp[B], the object pointer
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    cmp     r2, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    VREG_INDEX_TO_ADDR r0, r0           @ r0<- &fp[A]
-    ldmia   r0, {r0-r1}                 @ r0/r1<- fp[A]/fp[A+1]
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    strd    r0, [r2, r3]                @ obj.field<- r0/r1
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
-/* File: arm/op_iput_object_quick.S */
-    EXPORT_PC
-    add     r0, rFP, #OFF_FP_SHADOWFRAME
-    mov     r1, rPC
-    mov     r2, rINST
-    bl      MterpIputObjectQuick
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
-/* File: arm/op_invoke_virtual_quick.S */
-/* File: arm/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualQuick
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokeVirtualQuick
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
-/* File: arm/op_invoke_virtual_range_quick.S */
-/* File: arm/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualQuickRange
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokeVirtualQuickRange
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
-/* File: arm/op_iput_boolean_quick.S */
-/* File: arm/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r1, 1                         @ r1<- field byte offset
-    GET_VREG r3, r2                     @ r3<- fp[B], the object pointer
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    cmp     r3, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    GET_VREG r0, r2                     @ r0<- fp[A]
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    strb     r0, [r3, r1]             @ obj.field<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_byte_quick: /* 0xec */
-/* File: arm/op_iput_byte_quick.S */
-/* File: arm/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r1, 1                         @ r1<- field byte offset
-    GET_VREG r3, r2                     @ r3<- fp[B], the object pointer
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    cmp     r3, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    GET_VREG r0, r2                     @ r0<- fp[A]
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    strb     r0, [r3, r1]             @ obj.field<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_char_quick: /* 0xed */
-/* File: arm/op_iput_char_quick.S */
-/* File: arm/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r1, 1                         @ r1<- field byte offset
-    GET_VREG r3, r2                     @ r3<- fp[B], the object pointer
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    cmp     r3, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    GET_VREG r0, r2                     @ r0<- fp[A]
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    strh     r0, [r3, r1]             @ obj.field<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_short_quick: /* 0xee */
-/* File: arm/op_iput_short_quick.S */
-/* File: arm/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r1, 1                         @ r1<- field byte offset
-    GET_VREG r3, r2                     @ r3<- fp[B], the object pointer
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    cmp     r3, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    GET_VREG r0, r2                     @ r0<- fp[A]
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    strh     r0, [r3, r1]             @ obj.field<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
-/* File: arm/op_iget_boolean_quick.S */
-/* File: arm/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r1, 1                         @ r1<- field byte offset
-    GET_VREG r3, r2                     @ r3<- object we're operating on
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    cmp     r3, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    ldrb   r0, [r3, r1]                @ r0<- obj.field
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    SET_VREG r0, r2                     @ fp[A]<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
-/* File: arm/op_iget_byte_quick.S */
-/* File: arm/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r1, 1                         @ r1<- field byte offset
-    GET_VREG r3, r2                     @ r3<- object we're operating on
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    cmp     r3, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    ldrsb   r0, [r3, r1]                @ r0<- obj.field
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    SET_VREG r0, r2                     @ fp[A]<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
-/* File: arm/op_iget_char_quick.S */
-/* File: arm/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r1, 1                         @ r1<- field byte offset
-    GET_VREG r3, r2                     @ r3<- object we're operating on
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    cmp     r3, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    ldrh   r0, [r3, r1]                @ r0<- obj.field
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    SET_VREG r0, r2                     @ fp[A]<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
-/* File: arm/op_iget_short_quick.S */
-/* File: arm/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    mov     r2, rINST, lsr #12          @ r2<- B
-    FETCH r1, 1                         @ r1<- field byte offset
-    GET_VREG r3, r2                     @ r3<- object we're operating on
-    ubfx    r2, rINST, #8, #4           @ r2<- A
-    cmp     r3, #0                      @ check object for null
-    beq     common_errNullObject        @ object was null
-    ldrsh   r0, [r3, r1]                @ r0<- obj.field
-    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    SET_VREG r0, r2                     @ fp[A]<- r0
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/* File: arm/op_unused_f3.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/* File: arm/op_unused_f4.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/* File: arm/op_unused_f5.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/* File: arm/op_unused_f6.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/* File: arm/op_unused_f7.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/* File: arm/op_unused_f8.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/* File: arm/op_unused_f9.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
-/* File: arm/op_invoke_polymorphic.S */
-/* File: arm/invoke_polymorphic.S */
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern MterpInvokePolymorphic
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokePolymorphic
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 4
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
-/* File: arm/op_invoke_polymorphic_range.S */
-/* File: arm/invoke_polymorphic.S */
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern MterpInvokePolymorphicRange
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokePolymorphicRange
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 4
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_custom: /* 0xfc */
-/* File: arm/op_invoke_custom.S */
-/* File: arm/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeCustom
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokeCustom
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-    /*
-     * Handle an invoke-custom invocation.
-     *
-     * for: invoke-custom, invoke-custom/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, call_site@BBBB */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, call_site@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
-/* File: arm/op_invoke_custom_range.S */
-/* File: arm/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeCustomRange
-    EXPORT_PC
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rPC
-    mov     r3, rINST
-    bl      MterpInvokeCustomRange
-    cmp     r0, #0
-    beq     MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_method_handle: /* 0xfe */
-/* File: arm/op_const_method_handle.S */
-/* File: arm/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstMethodHandle
-    EXPORT_PC
-    FETCH   r0, 1                       @ r0<- BBBB
-    mov     r1, rINST, lsr #8           @ r1<- AA
-    add     r2, rFP, #OFF_FP_SHADOWFRAME
-    mov     r3, rSELF
-    bl      MterpConstMethodHandle                     @ (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     @ load rINST
-    cmp     r0, #0                      @ fail?
-    bne     MterpPossibleException      @ let reference interpreter deal with it.
-    ADVANCE 2                           @ advance rPC
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_method_type: /* 0xff */
-/* File: arm/op_const_method_type.S */
-/* File: arm/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstMethodType
-    EXPORT_PC
-    FETCH   r0, 1                       @ r0<- BBBB
-    mov     r1, rINST, lsr #8           @ r1<- AA
-    add     r2, rFP, #OFF_FP_SHADOWFRAME
-    mov     r3, rSELF
-    bl      MterpConstMethodType                     @ (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     @ load rINST
-    cmp     r0, #0                      @ fail?
-    bne     MterpPossibleException      @ let reference interpreter deal with it.
-    ADVANCE 2                           @ advance rPC
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-
-    .balign 128
-/* File: arm/instruction_end.S */
-
-    .type artMterpAsmInstructionEnd, #object
-    .hidden artMterpAsmInstructionEnd
-    .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-
-/*
- * ===========================================================================
- *  Sister implementations
- * ===========================================================================
- */
-/* File: arm/instruction_start_sister.S */
-
-    .type artMterpAsmSisterStart, #object
-    .hidden artMterpAsmSisterStart
-    .global artMterpAsmSisterStart
-    .text
-    .balign 4
-artMterpAsmSisterStart:
-
-
-/* continuation for op_float_to_long */
-/*
- * Convert the float in r0 to a long in r0/r1.
- *
- * We have to clip values to long min/max per the specification.  The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer.  The EABI convert function isn't doing this for us.
- */
-f2l_doconv:
-    ubfx    r2, r0, #23, #8             @ grab the exponent
-    cmp     r2, #0xbe                   @ MININT < x > MAXINT?
-    bhs     f2l_special_cases
-    b       __aeabi_f2lz                @ tail call to convert float to long
-f2l_special_cases:
-    cmp     r2, #0xff                   @ NaN or infinity?
-    beq     f2l_maybeNaN
-f2l_notNaN:
-    adds    r0, r0, r0                  @ sign bit to carry
-    mov     r0, #0xffffffff             @ assume maxlong for lsw
-    mov     r1, #0x7fffffff             @ assume maxlong for msw
-    adc     r0, r0, #0
-    adc     r1, r1, #0                  @ convert maxlong to minlong if exp negative
-    bx      lr                          @ return
-f2l_maybeNaN:
-    lsls    r3, r0, #9
-    beq     f2l_notNaN                  @ if fraction is non-zero, it's a NaN
-    mov     r0, #0
-    mov     r1, #0
-    bx      lr                          @ return 0 for NaN
-
-/* continuation for op_double_to_long */
-/*
- * Convert the double in r0/r1 to a long in r0/r1.
- *
- * We have to clip values to long min/max per the specification.  The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer.  The EABI convert function isn't doing this for us.
- */
-d2l_doconv:
-    ubfx    r2, r1, #20, #11            @ grab the exponent
-    movw    r3, #0x43e
-    cmp     r2, r3                      @ MINLONG < x > MAXLONG?
-    bhs     d2l_special_cases
-    b       __aeabi_d2lz                @ tail call to convert double to long
-d2l_special_cases:
-    movw    r3, #0x7ff
-    cmp     r2, r3
-    beq     d2l_maybeNaN                @ NaN?
-d2l_notNaN:
-    adds    r1, r1, r1                  @ sign bit to carry
-    mov     r0, #0xffffffff             @ assume maxlong for lsw
-    mov     r1, #0x7fffffff             @ assume maxlong for msw
-    adc     r0, r0, #0
-    adc     r1, r1, #0                  @ convert maxlong to minlong if exp negative
-    bx      lr                          @ return
-d2l_maybeNaN:
-    orrs    r3, r0, r1, lsl #12
-    beq     d2l_notNaN                  @ if fraction is non-zero, it's a NaN
-    mov     r0, #0
-    mov     r1, #0
-    bx      lr                          @ return 0 for NaN
-/* File: arm/instruction_end_sister.S */
-
-    .type artMterpAsmSisterEnd, #object
-    .hidden artMterpAsmSisterEnd
-    .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
-
-/* File: arm/instruction_start_alt.S */
-
-    .type artMterpAsmAltInstructionStart, #object
-    .hidden artMterpAsmAltInstructionStart
-    .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
-    .text
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_nop
-    sub    lr, lr, #(.L_ALT_op_nop - .L_op_nop)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move: /* 0x01 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_move
-    sub    lr, lr, #(.L_ALT_op_move - .L_op_move)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_move_from16
-    sub    lr, lr, #(.L_ALT_op_move_from16 - .L_op_move_from16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_move_16
-    sub    lr, lr, #(.L_ALT_op_move_16 - .L_op_move_16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_move_wide
-    sub    lr, lr, #(.L_ALT_op_move_wide - .L_op_move_wide)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_move_wide_from16
-    sub    lr, lr, #(.L_ALT_op_move_wide_from16 - .L_op_move_wide_from16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_move_wide_16
-    sub    lr, lr, #(.L_ALT_op_move_wide_16 - .L_op_move_wide_16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_move_object
-    sub    lr, lr, #(.L_ALT_op_move_object - .L_op_move_object)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_move_object_from16
-    sub    lr, lr, #(.L_ALT_op_move_object_from16 - .L_op_move_object_from16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_move_object_16
-    sub    lr, lr, #(.L_ALT_op_move_object_16 - .L_op_move_object_16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_move_result
-    sub    lr, lr, #(.L_ALT_op_move_result - .L_op_move_result)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_move_result_wide
-    sub    lr, lr, #(.L_ALT_op_move_result_wide - .L_op_move_result_wide)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_move_result_object
-    sub    lr, lr, #(.L_ALT_op_move_result_object - .L_op_move_result_object)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_move_exception
-    sub    lr, lr, #(.L_ALT_op_move_exception - .L_op_move_exception)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_return_void
-    sub    lr, lr, #(.L_ALT_op_return_void - .L_op_return_void)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return: /* 0x0f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_return
-    sub    lr, lr, #(.L_ALT_op_return - .L_op_return)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_return_wide
-    sub    lr, lr, #(.L_ALT_op_return_wide - .L_op_return_wide)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_return_object
-    sub    lr, lr, #(.L_ALT_op_return_object - .L_op_return_object)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_const_4
-    sub    lr, lr, #(.L_ALT_op_const_4 - .L_op_const_4)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_const_16
-    sub    lr, lr, #(.L_ALT_op_const_16 - .L_op_const_16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const: /* 0x14 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_const
-    sub    lr, lr, #(.L_ALT_op_const - .L_op_const)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_const_high16
-    sub    lr, lr, #(.L_ALT_op_const_high16 - .L_op_const_high16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_const_wide_16
-    sub    lr, lr, #(.L_ALT_op_const_wide_16 - .L_op_const_wide_16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_const_wide_32
-    sub    lr, lr, #(.L_ALT_op_const_wide_32 - .L_op_const_wide_32)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_const_wide
-    sub    lr, lr, #(.L_ALT_op_const_wide - .L_op_const_wide)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_const_wide_high16
-    sub    lr, lr, #(.L_ALT_op_const_wide_high16 - .L_op_const_wide_high16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_const_string
-    sub    lr, lr, #(.L_ALT_op_const_string - .L_op_const_string)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_const_string_jumbo
-    sub    lr, lr, #(.L_ALT_op_const_string_jumbo - .L_op_const_string_jumbo)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_const_class
-    sub    lr, lr, #(.L_ALT_op_const_class - .L_op_const_class)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_monitor_enter
-    sub    lr, lr, #(.L_ALT_op_monitor_enter - .L_op_monitor_enter)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_monitor_exit
-    sub    lr, lr, #(.L_ALT_op_monitor_exit - .L_op_monitor_exit)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_check_cast
-    sub    lr, lr, #(.L_ALT_op_check_cast - .L_op_check_cast)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_instance_of
-    sub    lr, lr, #(.L_ALT_op_instance_of - .L_op_instance_of)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_array_length
-    sub    lr, lr, #(.L_ALT_op_array_length - .L_op_array_length)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_new_instance
-    sub    lr, lr, #(.L_ALT_op_new_instance - .L_op_new_instance)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_new_array
-    sub    lr, lr, #(.L_ALT_op_new_array - .L_op_new_array)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_filled_new_array
-    sub    lr, lr, #(.L_ALT_op_filled_new_array - .L_op_filled_new_array)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_filled_new_array_range
-    sub    lr, lr, #(.L_ALT_op_filled_new_array_range - .L_op_filled_new_array_range)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_fill_array_data
-    sub    lr, lr, #(.L_ALT_op_fill_array_data - .L_op_fill_array_data)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_throw
-    sub    lr, lr, #(.L_ALT_op_throw - .L_op_throw)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_goto
-    sub    lr, lr, #(.L_ALT_op_goto - .L_op_goto)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_goto_16
-    sub    lr, lr, #(.L_ALT_op_goto_16 - .L_op_goto_16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_goto_32
-    sub    lr, lr, #(.L_ALT_op_goto_32 - .L_op_goto_32)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_packed_switch
-    sub    lr, lr, #(.L_ALT_op_packed_switch - .L_op_packed_switch)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sparse_switch
-    sub    lr, lr, #(.L_ALT_op_sparse_switch - .L_op_sparse_switch)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_cmpl_float
-    sub    lr, lr, #(.L_ALT_op_cmpl_float - .L_op_cmpl_float)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_cmpg_float
-    sub    lr, lr, #(.L_ALT_op_cmpg_float - .L_op_cmpg_float)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_cmpl_double
-    sub    lr, lr, #(.L_ALT_op_cmpl_double - .L_op_cmpl_double)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_cmpg_double
-    sub    lr, lr, #(.L_ALT_op_cmpg_double - .L_op_cmpg_double)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_cmp_long
-    sub    lr, lr, #(.L_ALT_op_cmp_long - .L_op_cmp_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_if_eq
-    sub    lr, lr, #(.L_ALT_op_if_eq - .L_op_if_eq)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_if_ne
-    sub    lr, lr, #(.L_ALT_op_if_ne - .L_op_if_ne)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_if_lt
-    sub    lr, lr, #(.L_ALT_op_if_lt - .L_op_if_lt)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_if_ge
-    sub    lr, lr, #(.L_ALT_op_if_ge - .L_op_if_ge)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_if_gt
-    sub    lr, lr, #(.L_ALT_op_if_gt - .L_op_if_gt)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_if_le
-    sub    lr, lr, #(.L_ALT_op_if_le - .L_op_if_le)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_if_eqz
-    sub    lr, lr, #(.L_ALT_op_if_eqz - .L_op_if_eqz)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_if_nez
-    sub    lr, lr, #(.L_ALT_op_if_nez - .L_op_if_nez)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_if_ltz
-    sub    lr, lr, #(.L_ALT_op_if_ltz - .L_op_if_ltz)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_if_gez
-    sub    lr, lr, #(.L_ALT_op_if_gez - .L_op_if_gez)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_if_gtz
-    sub    lr, lr, #(.L_ALT_op_if_gtz - .L_op_if_gtz)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_if_lez
-    sub    lr, lr, #(.L_ALT_op_if_lez - .L_op_if_lez)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_unused_3e
-    sub    lr, lr, #(.L_ALT_op_unused_3e - .L_op_unused_3e)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_unused_3f
-    sub    lr, lr, #(.L_ALT_op_unused_3f - .L_op_unused_3f)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_unused_40
-    sub    lr, lr, #(.L_ALT_op_unused_40 - .L_op_unused_40)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_unused_41
-    sub    lr, lr, #(.L_ALT_op_unused_41 - .L_op_unused_41)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_unused_42
-    sub    lr, lr, #(.L_ALT_op_unused_42 - .L_op_unused_42)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_unused_43
-    sub    lr, lr, #(.L_ALT_op_unused_43 - .L_op_unused_43)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_aget
-    sub    lr, lr, #(.L_ALT_op_aget - .L_op_aget)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_aget_wide
-    sub    lr, lr, #(.L_ALT_op_aget_wide - .L_op_aget_wide)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_aget_object
-    sub    lr, lr, #(.L_ALT_op_aget_object - .L_op_aget_object)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_aget_boolean
-    sub    lr, lr, #(.L_ALT_op_aget_boolean - .L_op_aget_boolean)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_aget_byte
-    sub    lr, lr, #(.L_ALT_op_aget_byte - .L_op_aget_byte)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_aget_char
-    sub    lr, lr, #(.L_ALT_op_aget_char - .L_op_aget_char)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_aget_short
-    sub    lr, lr, #(.L_ALT_op_aget_short - .L_op_aget_short)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_aput
-    sub    lr, lr, #(.L_ALT_op_aput - .L_op_aput)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_aput_wide
-    sub    lr, lr, #(.L_ALT_op_aput_wide - .L_op_aput_wide)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_aput_object
-    sub    lr, lr, #(.L_ALT_op_aput_object - .L_op_aput_object)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_aput_boolean
-    sub    lr, lr, #(.L_ALT_op_aput_boolean - .L_op_aput_boolean)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_aput_byte
-    sub    lr, lr, #(.L_ALT_op_aput_byte - .L_op_aput_byte)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_aput_char
-    sub    lr, lr, #(.L_ALT_op_aput_char - .L_op_aput_char)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_aput_short
-    sub    lr, lr, #(.L_ALT_op_aput_short - .L_op_aput_short)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iget
-    sub    lr, lr, #(.L_ALT_op_iget - .L_op_iget)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iget_wide
-    sub    lr, lr, #(.L_ALT_op_iget_wide - .L_op_iget_wide)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iget_object
-    sub    lr, lr, #(.L_ALT_op_iget_object - .L_op_iget_object)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iget_boolean
-    sub    lr, lr, #(.L_ALT_op_iget_boolean - .L_op_iget_boolean)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iget_byte
-    sub    lr, lr, #(.L_ALT_op_iget_byte - .L_op_iget_byte)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iget_char
-    sub    lr, lr, #(.L_ALT_op_iget_char - .L_op_iget_char)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iget_short
-    sub    lr, lr, #(.L_ALT_op_iget_short - .L_op_iget_short)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iput
-    sub    lr, lr, #(.L_ALT_op_iput - .L_op_iput)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iput_wide
-    sub    lr, lr, #(.L_ALT_op_iput_wide - .L_op_iput_wide)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iput_object
-    sub    lr, lr, #(.L_ALT_op_iput_object - .L_op_iput_object)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iput_boolean
-    sub    lr, lr, #(.L_ALT_op_iput_boolean - .L_op_iput_boolean)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iput_byte
-    sub    lr, lr, #(.L_ALT_op_iput_byte - .L_op_iput_byte)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iput_char
-    sub    lr, lr, #(.L_ALT_op_iput_char - .L_op_iput_char)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iput_short
-    sub    lr, lr, #(.L_ALT_op_iput_short - .L_op_iput_short)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sget
-    sub    lr, lr, #(.L_ALT_op_sget - .L_op_sget)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sget_wide
-    sub    lr, lr, #(.L_ALT_op_sget_wide - .L_op_sget_wide)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sget_object
-    sub    lr, lr, #(.L_ALT_op_sget_object - .L_op_sget_object)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sget_boolean
-    sub    lr, lr, #(.L_ALT_op_sget_boolean - .L_op_sget_boolean)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sget_byte
-    sub    lr, lr, #(.L_ALT_op_sget_byte - .L_op_sget_byte)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sget_char
-    sub    lr, lr, #(.L_ALT_op_sget_char - .L_op_sget_char)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sget_short
-    sub    lr, lr, #(.L_ALT_op_sget_short - .L_op_sget_short)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sput
-    sub    lr, lr, #(.L_ALT_op_sput - .L_op_sput)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sput_wide
-    sub    lr, lr, #(.L_ALT_op_sput_wide - .L_op_sput_wide)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sput_object
-    sub    lr, lr, #(.L_ALT_op_sput_object - .L_op_sput_object)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sput_boolean
-    sub    lr, lr, #(.L_ALT_op_sput_boolean - .L_op_sput_boolean)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sput_byte
-    sub    lr, lr, #(.L_ALT_op_sput_byte - .L_op_sput_byte)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sput_char
-    sub    lr, lr, #(.L_ALT_op_sput_char - .L_op_sput_char)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sput_short
-    sub    lr, lr, #(.L_ALT_op_sput_short - .L_op_sput_short)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_virtual
-    sub    lr, lr, #(.L_ALT_op_invoke_virtual - .L_op_invoke_virtual)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_super
-    sub    lr, lr, #(.L_ALT_op_invoke_super - .L_op_invoke_super)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_direct
-    sub    lr, lr, #(.L_ALT_op_invoke_direct - .L_op_invoke_direct)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_static
-    sub    lr, lr, #(.L_ALT_op_invoke_static - .L_op_invoke_static)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_interface
-    sub    lr, lr, #(.L_ALT_op_invoke_interface - .L_op_invoke_interface)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_return_void_no_barrier
-    sub    lr, lr, #(.L_ALT_op_return_void_no_barrier - .L_op_return_void_no_barrier)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_virtual_range
-    sub    lr, lr, #(.L_ALT_op_invoke_virtual_range - .L_op_invoke_virtual_range)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_super_range
-    sub    lr, lr, #(.L_ALT_op_invoke_super_range - .L_op_invoke_super_range)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_direct_range
-    sub    lr, lr, #(.L_ALT_op_invoke_direct_range - .L_op_invoke_direct_range)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_static_range
-    sub    lr, lr, #(.L_ALT_op_invoke_static_range - .L_op_invoke_static_range)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_interface_range
-    sub    lr, lr, #(.L_ALT_op_invoke_interface_range - .L_op_invoke_interface_range)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_unused_79
-    sub    lr, lr, #(.L_ALT_op_unused_79 - .L_op_unused_79)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_unused_7a
-    sub    lr, lr, #(.L_ALT_op_unused_7a - .L_op_unused_7a)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_neg_int
-    sub    lr, lr, #(.L_ALT_op_neg_int - .L_op_neg_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_not_int
-    sub    lr, lr, #(.L_ALT_op_not_int - .L_op_not_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_neg_long
-    sub    lr, lr, #(.L_ALT_op_neg_long - .L_op_neg_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_not_long
-    sub    lr, lr, #(.L_ALT_op_not_long - .L_op_not_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_neg_float
-    sub    lr, lr, #(.L_ALT_op_neg_float - .L_op_neg_float)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_neg_double
-    sub    lr, lr, #(.L_ALT_op_neg_double - .L_op_neg_double)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_int_to_long
-    sub    lr, lr, #(.L_ALT_op_int_to_long - .L_op_int_to_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_int_to_float
-    sub    lr, lr, #(.L_ALT_op_int_to_float - .L_op_int_to_float)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_int_to_double
-    sub    lr, lr, #(.L_ALT_op_int_to_double - .L_op_int_to_double)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_long_to_int
-    sub    lr, lr, #(.L_ALT_op_long_to_int - .L_op_long_to_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_long_to_float
-    sub    lr, lr, #(.L_ALT_op_long_to_float - .L_op_long_to_float)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_long_to_double
-    sub    lr, lr, #(.L_ALT_op_long_to_double - .L_op_long_to_double)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_float_to_int
-    sub    lr, lr, #(.L_ALT_op_float_to_int - .L_op_float_to_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_float_to_long
-    sub    lr, lr, #(.L_ALT_op_float_to_long - .L_op_float_to_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_float_to_double
-    sub    lr, lr, #(.L_ALT_op_float_to_double - .L_op_float_to_double)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_double_to_int
-    sub    lr, lr, #(.L_ALT_op_double_to_int - .L_op_double_to_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_double_to_long
-    sub    lr, lr, #(.L_ALT_op_double_to_long - .L_op_double_to_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_double_to_float
-    sub    lr, lr, #(.L_ALT_op_double_to_float - .L_op_double_to_float)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_int_to_byte
-    sub    lr, lr, #(.L_ALT_op_int_to_byte - .L_op_int_to_byte)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_int_to_char
-    sub    lr, lr, #(.L_ALT_op_int_to_char - .L_op_int_to_char)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_int_to_short
-    sub    lr, lr, #(.L_ALT_op_int_to_short - .L_op_int_to_short)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_add_int
-    sub    lr, lr, #(.L_ALT_op_add_int - .L_op_add_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sub_int
-    sub    lr, lr, #(.L_ALT_op_sub_int - .L_op_sub_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_mul_int
-    sub    lr, lr, #(.L_ALT_op_mul_int - .L_op_mul_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_div_int
-    sub    lr, lr, #(.L_ALT_op_div_int - .L_op_div_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_rem_int
-    sub    lr, lr, #(.L_ALT_op_rem_int - .L_op_rem_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_and_int
-    sub    lr, lr, #(.L_ALT_op_and_int - .L_op_and_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_or_int
-    sub    lr, lr, #(.L_ALT_op_or_int - .L_op_or_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_xor_int
-    sub    lr, lr, #(.L_ALT_op_xor_int - .L_op_xor_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_shl_int
-    sub    lr, lr, #(.L_ALT_op_shl_int - .L_op_shl_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_shr_int
-    sub    lr, lr, #(.L_ALT_op_shr_int - .L_op_shr_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_ushr_int
-    sub    lr, lr, #(.L_ALT_op_ushr_int - .L_op_ushr_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_add_long
-    sub    lr, lr, #(.L_ALT_op_add_long - .L_op_add_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sub_long
-    sub    lr, lr, #(.L_ALT_op_sub_long - .L_op_sub_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_mul_long
-    sub    lr, lr, #(.L_ALT_op_mul_long - .L_op_mul_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_div_long
-    sub    lr, lr, #(.L_ALT_op_div_long - .L_op_div_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_rem_long
-    sub    lr, lr, #(.L_ALT_op_rem_long - .L_op_rem_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_and_long
-    sub    lr, lr, #(.L_ALT_op_and_long - .L_op_and_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_or_long
-    sub    lr, lr, #(.L_ALT_op_or_long - .L_op_or_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_xor_long
-    sub    lr, lr, #(.L_ALT_op_xor_long - .L_op_xor_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_shl_long
-    sub    lr, lr, #(.L_ALT_op_shl_long - .L_op_shl_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_shr_long
-    sub    lr, lr, #(.L_ALT_op_shr_long - .L_op_shr_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_ushr_long
-    sub    lr, lr, #(.L_ALT_op_ushr_long - .L_op_ushr_long)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_add_float
-    sub    lr, lr, #(.L_ALT_op_add_float - .L_op_add_float)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sub_float
-    sub    lr, lr, #(.L_ALT_op_sub_float - .L_op_sub_float)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_mul_float
-    sub    lr, lr, #(.L_ALT_op_mul_float - .L_op_mul_float)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_div_float
-    sub    lr, lr, #(.L_ALT_op_div_float - .L_op_div_float)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_rem_float
-    sub    lr, lr, #(.L_ALT_op_rem_float - .L_op_rem_float)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_add_double
-    sub    lr, lr, #(.L_ALT_op_add_double - .L_op_add_double)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sub_double
-    sub    lr, lr, #(.L_ALT_op_sub_double - .L_op_sub_double)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_mul_double
-    sub    lr, lr, #(.L_ALT_op_mul_double - .L_op_mul_double)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_div_double
-    sub    lr, lr, #(.L_ALT_op_div_double - .L_op_div_double)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_rem_double
-    sub    lr, lr, #(.L_ALT_op_rem_double - .L_op_rem_double)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_add_int_2addr
-    sub    lr, lr, #(.L_ALT_op_add_int_2addr - .L_op_add_int_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sub_int_2addr
-    sub    lr, lr, #(.L_ALT_op_sub_int_2addr - .L_op_sub_int_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_mul_int_2addr
-    sub    lr, lr, #(.L_ALT_op_mul_int_2addr - .L_op_mul_int_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_div_int_2addr
-    sub    lr, lr, #(.L_ALT_op_div_int_2addr - .L_op_div_int_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_rem_int_2addr
-    sub    lr, lr, #(.L_ALT_op_rem_int_2addr - .L_op_rem_int_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_and_int_2addr
-    sub    lr, lr, #(.L_ALT_op_and_int_2addr - .L_op_and_int_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_or_int_2addr
-    sub    lr, lr, #(.L_ALT_op_or_int_2addr - .L_op_or_int_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_xor_int_2addr
-    sub    lr, lr, #(.L_ALT_op_xor_int_2addr - .L_op_xor_int_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_shl_int_2addr
-    sub    lr, lr, #(.L_ALT_op_shl_int_2addr - .L_op_shl_int_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_shr_int_2addr
-    sub    lr, lr, #(.L_ALT_op_shr_int_2addr - .L_op_shr_int_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_ushr_int_2addr
-    sub    lr, lr, #(.L_ALT_op_ushr_int_2addr - .L_op_ushr_int_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_add_long_2addr
-    sub    lr, lr, #(.L_ALT_op_add_long_2addr - .L_op_add_long_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sub_long_2addr
-    sub    lr, lr, #(.L_ALT_op_sub_long_2addr - .L_op_sub_long_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_mul_long_2addr
-    sub    lr, lr, #(.L_ALT_op_mul_long_2addr - .L_op_mul_long_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_div_long_2addr
-    sub    lr, lr, #(.L_ALT_op_div_long_2addr - .L_op_div_long_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_rem_long_2addr
-    sub    lr, lr, #(.L_ALT_op_rem_long_2addr - .L_op_rem_long_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_and_long_2addr
-    sub    lr, lr, #(.L_ALT_op_and_long_2addr - .L_op_and_long_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_or_long_2addr
-    sub    lr, lr, #(.L_ALT_op_or_long_2addr - .L_op_or_long_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_xor_long_2addr
-    sub    lr, lr, #(.L_ALT_op_xor_long_2addr - .L_op_xor_long_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_shl_long_2addr
-    sub    lr, lr, #(.L_ALT_op_shl_long_2addr - .L_op_shl_long_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_shr_long_2addr
-    sub    lr, lr, #(.L_ALT_op_shr_long_2addr - .L_op_shr_long_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_ushr_long_2addr
-    sub    lr, lr, #(.L_ALT_op_ushr_long_2addr - .L_op_ushr_long_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_add_float_2addr
-    sub    lr, lr, #(.L_ALT_op_add_float_2addr - .L_op_add_float_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sub_float_2addr
-    sub    lr, lr, #(.L_ALT_op_sub_float_2addr - .L_op_sub_float_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_mul_float_2addr
-    sub    lr, lr, #(.L_ALT_op_mul_float_2addr - .L_op_mul_float_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_div_float_2addr
-    sub    lr, lr, #(.L_ALT_op_div_float_2addr - .L_op_div_float_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_rem_float_2addr
-    sub    lr, lr, #(.L_ALT_op_rem_float_2addr - .L_op_rem_float_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_add_double_2addr
-    sub    lr, lr, #(.L_ALT_op_add_double_2addr - .L_op_add_double_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_sub_double_2addr
-    sub    lr, lr, #(.L_ALT_op_sub_double_2addr - .L_op_sub_double_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_mul_double_2addr
-    sub    lr, lr, #(.L_ALT_op_mul_double_2addr - .L_op_mul_double_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_div_double_2addr
-    sub    lr, lr, #(.L_ALT_op_div_double_2addr - .L_op_div_double_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_rem_double_2addr
-    sub    lr, lr, #(.L_ALT_op_rem_double_2addr - .L_op_rem_double_2addr)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_add_int_lit16
-    sub    lr, lr, #(.L_ALT_op_add_int_lit16 - .L_op_add_int_lit16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_rsub_int
-    sub    lr, lr, #(.L_ALT_op_rsub_int - .L_op_rsub_int)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_mul_int_lit16
-    sub    lr, lr, #(.L_ALT_op_mul_int_lit16 - .L_op_mul_int_lit16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_div_int_lit16
-    sub    lr, lr, #(.L_ALT_op_div_int_lit16 - .L_op_div_int_lit16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_rem_int_lit16
-    sub    lr, lr, #(.L_ALT_op_rem_int_lit16 - .L_op_rem_int_lit16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_and_int_lit16
-    sub    lr, lr, #(.L_ALT_op_and_int_lit16 - .L_op_and_int_lit16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_or_int_lit16
-    sub    lr, lr, #(.L_ALT_op_or_int_lit16 - .L_op_or_int_lit16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_xor_int_lit16
-    sub    lr, lr, #(.L_ALT_op_xor_int_lit16 - .L_op_xor_int_lit16)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_add_int_lit8
-    sub    lr, lr, #(.L_ALT_op_add_int_lit8 - .L_op_add_int_lit8)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_rsub_int_lit8
-    sub    lr, lr, #(.L_ALT_op_rsub_int_lit8 - .L_op_rsub_int_lit8)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_mul_int_lit8
-    sub    lr, lr, #(.L_ALT_op_mul_int_lit8 - .L_op_mul_int_lit8)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_div_int_lit8
-    sub    lr, lr, #(.L_ALT_op_div_int_lit8 - .L_op_div_int_lit8)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_rem_int_lit8
-    sub    lr, lr, #(.L_ALT_op_rem_int_lit8 - .L_op_rem_int_lit8)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_and_int_lit8
-    sub    lr, lr, #(.L_ALT_op_and_int_lit8 - .L_op_and_int_lit8)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_or_int_lit8
-    sub    lr, lr, #(.L_ALT_op_or_int_lit8 - .L_op_or_int_lit8)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_xor_int_lit8
-    sub    lr, lr, #(.L_ALT_op_xor_int_lit8 - .L_op_xor_int_lit8)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_shl_int_lit8
-    sub    lr, lr, #(.L_ALT_op_shl_int_lit8 - .L_op_shl_int_lit8)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_shr_int_lit8
-    sub    lr, lr, #(.L_ALT_op_shr_int_lit8 - .L_op_shr_int_lit8)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_ushr_int_lit8
-    sub    lr, lr, #(.L_ALT_op_ushr_int_lit8 - .L_op_ushr_int_lit8)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iget_quick
-    sub    lr, lr, #(.L_ALT_op_iget_quick - .L_op_iget_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iget_wide_quick
-    sub    lr, lr, #(.L_ALT_op_iget_wide_quick - .L_op_iget_wide_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iget_object_quick
-    sub    lr, lr, #(.L_ALT_op_iget_object_quick - .L_op_iget_object_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iput_quick
-    sub    lr, lr, #(.L_ALT_op_iput_quick - .L_op_iput_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iput_wide_quick
-    sub    lr, lr, #(.L_ALT_op_iput_wide_quick - .L_op_iput_wide_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iput_object_quick
-    sub    lr, lr, #(.L_ALT_op_iput_object_quick - .L_op_iput_object_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_virtual_quick
-    sub    lr, lr, #(.L_ALT_op_invoke_virtual_quick - .L_op_invoke_virtual_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_virtual_range_quick
-    sub    lr, lr, #(.L_ALT_op_invoke_virtual_range_quick - .L_op_invoke_virtual_range_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iput_boolean_quick
-    sub    lr, lr, #(.L_ALT_op_iput_boolean_quick - .L_op_iput_boolean_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iput_byte_quick
-    sub    lr, lr, #(.L_ALT_op_iput_byte_quick - .L_op_iput_byte_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iput_char_quick
-    sub    lr, lr, #(.L_ALT_op_iput_char_quick - .L_op_iput_char_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iput_short_quick
-    sub    lr, lr, #(.L_ALT_op_iput_short_quick - .L_op_iput_short_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iget_boolean_quick
-    sub    lr, lr, #(.L_ALT_op_iget_boolean_quick - .L_op_iget_boolean_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iget_byte_quick
-    sub    lr, lr, #(.L_ALT_op_iget_byte_quick - .L_op_iget_byte_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iget_char_quick
-    sub    lr, lr, #(.L_ALT_op_iget_char_quick - .L_op_iget_char_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_iget_short_quick
-    sub    lr, lr, #(.L_ALT_op_iget_short_quick - .L_op_iget_short_quick)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_unused_f3
-    sub    lr, lr, #(.L_ALT_op_unused_f3 - .L_op_unused_f3)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_unused_f4
-    sub    lr, lr, #(.L_ALT_op_unused_f4 - .L_op_unused_f4)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_unused_f5
-    sub    lr, lr, #(.L_ALT_op_unused_f5 - .L_op_unused_f5)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_unused_f6
-    sub    lr, lr, #(.L_ALT_op_unused_f6 - .L_op_unused_f6)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_unused_f7
-    sub    lr, lr, #(.L_ALT_op_unused_f7 - .L_op_unused_f7)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_unused_f8
-    sub    lr, lr, #(.L_ALT_op_unused_f8 - .L_op_unused_f8)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_unused_f9
-    sub    lr, lr, #(.L_ALT_op_unused_f9 - .L_op_unused_f9)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_polymorphic
-    sub    lr, lr, #(.L_ALT_op_invoke_polymorphic - .L_op_invoke_polymorphic)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_polymorphic_range
-    sub    lr, lr, #(.L_ALT_op_invoke_polymorphic_range - .L_op_invoke_polymorphic_range)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_custom
-    sub    lr, lr, #(.L_ALT_op_invoke_custom - .L_op_invoke_custom)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_invoke_custom_range
-    sub    lr, lr, #(.L_ALT_op_invoke_custom_range - .L_op_invoke_custom_range)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_const_method_handle
-    sub    lr, lr, #(.L_ALT_op_const_method_handle - .L_op_const_method_handle)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]            @ refresh IBASE.
-    adr    lr, .L_ALT_op_const_method_type
-    sub    lr, lr, #(.L_ALT_op_const_method_type - .L_op_const_method_type)               @ Addr of primary handler.
-    mov    r0, rSELF
-    add    r1, rFP, #OFF_FP_SHADOWFRAME
-    mov    r2, rPC
-    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
-
-    .balign 128
-/* File: arm/instruction_end_alt.S */
-
-    .type artMterpAsmAltInstructionEnd, #object
-    .hidden artMterpAsmAltInstructionEnd
-    .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
-
-/* File: arm/footer.S */
-/*
- * ===========================================================================
- *  Common subroutines and data
- * ===========================================================================
- */
-
-    .text
-    .align  2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogDivideByZeroException
-#endif
-    b MterpCommonFallback
-
-common_errArrayIndex:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogArrayIndexException
-#endif
-    b MterpCommonFallback
-
-common_errNegativeArraySize:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogNegativeArraySizeException
-#endif
-    b MterpCommonFallback
-
-common_errNoSuchMethod:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogNoSuchMethodException
-#endif
-    b MterpCommonFallback
-
-common_errNullObject:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogNullObjectException
-#endif
-    b MterpCommonFallback
-
-common_exceptionThrown:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogExceptionThrownException
-#endif
-    b MterpCommonFallback
-
-MterpSuspendFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    ldr  r2, [rSELF, #THREAD_FLAGS_OFFSET]
-    bl MterpLogSuspendFallback
-#endif
-    b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    ldr     r0, [rSELF, #THREAD_EXCEPTION_OFFSET]
-    cmp     r0, #0                                  @ Exception pending?
-    beq     MterpFallback                           @ If not, fall back to reference interpreter.
-    /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    bl      MterpHandleException                    @ (self, shadow_frame)
-    cmp     r0, #0
-    beq     MterpExceptionReturn                    @ no local catch, back to caller.
-    ldr     r0, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
-    ldr     r1, [rFP, #OFF_FP_DEX_PC]
-    ldr     rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
-    add     rPC, r0, r1, lsl #1                     @ generate new dex_pc_ptr
-    /* Do we need to switch interpreters? */
-    bl      MterpShouldSwitchInterpreters
-    cmp     r0, #0
-    bne     MterpFallback
-    /* resume execution at catch block */
-    EXPORT_PC
-    FETCH_INST
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-    /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    rINST          <= signed offset
- *    rPROFILE       <= signed hotness countdown (expanded to 32 bits)
- *    condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
-    cmp     rINST, #0
-MterpCommonTakenBranch:
-    bgt     .L_forward_branch           @ don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-#  error "JIT_CHECK_OSR must be -1."
-#endif
-    cmp     rPROFILE, #JIT_CHECK_OSR
-    beq     .L_osr_check
-    subsgt  rPROFILE, #1
-    beq     .L_add_batch                @ counted down to zero - report
-.L_resume_backward_branch:
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    REFRESH_IBASE
-    add     r2, rINST, rINST            @ r2<- byte offset
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    bne     .L_suspend_request_pending
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-.L_suspend_request_pending:
-    EXPORT_PC
-    mov     r0, rSELF
-    bl      MterpSuspendCheck           @ (self)
-    cmp     r0, #0
-    bne     MterpFallback
-    REFRESH_IBASE                       @ might have changed during suspend
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-.L_no_count_backwards:
-    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
-    bne     .L_resume_backward_branch
-.L_osr_check:
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rINST
-    EXPORT_PC
-    bl      MterpMaybeDoOnStackReplacement  @ (self, shadow_frame, offset)
-    cmp     r0, #0
-    bne     MterpOnStackReplacement
-    b       .L_resume_backward_branch
-
-.L_forward_branch:
-    cmp     rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
-    beq     .L_check_osr_forward
-.L_resume_forward_branch:
-    add     r2, rINST, rINST            @ r2<- byte offset
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-.L_check_osr_forward:
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rINST
-    EXPORT_PC
-    bl      MterpMaybeDoOnStackReplacement  @ (self, shadow_frame, offset)
-    cmp     r0, #0
-    bne     MterpOnStackReplacement
-    b       .L_resume_forward_branch
-
-.L_add_batch:
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    strh    rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
-    ldr     r0, [rFP, #OFF_FP_METHOD]
-    mov     r2, rSELF
-    bl      MterpAddHotnessBatch        @ (method, shadow_frame, self)
-    mov     rPROFILE, r0                @ restore new hotness countdown to rPROFILE
-    b       .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    mov     r0, rSELF
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, #2
-    EXPORT_PC
-    bl      MterpMaybeDoOnStackReplacement  @ (self, shadow_frame, offset)
-    cmp     r0, #0
-    bne     MterpOnStackReplacement
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    mov r0, rSELF
-    add r1, rFP, #OFF_FP_SHADOWFRAME
-    mov r2, rINST
-    bl MterpLogOSR
-#endif
-    mov r0, #1                          @ Signal normal return
-    b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  r0, rSELF
-    add  r1, rFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogFallback
-#endif
-MterpCommonFallback:
-    mov     r0, #0                                  @ signal retry with reference interpreter.
-    b       MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR.  Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- *  uint32_t* rFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    mov     r0, #1                                  @ signal return to caller.
-    b MterpDone
-MterpReturn:
-    ldr     r2, [rFP, #OFF_FP_RESULT_REGISTER]
-    str     r0, [r2]
-    str     r1, [r2, #4]
-    mov     r0, #1                                  @ signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    cmp     rPROFILE, #0
-    bgt     MterpProfileActive                      @ if > 0, we may have some counts to report.
-    ldmfd   sp!, {r3-r10,fp,pc}                     @ restore 10 regs and return
-
-MterpProfileActive:
-    mov     rINST, r0                               @ stash return value
-    /* Report cached hotness counts */
-    ldr     r0, [rFP, #OFF_FP_METHOD]
-    add     r1, rFP, #OFF_FP_SHADOWFRAME
-    mov     r2, rSELF
-    strh    rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
-    bl      MterpAddHotnessBatch                    @ (method, shadow_frame, self)
-    mov     r0, rINST                               @ restore return value
-    ldmfd   sp!, {r3-r10,fp,pc}                     @ restore 10 regs and return
-
-    END ExecuteMterpImpl
-
-
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
deleted file mode 100644
index fd60c95..0000000
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ /dev/null
@@ -1,11662 +0,0 @@
-/*
- * This file was generated automatically by gen-mterp.py for 'arm64'.
- *
- * --> DO NOT EDIT <--
- */
-
-/* File: arm64/header.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-  Art assembly interpreter notes:
-
-  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
-  handle invoke, allows higher-level code to create frame & shadow frame.
-
-  Once that's working, support direct entry code & eliminate shadow frame (and
-  excess locals allocation.
-
-  Some (hopefully) temporary ugliness.  We'll treat xFP as pointing to the
-  base of the vreg array within the shadow frame.  Access the other fields,
-  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
-  the shadow frame mechanism of double-storing object references - via xFP &
-  number_of_vregs_.
-
- */
-
-/*
-ARM64 Runtime register usage conventions.
-
-  r0     : w0 is 32-bit return register and x0 is 64-bit.
-  r0-r7  : Argument registers.
-  r8-r15 : Caller save registers (used as temporary registers).
-  r16-r17: Also known as ip0-ip1, respectively. Used as scratch registers by
-           the linker, by the trampolines and other stubs (the backend uses
-           these as temporary registers).
-  r18    : Caller save register (used as temporary register).
-  r19    : Pointer to thread-local storage.
-  r20-r29: Callee save registers.
-  r30    : (lr) is reserved (the link register).
-  rsp    : (sp) is reserved (the stack pointer).
-  rzr    : (zr) is reserved (the zero register).
-
-  Floating-point registers
-  v0-v31
-
-  v0     : s0 is return register for singles (32-bit) and d0 for doubles (64-bit).
-           This is analogous to the C/C++ (hard-float) calling convention.
-  v0-v7  : Floating-point argument registers in both Dalvik and C/C++ conventions.
-           Also used as temporary and codegen scratch registers.
-
-  v0-v7 and v16-v31 : trashed across C calls.
-  v8-v15 : bottom 64-bits preserved across C calls (d8-d15 are preserved).
-
-  v16-v31: Used as codegen temp/scratch.
-  v8-v15 : Can be used for promotion.
-
-  Must maintain 16-byte stack alignment.
-
-Mterp notes:
-
-The following registers have fixed assignments:
-
-  reg nick      purpose
-  x20  xPC       interpreted program counter, used for fetching instructions
-  x21  xFP       interpreted frame pointer, used for accessing locals and args
-  x22  xSELF     self (Thread) pointer
-  x23  xINST     first 16-bit code unit of current instruction
-  x24  xIBASE    interpreted instruction base pointer, used for computed goto
-  x25  xREFS     base of object references in shadow frame  (ideally, we'll get rid of this later).
-  x26  wPROFILE  jit profile hotness countdown
-  x16  ip        scratch reg
-  x17  ip2       scratch reg (used by macros)
-
-Macros are provided for common operations.  They MUST NOT alter unspecified registers or condition
-codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/* During bringup, we'll use the shadow frame model instead of xFP */
-/* single-purpose registers, given names for clarity */
-#define xPC      x20
-#define CFI_DEX  20 // DWARF register number of the register holding dex-pc (xPC).
-#define CFI_TMP  0  // DWARF register number of the first argument register (r0).
-#define xFP      x21
-#define xSELF    x22
-#define xINST    x23
-#define wINST    w23
-#define xIBASE   x24
-#define xREFS    x25
-#define wPROFILE w26
-#define xPROFILE x26
-#define ip       x16
-#define ip2      x17
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep xFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
-    str  xPC, [xFP, #OFF_FP_DEX_PC_PTR]
-.endm
-
-/*
- * Fetch the next instruction from xPC into wINST.  Does not advance xPC.
- */
-.macro FETCH_INST
-    ldrh    wINST, [xPC]
-.endm
-
-/*
- * Fetch the next instruction from the specified offset.  Advances xPC
- * to point to the next instruction.  "_count" is in 16-bit code units.
- *
- * Because of the limited size of immediate constants on ARM, this is only
- * suitable for small forward movements (i.e. don't try to implement "goto"
- * with this).
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss.  (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
-    ldrh    wINST, [xPC, #((\count)*2)]!
-.endm
-
-/*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to xPC and xINST).
- */
-.macro PREFETCH_ADVANCE_INST dreg, sreg, count
-    ldrh    \dreg, [\sreg, #((\count)*2)]!
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update xPC.  Used to load
- * xINST ahead of possible exception point.  Be sure to manually advance xPC
- * later.
- */
-.macro PREFETCH_INST count
-    ldrh    wINST, [xPC, #((\count)*2)]
-.endm
-
-/* Advance xPC by some number of code units. */
-.macro ADVANCE count
-  add  xPC, xPC, #((\count)*2)
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg and advance xPC.
- * xPC to point to the next instruction.  "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.  Must not set flags.
- *
- */
-.macro FETCH_ADVANCE_INST_RB reg
-    add     xPC, xPC, \reg, sxtw
-    ldrh    wINST, [xPC]
-.endm
-
-/*
- * Fetch a half-word code unit from an offset past the current PC.  The
- * "_count" value is in 16-bit code units.  Does not advance xPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-.macro FETCH reg, count
-    ldrh    \reg, [xPC, #((\count)*2)]
-.endm
-
-.macro FETCH_S reg, count
-    ldrsh   \reg, [xPC, #((\count)*2)]
-.endm
-
-/*
- * Fetch one byte from an offset past the current PC.  Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-.macro FETCH_B reg, count, byte
-    ldrb     \reg, [xPC, #((\count)*2+(\byte))]
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
-    and     \reg, xINST, #255
-.endm
-
-/*
- * Put the prefetched instruction's opcode field into the specified register.
- */
-.macro GET_PREFETCHED_OPCODE oreg, ireg
-    and     \oreg, \ireg, #255
-.endm
-
-/*
- * Begin executing the opcode in _reg.  Clobbers reg
- */
-
-.macro GOTO_OPCODE reg
-    add     \reg, xIBASE, \reg, lsl #7
-    br      \reg
-.endm
-.macro GOTO_OPCODE_BASE base,reg
-    add     \reg, \base, \reg, lsl #7
-    br      \reg
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-.macro GET_VREG reg, vreg
-    ldr     \reg, [xFP, \vreg, uxtw #2]
-.endm
-.macro SET_VREG reg, vreg
-    str     \reg, [xFP, \vreg, uxtw #2]
-    str     wzr, [xREFS, \vreg, uxtw #2]
-.endm
-.macro SET_VREG_OBJECT reg, vreg, tmpreg
-    str     \reg, [xFP, \vreg, uxtw #2]
-    str     \reg, [xREFS, \vreg, uxtw #2]
-.endm
-
-/*
- * Get/set the 64-bit value from a Dalvik register.
- * TUNING: can we do better here?
- */
-.macro GET_VREG_WIDE reg, vreg
-    add     ip2, xFP, \vreg, lsl #2
-    ldr     \reg, [ip2]
-.endm
-.macro SET_VREG_WIDE reg, vreg
-    add     ip2, xFP, \vreg, lsl #2
-    str     \reg, [ip2]
-    add     ip2, xREFS, \vreg, lsl #2
-    str     xzr, [ip2]
-.endm
-
-/*
- * Get the 32-bit value from a Dalvik register and sign-extend to 64-bit.
- * Used to avoid an extra instruction in int-to-long.
- */
-.macro GET_VREG_S reg, vreg
-    ldrsw   \reg, [xFP, \vreg, uxtw #2]
-.endm
-
-/*
- * Convert a virtual register index into an address.
- */
-.macro VREG_INDEX_TO_ADDR reg, vreg
-    add     \reg, xFP, \vreg, lsl #2   /* WARNING: handle shadow frame vreg zero if store */
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
-  ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
-.endm
-
-/*
- * Save two registers to the stack.
- */
-.macro SAVE_TWO_REGS reg1, reg2, offset
-    stp \reg1, \reg2, [sp, #(\offset)]
-    .cfi_rel_offset \reg1, (\offset)
-    .cfi_rel_offset \reg2, (\offset) + 8
-.endm
-
-/*
- * Restore two registers from the stack.
- */
-.macro RESTORE_TWO_REGS reg1, reg2, offset
-    ldp \reg1, \reg2, [sp, #(\offset)]
-    .cfi_restore \reg1
-    .cfi_restore \reg2
-.endm
-
-/*
- * Increase frame size and save two registers to the bottom of the stack.
- */
-.macro SAVE_TWO_REGS_INCREASE_FRAME reg1, reg2, frame_adjustment
-    stp \reg1, \reg2, [sp, #-(\frame_adjustment)]!
-    .cfi_adjust_cfa_offset (\frame_adjustment)
-    .cfi_rel_offset \reg1, 0
-    .cfi_rel_offset \reg2, 8
-.endm
-
-/*
- * Restore two registers from the bottom of the stack and decrease frame size.
- */
-.macro RESTORE_TWO_REGS_DECREASE_FRAME reg1, reg2, frame_adjustment
-    ldp \reg1, \reg2, [sp], #(\frame_adjustment)
-    .cfi_restore \reg1
-    .cfi_restore \reg2
-    .cfi_adjust_cfa_offset -(\frame_adjustment)
-.endm
-
-/*
- * cfi support macros.
- */
-.macro ENTRY name
-    .type \name, #function
-    .hidden \name  // Hide this as a global symbol, so we do not incur plt calls.
-    .global \name
-    /* Cache alignment for function entry */
-    .balign 16
-\name:
-    .cfi_startproc
-.endm
-
-.macro END name
-    .cfi_endproc
-    .size \name, .-\name
-.endm
-
-/* File: arm64/entry.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-    .text
-
-/*
- * Interpreter entry point.
- * On entry:
- *  x0  Thread* self/
- *  x1  insns_
- *  x2  ShadowFrame
- *  x3  JValue* result_register
- *
- */
-ENTRY ExecuteMterpImpl
-    SAVE_TWO_REGS_INCREASE_FRAME xPROFILE, x27, 80
-    SAVE_TWO_REGS                xIBASE, xREFS, 16
-    SAVE_TWO_REGS                xSELF, xINST, 32
-    SAVE_TWO_REGS                xPC, xFP, 48
-    SAVE_TWO_REGS                fp, lr, 64
-    add     fp, sp, #64
-
-    /* Remember the return register */
-    str     x3, [x2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
-
-    /* Remember the dex instruction pointer */
-    str     x1, [x2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
-
-    /* set up "named" registers */
-    mov     xSELF, x0
-    ldr     w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
-    add     xFP, x2, #SHADOWFRAME_VREGS_OFFSET     // point to vregs.
-    add     xREFS, xFP, w0, lsl #2                 // point to reference array in shadow frame
-    ldr     w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET]   // Get starting dex_pc.
-    add     xPC, x1, w0, lsl #1                    // Create direct pointer to 1st dex opcode
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-    EXPORT_PC
-
-    /* Starting ibase */
-    ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
-
-    /* Set up for backwards branches & osr profiling */
-    ldr     x0, [xFP, #OFF_FP_METHOD]
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xSELF
-    bl      MterpSetUpHotnessCountdown
-    mov     wPROFILE, w0                // Starting hotness countdown to xPROFILE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST                          // load wINST from rPC
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* NOTE: no fallthrough */
-
-/* File: arm64/instruction_start.S */
-
-    .type artMterpAsmInstructionStart, #object
-    .hidden artMterpAsmInstructionStart
-    .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
-    .text
-
-/* ------------------------------ */
-    .balign 128
-.L_op_nop: /* 0x00 */
-/* File: arm64/op_nop.S */
-    FETCH_ADVANCE_INST 1                // advance to next instr, load rINST
-    GET_INST_OPCODE ip                  // ip<- opcode from rINST
-    GOTO_OPCODE ip                      // execute it
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move: /* 0x01 */
-/* File: arm64/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    lsr     w1, wINST, #12              // x1<- B from 15:12
-    ubfx    w0, wINST, #8, #4           // x0<- A from 11:8
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    GET_VREG w2, w1                     // x2<- fp[B]
-    GET_INST_OPCODE ip                  // ip<- opcode from wINST
-    .if 0
-    SET_VREG_OBJECT w2, w0              // fp[A]<- x2
-    .else
-    SET_VREG w2, w0                     // fp[A]<- x2
-    .endif
-    GOTO_OPCODE ip                      // execute next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_from16: /* 0x02 */
-/* File: arm64/op_move_from16.S */
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    FETCH w1, 1                         // r1<- BBBB
-    lsr     w0, wINST, #8               // r0<- AA
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    GET_VREG w2, w1                     // r2<- fp[BBBB]
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    .if 0
-    SET_VREG_OBJECT w2, w0              // fp[AA]<- r2
-    .else
-    SET_VREG w2, w0                     // fp[AA]<- r2
-    .endif
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_16: /* 0x03 */
-/* File: arm64/op_move_16.S */
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    FETCH w1, 2                         // w1<- BBBB
-    FETCH w0, 1                         // w0<- AAAA
-    FETCH_ADVANCE_INST 3                // advance xPC, load xINST
-    GET_VREG w2, w1                     // w2<- fp[BBBB]
-    GET_INST_OPCODE ip                  // extract opcode from xINST
-    .if 0
-    SET_VREG_OBJECT w2, w0              // fp[AAAA]<- w2
-    .else
-    SET_VREG w2, w0                     // fp[AAAA]<- w2
-    .endif
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide: /* 0x04 */
-/* File: arm64/op_move_wide.S */
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE  x3, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE  x3, w2
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide_from16: /* 0x05 */
-/* File: arm64/op_move_wide_from16.S */
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    FETCH w3, 1                         // w3<- BBBB
-    lsr     w2, wINST, #8               // w2<- AA
-    GET_VREG_WIDE x3, w3
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE x3, w2
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide_16: /* 0x06 */
-/* File: arm64/op_move_wide_16.S */
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    FETCH w3, 2                         // w3<- BBBB
-    FETCH w2, 1                         // w2<- AAAA
-    GET_VREG_WIDE x3, w3
-    FETCH_ADVANCE_INST 3                // advance rPC, load rINST
-    SET_VREG_WIDE x3, w2
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object: /* 0x07 */
-/* File: arm64/op_move_object.S */
-/* File: arm64/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    lsr     w1, wINST, #12              // x1<- B from 15:12
-    ubfx    w0, wINST, #8, #4           // x0<- A from 11:8
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    GET_VREG w2, w1                     // x2<- fp[B]
-    GET_INST_OPCODE ip                  // ip<- opcode from wINST
-    .if 1
-    SET_VREG_OBJECT w2, w0              // fp[A]<- x2
-    .else
-    SET_VREG w2, w0                     // fp[A]<- x2
-    .endif
-    GOTO_OPCODE ip                      // execute next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object_from16: /* 0x08 */
-/* File: arm64/op_move_object_from16.S */
-/* File: arm64/op_move_from16.S */
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    FETCH w1, 1                         // r1<- BBBB
-    lsr     w0, wINST, #8               // r0<- AA
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    GET_VREG w2, w1                     // r2<- fp[BBBB]
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    .if 1
-    SET_VREG_OBJECT w2, w0              // fp[AA]<- r2
-    .else
-    SET_VREG w2, w0                     // fp[AA]<- r2
-    .endif
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object_16: /* 0x09 */
-/* File: arm64/op_move_object_16.S */
-/* File: arm64/op_move_16.S */
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    FETCH w1, 2                         // w1<- BBBB
-    FETCH w0, 1                         // w0<- AAAA
-    FETCH_ADVANCE_INST 3                // advance xPC, load xINST
-    GET_VREG w2, w1                     // w2<- fp[BBBB]
-    GET_INST_OPCODE ip                  // extract opcode from xINST
-    .if 1
-    SET_VREG_OBJECT w2, w0              // fp[AAAA]<- w2
-    .else
-    SET_VREG w2, w0                     // fp[AAAA]<- w2
-    .endif
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result: /* 0x0a */
-/* File: arm64/op_move_result.S */
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    lsr     w2, wINST, #8               // r2<- AA
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    ldr     x0, [xFP, #OFF_FP_RESULT_REGISTER]  // get pointer to result JType.
-    ldr     w0, [x0]                    // r0 <- result.i.
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    .if 0
-    SET_VREG_OBJECT w0, w2, w1          // fp[AA]<- r0
-    .else
-    SET_VREG w0, w2                     // fp[AA]<- r0
-    .endif
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result_wide: /* 0x0b */
-/* File: arm64/op_move_result_wide.S */
-    /* for: move-result-wide */
-    /* op vAA */
-    lsr     w2, wINST, #8               // r2<- AA
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    ldr     x0, [xFP, #OFF_FP_RESULT_REGISTER]  // get pointer to result JType.
-    ldr     x0, [x0]                    // r0 <- result.i.
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE x0, x2                // fp[AA]<- r0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result_object: /* 0x0c */
-/* File: arm64/op_move_result_object.S */
-/* File: arm64/op_move_result.S */
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    lsr     w2, wINST, #8               // r2<- AA
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    ldr     x0, [xFP, #OFF_FP_RESULT_REGISTER]  // get pointer to result JType.
-    ldr     w0, [x0]                    // r0 <- result.i.
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    .if 1
-    SET_VREG_OBJECT w0, w2, w1          // fp[AA]<- r0
-    .else
-    SET_VREG w0, w2                     // fp[AA]<- r0
-    .endif
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_exception: /* 0x0d */
-/* File: arm64/op_move_exception.S */
-    /* move-exception vAA */
-    lsr     w2, wINST, #8               // w2<- AA
-    ldr     x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
-    mov     x1, #0                      // w1<- 0
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    SET_VREG_OBJECT w3, w2              // fp[AA]<- exception obj
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    str     x1, [xSELF, #THREAD_EXCEPTION_OFFSET]  // clear exception
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_void: /* 0x0e */
-/* File: arm64/op_return_void.S */
-    .extern MterpThreadFenceForConstructor
-    bl      MterpThreadFenceForConstructor
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    mov     x0, xSELF
-    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    b.ne    .Lop_return_void_check
-.Lop_return_void_return:
-    mov     x0, #0
-    b       MterpReturn
-.Lop_return_void_check:
-    bl      MterpSuspendCheck           // (self)
-    b       .Lop_return_void_return
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return: /* 0x0f */
-/* File: arm64/op_return.S */
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return, return-object
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    bl      MterpThreadFenceForConstructor
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    mov     x0, xSELF
-    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    b.ne    .Lop_return_check
-.Lop_return_return:
-    lsr     w2, wINST, #8               // r2<- AA
-    GET_VREG w0, w2                     // r0<- vAA
-    b       MterpReturn
-.Lop_return_check:
-    bl      MterpSuspendCheck           // (self)
-    b       .Lop_return_return
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_wide: /* 0x10 */
-/* File: arm64/op_return_wide.S */
-    /*
-     * Return a 64-bit value.
-     */
-    /* return-wide vAA */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    bl      MterpThreadFenceForConstructor
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    mov     x0, xSELF
-    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    b.ne    .Lop_return_wide_check
-.Lop_return_wide_return:
-    lsr     w2, wINST, #8               // w2<- AA
-    GET_VREG_WIDE x0, w2                // x0<- vAA
-    b       MterpReturn
-.Lop_return_wide_check:
-    bl      MterpSuspendCheck           // (self)
-    b       .Lop_return_wide_return
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_object: /* 0x11 */
-/* File: arm64/op_return_object.S */
-/* File: arm64/op_return.S */
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return, return-object
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    bl      MterpThreadFenceForConstructor
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    mov     x0, xSELF
-    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    b.ne    .Lop_return_object_check
-.Lop_return_object_return:
-    lsr     w2, wINST, #8               // r2<- AA
-    GET_VREG w0, w2                     // r0<- vAA
-    b       MterpReturn
-.Lop_return_object_check:
-    bl      MterpSuspendCheck           // (self)
-    b       .Lop_return_object_return
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_4: /* 0x12 */
-/* File: arm64/op_const_4.S */
-    /* const/4 vA, #+B */
-    sbfx    w1, wINST, #12, #4          // w1<- sssssssB
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    FETCH_ADVANCE_INST 1                // advance xPC, load wINST
-    GET_INST_OPCODE ip                  // ip<- opcode from xINST
-    SET_VREG w1, w0                     // fp[A]<- w1
-    GOTO_OPCODE ip                      // execute next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_16: /* 0x13 */
-/* File: arm64/op_const_16.S */
-    /* const/16 vAA, #+BBBB */
-    FETCH_S w0, 1                       // w0<- ssssBBBB (sign-extended)
-    lsr     w3, wINST, #8               // w3<- AA
-    FETCH_ADVANCE_INST 2                // advance xPC, load wINST
-    SET_VREG w0, w3                     // vAA<- w0
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const: /* 0x14 */
-/* File: arm64/op_const.S */
-    /* const vAA, #+BBBBbbbb */
-    lsr     w3, wINST, #8               // w3<- AA
-    FETCH w0, 1                         // w0<- bbbb (low
-    FETCH w1, 2                         // w1<- BBBB (high
-    FETCH_ADVANCE_INST 3                // advance rPC, load wINST
-    orr     w0, w0, w1, lsl #16         // w0<- BBBBbbbb
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG w0, w3                     // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_high16: /* 0x15 */
-/* File: arm64/op_const_high16.S */
-    /* const/high16 vAA, #+BBBB0000 */
-    FETCH   w0, 1                       // r0<- 0000BBBB (zero-extended)
-    lsr     w3, wINST, #8               // r3<- AA
-    lsl     w0, w0, #16                 // r0<- BBBB0000
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    SET_VREG w0, w3                     // vAA<- r0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_16: /* 0x16 */
-/* File: arm64/op_const_wide_16.S */
-    /* const-wide/16 vAA, #+BBBB */
-    FETCH_S x0, 1                       // x0<- ssssssssssssBBBB (sign-extended)
-    lsr     w3, wINST, #8               // w3<- AA
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w3
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_32: /* 0x17 */
-/* File: arm64/op_const_wide_32.S */
-    /* const-wide/32 vAA, #+BBBBbbbb */
-    FETCH   w0, 1                       // x0<- 000000000000bbbb (low)
-    lsr     w3, wINST, #8               // w3<- AA
-    FETCH_S x2, 2                       // x2<- ssssssssssssBBBB (high)
-    FETCH_ADVANCE_INST 3                // advance rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    orr     x0, x0, x2, lsl #16         // x0<- ssssssssBBBBbbbb
-    SET_VREG_WIDE x0, w3
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide: /* 0x18 */
-/* File: arm64/op_const_wide.S */
-    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
-    FETCH w0, 1                         // w0<- bbbb (low)
-    FETCH w1, 2                         // w1<- BBBB (low middle)
-    FETCH w2, 3                         // w2<- hhhh (high middle)
-    FETCH w3, 4                         // w3<- HHHH (high)
-    lsr     w4, wINST, #8               // r4<- AA
-    FETCH_ADVANCE_INST 5                // advance rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    orr     w0, w0, w1, lsl #16         // w0<-         BBBBbbbb
-    orr     x0, x0, x2, lsl #32         // w0<-     hhhhBBBBbbbb
-    orr     x0, x0, x3, lsl #48         // w0<- HHHHhhhhBBBBbbbb
-    SET_VREG_WIDE x0, w4
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_high16: /* 0x19 */
-/* File: arm64/op_const_wide_high16.S */
-    /* const-wide/high16 vAA, #+BBBB000000000000 */
-    FETCH w0, 1                         // w0<- 0000BBBB (zero-extended)
-    lsr     w1, wINST, #8               // w1<- AA
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    lsl     x0, x0, #48
-    SET_VREG_WIDE x0, w1
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_string: /* 0x1a */
-/* File: arm64/op_const_string.S */
-/* File: arm64/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstString
-    EXPORT_PC
-    FETCH w0, 1                         // w0<- BBBB
-    lsr     w1, wINST, #8               // w1<- AA
-    add     x2, xFP, #OFF_FP_SHADOWFRAME
-    mov     x3, xSELF
-    bl      MterpConstString                     // (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     // load rINST
-    cbnz    w0, MterpPossibleException  // let reference interpreter deal with it.
-    ADVANCE 2                           // advance rPC
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
-/* File: arm64/op_const_string_jumbo.S */
-    /* const/string vAA, String//BBBBBBBB */
-    EXPORT_PC
-    FETCH w0, 1                         // w0<- bbbb (low
-    FETCH w2, 2                         // w2<- BBBB (high
-    lsr     w1, wINST, #8               // w1<- AA
-    orr     w0, w0, w2, lsl #16         // w1<- BBBBbbbb
-    add     x2, xFP, #OFF_FP_SHADOWFRAME
-    mov     x3, xSELF
-    bl      MterpConstString            // (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 3                     // advance rPC
-    cbnz    w0, MterpPossibleException      // let reference interpreter deal with it.
-    ADVANCE 3                           // advance rPC
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_class: /* 0x1c */
-/* File: arm64/op_const_class.S */
-/* File: arm64/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstClass
-    EXPORT_PC
-    FETCH w0, 1                         // w0<- BBBB
-    lsr     w1, wINST, #8               // w1<- AA
-    add     x2, xFP, #OFF_FP_SHADOWFRAME
-    mov     x3, xSELF
-    bl      MterpConstClass                     // (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     // load rINST
-    cbnz    w0, MterpPossibleException  // let reference interpreter deal with it.
-    ADVANCE 2                           // advance rPC
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_monitor_enter: /* 0x1d */
-/* File: arm64/op_monitor_enter.S */
-    /*
-     * Synchronize on an object.
-     */
-    /* monitor-enter vAA */
-    EXPORT_PC
-    lsr      w2, wINST, #8               // w2<- AA
-    GET_VREG w0, w2                      // w0<- vAA (object)
-    mov      x1, xSELF                   // w1<- self
-    bl       artLockObjectFromCode
-    cbnz     w0, MterpException
-    FETCH_ADVANCE_INST 1
-    GET_INST_OPCODE ip                   // extract opcode from rINST
-    GOTO_OPCODE ip                       // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_monitor_exit: /* 0x1e */
-/* File: arm64/op_monitor_exit.S */
-    /*
-     * Unlock an object.
-     *
-     * Exceptions that occur when unlocking a monitor need to appear as
-     * if they happened at the following instruction.  See the Dalvik
-     * instruction spec.
-     */
-    /* monitor-exit vAA */
-    EXPORT_PC
-    lsr      w2, wINST, #8              // w2<- AA
-    GET_VREG w0, w2                     // w0<- vAA (object)
-    mov      x1, xSELF                  // w0<- self
-    bl       artUnlockObjectFromCode    // w0<- success for unlock(self, obj)
-    cbnz     w0, MterpException
-    FETCH_ADVANCE_INST 1                // before throw: advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_check_cast: /* 0x1f */
-/* File: arm64/op_check_cast.S */
-    /*
-     * Check to see if a cast from one class to another is allowed.
-     */
-    /* check-cast vAA, class//BBBB */
-    EXPORT_PC
-    FETCH    w0, 1                      // w0<- BBBB
-    lsr      w1, wINST, #8              // w1<- AA
-    VREG_INDEX_TO_ADDR x1, w1           // w1<- &object
-    ldr      x2, [xFP, #OFF_FP_METHOD]  // w2<- method
-    mov      x3, xSELF                  // w3<- self
-    bl       MterpCheckCast             // (index, &obj, method, self)
-    PREFETCH_INST 2
-    cbnz     w0, MterpPossibleException
-    ADVANCE  2
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_instance_of: /* 0x20 */
-/* File: arm64/op_instance_of.S */
-    /*
-     * Check to see if an object reference is an instance of a class.
-     *
-     * Most common situation is a non-null object, being compared against
-     * an already-resolved class.
-     */
-    /* instance-of vA, vB, class//CCCC */
-    EXPORT_PC
-    FETCH     w0, 1                     // w0<- CCCC
-    lsr       w1, wINST, #12            // w1<- B
-    VREG_INDEX_TO_ADDR x1, w1           // w1<- &object
-    ldr       x2, [xFP, #OFF_FP_METHOD] // w2<- method
-    mov       x3, xSELF                 // w3<- self
-    bl        MterpInstanceOf           // (index, &obj, method, self)
-    ldr       x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
-    ubfx      w2, wINST, #8, #4         // w2<- A
-    PREFETCH_INST 2
-    cbnz      x1, MterpException
-    ADVANCE 2                           // advance rPC
-    SET_VREG w0, w2                     // vA<- w0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_array_length: /* 0x21 */
-/* File: arm64/op_array_length.S */
-    /*
-     * Return the length of an array.
-     */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG w0, w1                     // w0<- vB (object ref)
-    cbz     w0, common_errNullObject    // yup, fail
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- array length
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w3, w2                     // vB<- length
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_new_instance: /* 0x22 */
-/* File: arm64/op_new_instance.S */
-    /*
-     * Create a new instance of a class.
-     */
-    /* new-instance vAA, class//BBBB */
-    EXPORT_PC
-    add     x0, xFP, #OFF_FP_SHADOWFRAME
-    mov     x1, xSELF
-    mov     w2, wINST
-    bl      MterpNewInstance           // (shadow_frame, self, inst_data)
-    cbz     w0, MterpPossibleException
-    FETCH_ADVANCE_INST 2               // advance rPC, load rINST
-    GET_INST_OPCODE ip                 // extract opcode from rINST
-    GOTO_OPCODE ip                     // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_new_array: /* 0x23 */
-/* File: arm64/op_new_array.S */
-    /*
-     * Allocate an array of objects, specified with the array class
-     * and a count.
-     *
-     * The verifier guarantees that this is an array class, so we don't
-     * check for it here.
-     */
-    /* new-array vA, vB, class//CCCC */
-    EXPORT_PC
-    add     x0, xFP, #OFF_FP_SHADOWFRAME
-    mov     x1, xPC
-    mov     w2, wINST
-    mov     x3, xSELF
-    bl      MterpNewArray
-    cbz     w0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_filled_new_array: /* 0x24 */
-/* File: arm64/op_filled_new_array.S */
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
-    .extern MterpFilledNewArray
-    EXPORT_PC
-    add     x0, xFP, #OFF_FP_SHADOWFRAME
-    mov     x1, xPC
-    mov     x2, xSELF
-    bl      MterpFilledNewArray
-    cbz     w0, MterpPossibleException
-    FETCH_ADVANCE_INST 3                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
-/* File: arm64/op_filled_new_array_range.S */
-/* File: arm64/op_filled_new_array.S */
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
-    .extern MterpFilledNewArrayRange
-    EXPORT_PC
-    add     x0, xFP, #OFF_FP_SHADOWFRAME
-    mov     x1, xPC
-    mov     x2, xSELF
-    bl      MterpFilledNewArrayRange
-    cbz     w0, MterpPossibleException
-    FETCH_ADVANCE_INST 3                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_fill_array_data: /* 0x26 */
-/* File: arm64/op_fill_array_data.S */
-    /* fill-array-data vAA, +BBBBBBBB */
-    EXPORT_PC
-    FETCH   w0, 1                       // x0<- 000000000000bbbb (lo)
-    FETCH_S x1, 2                       // x1<- ssssssssssssBBBB (hi)
-    lsr     w3, wINST, #8               // w3<- AA
-    orr     x1, x0, x1, lsl #16         // x1<- ssssssssBBBBbbbb
-    GET_VREG w0, w3                     // w0<- vAA (array object)
-    add     x1, xPC, x1, lsl #1         // x1<- PC + ssssssssBBBBbbbb*2 (array data off.)
-    bl      MterpFillArrayData          // (obj, payload)
-    cbz     w0, MterpPossibleException      // exception?
-    FETCH_ADVANCE_INST 3                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_throw: /* 0x27 */
-/* File: arm64/op_throw.S */
-    /*
-     * Throw an exception object in the current thread.
-     */
-    /* throw vAA */
-    EXPORT_PC
-    lsr      w2, wINST, #8               // r2<- AA
-    GET_VREG w1, w2                      // r1<- vAA (exception object)
-    cbz      w1, common_errNullObject
-    str      x1, [xSELF, #THREAD_EXCEPTION_OFFSET]  // thread->exception<- obj
-    b        MterpException
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto: /* 0x28 */
-/* File: arm64/op_goto.S */
-    /*
-     * Unconditional branch, 8-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto +AA */
-    sbfx    wINST, wINST, #8, #8           // wINST<- ssssssAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto_16: /* 0x29 */
-/* File: arm64/op_goto_16.S */
-    /*
-     * Unconditional branch, 16-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto/16 +AAAA */
-    FETCH_S wINST, 1                    // wINST<- ssssAAAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto_32: /* 0x2a */
-/* File: arm64/op_goto_32.S */
-    /*
-     * Unconditional branch, 32-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     *
-     * Unlike most opcodes, this one is allowed to branch to itself, so
-     * our "backward branch" test must be "<=0" instead of "<0".  Because
-     * we need the V bit set, we'll use an adds to convert from Dalvik
-     * offset to byte offset.
-     */
-    /* goto/32 +AAAAAAAA */
-    FETCH w0, 1                         // w0<- aaaa (lo)
-    FETCH w1, 2                         // w1<- AAAA (hi)
-    orr     wINST, w0, w1, lsl #16      // wINST<- AAAAaaaa
-    b       MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
-    .balign 128
-.L_op_packed_switch: /* 0x2b */
-/* File: arm64/op_packed_switch.S */
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBB */
-    FETCH   w0, 1                       // x0<- 000000000000bbbb (lo)
-    FETCH_S x1, 2                       // x1<- ssssssssssssBBBB (hi)
-    lsr     w3, wINST, #8               // w3<- AA
-    orr     x0, x0, x1, lsl #16         // x0<- ssssssssBBBBbbbb
-    GET_VREG w1, w3                     // w1<- vAA
-    add     x0, xPC, x0, lsl #1         // x0<- PC + ssssssssBBBBbbbb*2
-    bl      MterpDoPackedSwitch                       // w0<- code-unit branch offset
-    sxtw    xINST, w0
-    b       MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sparse_switch: /* 0x2c */
-/* File: arm64/op_sparse_switch.S */
-/* File: arm64/op_packed_switch.S */
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBB */
-    FETCH   w0, 1                       // x0<- 000000000000bbbb (lo)
-    FETCH_S x1, 2                       // x1<- ssssssssssssBBBB (hi)
-    lsr     w3, wINST, #8               // w3<- AA
-    orr     x0, x0, x1, lsl #16         // x0<- ssssssssBBBBbbbb
-    GET_VREG w1, w3                     // w1<- vAA
-    add     x0, xPC, x0, lsl #1         // x0<- PC + ssssssssBBBBbbbb*2
-    bl      MterpDoSparseSwitch                       // w0<- code-unit branch offset
-    sxtw    xINST, w0
-    b       MterpCommonTakenBranchNoFlags
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpl_float: /* 0x2d */
-/* File: arm64/op_cmpl_float.S */
-/* File: arm64/fcmp.S */
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    and     w2, w0, #255                // w2<- BB
-    lsr     w3, w0, #8                  // w3<- CC
-    GET_VREG s1, w2
-    GET_VREG s2, w3
-    fcmp s1, s2
-    cset w0, ne
-    cneg w0, w0, lt
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w4                     // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpg_float: /* 0x2e */
-/* File: arm64/op_cmpg_float.S */
-/* File: arm64/fcmp.S */
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    and     w2, w0, #255                // w2<- BB
-    lsr     w3, w0, #8                  // w3<- CC
-    GET_VREG s1, w2
-    GET_VREG s2, w3
-    fcmp s1, s2
-    cset w0, ne
-    cneg w0, w0, cc
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w4                     // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpl_double: /* 0x2f */
-/* File: arm64/op_cmpl_double.S */
-/* File: arm64/fcmp.S */
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    and     w2, w0, #255                // w2<- BB
-    lsr     w3, w0, #8                  // w3<- CC
-    GET_VREG_WIDE d1, w2
-    GET_VREG_WIDE d2, w3
-    fcmp d1, d2
-    cset w0, ne
-    cneg w0, w0, lt
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w4                     // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpg_double: /* 0x30 */
-/* File: arm64/op_cmpg_double.S */
-/* File: arm64/fcmp.S */
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    and     w2, w0, #255                // w2<- BB
-    lsr     w3, w0, #8                  // w3<- CC
-    GET_VREG_WIDE d1, w2
-    GET_VREG_WIDE d2, w3
-    fcmp d1, d2
-    cset w0, ne
-    cneg w0, w0, cc
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w4                     // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmp_long: /* 0x31 */
-/* File: arm64/op_cmp_long.S */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    and     w2, w0, #255                // w2<- BB
-    lsr     w3, w0, #8                  // w3<- CC
-    GET_VREG_WIDE x1, w2
-    GET_VREG_WIDE x2, w3
-    cmp     x1, x2
-    cset    w0, ne
-    cneg    w0, w0, lt
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    SET_VREG w0, w4
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_eq: /* 0x32 */
-/* File: arm64/op_if_eq.S */
-/* File: arm64/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    GET_VREG w3, w1                     // w3<- vB
-    GET_VREG w2, w0                     // w2<- vA
-    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
-    cmp     w2, w3                      // compare (vA, vB)
-    b.eq MterpCommonTakenBranchNoFlags
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ne: /* 0x33 */
-/* File: arm64/op_if_ne.S */
-/* File: arm64/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    GET_VREG w3, w1                     // w3<- vB
-    GET_VREG w2, w0                     // w2<- vA
-    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
-    cmp     w2, w3                      // compare (vA, vB)
-    b.ne MterpCommonTakenBranchNoFlags
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_lt: /* 0x34 */
-/* File: arm64/op_if_lt.S */
-/* File: arm64/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    GET_VREG w3, w1                     // w3<- vB
-    GET_VREG w2, w0                     // w2<- vA
-    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
-    cmp     w2, w3                      // compare (vA, vB)
-    b.lt MterpCommonTakenBranchNoFlags
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ge: /* 0x35 */
-/* File: arm64/op_if_ge.S */
-/* File: arm64/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    GET_VREG w3, w1                     // w3<- vB
-    GET_VREG w2, w0                     // w2<- vA
-    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
-    cmp     w2, w3                      // compare (vA, vB)
-    b.ge MterpCommonTakenBranchNoFlags
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gt: /* 0x36 */
-/* File: arm64/op_if_gt.S */
-/* File: arm64/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    GET_VREG w3, w1                     // w3<- vB
-    GET_VREG w2, w0                     // w2<- vA
-    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
-    cmp     w2, w3                      // compare (vA, vB)
-    b.gt MterpCommonTakenBranchNoFlags
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_le: /* 0x37 */
-/* File: arm64/op_if_le.S */
-/* File: arm64/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    GET_VREG w3, w1                     // w3<- vB
-    GET_VREG w2, w0                     // w2<- vA
-    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
-    cmp     w2, w3                      // compare (vA, vB)
-    b.le MterpCommonTakenBranchNoFlags
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_eqz: /* 0x38 */
-/* File: arm64/op_if_eqz.S */
-/* File: arm64/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    lsr     w0, wINST, #8               // w0<- AA
-    GET_VREG w2, w0                     // w2<- vAA
-    FETCH_S wINST, 1                    // w1<- branch offset, in code units
-    .if 0
-    cmp     w2, #0                      // compare (vA, 0)
-    .endif
-    cbz     w2, MterpCommonTakenBranchNoFlags
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_nez: /* 0x39 */
-/* File: arm64/op_if_nez.S */
-/* File: arm64/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    lsr     w0, wINST, #8               // w0<- AA
-    GET_VREG w2, w0                     // w2<- vAA
-    FETCH_S wINST, 1                    // w1<- branch offset, in code units
-    .if 0
-    cmp     w2, #0                      // compare (vA, 0)
-    .endif
-    cbnz    w2, MterpCommonTakenBranchNoFlags
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ltz: /* 0x3a */
-/* File: arm64/op_if_ltz.S */
-/* File: arm64/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    lsr     w0, wINST, #8               // w0<- AA
-    GET_VREG w2, w0                     // w2<- vAA
-    FETCH_S wINST, 1                    // w1<- branch offset, in code units
-    .if 0
-    cmp     w2, #0                      // compare (vA, 0)
-    .endif
-    tbnz    w2, #31, MterpCommonTakenBranchNoFlags
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gez: /* 0x3b */
-/* File: arm64/op_if_gez.S */
-/* File: arm64/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    lsr     w0, wINST, #8               // w0<- AA
-    GET_VREG w2, w0                     // w2<- vAA
-    FETCH_S wINST, 1                    // w1<- branch offset, in code units
-    .if 0
-    cmp     w2, #0                      // compare (vA, 0)
-    .endif
-    tbz     w2, #31, MterpCommonTakenBranchNoFlags
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gtz: /* 0x3c */
-/* File: arm64/op_if_gtz.S */
-/* File: arm64/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    lsr     w0, wINST, #8               // w0<- AA
-    GET_VREG w2, w0                     // w2<- vAA
-    FETCH_S wINST, 1                    // w1<- branch offset, in code units
-    .if 1
-    cmp     w2, #0                      // compare (vA, 0)
-    .endif
-    b.gt MterpCommonTakenBranchNoFlags
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_lez: /* 0x3d */
-/* File: arm64/op_if_lez.S */
-/* File: arm64/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    lsr     w0, wINST, #8               // w0<- AA
-    GET_VREG w2, w0                     // w2<- vAA
-    FETCH_S wINST, 1                    // w1<- branch offset, in code units
-    .if 1
-    cmp     w2, #0                      // compare (vA, 0)
-    .endif
-    b.le MterpCommonTakenBranchNoFlags
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_3e: /* 0x3e */
-/* File: arm64/op_unused_3e.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_3f: /* 0x3f */
-/* File: arm64/op_unused_3f.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_40: /* 0x40 */
-/* File: arm64/op_unused_40.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_41: /* 0x41 */
-/* File: arm64/op_unused_41.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_42: /* 0x42 */
-/* File: arm64/op_unused_42.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_43: /* 0x43 */
-/* File: arm64/op_unused_43.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget: /* 0x44 */
-/* File: arm64/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B w2, 1, 0                    // w2<- BB
-    lsr     w9, wINST, #8               // w9<- AA
-    FETCH_B w3, 1, 1                    // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     x0, common_errNullObject    // bail if null array object.
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- arrayObj->length
-    add     x0, x0, w1, uxtw #2    // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    ldr   w2, [x0, #MIRROR_INT_ARRAY_DATA_OFFSET]     // w2<- vBB[vCC]
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w2, w9                     // vAA<- w2
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_wide: /* 0x45 */
-/* File: arm64/op_aget_wide.S */
-    /*
-     * Array get, 64 bits.  vAA <- vBB[vCC].
-     *
-     */
-    /* aget-wide vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    and     w2, w0, #255                // w2<- BB
-    lsr     w3, w0, #8                  // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     w0, common_errNullObject        // yes, bail
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- arrayObj->length
-    add     x0, x0, w1, lsl #3          // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    ldr     x2, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]  // x2<- vBB[vCC]
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE x2, w4
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_object: /* 0x46 */
-/* File: arm64/op_aget_object.S */
-    /*
-     * Array object get.  vAA <- vBB[vCC].
-     *
-     * for: aget-object
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B w2, 1, 0                    // w2<- BB
-    FETCH_B w3, 1, 1                    // w3<- CC
-    EXPORT_PC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    bl       artAGetObjectFromMterp     // (array, index)
-    ldr      x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
-    lsr      w2, wINST, #8               // w9<- AA
-    PREFETCH_INST 2
-    cbnz     w1, MterpException
-    SET_VREG_OBJECT w0, w2
-    ADVANCE 2
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_boolean: /* 0x47 */
-/* File: arm64/op_aget_boolean.S */
-/* File: arm64/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B w2, 1, 0                    // w2<- BB
-    lsr     w9, wINST, #8               // w9<- AA
-    FETCH_B w3, 1, 1                    // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     x0, common_errNullObject    // bail if null array object.
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- arrayObj->length
-    add     x0, x0, w1, uxtw #0    // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    ldrb   w2, [x0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET]     // w2<- vBB[vCC]
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w2, w9                     // vAA<- w2
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_byte: /* 0x48 */
-/* File: arm64/op_aget_byte.S */
-/* File: arm64/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B w2, 1, 0                    // w2<- BB
-    lsr     w9, wINST, #8               // w9<- AA
-    FETCH_B w3, 1, 1                    // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     x0, common_errNullObject    // bail if null array object.
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- arrayObj->length
-    add     x0, x0, w1, uxtw #0    // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    ldrsb   w2, [x0, #MIRROR_BYTE_ARRAY_DATA_OFFSET]     // w2<- vBB[vCC]
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w2, w9                     // vAA<- w2
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_char: /* 0x49 */
-/* File: arm64/op_aget_char.S */
-/* File: arm64/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B w2, 1, 0                    // w2<- BB
-    lsr     w9, wINST, #8               // w9<- AA
-    FETCH_B w3, 1, 1                    // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     x0, common_errNullObject    // bail if null array object.
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- arrayObj->length
-    add     x0, x0, w1, uxtw #1    // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    ldrh   w2, [x0, #MIRROR_CHAR_ARRAY_DATA_OFFSET]     // w2<- vBB[vCC]
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w2, w9                     // vAA<- w2
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_short: /* 0x4a */
-/* File: arm64/op_aget_short.S */
-/* File: arm64/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B w2, 1, 0                    // w2<- BB
-    lsr     w9, wINST, #8               // w9<- AA
-    FETCH_B w3, 1, 1                    // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     x0, common_errNullObject    // bail if null array object.
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- arrayObj->length
-    add     x0, x0, w1, uxtw #1    // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    ldrsh   w2, [x0, #MIRROR_SHORT_ARRAY_DATA_OFFSET]     // w2<- vBB[vCC]
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w2, w9                     // vAA<- w2
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput: /* 0x4b */
-/* File: arm64/op_aput.S */
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B w2, 1, 0                    // w2<- BB
-    lsr     w9, wINST, #8               // w9<- AA
-    FETCH_B w3, 1, 1                    // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     w0, common_errNullObject    // bail if null
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]     // w3<- arrayObj->length
-    add     x0, x0, w1, lsl #2     // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_VREG w2, w9                     // w2<- vAA
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    str  w2, [x0, #MIRROR_INT_ARRAY_DATA_OFFSET]     // vBB[vCC]<- w2
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_wide: /* 0x4c */
-/* File: arm64/op_aput_wide.S */
-    /*
-     * Array put, 64 bits.  vBB[vCC] <- vAA.
-     *
-     */
-    /* aput-wide vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    and     w2, w0, #255                // w2<- BB
-    lsr     w3, w0, #8                  // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     w0, common_errNullObject    // bail if null
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- arrayObj->length
-    add     x0, x0, w1, lsl #3          // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    GET_VREG_WIDE x1, w4
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    str     x1, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_object: /* 0x4d */
-/* File: arm64/op_aput_object.S */
-    /*
-     * Store an object into an array.  vBB[vCC] <- vAA.
-     */
-    /* op vAA, vBB, vCC */
-    EXPORT_PC
-    add     x0, xFP, #OFF_FP_SHADOWFRAME
-    mov     x1, xPC
-    mov     w2, wINST
-    bl      MterpAputObject
-    cbz     w0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_boolean: /* 0x4e */
-/* File: arm64/op_aput_boolean.S */
-/* File: arm64/op_aput.S */
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B w2, 1, 0                    // w2<- BB
-    lsr     w9, wINST, #8               // w9<- AA
-    FETCH_B w3, 1, 1                    // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     w0, common_errNullObject    // bail if null
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]     // w3<- arrayObj->length
-    add     x0, x0, w1, lsl #0     // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_VREG w2, w9                     // w2<- vAA
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    strb  w2, [x0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET]     // vBB[vCC]<- w2
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_byte: /* 0x4f */
-/* File: arm64/op_aput_byte.S */
-/* File: arm64/op_aput.S */
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B w2, 1, 0                    // w2<- BB
-    lsr     w9, wINST, #8               // w9<- AA
-    FETCH_B w3, 1, 1                    // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     w0, common_errNullObject    // bail if null
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]     // w3<- arrayObj->length
-    add     x0, x0, w1, lsl #0     // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_VREG w2, w9                     // w2<- vAA
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    strb  w2, [x0, #MIRROR_BYTE_ARRAY_DATA_OFFSET]     // vBB[vCC]<- w2
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_char: /* 0x50 */
-/* File: arm64/op_aput_char.S */
-/* File: arm64/op_aput.S */
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B w2, 1, 0                    // w2<- BB
-    lsr     w9, wINST, #8               // w9<- AA
-    FETCH_B w3, 1, 1                    // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     w0, common_errNullObject    // bail if null
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]     // w3<- arrayObj->length
-    add     x0, x0, w1, lsl #1     // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_VREG w2, w9                     // w2<- vAA
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    strh  w2, [x0, #MIRROR_CHAR_ARRAY_DATA_OFFSET]     // vBB[vCC]<- w2
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_short: /* 0x51 */
-/* File: arm64/op_aput_short.S */
-/* File: arm64/op_aput.S */
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B w2, 1, 0                    // w2<- BB
-    lsr     w9, wINST, #8               // w9<- AA
-    FETCH_B w3, 1, 1                    // w3<- CC
-    GET_VREG w0, w2                     // w0<- vBB (array object)
-    GET_VREG w1, w3                     // w1<- vCC (requested index)
-    cbz     w0, common_errNullObject    // bail if null
-    ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]     // w3<- arrayObj->length
-    add     x0, x0, w1, lsl #1     // w0<- arrayObj + index*width
-    cmp     w1, w3                      // compare unsigned index, length
-    bcs     common_errArrayIndex        // index >= length, bail
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_VREG w2, w9                     // w2<- vAA
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    strh  w2, [x0, #MIRROR_SHORT_ARRAY_DATA_OFFSET]     // vBB[vCC]<- w2
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget: /* 0x52 */
-/* File: arm64/op_iget.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU32
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpIGetU32
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_wide: /* 0x53 */
-/* File: arm64/op_iget_wide.S */
-/* File: arm64/op_iget.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU64
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpIGetU64
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_object: /* 0x54 */
-/* File: arm64/op_iget_object.S */
-/* File: arm64/op_iget.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetObj
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpIGetObj
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_boolean: /* 0x55 */
-/* File: arm64/op_iget_boolean.S */
-/* File: arm64/op_iget.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU8
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpIGetU8
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_byte: /* 0x56 */
-/* File: arm64/op_iget_byte.S */
-/* File: arm64/op_iget.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetI8
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpIGetI8
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_char: /* 0x57 */
-/* File: arm64/op_iget_char.S */
-/* File: arm64/op_iget.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU16
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpIGetU16
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_short: /* 0x58 */
-/* File: arm64/op_iget_short.S */
-/* File: arm64/op_iget.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetI16
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpIGetI16
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput: /* 0x59 */
-/* File: arm64/op_iput.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU32
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpIPutU32
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_wide: /* 0x5a */
-/* File: arm64/op_iput_wide.S */
-/* File: arm64/op_iput.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU64
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpIPutU64
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_object: /* 0x5b */
-/* File: arm64/op_iput_object.S */
-/* File: arm64/op_iput.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutObj
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpIPutObj
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_boolean: /* 0x5c */
-/* File: arm64/op_iput_boolean.S */
-/* File: arm64/op_iput.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU8
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpIPutU8
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_byte: /* 0x5d */
-/* File: arm64/op_iput_byte.S */
-/* File: arm64/op_iput.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutI8
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpIPutI8
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_char: /* 0x5e */
-/* File: arm64/op_iput_char.S */
-/* File: arm64/op_iput.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU16
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpIPutU16
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_short: /* 0x5f */
-/* File: arm64/op_iput_short.S */
-/* File: arm64/op_iput.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutI16
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpIPutI16
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget: /* 0x60 */
-/* File: arm64/op_sget.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU32
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpSGetU32
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_wide: /* 0x61 */
-/* File: arm64/op_sget_wide.S */
-/* File: arm64/op_sget.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU64
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpSGetU64
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_object: /* 0x62 */
-/* File: arm64/op_sget_object.S */
-/* File: arm64/op_sget.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetObj
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpSGetObj
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_boolean: /* 0x63 */
-/* File: arm64/op_sget_boolean.S */
-/* File: arm64/op_sget.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU8
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpSGetU8
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_byte: /* 0x64 */
-/* File: arm64/op_sget_byte.S */
-/* File: arm64/op_sget.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetI8
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpSGetI8
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_char: /* 0x65 */
-/* File: arm64/op_sget_char.S */
-/* File: arm64/op_sget.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU16
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpSGetU16
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_short: /* 0x66 */
-/* File: arm64/op_sget_short.S */
-/* File: arm64/op_sget.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetI16
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpSGetI16
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput: /* 0x67 */
-/* File: arm64/op_sput.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU32
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpSPutU32
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_wide: /* 0x68 */
-/* File: arm64/op_sput_wide.S */
-/* File: arm64/op_sput.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU64
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpSPutU64
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_object: /* 0x69 */
-/* File: arm64/op_sput_object.S */
-/* File: arm64/op_sput.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutObj
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpSPutObj
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_boolean: /* 0x6a */
-/* File: arm64/op_sput_boolean.S */
-/* File: arm64/op_sput.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU8
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpSPutU8
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_byte: /* 0x6b */
-/* File: arm64/op_sput_byte.S */
-/* File: arm64/op_sput.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutI8
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpSPutI8
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_char: /* 0x6c */
-/* File: arm64/op_sput_char.S */
-/* File: arm64/op_sput.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU16
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpSPutU16
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_short: /* 0x6d */
-/* File: arm64/op_sput_short.S */
-/* File: arm64/op_sput.S */
-/* File: arm64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutI16
-    mov      x0, xPC                       // arg0: Instruction* inst
-    mov      x1, xINST                     // arg1: uint16_t inst_data
-    add      x2, xFP, #OFF_FP_SHADOWFRAME  // arg2: ShadowFrame* sf
-    mov      x3, xSELF                     // arg3: Thread* self
-    PREFETCH_INST 2                        // prefetch next opcode
-    bl       MterpSPutI16
-    cbz      x0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE ip                     // extract opcode from rINST
-    GOTO_OPCODE ip                         // jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual: /* 0x6e */
-/* File: arm64/op_invoke_virtual.S */
-/* File: arm64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtual
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokeVirtual
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-    /*
-     * Handle a virtual method call.
-     *
-     * for: invoke-virtual, invoke-virtual/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_super: /* 0x6f */
-/* File: arm64/op_invoke_super.S */
-/* File: arm64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeSuper
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokeSuper
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-    /*
-     * Handle a "super" method call.
-     *
-     * for: invoke-super, invoke-super/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_direct: /* 0x70 */
-/* File: arm64/op_invoke_direct.S */
-/* File: arm64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeDirect
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokeDirect
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_static: /* 0x71 */
-/* File: arm64/op_invoke_static.S */
-/* File: arm64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeStatic
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokeStatic
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_interface: /* 0x72 */
-/* File: arm64/op_invoke_interface.S */
-/* File: arm64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeInterface
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokeInterface
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-    /*
-     * Handle an interface method call.
-     *
-     * for: invoke-interface, invoke-interface/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
-/* File: arm64/op_return_void_no_barrier.S */
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    mov     x0, xSELF
-    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    b.ne    .Lop_return_void_no_barrier_check
-.Lop_return_void_no_barrier_return:
-    mov     x0, #0
-    b       MterpReturn
-.Lop_return_void_no_barrier_check:
-    bl      MterpSuspendCheck           // (self)
-    b       .Lop_return_void_no_barrier_return
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
-/* File: arm64/op_invoke_virtual_range.S */
-/* File: arm64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualRange
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokeVirtualRange
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_super_range: /* 0x75 */
-/* File: arm64/op_invoke_super_range.S */
-/* File: arm64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeSuperRange
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokeSuperRange
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
-/* File: arm64/op_invoke_direct_range.S */
-/* File: arm64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeDirectRange
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokeDirectRange
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_static_range: /* 0x77 */
-/* File: arm64/op_invoke_static_range.S */
-/* File: arm64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeStaticRange
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokeStaticRange
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
-/* File: arm64/op_invoke_interface_range.S */
-/* File: arm64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeInterfaceRange
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokeInterfaceRange
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_79: /* 0x79 */
-/* File: arm64/op_unused_79.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_7a: /* 0x7a */
-/* File: arm64/op_unused_7a.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_int: /* 0x7b */
-/* File: arm64/op_neg_int.S */
-/* File: arm64/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op w0".
-     * This could be an ARM instruction or a function call.
-     *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    GET_VREG w0, w3                     // w0<- vB
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    sub     w0, wzr, w0                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                     // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 8-9 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_not_int: /* 0x7c */
-/* File: arm64/op_not_int.S */
-/* File: arm64/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op w0".
-     * This could be an ARM instruction or a function call.
-     *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    GET_VREG w0, w3                     // w0<- vB
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    mvn     w0, w0                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                     // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 8-9 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_long: /* 0x7d */
-/* File: arm64/op_neg_long.S */
-/* File: arm64/unopWide.S */
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op x0".
-     *
-     * For: neg-long, not-long
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG_WIDE x0, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    sub x0, xzr, x0
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE x0, w4
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-11 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_not_long: /* 0x7e */
-/* File: arm64/op_not_long.S */
-/* File: arm64/unopWide.S */
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op x0".
-     *
-     * For: neg-long, not-long
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG_WIDE x0, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    mvn     x0, x0
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE x0, w4
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-11 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_float: /* 0x7f */
-/* File: arm64/op_neg_float.S */
-/* File: arm64/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op w0".
-     * This could be an ARM instruction or a function call.
-     *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    GET_VREG w0, w3                     // w0<- vB
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    eor     w0, w0, #0x80000000                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                     // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 8-9 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_double: /* 0x80 */
-/* File: arm64/op_neg_double.S */
-/* File: arm64/unopWide.S */
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op x0".
-     *
-     * For: neg-long, not-long
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG_WIDE x0, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    eor     x0, x0, #0x8000000000000000
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE x0, w4
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-11 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_long: /* 0x81 */
-/* File: arm64/op_int_to_long.S */
-    /* int-to-long vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG_S x0, w3                   // x0<- sign_extend(fp[B])
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE x0, w4                // fp[A]<- x0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_float: /* 0x82 */
-/* File: arm64/op_int_to_float.S */
-/* File: arm64/funopNarrow.S */
-    /*
-     * Generic 32bit-to-32bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "s0 = op w0".
-     *
-     * For: int-to-float, float-to-int
-     * TODO: refactor all of the conversions - parameterize width and use same template.
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG w0, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    scvtf s0, w0                              // d0<- op
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG s0, w4                // vA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_double: /* 0x83 */
-/* File: arm64/op_int_to_double.S */
-/* File: arm64/funopWider.S */
-    /*
-     * Generic 32bit-to-64bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "d0 = op w0".
-     *
-     * For: int-to-double, float-to-double, float-to-long
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG w0, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    scvtf d0, w0                              // d0<- op
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE d0, w4           // vA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* File: arm64/op_long_to_int.S */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-/* File: arm64/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    lsr     w1, wINST, #12              // x1<- B from 15:12
-    ubfx    w0, wINST, #8, #4           // x0<- A from 11:8
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    GET_VREG w2, w1                     // x2<- fp[B]
-    GET_INST_OPCODE ip                  // ip<- opcode from wINST
-    .if 0
-    SET_VREG_OBJECT w2, w0              // fp[A]<- x2
-    .else
-    SET_VREG w2, w0                     // fp[A]<- x2
-    .endif
-    GOTO_OPCODE ip                      // execute next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_float: /* 0x85 */
-/* File: arm64/op_long_to_float.S */
-/* File: arm64/funopNarrower.S */
-    /*
-     * Generic 64bit-to-32bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "s0 = op x0".
-     *
-     * For: int-to-double, float-to-double, float-to-long
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG_WIDE x0, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    scvtf s0, x0                              // d0<- op
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG s0, w4                // vA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_double: /* 0x86 */
-/* File: arm64/op_long_to_double.S */
-/* File: arm64/funopWide.S */
-    /*
-     * Generic 64bit-to-64bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "d0 = op x0".
-     *
-     * For: long-to-double, double-to-long
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG_WIDE x0, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    scvtf d0, x0                              // d0<- op
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE d0, w4           // vA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_int: /* 0x87 */
-/* File: arm64/op_float_to_int.S */
-/* File: arm64/funopNarrow.S */
-    /*
-     * Generic 32bit-to-32bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "w0 = op s0".
-     *
-     * For: int-to-float, float-to-int
-     * TODO: refactor all of the conversions - parameterize width and use same template.
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG s0, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    fcvtzs w0, s0                              // d0<- op
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG w0, w4                // vA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_long: /* 0x88 */
-/* File: arm64/op_float_to_long.S */
-/* File: arm64/funopWider.S */
-    /*
-     * Generic 32bit-to-64bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "x0 = op s0".
-     *
-     * For: int-to-double, float-to-double, float-to-long
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG s0, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    fcvtzs x0, s0                              // d0<- op
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE x0, w4           // vA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_double: /* 0x89 */
-/* File: arm64/op_float_to_double.S */
-/* File: arm64/funopWider.S */
-    /*
-     * Generic 32bit-to-64bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "d0 = op s0".
-     *
-     * For: int-to-double, float-to-double, float-to-long
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG s0, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    fcvt  d0, s0                              // d0<- op
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE d0, w4           // vA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_int: /* 0x8a */
-/* File: arm64/op_double_to_int.S */
-/* File: arm64/funopNarrower.S */
-    /*
-     * Generic 64bit-to-32bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "w0 = op d0".
-     *
-     * For: int-to-double, float-to-double, float-to-long
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG_WIDE d0, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    fcvtzs w0, d0                              // d0<- op
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG w0, w4                // vA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_long: /* 0x8b */
-/* File: arm64/op_double_to_long.S */
-/* File: arm64/funopWide.S */
-    /*
-     * Generic 64bit-to-64bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "x0 = op d0".
-     *
-     * For: long-to-double, double-to-long
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG_WIDE d0, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    fcvtzs x0, d0                              // d0<- op
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE x0, w4           // vA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_float: /* 0x8c */
-/* File: arm64/op_double_to_float.S */
-/* File: arm64/funopNarrower.S */
-    /*
-     * Generic 64bit-to-32bit floating point unary operation.  Provide an
-     * "instr" line that specifies an instruction that performs "s0 = op d0".
-     *
-     * For: int-to-double, float-to-double, float-to-long
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w4, wINST, #8, #4           // w4<- A
-    GET_VREG_WIDE d0, w3
-    FETCH_ADVANCE_INST 1                // advance rPC, load wINST
-    fcvt s0, d0                              // d0<- op
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG s0, w4                // vA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_byte: /* 0x8d */
-/* File: arm64/op_int_to_byte.S */
-/* File: arm64/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op w0".
-     * This could be an ARM instruction or a function call.
-     *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    GET_VREG w0, w3                     // w0<- vB
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    sxtb    w0, w0                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                     // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 8-9 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_char: /* 0x8e */
-/* File: arm64/op_int_to_char.S */
-/* File: arm64/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op w0".
-     * This could be an ARM instruction or a function call.
-     *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    GET_VREG w0, w3                     // w0<- vB
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    uxth    w0, w0                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                     // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 8-9 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_short: /* 0x8f */
-/* File: arm64/op_int_to_short.S */
-/* File: arm64/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op w0".
-     * This could be an ARM instruction or a function call.
-     *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
-     */
-    /* unop vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    GET_VREG w0, w3                     // w0<- vB
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    sxth    w0, w0                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                     // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 8-9 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int: /* 0x90 */
-/* File: arm64/op_add_int.S */
-/* File: arm64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w9, wINST, #8               // w9<- AA
-    lsr     w3, w0, #8                  // w3<- CC
-    and     w2, w0, #255                // w2<- BB
-    GET_VREG w1, w3                     // w1<- vCC
-    GET_VREG w0, w2                     // w0<- vBB
-    .if 0
-    cbz     w1, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    add     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_int: /* 0x91 */
-/* File: arm64/op_sub_int.S */
-/* File: arm64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w9, wINST, #8               // w9<- AA
-    lsr     w3, w0, #8                  // w3<- CC
-    and     w2, w0, #255                // w2<- BB
-    GET_VREG w1, w3                     // w1<- vCC
-    GET_VREG w0, w2                     // w0<- vBB
-    .if 0
-    cbz     w1, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    sub     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int: /* 0x92 */
-/* File: arm64/op_mul_int.S */
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-/* File: arm64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w9, wINST, #8               // w9<- AA
-    lsr     w3, w0, #8                  // w3<- CC
-    and     w2, w0, #255                // w2<- BB
-    GET_VREG w1, w3                     // w1<- vCC
-    GET_VREG w0, w2                     // w0<- vBB
-    .if 0
-    cbz     w1, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    mul     w0, w1, w0                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int: /* 0x93 */
-/* File: arm64/op_div_int.S */
-/* File: arm64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w9, wINST, #8               // w9<- AA
-    lsr     w3, w0, #8                  // w3<- CC
-    and     w2, w0, #255                // w2<- BB
-    GET_VREG w1, w3                     // w1<- vCC
-    GET_VREG w0, w2                     // w0<- vBB
-    .if 1
-    cbz     w1, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    sdiv     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int: /* 0x94 */
-/* File: arm64/op_rem_int.S */
-/* File: arm64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w9, wINST, #8               // w9<- AA
-    lsr     w3, w0, #8                  // w3<- CC
-    and     w2, w0, #255                // w2<- BB
-    GET_VREG w1, w3                     // w1<- vCC
-    GET_VREG w0, w2                     // w0<- vBB
-    .if 1
-    cbz     w1, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    sdiv     w2, w0, w1                           // optional op; may set condition codes
-    msub w0, w2, w1, w0                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int: /* 0x95 */
-/* File: arm64/op_and_int.S */
-/* File: arm64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w9, wINST, #8               // w9<- AA
-    lsr     w3, w0, #8                  // w3<- CC
-    and     w2, w0, #255                // w2<- BB
-    GET_VREG w1, w3                     // w1<- vCC
-    GET_VREG w0, w2                     // w0<- vBB
-    .if 0
-    cbz     w1, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    and     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int: /* 0x96 */
-/* File: arm64/op_or_int.S */
-/* File: arm64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w9, wINST, #8               // w9<- AA
-    lsr     w3, w0, #8                  // w3<- CC
-    and     w2, w0, #255                // w2<- BB
-    GET_VREG w1, w3                     // w1<- vCC
-    GET_VREG w0, w2                     // w0<- vBB
-    .if 0
-    cbz     w1, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    orr     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int: /* 0x97 */
-/* File: arm64/op_xor_int.S */
-/* File: arm64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w9, wINST, #8               // w9<- AA
-    lsr     w3, w0, #8                  // w3<- CC
-    and     w2, w0, #255                // w2<- BB
-    GET_VREG w1, w3                     // w1<- vCC
-    GET_VREG w0, w2                     // w0<- vBB
-    .if 0
-    cbz     w1, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    eor     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int: /* 0x98 */
-/* File: arm64/op_shl_int.S */
-/* File: arm64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w9, wINST, #8               // w9<- AA
-    lsr     w3, w0, #8                  // w3<- CC
-    and     w2, w0, #255                // w2<- BB
-    GET_VREG w1, w3                     // w1<- vCC
-    GET_VREG w0, w2                     // w0<- vBB
-    .if 0
-    cbz     w1, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    lsl     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int: /* 0x99 */
-/* File: arm64/op_shr_int.S */
-/* File: arm64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w9, wINST, #8               // w9<- AA
-    lsr     w3, w0, #8                  // w3<- CC
-    and     w2, w0, #255                // w2<- BB
-    GET_VREG w1, w3                     // w1<- vCC
-    GET_VREG w0, w2                     // w0<- vBB
-    .if 0
-    cbz     w1, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    asr     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int: /* 0x9a */
-/* File: arm64/op_ushr_int.S */
-/* File: arm64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
-     * handles it correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
-     *      mul-float, div-float, rem-float
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w9, wINST, #8               // w9<- AA
-    lsr     w3, w0, #8                  // w3<- CC
-    and     w2, w0, #255                // w2<- BB
-    GET_VREG w1, w3                     // w1<- vCC
-    GET_VREG w0, w2                     // w0<- vBB
-    .if 0
-    cbz     w1, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    lsr     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_long: /* 0x9b */
-/* File: arm64/op_add_long.S */
-/* File: arm64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = x1 op x2".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than x0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double, rem-double
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    lsr     w2, w0, #8                  // w2<- CC
-    and     w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE x2, w2               // w2<- vCC
-    GET_VREG_WIDE x1, w1               // w1<- vBB
-    .if 0
-    cbz     x2, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    add x0, x1, x2                              // x0<- op, w0-w4 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w4           // vAA<- x0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_long: /* 0x9c */
-/* File: arm64/op_sub_long.S */
-/* File: arm64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = x1 op x2".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than x0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double, rem-double
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    lsr     w2, w0, #8                  // w2<- CC
-    and     w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE x2, w2               // w2<- vCC
-    GET_VREG_WIDE x1, w1               // w1<- vBB
-    .if 0
-    cbz     x2, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    sub x0, x1, x2                              // x0<- op, w0-w4 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w4           // vAA<- x0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_long: /* 0x9d */
-/* File: arm64/op_mul_long.S */
-/* File: arm64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = x1 op x2".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than x0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double, rem-double
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    lsr     w2, w0, #8                  // w2<- CC
-    and     w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE x2, w2               // w2<- vCC
-    GET_VREG_WIDE x1, w1               // w1<- vBB
-    .if 0
-    cbz     x2, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    mul x0, x1, x2                              // x0<- op, w0-w4 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w4           // vAA<- x0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_long: /* 0x9e */
-/* File: arm64/op_div_long.S */
-/* File: arm64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = x1 op x2".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than x0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double, rem-double
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    lsr     w2, w0, #8                  // w2<- CC
-    and     w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE x2, w2               // w2<- vCC
-    GET_VREG_WIDE x1, w1               // w1<- vBB
-    .if 1
-    cbz     x2, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    sdiv x0, x1, x2                              // x0<- op, w0-w4 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w4           // vAA<- x0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_long: /* 0x9f */
-/* File: arm64/op_rem_long.S */
-/* File: arm64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = x1 op x2".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than x0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double, rem-double
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    lsr     w2, w0, #8                  // w2<- CC
-    and     w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE x2, w2               // w2<- vCC
-    GET_VREG_WIDE x1, w1               // w1<- vBB
-    .if 1
-    cbz     x2, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    sdiv x3, x1, x2
-    msub x0, x3, x2, x1                              // x0<- op, w0-w4 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w4           // vAA<- x0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_long: /* 0xa0 */
-/* File: arm64/op_and_long.S */
-/* File: arm64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = x1 op x2".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than x0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double, rem-double
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    lsr     w2, w0, #8                  // w2<- CC
-    and     w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE x2, w2               // w2<- vCC
-    GET_VREG_WIDE x1, w1               // w1<- vBB
-    .if 0
-    cbz     x2, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    and x0, x1, x2                              // x0<- op, w0-w4 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w4           // vAA<- x0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_long: /* 0xa1 */
-/* File: arm64/op_or_long.S */
-/* File: arm64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = x1 op x2".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than x0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double, rem-double
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    lsr     w2, w0, #8                  // w2<- CC
-    and     w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE x2, w2               // w2<- vCC
-    GET_VREG_WIDE x1, w1               // w1<- vBB
-    .if 0
-    cbz     x2, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    orr x0, x1, x2                              // x0<- op, w0-w4 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w4           // vAA<- x0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_long: /* 0xa2 */
-/* File: arm64/op_xor_long.S */
-/* File: arm64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = x1 op x2".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than x0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double, rem-double
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    lsr     w2, w0, #8                  // w2<- CC
-    and     w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE x2, w2               // w2<- vCC
-    GET_VREG_WIDE x1, w1               // w1<- vBB
-    .if 0
-    cbz     x2, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    eor x0, x1, x2                              // x0<- op, w0-w4 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w4           // vAA<- x0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_long: /* 0xa3 */
-/* File: arm64/op_shl_long.S */
-/* File: arm64/shiftWide.S */
-    /*
-     * 64-bit shift operation.
-     *
-     * For: shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr      w3, wINST, #8               // w3<- AA
-    lsr      w2, w0, #8                  // w2<- CC
-    GET_VREG w2, w2                     // w2<- vCC (shift count)
-    and      w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE x1, w1                // x1<- vBB
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    lsl  x0, x1, x2                 // Do the shift. Only low 6 bits of x2 are used.
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w3                // vAA<- x0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_long: /* 0xa4 */
-/* File: arm64/op_shr_long.S */
-/* File: arm64/shiftWide.S */
-    /*
-     * 64-bit shift operation.
-     *
-     * For: shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr      w3, wINST, #8               // w3<- AA
-    lsr      w2, w0, #8                  // w2<- CC
-    GET_VREG w2, w2                     // w2<- vCC (shift count)
-    and      w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE x1, w1                // x1<- vBB
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    asr  x0, x1, x2                 // Do the shift. Only low 6 bits of x2 are used.
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w3                // vAA<- x0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_long: /* 0xa5 */
-/* File: arm64/op_ushr_long.S */
-/* File: arm64/shiftWide.S */
-    /*
-     * 64-bit shift operation.
-     *
-     * For: shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr      w3, wINST, #8               // w3<- AA
-    lsr      w2, w0, #8                  // w2<- CC
-    GET_VREG w2, w2                     // w2<- vCC (shift count)
-    and      w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE x1, w1                // x1<- vBB
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    lsr  x0, x1, x2                 // Do the shift. Only low 6 bits of x2 are used.
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w3                // vAA<- x0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_float: /* 0xa6 */
-/* File: arm64/op_add_float.S */
-/* File: arm64/fbinop.S */
-    /*:
-     * Generic 32-bit floating-point operation.
-     *
-     * For: add-float, sub-float, mul-float, div-float
-     * form: <op> s0, s0, s1
-     */
-    /* floatop vAA, vBB, vCC */
-    FETCH w0, 1                         // r0<- CCBB
-    lsr     w1, w0, #8                  // r2<- CC
-    and     w0, w0, #255                // r1<- BB
-    GET_VREG  s1, w1
-    GET_VREG  s0, w0
-    fadd   s0, s0, s1                              // s0<- op
-    lsr     w1, wINST, #8               // r1<- AA
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG  s0, w1
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_float: /* 0xa7 */
-/* File: arm64/op_sub_float.S */
-/* File: arm64/fbinop.S */
-    /*:
-     * Generic 32-bit floating-point operation.
-     *
-     * For: add-float, sub-float, mul-float, div-float
-     * form: <op> s0, s0, s1
-     */
-    /* floatop vAA, vBB, vCC */
-    FETCH w0, 1                         // r0<- CCBB
-    lsr     w1, w0, #8                  // r2<- CC
-    and     w0, w0, #255                // r1<- BB
-    GET_VREG  s1, w1
-    GET_VREG  s0, w0
-    fsub   s0, s0, s1                              // s0<- op
-    lsr     w1, wINST, #8               // r1<- AA
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG  s0, w1
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_float: /* 0xa8 */
-/* File: arm64/op_mul_float.S */
-/* File: arm64/fbinop.S */
-    /*:
-     * Generic 32-bit floating-point operation.
-     *
-     * For: add-float, sub-float, mul-float, div-float
-     * form: <op> s0, s0, s1
-     */
-    /* floatop vAA, vBB, vCC */
-    FETCH w0, 1                         // r0<- CCBB
-    lsr     w1, w0, #8                  // r2<- CC
-    and     w0, w0, #255                // r1<- BB
-    GET_VREG  s1, w1
-    GET_VREG  s0, w0
-    fmul   s0, s0, s1                              // s0<- op
-    lsr     w1, wINST, #8               // r1<- AA
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG  s0, w1
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_float: /* 0xa9 */
-/* File: arm64/op_div_float.S */
-/* File: arm64/fbinop.S */
-    /*:
-     * Generic 32-bit floating-point operation.
-     *
-     * For: add-float, sub-float, mul-float, div-float
-     * form: <op> s0, s0, s1
-     */
-    /* floatop vAA, vBB, vCC */
-    FETCH w0, 1                         // r0<- CCBB
-    lsr     w1, w0, #8                  // r2<- CC
-    and     w0, w0, #255                // r1<- BB
-    GET_VREG  s1, w1
-    GET_VREG  s0, w0
-    fdiv   s0, s0, s1                              // s0<- op
-    lsr     w1, wINST, #8               // r1<- AA
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG  s0, w1
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_float: /* 0xaa */
-/* File: arm64/op_rem_float.S */
-/* EABI doesn't define a float remainder function, but libm does */
-/* File: arm64/fbinop.S */
-    /*:
-     * Generic 32-bit floating-point operation.
-     *
-     * For: add-float, sub-float, mul-float, div-float
-     * form: <op> s0, s0, s1
-     */
-    /* floatop vAA, vBB, vCC */
-    FETCH w0, 1                         // r0<- CCBB
-    lsr     w1, w0, #8                  // r2<- CC
-    and     w0, w0, #255                // r1<- BB
-    GET_VREG  s1, w1
-    GET_VREG  s0, w0
-    bl      fmodf                              // s0<- op
-    lsr     w1, wINST, #8               // r1<- AA
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG  s0, w1
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_double: /* 0xab */
-/* File: arm64/op_add_double.S */
-/* File: arm64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = x1 op x2".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than x0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double, rem-double
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    lsr     w2, w0, #8                  // w2<- CC
-    and     w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE d2, w2               // w2<- vCC
-    GET_VREG_WIDE d1, w1               // w1<- vBB
-    .if 0
-    cbz     d2, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    fadd d0, d1, d2                              // d0<- op, w0-w4 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE d0, w4           // vAA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_double: /* 0xac */
-/* File: arm64/op_sub_double.S */
-/* File: arm64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = x1 op x2".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than x0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double, rem-double
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    lsr     w2, w0, #8                  // w2<- CC
-    and     w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE d2, w2               // w2<- vCC
-    GET_VREG_WIDE d1, w1               // w1<- vBB
-    .if 0
-    cbz     d2, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    fsub d0, d1, d2                              // d0<- op, w0-w4 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE d0, w4           // vAA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_double: /* 0xad */
-/* File: arm64/op_mul_double.S */
-/* File: arm64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = x1 op x2".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than x0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double, rem-double
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    lsr     w2, w0, #8                  // w2<- CC
-    and     w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE d2, w2               // w2<- vCC
-    GET_VREG_WIDE d1, w1               // w1<- vBB
-    .if 0
-    cbz     d2, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    fmul d0, d1, d2                              // d0<- op, w0-w4 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE d0, w4           // vAA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_double: /* 0xae */
-/* File: arm64/op_div_double.S */
-/* File: arm64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = x1 op x2".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than x0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, add-double, sub-double, mul-double, div-double, rem-double
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w4, wINST, #8               // w4<- AA
-    lsr     w2, w0, #8                  // w2<- CC
-    and     w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE d2, w2               // w2<- vCC
-    GET_VREG_WIDE d1, w1               // w1<- vBB
-    .if 0
-    cbz     d2, common_errDivideByZero  // is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    fdiv d0, d1, d2                              // d0<- op, w0-w4 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE d0, w4           // vAA<- d0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_double: /* 0xaf */
-/* File: arm64/op_rem_double.S */
-    /* rem vAA, vBB, vCC */
-    FETCH w0, 1                         // w0<- CCBB
-    lsr     w2, w0, #8                  // w2<- CC
-    and     w1, w0, #255                // w1<- BB
-    GET_VREG_WIDE d1, w2                // d1<- vCC
-    GET_VREG_WIDE d0, w1                // d0<- vBB
-    bl  fmod
-    lsr     w4, wINST, #8               // w4<- AA
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE d0, w4                // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 11-14 instructions */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
-/* File: arm64/op_add_int_2addr.S */
-/* File: arm64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w1, w3                     // w1<- vB
-    GET_VREG w0, w9                     // w0<- vA
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    add     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
-/* File: arm64/op_sub_int_2addr.S */
-/* File: arm64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w1, w3                     // w1<- vB
-    GET_VREG w0, w9                     // w0<- vA
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    sub     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
-/* File: arm64/op_mul_int_2addr.S */
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-/* File: arm64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w1, w3                     // w1<- vB
-    GET_VREG w0, w9                     // w0<- vA
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    mul     w0, w1, w0                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
-/* File: arm64/op_div_int_2addr.S */
-/* File: arm64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w1, w3                     // w1<- vB
-    GET_VREG w0, w9                     // w0<- vA
-    .if 1
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    sdiv     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
-/* File: arm64/op_rem_int_2addr.S */
-/* File: arm64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w1, w3                     // w1<- vB
-    GET_VREG w0, w9                     // w0<- vA
-    .if 1
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    sdiv     w2, w0, w1                           // optional op; may set condition codes
-    msub w0, w2, w1, w0                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
-/* File: arm64/op_and_int_2addr.S */
-/* File: arm64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w1, w3                     // w1<- vB
-    GET_VREG w0, w9                     // w0<- vA
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    and     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
-/* File: arm64/op_or_int_2addr.S */
-/* File: arm64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w1, w3                     // w1<- vB
-    GET_VREG w0, w9                     // w0<- vA
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    orr     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
-/* File: arm64/op_xor_int_2addr.S */
-/* File: arm64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w1, w3                     // w1<- vB
-    GET_VREG w0, w9                     // w0<- vA
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    eor     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
-/* File: arm64/op_shl_int_2addr.S */
-/* File: arm64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w1, w3                     // w1<- vB
-    GET_VREG w0, w9                     // w0<- vA
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    lsl     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
-/* File: arm64/op_shr_int_2addr.S */
-/* File: arm64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w1, w3                     // w1<- vB
-    GET_VREG w0, w9                     // w0<- vA
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    asr     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
-/* File: arm64/op_ushr_int_2addr.S */
-/* File: arm64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
-     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w1, w3                     // w1<- vB
-    GET_VREG w0, w9                     // w0<- vA
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    lsr     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_long_2addr: /* 0xbb */
-/* File: arm64/op_add_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "x0 = x0 op x1".
-     * This must not be a function call, as we keep w2 live across it.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE x1, w1               // x1<- vB
-    GET_VREG_WIDE x0, w2               // x0<- vA
-    .if 0
-    cbz     x1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    
-    add     x0, x0, x1                              // result<- op
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
-/* File: arm64/op_sub_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "x0 = x0 op x1".
-     * This must not be a function call, as we keep w2 live across it.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE x1, w1               // x1<- vB
-    GET_VREG_WIDE x0, w2               // x0<- vA
-    .if 0
-    cbz     x1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    
-    sub     x0, x0, x1                              // result<- op
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
-/* File: arm64/op_mul_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "x0 = x0 op x1".
-     * This must not be a function call, as we keep w2 live across it.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE x1, w1               // x1<- vB
-    GET_VREG_WIDE x0, w2               // x0<- vA
-    .if 0
-    cbz     x1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    
-    mul     x0, x0, x1                              // result<- op
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_long_2addr: /* 0xbe */
-/* File: arm64/op_div_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "x0 = x0 op x1".
-     * This must not be a function call, as we keep w2 live across it.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE x1, w1               // x1<- vB
-    GET_VREG_WIDE x0, w2               // x0<- vA
-    .if 1
-    cbz     x1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    
-    sdiv     x0, x0, x1                              // result<- op
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
-/* File: arm64/op_rem_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "x0 = x0 op x1".
-     * This must not be a function call, as we keep w2 live across it.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE x1, w1               // x1<- vB
-    GET_VREG_WIDE x0, w2               // x0<- vA
-    .if 1
-    cbz     x1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    sdiv x3, x0, x1
-    msub x0, x3, x1, x0                              // result<- op
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
-/* File: arm64/op_and_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "x0 = x0 op x1".
-     * This must not be a function call, as we keep w2 live across it.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE x1, w1               // x1<- vB
-    GET_VREG_WIDE x0, w2               // x0<- vA
-    .if 0
-    cbz     x1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    
-    and     x0, x0, x1                              // result<- op
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
-/* File: arm64/op_or_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "x0 = x0 op x1".
-     * This must not be a function call, as we keep w2 live across it.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE x1, w1               // x1<- vB
-    GET_VREG_WIDE x0, w2               // x0<- vA
-    .if 0
-    cbz     x1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    
-    orr     x0, x0, x1                              // result<- op
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
-/* File: arm64/op_xor_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "x0 = x0 op x1".
-     * This must not be a function call, as we keep w2 live across it.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE x1, w1               // x1<- vB
-    GET_VREG_WIDE x0, w2               // x0<- vA
-    .if 0
-    cbz     x1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    
-    eor     x0, x0, x1                              // result<- op
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
-/* File: arm64/op_shl_long_2addr.S */
-/* File: arm64/shiftWide2addr.S */
-    /*
-     * Generic 64-bit shift operation.
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG w1, w1                     // x1<- vB
-    GET_VREG_WIDE x0, w2                // x0<- vA
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    lsl x0, x0, x1                  // Do the shift. Only low 6 bits of x1 are used.
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
-/* File: arm64/op_shr_long_2addr.S */
-/* File: arm64/shiftWide2addr.S */
-    /*
-     * Generic 64-bit shift operation.
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG w1, w1                     // x1<- vB
-    GET_VREG_WIDE x0, w2                // x0<- vA
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    asr x0, x0, x1                  // Do the shift. Only low 6 bits of x1 are used.
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
-/* File: arm64/op_ushr_long_2addr.S */
-/* File: arm64/shiftWide2addr.S */
-    /*
-     * Generic 64-bit shift operation.
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG w1, w1                     // x1<- vB
-    GET_VREG_WIDE x0, w2                // x0<- vA
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    lsr x0, x0, x1                  // Do the shift. Only low 6 bits of x1 are used.
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE x0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
-/* File: arm64/op_add_float_2addr.S */
-/* File: arm64/fbinop2addr.S */
-    /*
-     * Generic 32-bit floating point "/2addr" binary operation.  Provide
-     * an "instr" line that specifies an instruction that performs
-     * "s2 = s0 op s1".
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG s1, w3
-    GET_VREG s0, w9
-    fadd   s2, s0, s1                              // s2<- op
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG s2, w9
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
-/* File: arm64/op_sub_float_2addr.S */
-/* File: arm64/fbinop2addr.S */
-    /*
-     * Generic 32-bit floating point "/2addr" binary operation.  Provide
-     * an "instr" line that specifies an instruction that performs
-     * "s2 = s0 op s1".
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG s1, w3
-    GET_VREG s0, w9
-    fsub   s2, s0, s1                              // s2<- op
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG s2, w9
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
-/* File: arm64/op_mul_float_2addr.S */
-/* File: arm64/fbinop2addr.S */
-    /*
-     * Generic 32-bit floating point "/2addr" binary operation.  Provide
-     * an "instr" line that specifies an instruction that performs
-     * "s2 = s0 op s1".
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG s1, w3
-    GET_VREG s0, w9
-    fmul   s2, s0, s1                              // s2<- op
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG s2, w9
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
-/* File: arm64/op_div_float_2addr.S */
-/* File: arm64/fbinop2addr.S */
-    /*
-     * Generic 32-bit floating point "/2addr" binary operation.  Provide
-     * an "instr" line that specifies an instruction that performs
-     * "s2 = s0 op s1".
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG s1, w3
-    GET_VREG s0, w9
-    fdiv   s2, s0, s1                              // s2<- op
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG s2, w9
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_float_2addr: /* 0xca */
-/* File: arm64/op_rem_float_2addr.S */
-    /* rem vA, vB */
-    lsr     w3, wINST, #12              // w3<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG s1, w3
-    GET_VREG s0, w9
-    bl  fmodf
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG s0, w9
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_double_2addr: /* 0xcb */
-/* File: arm64/op_add_double_2addr.S */
-/* File: arm64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "x0 = x0 op x1".
-     * This must not be a function call, as we keep w2 live across it.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE d1, w1               // x1<- vB
-    GET_VREG_WIDE d0, w2               // x0<- vA
-    .if 0
-    cbz     d1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    
-    fadd     d0, d0, d1                              // result<- op
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE d0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
-/* File: arm64/op_sub_double_2addr.S */
-/* File: arm64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "x0 = x0 op x1".
-     * This must not be a function call, as we keep w2 live across it.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE d1, w1               // x1<- vB
-    GET_VREG_WIDE d0, w2               // x0<- vA
-    .if 0
-    cbz     d1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    
-    fsub     d0, d0, d1                              // result<- op
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE d0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
-/* File: arm64/op_mul_double_2addr.S */
-/* File: arm64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "x0 = x0 op x1".
-     * This must not be a function call, as we keep w2 live across it.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE d1, w1               // x1<- vB
-    GET_VREG_WIDE d0, w2               // x0<- vA
-    .if 0
-    cbz     d1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    
-    fmul     d0, d0, d1                              // result<- op
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE d0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_double_2addr: /* 0xce */
-/* File: arm64/op_div_double_2addr.S */
-/* File: arm64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "x0 = x0 op x1".
-     * This must not be a function call, as we keep w2 live across it.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
-     *      sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE d1, w1               // x1<- vB
-    GET_VREG_WIDE d0, w2               // x0<- vA
-    .if 0
-    cbz     d1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    
-    fdiv     d0, d0, d1                              // result<- op
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE d0, w2               // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
-/* File: arm64/op_rem_double_2addr.S */
-    /* rem vA, vB */
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    GET_VREG_WIDE d1, w1                // d1<- vB
-    GET_VREG_WIDE d0, w2                // d0<- vA
-    bl fmod
-    ubfx    w2, wINST, #8, #4           // w2<- A (need to reload - killed across call)
-    FETCH_ADVANCE_INST 1                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG_WIDE d0, w2                // vAA<- result
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
-/* File: arm64/op_add_int_lit16.S */
-/* File: arm64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S w1, 1                       // w1<- ssssCCCC (sign-extended)
-    lsr     w2, wINST, #12              // w2<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w0, w2                     // w0<- vB
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    add     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* File: arm64/op_rsub_int.S */
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-/* File: arm64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S w1, 1                       // w1<- ssssCCCC (sign-extended)
-    lsr     w2, wINST, #12              // w2<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w0, w2                     // w0<- vB
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    sub     w0, w1, w0                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
-/* File: arm64/op_mul_int_lit16.S */
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-/* File: arm64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S w1, 1                       // w1<- ssssCCCC (sign-extended)
-    lsr     w2, wINST, #12              // w2<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w0, w2                     // w0<- vB
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    mul     w0, w1, w0                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
-/* File: arm64/op_div_int_lit16.S */
-/* File: arm64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S w1, 1                       // w1<- ssssCCCC (sign-extended)
-    lsr     w2, wINST, #12              // w2<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w0, w2                     // w0<- vB
-    .if 1
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    sdiv w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
-/* File: arm64/op_rem_int_lit16.S */
-/* File: arm64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S w1, 1                       // w1<- ssssCCCC (sign-extended)
-    lsr     w2, wINST, #12              // w2<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w0, w2                     // w0<- vB
-    .if 1
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    sdiv w3, w0, w1
-    msub w0, w3, w1, w0                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
-/* File: arm64/op_and_int_lit16.S */
-/* File: arm64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S w1, 1                       // w1<- ssssCCCC (sign-extended)
-    lsr     w2, wINST, #12              // w2<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w0, w2                     // w0<- vB
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    and     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
-/* File: arm64/op_or_int_lit16.S */
-/* File: arm64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S w1, 1                       // w1<- ssssCCCC (sign-extended)
-    lsr     w2, wINST, #12              // w2<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w0, w2                     // w0<- vB
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    orr     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
-/* File: arm64/op_xor_int_lit16.S */
-/* File: arm64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    FETCH_S w1, 1                       // w1<- ssssCCCC (sign-extended)
-    lsr     w2, wINST, #12              // w2<- B
-    ubfx    w9, wINST, #8, #4           // w9<- A
-    GET_VREG w0, w2                     // w0<- vB
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    eor     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-13 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
-/* File: arm64/op_add_int_lit8.S */
-/* File: arm64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S w3, 1                       // w3<- ssssCCBB (sign-extended for CC)
-    lsr     w9, wINST, #8               // w9<- AA
-    and     w2, w3, #255                // w2<- BB
-    GET_VREG w0, w2                     // w0<- vBB
-                                // optional; typically w1<- ssssssCC (sign extended)
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    add     w0, w0, w3, asr #8                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
-/* File: arm64/op_rsub_int_lit8.S */
-/* File: arm64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S w3, 1                       // w3<- ssssCCBB (sign-extended for CC)
-    lsr     w9, wINST, #8               // w9<- AA
-    and     w2, w3, #255                // w2<- BB
-    GET_VREG w0, w2                     // w0<- vBB
-    asr     w1, w3, #8                            // optional; typically w1<- ssssssCC (sign extended)
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    sub     w0, w1, w0                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_lit8: /* 0xda */
-/* File: arm64/op_mul_int_lit8.S */
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-/* File: arm64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S w3, 1                       // w3<- ssssCCBB (sign-extended for CC)
-    lsr     w9, wINST, #8               // w9<- AA
-    and     w2, w3, #255                // w2<- BB
-    GET_VREG w0, w2                     // w0<- vBB
-    asr     w1, w3, #8                            // optional; typically w1<- ssssssCC (sign extended)
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    mul     w0, w1, w0                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_lit8: /* 0xdb */
-/* File: arm64/op_div_int_lit8.S */
-/* File: arm64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S w3, 1                       // w3<- ssssCCBB (sign-extended for CC)
-    lsr     w9, wINST, #8               // w9<- AA
-    and     w2, w3, #255                // w2<- BB
-    GET_VREG w0, w2                     // w0<- vBB
-    asr     w1, w3, #8                            // optional; typically w1<- ssssssCC (sign extended)
-    .if 1
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    sdiv     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
-/* File: arm64/op_rem_int_lit8.S */
-/* File: arm64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S w3, 1                       // w3<- ssssCCBB (sign-extended for CC)
-    lsr     w9, wINST, #8               // w9<- AA
-    and     w2, w3, #255                // w2<- BB
-    GET_VREG w0, w2                     // w0<- vBB
-    asr     w1, w3, #8                            // optional; typically w1<- ssssssCC (sign extended)
-    .if 1
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    sdiv w3, w0, w1                           // optional op; may set condition codes
-    msub w0, w3, w1, w0                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_lit8: /* 0xdd */
-/* File: arm64/op_and_int_lit8.S */
-/* File: arm64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S w3, 1                       // w3<- ssssCCBB (sign-extended for CC)
-    lsr     w9, wINST, #8               // w9<- AA
-    and     w2, w3, #255                // w2<- BB
-    GET_VREG w0, w2                     // w0<- vBB
-                                // optional; typically w1<- ssssssCC (sign extended)
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    and     w0, w0, w3, asr #8                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_lit8: /* 0xde */
-/* File: arm64/op_or_int_lit8.S */
-/* File: arm64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S w3, 1                       // w3<- ssssCCBB (sign-extended for CC)
-    lsr     w9, wINST, #8               // w9<- AA
-    and     w2, w3, #255                // w2<- BB
-    GET_VREG w0, w2                     // w0<- vBB
-                                // optional; typically w1<- ssssssCC (sign extended)
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    orr     w0, w0, w3, asr #8                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
-/* File: arm64/op_xor_int_lit8.S */
-/* File: arm64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S w3, 1                       // w3<- ssssCCBB (sign-extended for CC)
-    lsr     w9, wINST, #8               // w9<- AA
-    and     w2, w3, #255                // w2<- BB
-    GET_VREG w0, w2                     // w0<- vBB
-                                // optional; typically w1<- ssssssCC (sign extended)
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    eor     w0, w0, w3, asr #8                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
-/* File: arm64/op_shl_int_lit8.S */
-/* File: arm64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S w3, 1                       // w3<- ssssCCBB (sign-extended for CC)
-    lsr     w9, wINST, #8               // w9<- AA
-    and     w2, w3, #255                // w2<- BB
-    GET_VREG w0, w2                     // w0<- vBB
-    ubfx    w1, w3, #8, #5                            // optional; typically w1<- ssssssCC (sign extended)
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    lsl     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
-/* File: arm64/op_shr_int_lit8.S */
-/* File: arm64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S w3, 1                       // w3<- ssssCCBB (sign-extended for CC)
-    lsr     w9, wINST, #8               // w9<- AA
-    and     w2, w3, #255                // w2<- BB
-    GET_VREG w0, w2                     // w0<- vBB
-    ubfx    w1, w3, #8, #5                            // optional; typically w1<- ssssssCC (sign extended)
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    asr     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
-/* File: arm64/op_ushr_int_lit8.S */
-/* File: arm64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = w0 op w1".
-     * This could be an ARM instruction or a function call.  (If the result
-     * comes back in a register other than w0, you can override "result".)
-     *
-     * You can override "extract" if the extraction of the literal value
-     * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
-     * can be omitted completely if the shift is embedded in "instr".
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (w1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    FETCH_S w3, 1                       // w3<- ssssCCBB (sign-extended for CC)
-    lsr     w9, wINST, #8               // w9<- AA
-    and     w2, w3, #255                // w2<- BB
-    GET_VREG w0, w2                     // w0<- vBB
-    ubfx    w1, w3, #8, #5                            // optional; typically w1<- ssssssCC (sign extended)
-    .if 0
-    cbz     w1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-                               // optional op; may set condition codes
-    lsr     w0, w0, w1                              // w0<- op, w0-w3 changed
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    SET_VREG w0, w9                // vAA<- w0
-    GOTO_OPCODE ip                      // jump to next instruction
-    /* 10-12 instructions */
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_quick: /* 0xe3 */
-/* File: arm64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w1, 1                         // w1<- field byte offset
-    GET_VREG w3, w2                     // w3<- object we're operating on
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    cbz     w3, common_errNullObject    // object was null
-    ldr   w0, [x3, x1]                // w0<- obj.field
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    SET_VREG w0, w2                     // fp[A]<- w0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
-/* File: arm64/op_iget_wide_quick.S */
-    /* iget-wide-quick vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w4, 1                         // w4<- field byte offset
-    GET_VREG w3, w2                     // w3<- object we're operating on
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    cbz     w3, common_errNullObject    // object was null
-    ldr     x0, [x3, x4]                // x0<- obj.field
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    SET_VREG_WIDE x0, w2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
-/* File: arm64/op_iget_object_quick.S */
-    /* For: iget-object-quick */
-    /* op vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w1, 1                         // w1<- field byte offset
-    EXPORT_PC
-    GET_VREG w0, w2                     // w0<- object we're operating on
-    bl      artIGetObjectFromMterp      // (obj, offset)
-    ldr     x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    PREFETCH_INST 2
-    cbnz    w3, MterpPossibleException      // bail out
-    SET_VREG_OBJECT w0, w2              // fp[A]<- w0
-    ADVANCE 2                           // advance rPC
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_quick: /* 0xe6 */
-/* File: arm64/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w1, 1                         // w1<- field byte offset
-    GET_VREG w3, w2                     // w3<- fp[B], the object pointer
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    cbz     w3, common_errNullObject    // object was null
-    GET_VREG w0, w2                     // w0<- fp[A]
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    str     w0, [x3, x1]             // obj.field<- w0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
-/* File: arm64/op_iput_wide_quick.S */
-    /* iput-wide-quick vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w3, 1                         // w3<- field byte offset
-    GET_VREG w2, w2                     // w2<- fp[B], the object pointer
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    cbz     w2, common_errNullObject    // object was null
-    GET_VREG_WIDE x0, w0                // x0<- fp[A]
-    FETCH_ADVANCE_INST 2                // advance rPC, load wINST
-    str     x0, [x2, x3]                // obj.field<- x0
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
-/* File: arm64/op_iput_object_quick.S */
-    EXPORT_PC
-    add     x0, xFP, #OFF_FP_SHADOWFRAME
-    mov     x1, xPC
-    mov     w2, wINST
-    bl      MterpIputObjectQuick
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
-/* File: arm64/op_invoke_virtual_quick.S */
-/* File: arm64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualQuick
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokeVirtualQuick
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
-/* File: arm64/op_invoke_virtual_range_quick.S */
-/* File: arm64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualQuickRange
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokeVirtualQuickRange
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
-/* File: arm64/op_iput_boolean_quick.S */
-/* File: arm64/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w1, 1                         // w1<- field byte offset
-    GET_VREG w3, w2                     // w3<- fp[B], the object pointer
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    cbz     w3, common_errNullObject    // object was null
-    GET_VREG w0, w2                     // w0<- fp[A]
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    strb     w0, [x3, x1]             // obj.field<- w0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_byte_quick: /* 0xec */
-/* File: arm64/op_iput_byte_quick.S */
-/* File: arm64/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w1, 1                         // w1<- field byte offset
-    GET_VREG w3, w2                     // w3<- fp[B], the object pointer
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    cbz     w3, common_errNullObject    // object was null
-    GET_VREG w0, w2                     // w0<- fp[A]
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    strb     w0, [x3, x1]             // obj.field<- w0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_char_quick: /* 0xed */
-/* File: arm64/op_iput_char_quick.S */
-/* File: arm64/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w1, 1                         // w1<- field byte offset
-    GET_VREG w3, w2                     // w3<- fp[B], the object pointer
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    cbz     w3, common_errNullObject    // object was null
-    GET_VREG w0, w2                     // w0<- fp[A]
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    strh     w0, [x3, x1]             // obj.field<- w0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_short_quick: /* 0xee */
-/* File: arm64/op_iput_short_quick.S */
-/* File: arm64/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w1, 1                         // w1<- field byte offset
-    GET_VREG w3, w2                     // w3<- fp[B], the object pointer
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    cbz     w3, common_errNullObject    // object was null
-    GET_VREG w0, w2                     // w0<- fp[A]
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    strh     w0, [x3, x1]             // obj.field<- w0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
-/* File: arm64/op_iget_boolean_quick.S */
-/* File: arm64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w1, 1                         // w1<- field byte offset
-    GET_VREG w3, w2                     // w3<- object we're operating on
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    cbz     w3, common_errNullObject    // object was null
-    ldrb   w0, [x3, x1]                // w0<- obj.field
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    SET_VREG w0, w2                     // fp[A]<- w0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
-/* File: arm64/op_iget_byte_quick.S */
-/* File: arm64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w1, 1                         // w1<- field byte offset
-    GET_VREG w3, w2                     // w3<- object we're operating on
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    cbz     w3, common_errNullObject    // object was null
-    ldrsb   w0, [x3, x1]                // w0<- obj.field
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    SET_VREG w0, w2                     // fp[A]<- w0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
-/* File: arm64/op_iget_char_quick.S */
-/* File: arm64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w1, 1                         // w1<- field byte offset
-    GET_VREG w3, w2                     // w3<- object we're operating on
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    cbz     w3, common_errNullObject    // object was null
-    ldrh   w0, [x3, x1]                // w0<- obj.field
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    SET_VREG w0, w2                     // fp[A]<- w0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
-/* File: arm64/op_iget_short_quick.S */
-/* File: arm64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset//CCCC */
-    lsr     w2, wINST, #12              // w2<- B
-    FETCH w1, 1                         // w1<- field byte offset
-    GET_VREG w3, w2                     // w3<- object we're operating on
-    ubfx    w2, wINST, #8, #4           // w2<- A
-    cbz     w3, common_errNullObject    // object was null
-    ldrsh   w0, [x3, x1]                // w0<- obj.field
-    FETCH_ADVANCE_INST 2                // advance rPC, load rINST
-    
-    SET_VREG w0, w2                     // fp[A]<- w0
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/* File: arm64/op_unused_f3.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/* File: arm64/op_unused_f4.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/* File: arm64/op_unused_f5.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/* File: arm64/op_unused_f6.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/* File: arm64/op_unused_f7.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/* File: arm64/op_unused_f8.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/* File: arm64/op_unused_f9.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
-/* File: arm64/op_invoke_polymorphic.S */
-/* File: arm64/invoke_polymorphic.S */
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern MterpInvokePolymorphic
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokePolymorphic
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 4
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
-/* File: arm64/op_invoke_polymorphic_range.S */
-/* File: arm64/invoke_polymorphic.S */
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern MterpInvokePolymorphicRange
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokePolymorphicRange
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 4
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_custom: /* 0xfc */
-/* File: arm64/op_invoke_custom.S */
-/* File: arm64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeCustom
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokeCustom
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
-/* File: arm64/op_invoke_custom_range.S */
-/* File: arm64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeCustomRange
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xPC
-    mov     x3, xINST
-    bl      MterpInvokeCustomRange
-    cbz     w0, MterpException
-    FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_method_handle: /* 0xfe */
-/* File: arm64/op_const_method_handle.S */
-/* File: arm64/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstMethodHandle
-    EXPORT_PC
-    FETCH w0, 1                         // w0<- BBBB
-    lsr     w1, wINST, #8               // w1<- AA
-    add     x2, xFP, #OFF_FP_SHADOWFRAME
-    mov     x3, xSELF
-    bl      MterpConstMethodHandle                     // (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     // load rINST
-    cbnz    w0, MterpPossibleException  // let reference interpreter deal with it.
-    ADVANCE 2                           // advance rPC
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_method_type: /* 0xff */
-/* File: arm64/op_const_method_type.S */
-/* File: arm64/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstMethodType
-    EXPORT_PC
-    FETCH w0, 1                         // w0<- BBBB
-    lsr     w1, wINST, #8               // w1<- AA
-    add     x2, xFP, #OFF_FP_SHADOWFRAME
-    mov     x3, xSELF
-    bl      MterpConstMethodType                     // (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     // load rINST
-    cbnz    w0, MterpPossibleException  // let reference interpreter deal with it.
-    ADVANCE 2                           // advance rPC
-    GET_INST_OPCODE ip                  // extract opcode from rINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-    .balign 128
-/* File: arm64/instruction_end.S */
-
-    .type artMterpAsmInstructionEnd, #object
-    .hidden artMterpAsmInstructionEnd
-    .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-
-/*
- * ===========================================================================
- *  Sister implementations
- * ===========================================================================
- */
-/* File: arm64/instruction_start_sister.S */
-
-    .type artMterpAsmSisterStart, #object
-    .hidden artMterpAsmSisterStart
-    .global artMterpAsmSisterStart
-    .text
-    .balign 4
-artMterpAsmSisterStart:
-
-/* File: arm64/instruction_end_sister.S */
-
-    .type artMterpAsmSisterEnd, #object
-    .hidden artMterpAsmSisterEnd
-    .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
-
-/* File: arm64/footer.S */
-/*
- * ===========================================================================
- *  Common subroutines and data
- * ===========================================================================
- */
-
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogDivideByZeroException
-#endif
-    b MterpCommonFallback
-
-common_errArrayIndex:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogArrayIndexException
-#endif
-    b MterpCommonFallback
-
-common_errNegativeArraySize:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogNegativeArraySizeException
-#endif
-    b MterpCommonFallback
-
-common_errNoSuchMethod:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogNoSuchMethodException
-#endif
-    b MterpCommonFallback
-
-common_errNullObject:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogNullObjectException
-#endif
-    b MterpCommonFallback
-
-common_exceptionThrown:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogExceptionThrownException
-#endif
-    b MterpCommonFallback
-
-MterpSuspendFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    ldr  x2, [xSELF, #THREAD_FLAGS_OFFSET]
-    bl MterpLogSuspendFallback
-#endif
-    b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    ldr     x0, [xSELF, #THREAD_EXCEPTION_OFFSET]
-    cbz     x0, MterpFallback                       // If not, fall back to reference interpreter.
-    /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    bl      MterpHandleException                    // (self, shadow_frame)
-    cbz     w0, MterpExceptionReturn                // no local catch, back to caller.
-    ldr     x0, [xFP, #OFF_FP_DEX_INSTRUCTIONS]
-    ldr     w1, [xFP, #OFF_FP_DEX_PC]
-    ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
-    add     xPC, x0, x1, lsl #1                     // generate new dex_pc_ptr
-    /* Do we need to switch interpreters? */
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
-    /* resume execution at catch block */
-    EXPORT_PC
-    FETCH_INST
-    GET_INST_OPCODE ip
-    GOTO_OPCODE ip
-    /* NOTE: no fallthrough */
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    wINST          <= signed offset
- *    wPROFILE       <= signed hotness countdown (expanded to 32 bits)
- *    condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
-    cmp     wINST, #0
-    b.gt    .L_forward_branch           // don't add forward branches to hotness
-    tbnz    wPROFILE, #31, .L_no_count_backwards  // go if negative
-    subs    wPROFILE, wPROFILE, #1      // countdown
-    b.eq    .L_add_batch                // counted down to zero - report
-.L_resume_backward_branch:
-    ldr     lr, [xSELF, #THREAD_FLAGS_OFFSET]
-    add     w2, wINST, wINST            // w2<- byte offset
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    REFRESH_IBASE
-    ands    lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    b.ne    .L_suspend_request_pending
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-.L_suspend_request_pending:
-    EXPORT_PC
-    mov     x0, xSELF
-    bl      MterpSuspendCheck           // (self)
-    cbnz    x0, MterpFallback
-    REFRESH_IBASE                       // might have changed during suspend
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-.L_no_count_backwards:
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.ne    .L_resume_backward_branch
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xINST
-    EXPORT_PC
-    bl      MterpMaybeDoOnStackReplacement  // (self, shadow_frame, offset)
-    cbnz    x0, MterpOnStackReplacement
-    b       .L_resume_backward_branch
-
-.L_forward_branch:
-    cmp     wPROFILE, #JIT_CHECK_OSR    // possible OSR re-entry?
-    b.eq    .L_check_osr_forward
-.L_resume_forward_branch:
-    add     w2, wINST, wINST            // w2<- byte offset
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-.L_check_osr_forward:
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xINST
-    EXPORT_PC
-    bl      MterpMaybeDoOnStackReplacement  // (self, shadow_frame, offset)
-    cbnz    x0, MterpOnStackReplacement
-    b       .L_resume_forward_branch
-
-.L_add_batch:
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    strh    wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
-    ldr     x0, [xFP, #OFF_FP_METHOD]
-    mov     x2, xSELF
-    bl      MterpAddHotnessBatch        // (method, shadow_frame, self)
-    mov     wPROFILE, w0                // restore new hotness countdown to wPROFILE
-    b       .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, #2
-    EXPORT_PC
-    bl      MterpMaybeDoOnStackReplacement  // (self, shadow_frame, offset)
-    cbnz    x0, MterpOnStackReplacement
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-
-/*
- * Check for suspend check request.  Assumes wINST already loaded, xPC advanced and
- * still needs to get the opcode and branch to it, and flags are in lr.
- */
-MterpCheckSuspendAndContinue:
-    ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]  // refresh xIBASE
-    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    b.ne    check1
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-check1:
-    EXPORT_PC
-    mov     x0, xSELF
-    bl      MterpSuspendCheck           // (self)
-    cbnz    x0, MterpFallback           // Something in the environment changed, switch interpreters
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    sxtw x2, wINST
-    bl MterpLogOSR
-#endif
-    mov  x0, #1                         // Signal normal return
-    b    MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    mov  x0, xSELF
-    add  x1, xFP, #OFF_FP_SHADOWFRAME
-    bl MterpLogFallback
-#endif
-MterpCommonFallback:
-    mov     x0, #0                                  // signal retry with reference interpreter.
-    b       MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR.  Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- *  uint32_t* xFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    mov     x0, #1                                  // signal return to caller.
-    b MterpDone
-MterpReturn:
-    ldr     x2, [xFP, #OFF_FP_RESULT_REGISTER]
-    str     x0, [x2]
-    mov     x0, #1                                  // signal return to caller.
-MterpDone:
-/*
- * At this point, we expect wPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending wPROFILE and the cached hotness counter).  wPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    cmp     wPROFILE, #0
-    bgt     MterpProfileActive                      // if > 0, we may have some counts to report.
-    .cfi_remember_state
-    RESTORE_TWO_REGS                fp, lr, 64
-    RESTORE_TWO_REGS                xPC, xFP, 48
-    RESTORE_TWO_REGS                xSELF, xINST, 32
-    RESTORE_TWO_REGS                xIBASE, xREFS, 16
-    RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
-    ret
-    .cfi_restore_state                              // Reset unwind info so following code unwinds.
-    .cfi_def_cfa_offset 80                          // workaround for clang bug: 31975598
-
-MterpProfileActive:
-    mov     xINST, x0                               // stash return value
-    /* Report cached hotness counts */
-    ldr     x0, [xFP, #OFF_FP_METHOD]
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    mov     x2, xSELF
-    strh    wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
-    bl      MterpAddHotnessBatch                    // (method, shadow_frame, self)
-    mov     x0, xINST                               // restore return value
-    RESTORE_TWO_REGS                fp, lr, 64
-    RESTORE_TWO_REGS                xPC, xFP, 48
-    RESTORE_TWO_REGS                xSELF, xINST, 32
-    RESTORE_TWO_REGS                xIBASE, xREFS, 16
-    RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
-    ret
-
-
-/* File: arm64/instruction_start_alt.S */
-
-    .type artMterpAsmAltInstructionStart, #object
-    .hidden artMterpAsmAltInstructionStart
-    .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
-    .text
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (0 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move: /* 0x01 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (1 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (2 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (3 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (4 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (5 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (6 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (7 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (8 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (9 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (10 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (11 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (12 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (13 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (14 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return: /* 0x0f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (15 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (16 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (17 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (18 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (19 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const: /* 0x14 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (20 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (21 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (22 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (23 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (24 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (25 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (26 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (27 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (28 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (29 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (30 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (31 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (32 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (33 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (34 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (35 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (36 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (37 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (38 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (39 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (40 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (41 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (42 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (43 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (44 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (45 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (46 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (47 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (48 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (49 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (50 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (51 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (52 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (53 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (54 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (55 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (56 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (57 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (58 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (59 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (60 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (61 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (62 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (63 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (64 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (65 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (66 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (67 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (68 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (69 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (70 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (71 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (72 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (73 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (74 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (75 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (76 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (77 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (78 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (79 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (80 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (81 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (82 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (83 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (84 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (85 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (86 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (87 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (88 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (89 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (90 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (91 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (92 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (93 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (94 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (95 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (96 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (97 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (98 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (99 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (100 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (101 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (102 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (103 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (104 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (105 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (106 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (107 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (108 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (109 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (110 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (111 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (112 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (113 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (114 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (115 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (116 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (117 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (118 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (119 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (120 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (121 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (122 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (123 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (124 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (125 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (126 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (127 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (128 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (129 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (130 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (131 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (132 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (133 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (134 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (135 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (136 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (137 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (138 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (139 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (140 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (141 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (142 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (143 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (144 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (145 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (146 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (147 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (148 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (149 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (150 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (151 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (152 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (153 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (154 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (155 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (156 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (157 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (158 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (159 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (160 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (161 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (162 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (163 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (164 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (165 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (166 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (167 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (168 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (169 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (170 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (171 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (172 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (173 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (174 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (175 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (176 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (177 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (178 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (179 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (180 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (181 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (182 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (183 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (184 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (185 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (186 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (187 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (188 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (189 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (190 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (191 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (192 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (193 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (194 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (195 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (196 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (197 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (198 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (199 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (200 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (201 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (202 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (203 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (204 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (205 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (206 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (207 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (208 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (209 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (210 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (211 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (212 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (213 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (214 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (215 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (216 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (217 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (218 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (219 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (220 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (221 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (222 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (223 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (224 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (225 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (226 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (227 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (228 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (229 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (230 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (231 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (232 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (233 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (234 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (235 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (236 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (237 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (238 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (239 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (240 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (241 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (242 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (243 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (244 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (245 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (246 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (247 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (248 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (249 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (250 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (251 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (252 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (253 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (254 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    ldr    xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]            // refresh IBASE.
-    adr    lr, artMterpAsmInstructionStart + (255 * 128)       // Addr of primary handler.
-    mov    x0, xSELF
-    add    x1, xFP, #OFF_FP_SHADOWFRAME
-    mov    x2, xPC
-    b      MterpCheckBefore     // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-    .balign 128
-/* File: arm64/instruction_end_alt.S */
-
-    .type artMterpAsmAltInstructionEnd, #object
-    .hidden artMterpAsmAltInstructionEnd
-    .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
-
-/* File: arm64/close_cfi.S */
-// Close out the cfi info.  We're treating mterp as a single function.
-
-END ExecuteMterpImpl
-
-
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
deleted file mode 100644
index 1f5bea0..0000000
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ /dev/null
@@ -1,12389 +0,0 @@
-/*
- * This file was generated automatically by gen-mterp.py for 'mips'.
- *
- * --> DO NOT EDIT <--
- */
-
-/* File: mips/header.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-  Art assembly interpreter notes:
-
-  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
-  handle invoke, allows higher-level code to create frame & shadow frame.
-
-  Once that's working, support direct entry code & eliminate shadow frame (and
-  excess locals allocation.
-
-  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
-  base of the vreg array within the shadow frame.  Access the other fields,
-  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
-  the shadow frame mechanism of double-storing object references - via rFP &
-  number_of_vregs_.
-
- */
-
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#if (__mips==32) && (__mips_isa_rev>=2)
-#define MIPS32REVGE2    /* mips32r2 and greater */
-#if (__mips==32) && (__mips_isa_rev>=5)
-#define FPU64           /* 64 bit FPU */
-#if (__mips==32) && (__mips_isa_rev>=6)
-#define MIPS32REVGE6    /* mips32r6 and greater */
-#endif
-#endif
-#endif
-
-/* MIPS definitions and declarations
-
-   reg  nick      purpose
-   s0   rPC       interpreted program counter, used for fetching instructions
-   s1   rFP       interpreted frame pointer, used for accessing locals and args
-   s2   rSELF     self (Thread) pointer
-   s3   rIBASE    interpreted instruction base pointer, used for computed goto
-   s4   rINST     first 16-bit code unit of current instruction
-   s5   rOBJ      object pointer
-   s6   rREFS     base of object references in shadow frame (ideally, we'll get rid of this later).
-   s7   rTEMP     used as temp storage that can survive a function call
-   s8   rPROFILE  branch profiling countdown
-
-*/
-
-/* single-purpose registers, given names for clarity */
-#define rPC s0
-#define CFI_DEX 16  // DWARF register number of the register holding dex-pc (s0).
-#define CFI_TMP 4   // DWARF register number of the first argument register (a0).
-#define rFP s1
-#define rSELF s2
-#define rIBASE s3
-#define rINST s4
-#define rOBJ s5
-#define rREFS s6
-#define rTEMP s7
-#define rPROFILE s8
-
-#define rARG0 a0
-#define rARG1 a1
-#define rARG2 a2
-#define rARG3 a3
-#define rRESULT0 v0
-#define rRESULT1 v1
-
-/* GP register definitions */
-#define zero    $0      /* always zero */
-#define AT      $at     /* assembler temp */
-#define v0      $2      /* return value */
-#define v1      $3
-#define a0      $4      /* argument registers */
-#define a1      $5
-#define a2      $6
-#define a3      $7
-#define t0      $8      /* temp registers (not saved across subroutine calls) */
-#define t1      $9
-#define t2      $10
-#define t3      $11
-#define t4      $12
-#define t5      $13
-#define t6      $14
-#define t7      $15
-#define ta0     $12     /* alias */
-#define ta1     $13
-#define ta2     $14
-#define ta3     $15
-#define s0      $16     /* saved across subroutine calls (callee saved) */
-#define s1      $17
-#define s2      $18
-#define s3      $19
-#define s4      $20
-#define s5      $21
-#define s6      $22
-#define s7      $23
-#define t8      $24     /* two more temp registers */
-#define t9      $25
-#define k0      $26     /* kernel temporary */
-#define k1      $27
-#define gp      $28     /* global pointer */
-#define sp      $29     /* stack pointer */
-#define s8      $30     /* one more callee saved */
-#define ra      $31     /* return address */
-
-/* FP register definitions */
-#define fv0    $f0
-#define fv0f   $f1
-#define fv1    $f2
-#define fv1f   $f3
-#define fa0    $f12
-#define fa0f   $f13
-#define fa1    $f14
-#define fa1f   $f15
-#define ft0    $f4
-#define ft0f   $f5
-#define ft1    $f6
-#define ft1f   $f7
-#define ft2    $f8
-#define ft2f   $f9
-#define ft3    $f10
-#define ft3f   $f11
-#define ft4    $f16
-#define ft4f   $f17
-#define ft5    $f18
-#define ft5f   $f19
-#define fs0    $f20
-#define fs0f   $f21
-#define fs1    $f22
-#define fs1f   $f23
-#define fs2    $f24
-#define fs2f   $f25
-#define fs3    $f26
-#define fs3f   $f27
-#define fs4    $f28
-#define fs4f   $f29
-#define fs5    $f30
-#define fs5f   $f31
-
-#ifndef MIPS32REVGE6
-#define fcc0   $fcc0
-#define fcc1   $fcc1
-#endif
-
-#ifdef MIPS32REVGE2
-#define SEB(rd, rt) \
-    seb       rd, rt
-#define SEH(rd, rt) \
-    seh       rd, rt
-#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
-    ins       rd_lo, rt_hi, 16, 16
-#else
-#define SEB(rd, rt) \
-    sll       rd, rt, 24; \
-    sra       rd, rd, 24
-#define SEH(rd, rt) \
-    sll       rd, rt, 16; \
-    sra       rd, rd, 16
-/* Clobbers rt_hi on pre-R2. */
-#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
-    sll       rt_hi, rt_hi, 16; \
-    or        rd_lo, rt_hi
-#endif
-
-#ifdef FPU64
-#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
-    mthc1     r, flo
-#else
-#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
-    mtc1      r, fhi
-#endif
-
-#ifdef MIPS32REVGE6
-#define JR(rt) \
-    jic       rt, 0
-#define LSA(rd, rs, rt, sa) \
-    .if sa; \
-    lsa       rd, rs, rt, sa; \
-    .else; \
-    addu      rd, rs, rt; \
-    .endif
-#else
-#define JR(rt) \
-    jalr      zero, rt
-#define LSA(rd, rs, rt, sa) \
-    .if sa; \
-    .set      push; \
-    .set      noat; \
-    sll       AT, rs, sa; \
-    addu      rd, AT, rt; \
-    .set      pop; \
-    .else; \
-    addu      rd, rs, rt; \
-    .endif
-#endif
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-#define EXPORT_PC() \
-    sw        rPC, OFF_FP_DEX_PC_PTR(rFP)
-
-#define EXPORT_DEX_PC(tmp) \
-    lw        tmp, OFF_FP_DEX_INSTRUCTIONS(rFP); \
-    sw        rPC, OFF_FP_DEX_PC_PTR(rFP); \
-    subu      tmp, rPC, tmp; \
-    sra       tmp, tmp, 1; \
-    sw        tmp, OFF_FP_DEX_PC(rFP)
-
-/*
- * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
- */
-#define FETCH_INST() lhu rINST, (rPC)
-
-/*
- * Fetch the next instruction from the specified offset.  Advances rPC
- * to point to the next instruction.  "_count" is in 16-bit code units.
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss.  (This also implies that it must come after
- * EXPORT_PC().)
- */
-#define FETCH_ADVANCE_INST(_count) \
-    lhu       rINST, ((_count)*2)(rPC); \
-    addu      rPC, rPC, ((_count) * 2)
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
- * rINST ahead of possible exception point.  Be sure to manually advance rPC
- * later.
- */
-#define PREFETCH_INST(_count) lhu rINST, ((_count)*2)(rPC)
-
-/* Advance rPC by some number of code units. */
-#define ADVANCE(_count) addu rPC, rPC, ((_count) * 2)
-
-/*
- * Fetch the next instruction from an offset specified by rd.  Updates
- * rPC to point to the next instruction.  "rd" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.
- */
-#define FETCH_ADVANCE_INST_RB(rd) \
-    addu      rPC, rPC, rd; \
-    lhu       rINST, (rPC)
-
-/*
- * Fetch a half-word code unit from an offset past the current PC.  The
- * "_count" value is in 16-bit code units.  Does not advance rPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
-#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
-
-/*
- * Fetch one byte from an offset past the current PC.  Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-#define FETCH_B(rd, _count, _byte) lbu rd, ((_count) * 2 + _byte)(rPC)
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
-
-/*
- * Transform opcode into branch target address.
- */
-#define GET_OPCODE_TARGET(rd) \
-    sll       rd, rd, 7; \
-    addu      rd, rIBASE, rd
-
-/*
- * Begin executing the opcode in rd.
- */
-#define GOTO_OPCODE(rd) \
-    GET_OPCODE_TARGET(rd); \
-    JR(rd)
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
-
-#define GET_VREG_F(rd, rix) \
-    .set noat; \
-    EAS2(AT, rFP, rix); \
-    l.s       rd, (AT); \
-    .set at
-
-#ifdef MIPS32REVGE6
-#define SET_VREG(rd, rix) \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8)
-#else
-#define SET_VREG(rd, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG_OBJECT(rd, rix) \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        rd, 0(t8)
-#else
-#define SET_VREG_OBJECT(rd, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        rd, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG64(rlo, rhi, rix) \
-    lsa       t8, rix, rFP, 2; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#else
-#define SET_VREG64(rlo, rhi, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG_F(rd, rix) \
-    lsa       t8, rix, rFP, 2; \
-    s.s       rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8)
-#else
-#define SET_VREG_F(rd, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG64_F(rlo, rhi, rix) \
-    lsa       t8, rix, rFP, 2; \
-    .set noat; \
-    mfhc1     AT, rlo; \
-    s.s       rlo, 0(t8); \
-    sw        AT, 4(t8); \
-    .set at; \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#elif defined(FPU64)
-#define SET_VREG64_F(rlo, rhi, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rREFS, AT; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8); \
-    addu      t8, rFP, AT; \
-    mfhc1     AT, rlo; \
-    sw        AT, 4(t8); \
-    .set at; \
-    s.s       rlo, 0(t8)
-#else
-#define SET_VREG64_F(rlo, rhi, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rlo, 0(t8); \
-    s.s       rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#endif
-
-/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG_OBJECT and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    jalr      zero, dst; \
-    sw        rd, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    jalr      zero, dst; \
-    sw        rd, 0(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#else
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG_F and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_F_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    s.s       rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG_F_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG64_F and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    .set noat; \
-    mfhc1     AT, rlo; \
-    s.s       rlo, 0(t8); \
-    sw        AT, 4(t8); \
-    .set at; \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#elif defined(FPU64)
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rREFS, AT; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8); \
-    addu      t8, rFP, AT; \
-    mfhc1     AT, rlo; \
-    sw        AT, 4(t8); \
-    .set at; \
-    jalr      zero, dst; \
-    s.s       rlo, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rlo, 0(t8); \
-    s.s       rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#endif
-
-#define GET_OPA(rd) srl rd, rINST, 8
-#ifdef MIPS32REVGE2
-#define GET_OPA4(rd) ext rd, rINST, 8, 4
-#else
-#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
-#endif
-#define GET_OPB(rd) srl rd, rINST, 12
-
-/*
- * Form an Effective Address rd = rbase + roff<<shift;
- * Uses reg AT on pre-R6.
- */
-#define EASN(rd, rbase, roff, shift) LSA(rd, roff, rbase, shift)
-
-#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
-#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
-#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
-#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
-
-#define LOAD_eas2(rd, rbase, roff) \
-    .set noat; \
-    EAS2(AT, rbase, roff); \
-    lw        rd, 0(AT); \
-    .set at
-
-#define STORE_eas2(rd, rbase, roff) \
-    .set noat; \
-    EAS2(AT, rbase, roff); \
-    sw        rd, 0(AT); \
-    .set at
-
-#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
-#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
-
-#define STORE64_off(rlo, rhi, rbase, off) \
-    sw        rlo, off(rbase); \
-    sw        rhi, (off+4)(rbase)
-#define LOAD64_off(rlo, rhi, rbase, off) \
-    lw        rlo, off(rbase); \
-    lw        rhi, (off+4)(rbase)
-
-#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
-#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
-
-#ifdef FPU64
-#define STORE64_off_F(rlo, rhi, rbase, off) \
-    s.s       rlo, off(rbase); \
-    .set noat; \
-    mfhc1     AT, rlo; \
-    sw        AT, (off+4)(rbase); \
-    .set at
-#define LOAD64_off_F(rlo, rhi, rbase, off) \
-    l.s       rlo, off(rbase); \
-    .set noat; \
-    lw        AT, (off+4)(rbase); \
-    mthc1     AT, rlo; \
-    .set at
-#else
-#define STORE64_off_F(rlo, rhi, rbase, off) \
-    s.s       rlo, off(rbase); \
-    s.s       rhi, (off+4)(rbase)
-#define LOAD64_off_F(rlo, rhi, rbase, off) \
-    l.s       rlo, off(rbase); \
-    l.s       rhi, (off+4)(rbase)
-#endif
-
-#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
-#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
-
-
-#define LOAD_base_offMirrorArray_length(rd, rbase) LOAD_RB_OFF(rd, rbase, MIRROR_ARRAY_LENGTH_OFFSET)
-
-#define STACK_STORE(rd, off) sw rd, off(sp)
-#define STACK_LOAD(rd, off) lw rd, off(sp)
-#define CREATE_STACK(n) subu sp, sp, n
-#define DELETE_STACK(n) addu sp, sp, n
-
-#define LOAD_ADDR(dest, addr) la dest, addr
-#define LOAD_IMM(dest, imm) li dest, imm
-#define MOVE_REG(dest, src) move dest, src
-#define STACK_SIZE 128
-
-#define STACK_OFFSET_ARG04 16
-#define STACK_OFFSET_ARG05 20
-#define STACK_OFFSET_ARG06 24
-#define STACK_OFFSET_ARG07 28
-#define STACK_OFFSET_GP    84
-
-#define JAL(n) jal n
-#define BAL(n) bal n
-
-/*
- * FP register usage restrictions:
- * 1) We don't use the callee save FP registers so we don't have to save them.
- * 2) We don't use the odd FP registers so we can share code with mips32r6.
- */
-#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
-    STACK_STORE(ra, 124); \
-    STACK_STORE(s8, 120); \
-    STACK_STORE(s0, 116); \
-    STACK_STORE(s1, 112); \
-    STACK_STORE(s2, 108); \
-    STACK_STORE(s3, 104); \
-    STACK_STORE(s4, 100); \
-    STACK_STORE(s5, 96); \
-    STACK_STORE(s6, 92); \
-    STACK_STORE(s7, 88);
-
-#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
-    STACK_LOAD(s7, 88); \
-    STACK_LOAD(s6, 92); \
-    STACK_LOAD(s5, 96); \
-    STACK_LOAD(s4, 100); \
-    STACK_LOAD(s3, 104); \
-    STACK_LOAD(s2, 108); \
-    STACK_LOAD(s1, 112); \
-    STACK_LOAD(s0, 116); \
-    STACK_LOAD(s8, 120); \
-    STACK_LOAD(ra, 124); \
-    DELETE_STACK(STACK_SIZE)
-
-#define REFRESH_IBASE() \
-    lw        rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-
-/* Constants for float/double_to_int/long conversions */
-#define INT_MIN                 0x80000000
-#define INT_MIN_AS_FLOAT        0xCF000000
-#define INT_MIN_AS_DOUBLE_HIGH  0xC1E00000
-#define LONG_MIN_HIGH           0x80000000
-#define LONG_MIN_AS_FLOAT       0xDF000000
-#define LONG_MIN_AS_DOUBLE_HIGH 0xC3E00000
-
-/* File: mips/entry.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
-    .text
-    .align 2
-    .global ExecuteMterpImpl
-    .ent    ExecuteMterpImpl
-    .frame sp, STACK_SIZE, ra
-/*
- * On entry:
- *  a0  Thread* self
- *  a1  dex_instructions
- *  a2  ShadowFrame
- *  a3  JValue* result_register
- *
- */
-
-ExecuteMterpImpl:
-    .cfi_startproc
-    .set noreorder
-    .cpload t9
-    .set reorder
-/* Save to the stack. Frame size = STACK_SIZE */
-    STACK_STORE_FULL()
-/* This directive will make sure all subsequent jal restore gp at a known offset */
-    .cprestore STACK_OFFSET_GP
-
-    /* Remember the return register */
-    sw      a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
-
-    /* Remember the dex instruction pointer */
-    sw      a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
-
-    /* set up "named" registers */
-    move    rSELF, a0
-    lw      a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
-    addu    rFP, a2, SHADOWFRAME_VREGS_OFFSET     # point to vregs.
-    EAS2(rREFS, rFP, a0)                          # point to reference array in shadow frame
-    lw      a0, SHADOWFRAME_DEX_PC_OFFSET(a2)     # Get starting dex_pc
-    EAS1(rPC, a1, a0)                             # Create direct pointer to 1st dex opcode
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-
-    EXPORT_PC()
-
-    /* Starting ibase */
-    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-
-    /* Set up for backwards branches & osr profiling */
-    lw      a0, OFF_FP_METHOD(rFP)
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    JAL(MterpSetUpHotnessCountdown)        # (method, shadow_frame, self)
-    move    rPROFILE, v0                   # Starting hotness countdown to rPROFILE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST()                           # load rINST from rPC
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
-    /* NOTE: no fallthrough */
-
-/* File: mips/instruction_start.S */
-
-    .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
-    .text
-
-/* ------------------------------ */
-    .balign 128
-.L_op_nop: /* 0x00 */
-/* File: mips/op_nop.S */
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move: /* 0x01 */
-/* File: mips/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    GET_OPB(a1)                            #  a1 <- B from 15:12
-    GET_OPA4(a0)                           #  a0 <- A from 11:8
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[B]
-    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
-    .if 0
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[A] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
-    .endif
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_from16: /* 0x02 */
-/* File: mips/op_move_from16.S */
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    FETCH(a1, 1)                           #  a1 <- BBBB
-    GET_OPA(a0)                            #  a0 <- AA
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    .if 0
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AA] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[AA] <- a2
-    .endif
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_16: /* 0x03 */
-/* File: mips/op_move_16.S */
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    FETCH(a1, 2)                           #  a1 <- BBBB
-    FETCH(a0, 1)                           #  a0 <- AAAA
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    .if 0
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AAAA] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[AAAA] <- a2
-    .endif
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide: /* 0x04 */
-/* File: mips/op_move_wide.S */
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[B]
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[A] <- a0/a1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide_from16: /* 0x05 */
-/* File: mips/op_move_wide_from16.S */
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
-    FETCH(a3, 1)                           #  a3 <- BBBB
-    GET_OPA(a2)                            #  a2 <- AA
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AA] <- a0/a1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide_16: /* 0x06 */
-/* File: mips/op_move_wide_16.S */
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
-    FETCH(a3, 2)                           #  a3 <- BBBB
-    FETCH(a2, 1)                           #  a2 <- AAAA
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AAAA] <- a0/a1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object: /* 0x07 */
-/* File: mips/op_move_object.S */
-/* File: mips/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    GET_OPB(a1)                            #  a1 <- B from 15:12
-    GET_OPA4(a0)                           #  a0 <- A from 11:8
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[B]
-    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
-    .if 1
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[A] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
-    .endif
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object_from16: /* 0x08 */
-/* File: mips/op_move_object_from16.S */
-/* File: mips/op_move_from16.S */
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    FETCH(a1, 1)                           #  a1 <- BBBB
-    GET_OPA(a0)                            #  a0 <- AA
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    .if 1
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AA] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[AA] <- a2
-    .endif
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object_16: /* 0x09 */
-/* File: mips/op_move_object_16.S */
-/* File: mips/op_move_16.S */
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    FETCH(a1, 2)                           #  a1 <- BBBB
-    FETCH(a0, 1)                           #  a0 <- AAAA
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    .if 1
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AAAA] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[AAAA] <- a2
-    .endif
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result: /* 0x0a */
-/* File: mips/op_move_result.S */
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    GET_OPA(a2)                            #  a2 <- AA
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    lw    a0, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
-    lw    a0, 0(a0)                        #  a0 <- result.i
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    .if 0
-    SET_VREG_OBJECT_GOTO(a0, a2, t0)       #  fp[AA] <- a0
-    .else
-    SET_VREG_GOTO(a0, a2, t0)              #  fp[AA] <- a0
-    .endif
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result_wide: /* 0x0b */
-/* File: mips/op_move_result_wide.S */
-    /* move-result-wide vAA */
-    GET_OPA(a2)                            #  a2 <- AA
-    lw    a3, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- retval.j
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AA] <- a0/a1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result_object: /* 0x0c */
-/* File: mips/op_move_result_object.S */
-/* File: mips/op_move_result.S */
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    GET_OPA(a2)                            #  a2 <- AA
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    lw    a0, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
-    lw    a0, 0(a0)                        #  a0 <- result.i
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    .if 1
-    SET_VREG_OBJECT_GOTO(a0, a2, t0)       #  fp[AA] <- a0
-    .else
-    SET_VREG_GOTO(a0, a2, t0)              #  fp[AA] <- a0
-    .endif
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_exception: /* 0x0d */
-/* File: mips/op_move_exception.S */
-    /* move-exception vAA */
-    GET_OPA(a2)                                 #  a2 <- AA
-    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)    #  get exception obj
-    FETCH_ADVANCE_INST(1)                       #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                         #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    SET_VREG_OBJECT(a3, a2)                     #  fp[AA] <- exception obj
-    sw    zero, THREAD_EXCEPTION_OFFSET(rSELF)  #  clear exception
-    JR(t0)                                      #  jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_void: /* 0x0e */
-/* File: mips/op_return_void.S */
-    .extern MterpThreadFenceForConstructor
-    JAL(MterpThreadFenceForConstructor)
-    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
-    move      a0, rSELF
-    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz      ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    move      v0, zero
-    move      v1, zero
-    b         MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return: /* 0x0f */
-/* File: mips/op_return.S */
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return, return-object
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    JAL(MterpThreadFenceForConstructor)
-    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
-    move      a0, rSELF
-    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz      ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    GET_OPA(a2)                            #  a2 <- AA
-    GET_VREG(v0, a2)                       #  v0 <- vAA
-    move      v1, zero
-    b         MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_wide: /* 0x10 */
-/* File: mips/op_return_wide.S */
-    /*
-     * Return a 64-bit value.
-     */
-    /* return-wide vAA */
-    .extern MterpThreadFenceForConstructor
-    JAL(MterpThreadFenceForConstructor)
-    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
-    move      a0, rSELF
-    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz      ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    GET_OPA(a2)                            #  a2 <- AA
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[AA]
-    LOAD64(v0, v1, a2)                     #  v0/v1 <- vAA/vAA+1
-    b         MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_object: /* 0x11 */
-/* File: mips/op_return_object.S */
-/* File: mips/op_return.S */
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return, return-object
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    JAL(MterpThreadFenceForConstructor)
-    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
-    move      a0, rSELF
-    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz      ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    GET_OPA(a2)                            #  a2 <- AA
-    GET_VREG(v0, a2)                       #  v0 <- vAA
-    move      v1, zero
-    b         MterpReturn
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_4: /* 0x12 */
-/* File: mips/op_const_4.S */
-    /* const/4 vA, +B */
-    sll       a1, rINST, 16                #  a1 <- Bxxx0000
-    GET_OPA(a0)                            #  a0 <- A+
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    sra       a1, a1, 28                   #  a1 <- sssssssB (sign-extended)
-    and       a0, a0, 15
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a1, a0, t0)              #  fp[A] <- a1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_16: /* 0x13 */
-/* File: mips/op_const_16.S */
-    /* const/16 vAA, +BBBB */
-    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const: /* 0x14 */
-/* File: mips/op_const.S */
-    /* const vAA, +BBBBbbbb */
-    GET_OPA(a3)                            #  a3 <- AA
-    FETCH(a0, 1)                           #  a0 <- bbbb (low)
-    FETCH(a1, 2)                           #  a1 <- BBBB (high)
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_high16: /* 0x15 */
-/* File: mips/op_const_high16.S */
-    /* const/high16 vAA, +BBBB0000 */
-    FETCH(a0, 1)                           #  a0 <- 0000BBBB (zero-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    sll       a0, a0, 16                   #  a0 <- BBBB0000
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_16: /* 0x16 */
-/* File: mips/op_const_wide_16.S */
-    /* const-wide/16 vAA, +BBBB */
-    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    sra       a1, a0, 31                   #  a1 <- ssssssss
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_32: /* 0x17 */
-/* File: mips/op_const_wide_32.S */
-    /* const-wide/32 vAA, +BBBBbbbb */
-    FETCH(a0, 1)                           #  a0 <- 0000bbbb (low)
-    GET_OPA(a3)                            #  a3 <- AA
-    FETCH_S(a2, 2)                         #  a2 <- ssssBBBB (high)
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    INSERT_HIGH_HALF(a0, a2)               #  a0 <- BBBBbbbb
-    sra       a1, a0, 31                   #  a1 <- ssssssss
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide: /* 0x18 */
-/* File: mips/op_const_wide.S */
-    /* const-wide vAA, +HHHHhhhhBBBBbbbb */
-    FETCH(a0, 1)                           #  a0 <- bbbb (low)
-    FETCH(a1, 2)                           #  a1 <- BBBB (low middle)
-    FETCH(a2, 3)                           #  a2 <- hhhh (high middle)
-    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb (low word)
-    FETCH(a3, 4)                           #  a3 <- HHHH (high)
-    GET_OPA(t1)                            #  t1 <- AA
-    INSERT_HIGH_HALF(a2, a3)               #  a2 <- HHHHhhhh (high word)
-    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a2, t1, t0)        #  vAA/vAA+1 <- a0/a2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_high16: /* 0x19 */
-/* File: mips/op_const_wide_high16.S */
-    /* const-wide/high16 vAA, +BBBB000000000000 */
-    FETCH(a1, 1)                           #  a1 <- 0000BBBB (zero-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    li        a0, 0                        #  a0 <- 00000000
-    sll       a1, 16                       #  a1 <- BBBB0000
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_string: /* 0x1a */
-/* File: mips/op_const_string.S */
-/* File: mips/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstString
-    EXPORT_PC()
-    FETCH(a0, 1)                        # a0 <- BBBB
-    GET_OPA(a1)                         # a1 <- AA
-    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
-    move   a3, rSELF
-    JAL(MterpConstString)                        # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST(2)                    # load rINST
-    bnez   v0, MterpPossibleException
-    ADVANCE(2)                          # advance rPC
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
-/* File: mips/op_const_string_jumbo.S */
-    /* const/string vAA, string@BBBBBBBB */
-    EXPORT_PC()
-    FETCH(a0, 1)                        # a0 <- bbbb (low)
-    FETCH(a2, 2)                        # a2 <- BBBB (high)
-    GET_OPA(a1)                         # a1 <- AA
-    INSERT_HIGH_HALF(a0, a2)            # a0 <- BBBBbbbb
-    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
-    move   a3, rSELF
-    JAL(MterpConstString)               # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST(3)                    # load rINST
-    bnez   v0, MterpPossibleException
-    ADVANCE(3)                          # advance rPC
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_class: /* 0x1c */
-/* File: mips/op_const_class.S */
-/* File: mips/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstClass
-    EXPORT_PC()
-    FETCH(a0, 1)                        # a0 <- BBBB
-    GET_OPA(a1)                         # a1 <- AA
-    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
-    move   a3, rSELF
-    JAL(MterpConstClass)                        # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST(2)                    # load rINST
-    bnez   v0, MterpPossibleException
-    ADVANCE(2)                          # advance rPC
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_monitor_enter: /* 0x1d */
-/* File: mips/op_monitor_enter.S */
-    /*
-     * Synchronize on an object.
-     */
-    /* monitor-enter vAA */
-    EXPORT_PC()
-    GET_OPA(a2)                            # a2 <- AA
-    GET_VREG(a0, a2)                       # a0 <- vAA (object)
-    move   a1, rSELF                       # a1 <- self
-    JAL(artLockObjectFromCode)             # v0 <- artLockObject(obj, self)
-    bnez v0, MterpException
-    FETCH_ADVANCE_INST(1)                  # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_monitor_exit: /* 0x1e */
-/* File: mips/op_monitor_exit.S */
-    /*
-     * Unlock an object.
-     *
-     * Exceptions that occur when unlocking a monitor need to appear as
-     * if they happened at the following instruction.  See the Dalvik
-     * instruction spec.
-     */
-    /* monitor-exit vAA */
-    EXPORT_PC()
-    GET_OPA(a2)                            # a2 <- AA
-    GET_VREG(a0, a2)                       # a0 <- vAA (object)
-    move   a1, rSELF                       # a1 <- self
-    JAL(artUnlockObjectFromCode)           # v0 <- artUnlockObject(obj, self)
-    bnez v0, MterpException
-    FETCH_ADVANCE_INST(1)                  # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_check_cast: /* 0x1f */
-/* File: mips/op_check_cast.S */
-    /*
-     * Check to see if a cast from one class to another is allowed.
-     */
-    /* check-cast vAA, class@BBBB */
-    EXPORT_PC()
-    FETCH(a0, 1)                           #  a0 <- BBBB
-    GET_OPA(a1)                            #  a1 <- AA
-    EAS2(a1, rFP, a1)                      #  a1 <- &object
-    lw     a2, OFF_FP_METHOD(rFP)          #  a2 <- method
-    move   a3, rSELF                       #  a3 <- self
-    JAL(MterpCheckCast)                    #  v0 <- CheckCast(index, &obj, method, self)
-    PREFETCH_INST(2)
-    bnez   v0, MterpPossibleException
-    ADVANCE(2)
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_instance_of: /* 0x20 */
-/* File: mips/op_instance_of.S */
-    /*
-     * Check to see if an object reference is an instance of a class.
-     *
-     * Most common situation is a non-null object, being compared against
-     * an already-resolved class.
-     */
-    /* instance-of vA, vB, class@CCCC */
-    EXPORT_PC()
-    FETCH(a0, 1)                           # a0 <- CCCC
-    GET_OPB(a1)                            # a1 <- B
-    EAS2(a1, rFP, a1)                      # a1 <- &object
-    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- method
-    move  a3, rSELF                        # a3 <- self
-    GET_OPA4(rOBJ)                         # rOBJ <- A+
-    JAL(MterpInstanceOf)                   # v0 <- Mterp(index, &obj, method, self)
-    lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    PREFETCH_INST(2)                       # load rINST
-    bnez a1, MterpException
-    ADVANCE(2)                             # advance rPC
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    SET_VREG_GOTO(v0, rOBJ, t0)            # vA <- v0
-
-/* ------------------------------ */
-    .balign 128
-.L_op_array_length: /* 0x21 */
-/* File: mips/op_array_length.S */
-    /*
-     * Return the length of an array.
-     */
-    /* array-length vA, vB */
-    GET_OPB(a1)                            #  a1 <- B
-    GET_OPA4(a2)                           #  a2 <- A+
-    GET_VREG(a0, a1)                       #  a0 <- vB (object ref)
-    # is object null?
-    beqz      a0, common_errNullObject     #  yup, fail
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- array length
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a3, a2, t0)              #  vA <- length
-
-/* ------------------------------ */
-    .balign 128
-.L_op_new_instance: /* 0x22 */
-/* File: mips/op_new_instance.S */
-    /*
-     * Create a new instance of a class.
-     */
-    /* new-instance vAA, class@BBBB */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rSELF
-    move   a2, rINST
-    JAL(MterpNewInstance)
-    beqz   v0, MterpPossibleException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_new_array: /* 0x23 */
-/* File: mips/op_new_array.S */
-    /*
-     * Allocate an array of objects, specified with the array class
-     * and a count.
-     *
-     * The verifier guarantees that this is an array class, so we don't
-     * check for it here.
-     */
-    /* new-array vA, vB, class@CCCC */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rPC
-    move   a2, rINST
-    move   a3, rSELF
-    JAL(MterpNewArray)
-    beqz   v0, MterpPossibleException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_filled_new_array: /* 0x24 */
-/* File: mips/op_filled_new_array.S */
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    .extern MterpFilledNewArray
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME     # a0 <- shadow frame
-    move   a1, rPC
-    move   a2, rSELF
-    JAL(MterpFilledNewArray)                           #  v0 <- helper(shadow_frame, pc, self)
-    beqz      v0,  MterpPossibleException  #  has exception
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
-/* File: mips/op_filled_new_array_range.S */
-/* File: mips/op_filled_new_array.S */
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    .extern MterpFilledNewArrayRange
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME     # a0 <- shadow frame
-    move   a1, rPC
-    move   a2, rSELF
-    JAL(MterpFilledNewArrayRange)                           #  v0 <- helper(shadow_frame, pc, self)
-    beqz      v0,  MterpPossibleException  #  has exception
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_fill_array_data: /* 0x26 */
-/* File: mips/op_fill_array_data.S */
-    /* fill-array-data vAA, +BBBBBBBB */
-    EXPORT_PC()
-    FETCH(a1, 1)                           #  a1 <- bbbb (lo)
-    FETCH(a0, 2)                           #  a0 <- BBBB (hi)
-    GET_OPA(a3)                            #  a3 <- AA
-    INSERT_HIGH_HALF(a1, a0)               #  a1 <- BBBBbbbb
-    GET_VREG(a0, a3)                       #  a0 <- vAA (array object)
-    EAS1(a1, rPC, a1)                      #  a1 <- PC + BBBBbbbb*2 (array data off.)
-    JAL(MterpFillArrayData)                #  v0 <- Mterp(obj, payload)
-    beqz      v0,  MterpPossibleException  #  has exception
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_throw: /* 0x27 */
-/* File: mips/op_throw.S */
-    /*
-     * Throw an exception object in the current thread.
-     */
-    /* throw vAA */
-    EXPORT_PC()                              #  exception handler can throw
-    GET_OPA(a2)                              #  a2 <- AA
-    GET_VREG(a1, a2)                         #  a1 <- vAA (exception object)
-    # null object?
-    beqz  a1, common_errNullObject           #  yes, throw an NPE instead
-    sw    a1, THREAD_EXCEPTION_OFFSET(rSELF) #  thread->exception <- obj
-    b         MterpException
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto: /* 0x28 */
-/* File: mips/op_goto.S */
-    /*
-     * Unconditional branch, 8-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto +AA */
-    sll       a0, rINST, 16                #  a0 <- AAxx0000
-    sra       rINST, a0, 24                #  rINST <- ssssssAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto_16: /* 0x29 */
-/* File: mips/op_goto_16.S */
-    /*
-     * Unconditional branch, 16-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto/16 +AAAA */
-    FETCH_S(rINST, 1)                      #  rINST <- ssssAAAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto_32: /* 0x2a */
-/* File: mips/op_goto_32.S */
-    /*
-     * Unconditional branch, 32-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     *
-     * Unlike most opcodes, this one is allowed to branch to itself, so
-     * our "backward branch" test must be "<=0" instead of "<0".
-     */
-    /* goto/32 +AAAAAAAA */
-    FETCH(rINST, 1)                        #  rINST <- aaaa (lo)
-    FETCH(a1, 2)                           #  a1 <- AAAA (hi)
-    INSERT_HIGH_HALF(rINST, a1)            #  rINST <- AAAAaaaa
-    b         MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
-    .balign 128
-.L_op_packed_switch: /* 0x2b */
-/* File: mips/op_packed_switch.S */
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBB */
-    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
-    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
-    GET_OPA(a3)                            #  a3 <- AA
-    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
-    GET_VREG(a1, a3)                       #  a1 <- vAA
-    EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
-    JAL(MterpDoPackedSwitch)                             #  a0 <- code-unit branch offset
-    move      rINST, v0
-    b         MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sparse_switch: /* 0x2c */
-/* File: mips/op_sparse_switch.S */
-/* File: mips/op_packed_switch.S */
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBB */
-    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
-    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
-    GET_OPA(a3)                            #  a3 <- AA
-    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
-    GET_VREG(a1, a3)                       #  a1 <- vAA
-    EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
-    JAL(MterpDoSparseSwitch)                             #  a0 <- code-unit branch offset
-    move      rINST, v0
-    b         MterpCommonTakenBranchNoFlags
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpl_float: /* 0x2d */
-/* File: mips/op_cmpl_float.S */
-    /*
-     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
-     * into the destination register based on the comparison results.
-     *
-     * for: cmpl-float, cmpg-float
-     */
-    /* op vAA, vBB, vCC */
-
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8
-    GET_VREG_F(ft0, a2)
-    GET_VREG_F(ft1, a3)
-#ifdef MIPS32REVGE6
-    cmp.eq.s  ft2, ft0, ft1
-    li        rTEMP, 0
-    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
-    .if 0
-    cmp.lt.s  ft2, ft0, ft1
-    li        rTEMP, -1
-    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    cmp.lt.s  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#else
-    c.eq.s    fcc0, ft0, ft1
-    li        rTEMP, 0
-    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
-    .if 0
-    c.olt.s   fcc0, ft0, ft1
-    li        rTEMP, -1
-    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    c.olt.s   fcc0, ft1, ft0
-    li        rTEMP, 1
-    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#endif
-1:
-    GET_OPA(rOBJ)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpg_float: /* 0x2e */
-/* File: mips/op_cmpg_float.S */
-/* File: mips/op_cmpl_float.S */
-    /*
-     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
-     * into the destination register based on the comparison results.
-     *
-     * for: cmpl-float, cmpg-float
-     */
-    /* op vAA, vBB, vCC */
-
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8
-    GET_VREG_F(ft0, a2)
-    GET_VREG_F(ft1, a3)
-#ifdef MIPS32REVGE6
-    cmp.eq.s  ft2, ft0, ft1
-    li        rTEMP, 0
-    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
-    .if 1
-    cmp.lt.s  ft2, ft0, ft1
-    li        rTEMP, -1
-    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    cmp.lt.s  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#else
-    c.eq.s    fcc0, ft0, ft1
-    li        rTEMP, 0
-    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
-    .if 1
-    c.olt.s   fcc0, ft0, ft1
-    li        rTEMP, -1
-    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    c.olt.s   fcc0, ft1, ft0
-    li        rTEMP, 1
-    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#endif
-1:
-    GET_OPA(rOBJ)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpl_double: /* 0x2f */
-/* File: mips/op_cmpl_double.S */
-    /*
-     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
-     * into the destination register based on the comparison results.
-     *
-     * For: cmpl-double, cmpg-double
-     */
-    /* op vAA, vBB, vCC */
-
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    and       rOBJ, a0, 255                #  rOBJ <- BB
-    srl       t0, a0, 8                    #  t0 <- CC
-    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[BB]
-    EAS2(t0, rFP, t0)                      #  t0 <- &fp[CC]
-    LOAD64_F(ft0, ft0f, rOBJ)
-    LOAD64_F(ft1, ft1f, t0)
-#ifdef MIPS32REVGE6
-    cmp.eq.d  ft2, ft0, ft1
-    li        rTEMP, 0
-    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
-    .if 0
-    cmp.lt.d  ft2, ft0, ft1
-    li        rTEMP, -1
-    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    cmp.lt.d  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#else
-    c.eq.d    fcc0, ft0, ft1
-    li        rTEMP, 0
-    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
-    .if 0
-    c.olt.d   fcc0, ft0, ft1
-    li        rTEMP, -1
-    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    c.olt.d   fcc0, ft1, ft0
-    li        rTEMP, 1
-    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#endif
-1:
-    GET_OPA(rOBJ)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpg_double: /* 0x30 */
-/* File: mips/op_cmpg_double.S */
-/* File: mips/op_cmpl_double.S */
-    /*
-     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
-     * into the destination register based on the comparison results.
-     *
-     * For: cmpl-double, cmpg-double
-     */
-    /* op vAA, vBB, vCC */
-
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    and       rOBJ, a0, 255                #  rOBJ <- BB
-    srl       t0, a0, 8                    #  t0 <- CC
-    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[BB]
-    EAS2(t0, rFP, t0)                      #  t0 <- &fp[CC]
-    LOAD64_F(ft0, ft0f, rOBJ)
-    LOAD64_F(ft1, ft1f, t0)
-#ifdef MIPS32REVGE6
-    cmp.eq.d  ft2, ft0, ft1
-    li        rTEMP, 0
-    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
-    .if 1
-    cmp.lt.d  ft2, ft0, ft1
-    li        rTEMP, -1
-    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    cmp.lt.d  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#else
-    c.eq.d    fcc0, ft0, ft1
-    li        rTEMP, 0
-    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
-    .if 1
-    c.olt.d   fcc0, ft0, ft1
-    li        rTEMP, -1
-    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    c.olt.d   fcc0, ft1, ft0
-    li        rTEMP, 1
-    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#endif
-1:
-    GET_OPA(rOBJ)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmp_long: /* 0x31 */
-/* File: mips/op_cmp_long.S */
-    /*
-     * Compare two 64-bit values
-     *    x = y     return  0
-     *    x < y     return -1
-     *    x > y     return  1
-     *
-     * I think I can improve on the ARM code by the following observation
-     *    slt   t0,  x.hi, y.hi;        # (x.hi < y.hi) ? 1:0
-     *    sgt   t1,  x.hi, y.hi;        # (y.hi > x.hi) ? 1:0
-     *    subu  v0, t0, t1              # v0= -1:1:0 for [ < > = ]
-     */
-    /* cmp-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64(a0, a1, a2)                     #  a0/a1 <- vBB/vBB+1
-    LOAD64(a2, a3, a3)                     #  a2/a3 <- vCC/vCC+1
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    slt       t0, a1, a3                   #  compare hi
-    sgt       t1, a1, a3
-    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0)
-    bnez      v0, .Lop_cmp_long_finish
-    # at this point x.hi==y.hi
-    sltu      t0, a0, a2                   #  compare lo
-    sgtu      t1, a0, a2
-    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0) for [< > =]
-
-.Lop_cmp_long_finish:
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(v0, rOBJ, t0)            #  vAA <- v0
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_eq: /* 0x32 */
-/* File: mips/op_if_eq.S */
-/* File: mips/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    GET_OPA4(a0)                           #  a0 <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    GET_VREG(a3, a1)                       #  a3 <- vB
-    GET_VREG(a0, a0)                       #  a0 <- vA
-    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
-    beq a0, a3, MterpCommonTakenBranchNoFlags  #  compare (vA, vB)
-    li        t0, JIT_CHECK_OSR
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ne: /* 0x33 */
-/* File: mips/op_if_ne.S */
-/* File: mips/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    GET_OPA4(a0)                           #  a0 <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    GET_VREG(a3, a1)                       #  a3 <- vB
-    GET_VREG(a0, a0)                       #  a0 <- vA
-    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
-    bne a0, a3, MterpCommonTakenBranchNoFlags  #  compare (vA, vB)
-    li        t0, JIT_CHECK_OSR
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_lt: /* 0x34 */
-/* File: mips/op_if_lt.S */
-/* File: mips/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    GET_OPA4(a0)                           #  a0 <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    GET_VREG(a3, a1)                       #  a3 <- vB
-    GET_VREG(a0, a0)                       #  a0 <- vA
-    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
-    blt a0, a3, MterpCommonTakenBranchNoFlags  #  compare (vA, vB)
-    li        t0, JIT_CHECK_OSR
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ge: /* 0x35 */
-/* File: mips/op_if_ge.S */
-/* File: mips/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    GET_OPA4(a0)                           #  a0 <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    GET_VREG(a3, a1)                       #  a3 <- vB
-    GET_VREG(a0, a0)                       #  a0 <- vA
-    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
-    bge a0, a3, MterpCommonTakenBranchNoFlags  #  compare (vA, vB)
-    li        t0, JIT_CHECK_OSR
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gt: /* 0x36 */
-/* File: mips/op_if_gt.S */
-/* File: mips/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    GET_OPA4(a0)                           #  a0 <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    GET_VREG(a3, a1)                       #  a3 <- vB
-    GET_VREG(a0, a0)                       #  a0 <- vA
-    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
-    bgt a0, a3, MterpCommonTakenBranchNoFlags  #  compare (vA, vB)
-    li        t0, JIT_CHECK_OSR
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_le: /* 0x37 */
-/* File: mips/op_if_le.S */
-/* File: mips/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    GET_OPA4(a0)                           #  a0 <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    GET_VREG(a3, a1)                       #  a3 <- vB
-    GET_VREG(a0, a0)                       #  a0 <- vA
-    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
-    ble a0, a3, MterpCommonTakenBranchNoFlags  #  compare (vA, vB)
-    li        t0, JIT_CHECK_OSR
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_eqz: /* 0x38 */
-/* File: mips/op_if_eqz.S */
-/* File: mips/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    GET_OPA(a0)                            #  a0 <- AA
-    GET_VREG(a0, a0)                       #  a0 <- vAA
-    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
-    beq a0, zero, MterpCommonTakenBranchNoFlags
-    li        t0, JIT_CHECK_OSR            # possible OSR re-entry?
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_nez: /* 0x39 */
-/* File: mips/op_if_nez.S */
-/* File: mips/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    GET_OPA(a0)                            #  a0 <- AA
-    GET_VREG(a0, a0)                       #  a0 <- vAA
-    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
-    bne a0, zero, MterpCommonTakenBranchNoFlags
-    li        t0, JIT_CHECK_OSR            # possible OSR re-entry?
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ltz: /* 0x3a */
-/* File: mips/op_if_ltz.S */
-/* File: mips/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    GET_OPA(a0)                            #  a0 <- AA
-    GET_VREG(a0, a0)                       #  a0 <- vAA
-    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
-    blt a0, zero, MterpCommonTakenBranchNoFlags
-    li        t0, JIT_CHECK_OSR            # possible OSR re-entry?
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gez: /* 0x3b */
-/* File: mips/op_if_gez.S */
-/* File: mips/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    GET_OPA(a0)                            #  a0 <- AA
-    GET_VREG(a0, a0)                       #  a0 <- vAA
-    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
-    bge a0, zero, MterpCommonTakenBranchNoFlags
-    li        t0, JIT_CHECK_OSR            # possible OSR re-entry?
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gtz: /* 0x3c */
-/* File: mips/op_if_gtz.S */
-/* File: mips/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    GET_OPA(a0)                            #  a0 <- AA
-    GET_VREG(a0, a0)                       #  a0 <- vAA
-    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
-    bgt a0, zero, MterpCommonTakenBranchNoFlags
-    li        t0, JIT_CHECK_OSR            # possible OSR re-entry?
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_lez: /* 0x3d */
-/* File: mips/op_if_lez.S */
-/* File: mips/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    GET_OPA(a0)                            #  a0 <- AA
-    GET_VREG(a0, a0)                       #  a0 <- vAA
-    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
-    ble a0, zero, MterpCommonTakenBranchNoFlags
-    li        t0, JIT_CHECK_OSR            # possible OSR re-entry?
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_3e: /* 0x3e */
-/* File: mips/op_unused_3e.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_3f: /* 0x3f */
-/* File: mips/op_unused_3f.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_40: /* 0x40 */
-/* File: mips/op_unused_40.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_41: /* 0x41 */
-/* File: mips/op_unused_41.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_42: /* 0x42 */
-/* File: mips/op_unused_42.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_43: /* 0x43 */
-/* File: mips/op_unused_43.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget: /* 0x44 */
-/* File: mips/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, 2)               #  a0 <- arrayObj + index*width
-    # a1 >= a3; compare unsigned index
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    lw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0)             #  a2 <- vBB[vCC]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_wide: /* 0x45 */
-/* File: mips/op_aget_wide.S */
-    /*
-     * Array get, 64 bits.  vAA <- vBB[vCC].
-     *
-     * Arrays of long/double are 64-bit aligned.
-     */
-    /* aget-wide vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    LOAD64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET)
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a2, a3, rOBJ, t0)      #  vAA/vAA+1 <- a2/a3
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_object: /* 0x46 */
-/* File: mips/op_aget_object.S */
-    /*
-     * Array object get.  vAA <- vBB[vCC].
-     *
-     * for: aget-object
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    EXPORT_PC()
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    JAL(artAGetObjectFromMterp)            #  v0 <- GetObj(array, index)
-    lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    PREFETCH_INST(2)                       #  load rINST
-    bnez a1, MterpException
-    ADVANCE(2)                             #  advance rPC
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_OBJECT_GOTO(v0, rOBJ, t0)     #  vAA <- v0
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_boolean: /* 0x47 */
-/* File: mips/op_aget_boolean.S */
-/* File: mips/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
-    # a1 >= a3; compare unsigned index
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    lbu a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0)             #  a2 <- vBB[vCC]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_byte: /* 0x48 */
-/* File: mips/op_aget_byte.S */
-/* File: mips/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
-    # a1 >= a3; compare unsigned index
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    lb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0)             #  a2 <- vBB[vCC]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_char: /* 0x49 */
-/* File: mips/op_aget_char.S */
-/* File: mips/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
-    # a1 >= a3; compare unsigned index
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    lhu a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0)             #  a2 <- vBB[vCC]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_short: /* 0x4a */
-/* File: mips/op_aget_short.S */
-/* File: mips/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
-    # a1 >= a3; compare unsigned index
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    lh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0)             #  a2 <- vBB[vCC]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput: /* 0x4b */
-/* File: mips/op_aput.S */
-
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, 2)               #  a0 <- arrayObj + index*width
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    sw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0)            #  vBB[vCC] <- a2
-    JR(t0)                                 #  jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_wide: /* 0x4c */
-/* File: mips/op_aput_wide.S */
-    /*
-     * Array put, 64 bits.  vBB[vCC] <- vAA.
-     */
-    /* aput-wide vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(t0)                            #  t0 <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
-    EAS2(rOBJ, rFP, t0)                    #  rOBJ <- &fp[AA]
-    # compare unsigned index, length
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    LOAD64(a2, a3, rOBJ)                   #  a2/a3 <- vAA/vAA+1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) #  a2/a3 <- vBB[vCC]
-    JR(t0)                                 #  jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_object: /* 0x4d */
-/* File: mips/op_aput_object.S */
-    /*
-     * Store an object into an array.  vBB[vCC] <- vAA.
-     *
-     */
-    /* op vAA, vBB, vCC */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rPC
-    move   a2, rINST
-    JAL(MterpAputObject)
-    beqz   v0, MterpPossibleException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_boolean: /* 0x4e */
-/* File: mips/op_aput_boolean.S */
-/* File: mips/op_aput.S */
-
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    sb a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0)            #  vBB[vCC] <- a2
-    JR(t0)                                 #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_byte: /* 0x4f */
-/* File: mips/op_aput_byte.S */
-/* File: mips/op_aput.S */
-
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    sb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0)            #  vBB[vCC] <- a2
-    JR(t0)                                 #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_char: /* 0x50 */
-/* File: mips/op_aput_char.S */
-/* File: mips/op_aput.S */
-
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    sh a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0)            #  vBB[vCC] <- a2
-    JR(t0)                                 #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_short: /* 0x51 */
-/* File: mips/op_aput_short.S */
-/* File: mips/op_aput.S */
-
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    sh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0)            #  vBB[vCC] <- a2
-    JR(t0)                                 #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget: /* 0x52 */
-/* File: mips/op_iget.S */
-/* File: mips/field.S */
-TODO
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_wide: /* 0x53 */
-/* File: mips/op_iget_wide.S */
-/* File: mips/op_iget.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_object: /* 0x54 */
-/* File: mips/op_iget_object.S */
-/* File: mips/op_iget.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_boolean: /* 0x55 */
-/* File: mips/op_iget_boolean.S */
-/* File: mips/op_iget.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_byte: /* 0x56 */
-/* File: mips/op_iget_byte.S */
-/* File: mips/op_iget.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_char: /* 0x57 */
-/* File: mips/op_iget_char.S */
-/* File: mips/op_iget.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_short: /* 0x58 */
-/* File: mips/op_iget_short.S */
-/* File: mips/op_iget.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput: /* 0x59 */
-/* File: mips/op_iput.S */
-/* File: mips/field.S */
-TODO
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_wide: /* 0x5a */
-/* File: mips/op_iput_wide.S */
-/* File: mips/op_iput.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_object: /* 0x5b */
-/* File: mips/op_iput_object.S */
-/* File: mips/op_iput.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_boolean: /* 0x5c */
-/* File: mips/op_iput_boolean.S */
-/* File: mips/op_iput.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_byte: /* 0x5d */
-/* File: mips/op_iput_byte.S */
-/* File: mips/op_iput.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_char: /* 0x5e */
-/* File: mips/op_iput_char.S */
-/* File: mips/op_iput.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_short: /* 0x5f */
-/* File: mips/op_iput_short.S */
-/* File: mips/op_iput.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget: /* 0x60 */
-/* File: mips/op_sget.S */
-/* File: mips/field.S */
-TODO
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_wide: /* 0x61 */
-/* File: mips/op_sget_wide.S */
-/* File: mips/op_sget.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_object: /* 0x62 */
-/* File: mips/op_sget_object.S */
-/* File: mips/op_sget.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_boolean: /* 0x63 */
-/* File: mips/op_sget_boolean.S */
-/* File: mips/op_sget.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_byte: /* 0x64 */
-/* File: mips/op_sget_byte.S */
-/* File: mips/op_sget.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_char: /* 0x65 */
-/* File: mips/op_sget_char.S */
-/* File: mips/op_sget.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_short: /* 0x66 */
-/* File: mips/op_sget_short.S */
-/* File: mips/op_sget.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput: /* 0x67 */
-/* File: mips/op_sput.S */
-/* File: mips/field.S */
-TODO
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_wide: /* 0x68 */
-/* File: mips/op_sput_wide.S */
-/* File: mips/op_sput.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_object: /* 0x69 */
-/* File: mips/op_sput_object.S */
-/* File: mips/op_sput.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_boolean: /* 0x6a */
-/* File: mips/op_sput_boolean.S */
-/* File: mips/op_sput.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_byte: /* 0x6b */
-/* File: mips/op_sput_byte.S */
-/* File: mips/op_sput.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_char: /* 0x6c */
-/* File: mips/op_sput_char.S */
-/* File: mips/op_sput.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_short: /* 0x6d */
-/* File: mips/op_sput_short.S */
-/* File: mips/op_sput.S */
-/* File: mips/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual: /* 0x6e */
-/* File: mips/op_invoke_virtual.S */
-/* File: mips/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtual
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokeVirtual)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_super: /* 0x6f */
-/* File: mips/op_invoke_super.S */
-/* File: mips/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeSuper
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokeSuper)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_direct: /* 0x70 */
-/* File: mips/op_invoke_direct.S */
-/* File: mips/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeDirect
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokeDirect)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_static: /* 0x71 */
-/* File: mips/op_invoke_static.S */
-/* File: mips/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeStatic
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokeStatic)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_interface: /* 0x72 */
-/* File: mips/op_invoke_interface.S */
-/* File: mips/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeInterface
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokeInterface)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
-/* File: mips/op_return_void_no_barrier.S */
-    lw     ra, THREAD_FLAGS_OFFSET(rSELF)
-    move   a0, rSELF
-    and    ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz   ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    move   v0, zero
-    move   v1, zero
-    b      MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
-/* File: mips/op_invoke_virtual_range.S */
-/* File: mips/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualRange
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokeVirtualRange)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_super_range: /* 0x75 */
-/* File: mips/op_invoke_super_range.S */
-/* File: mips/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeSuperRange
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokeSuperRange)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
-/* File: mips/op_invoke_direct_range.S */
-/* File: mips/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeDirectRange
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokeDirectRange)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_static_range: /* 0x77 */
-/* File: mips/op_invoke_static_range.S */
-/* File: mips/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeStaticRange
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokeStaticRange)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
-/* File: mips/op_invoke_interface_range.S */
-/* File: mips/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeInterfaceRange
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokeInterfaceRange)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_79: /* 0x79 */
-/* File: mips/op_unused_79.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_7a: /* 0x7a */
-/* File: mips/op_unused_7a.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_int: /* 0x7b */
-/* File: mips/op_neg_int.S */
-/* File: mips/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result0 = op a0".
-     * This could be a MIPS instruction or a function call.
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      neg-int, not-int, neg-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(t0)                           #  t0 <- A+
-    GET_VREG(a0, a3)                       #  a0 <- vB
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-                                  #  optional op
-    negu a0, a0                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, t0, t1)        #  vA <- result0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_not_int: /* 0x7c */
-/* File: mips/op_not_int.S */
-/* File: mips/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result0 = op a0".
-     * This could be a MIPS instruction or a function call.
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      neg-int, not-int, neg-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(t0)                           #  t0 <- A+
-    GET_VREG(a0, a3)                       #  a0 <- vB
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-                                  #  optional op
-    not a0, a0                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, t0, t1)        #  vA <- result0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_long: /* 0x7d */
-/* File: mips/op_neg_long.S */
-/* File: mips/unopWide.S */
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result0/result1 = op a0/a1".
-     * This could be MIPS instruction or a function call.
-     *
-     * For: neg-long, not-long, neg-double,
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vA
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    negu v0, a0                              #  optional op
-    negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0                                 #  a0/a1 <- op, a2-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_not_long: /* 0x7e */
-/* File: mips/op_not_long.S */
-/* File: mips/unopWide.S */
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result0/result1 = op a0/a1".
-     * This could be MIPS instruction or a function call.
-     *
-     * For: neg-long, not-long, neg-double,
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vA
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    not a0, a0                              #  optional op
-    not a1, a1                                 #  a0/a1 <- op, a2-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_float: /* 0x7f */
-/* File: mips/op_neg_float.S */
-/* File: mips/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result0 = op a0".
-     * This could be a MIPS instruction or a function call.
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      neg-int, not-int, neg-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(t0)                           #  t0 <- A+
-    GET_VREG(a0, a3)                       #  a0 <- vB
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-                                  #  optional op
-    addu a0, a0, 0x80000000                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, t0, t1)        #  vA <- result0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_double: /* 0x80 */
-/* File: mips/op_neg_double.S */
-/* File: mips/unopWide.S */
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result0/result1 = op a0/a1".
-     * This could be MIPS instruction or a function call.
-     *
-     * For: neg-long, not-long, neg-double,
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vA
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-                                  #  optional op
-    addu a1, a1, 0x80000000                                 #  a0/a1 <- op, a2-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_long: /* 0x81 */
-/* File: mips/op_int_to_long.S */
-/* File: mips/unopWider.S */
-    /*
-     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result0/result1 = op a0".
-     *
-     * For: int-to-long
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, a3)                       #  a0 <- vB
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-                                  #  optional op
-    sra a1, a0, 31                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_float: /* 0x82 */
-/* File: mips/op_int_to_float.S */
-/* File: mips/funop.S */
-    /*
-     * Generic 32-bit floating-point unary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = op fa0".
-     * This could be a MIPS instruction or a function call.
-     *
-     * for: int-to-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    cvt.s.w fv0, fa0
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t1)         #  vA <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_double: /* 0x83 */
-/* File: mips/op_int_to_double.S */
-/* File: mips/funopWider.S */
-    /*
-     * Generic 32bit-to-64bit floating-point unary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = op fa0".
-     *
-     * For: int-to-double, float-to-double
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    cvt.d.w fv0, fa0
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* File: mips/op_long_to_int.S */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-/* File: mips/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    GET_OPB(a1)                            #  a1 <- B from 15:12
-    GET_OPA4(a0)                           #  a0 <- A from 11:8
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[B]
-    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
-    .if 0
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[A] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
-    .endif
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_float: /* 0x85 */
-/* File: mips/op_long_to_float.S */
-    /*
-     * long-to-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-
-#ifdef MIPS32REVGE6
-    LOAD64_F(fv0, fv0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    cvt.s.l   fv0, fv0
-#else
-    LOAD64(rARG0, rARG1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    JAL(__floatdisf)
-#endif
-
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- fv0
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_double: /* 0x86 */
-/* File: mips/op_long_to_double.S */
-    /*
-     * long-to-double
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-
-#ifdef MIPS32REVGE6
-    LOAD64_F(fv0, fv0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    cvt.d.l   fv0, fv0
-#else
-    LOAD64(rARG0, rARG1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    JAL(__floatdidf)                       #  a0/a1 <- op, a2-a3 changed
-#endif
-
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- result
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_int: /* 0x87 */
-/* File: mips/op_float_to_int.S */
-    /*
-     * float-to-int
-     *
-     * We have to clip values to int min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-#ifndef MIPS32REVGE6
-    li        t0, INT_MIN_AS_FLOAT
-    mtc1      t0, fa1
-    c.ole.s   fcc0, fa1, fa0
-#endif
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-#ifndef MIPS32REVGE6
-    bc1t      fcc0, 1f                     #  if INT_MIN <= vB, proceed to truncation
-    c.eq.s    fcc0, fa0, fa0
-    mtc1      zero, fa0
-    movt.s    fa0, fa1, fcc0               #  fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
-1:
-#endif
-    trunc.w.s fa0, fa0
-    SET_VREG_F_GOTO(fa0, rOBJ, t1)         #  vA <- result
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_long: /* 0x88 */
-/* File: mips/op_float_to_long.S */
-    /*
-     * float-to-long
-     *
-     * We have to clip values to long min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-#ifdef MIPS32REVGE6
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    trunc.l.s fa0, fa0
-    SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) #  vA <- result
-#else
-    c.eq.s    fcc0, fa0, fa0
-    li        rRESULT0, 0
-    li        rRESULT1, 0
-    bc1f      fcc0, .Lop_float_to_long_get_opcode
-
-    li        t0, LONG_MIN_AS_FLOAT
-    mtc1      t0, fa1
-    c.ole.s   fcc0, fa0, fa1
-    li        rRESULT1, LONG_MIN_HIGH
-    bc1t      fcc0, .Lop_float_to_long_get_opcode
-
-    neg.s     fa1, fa1
-    c.ole.s   fcc0, fa1, fa0
-    nor       rRESULT0, rRESULT0, zero
-    nor       rRESULT1, rRESULT1, zero
-    bc1t      fcc0, .Lop_float_to_long_get_opcode
-
-    JAL(__fixsfdi)
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    b         .Lop_float_to_long_set_vreg
-#endif
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_double: /* 0x89 */
-/* File: mips/op_float_to_double.S */
-/* File: mips/funopWider.S */
-    /*
-     * Generic 32bit-to-64bit floating-point unary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = op fa0".
-     *
-     * For: int-to-double, float-to-double
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    cvt.d.s fv0, fa0
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_int: /* 0x8a */
-/* File: mips/op_double_to_int.S */
-    /*
-     * double-to-int
-     *
-     * We have to clip values to int min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64_F(fa0, fa0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-#ifndef MIPS32REVGE6
-    li        t0, INT_MIN_AS_DOUBLE_HIGH
-    mtc1      zero, fa1
-    MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
-    c.ole.d   fcc0, fa1, fa0
-#endif
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-#ifndef MIPS32REVGE6
-    bc1t      fcc0, 1f                     #  if INT_MIN <= vB, proceed to truncation
-    c.eq.d    fcc0, fa0, fa0
-    mtc1      zero, fa0
-    MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
-    movt.d    fa0, fa1, fcc0               #  fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
-1:
-#endif
-    trunc.w.d fa0, fa0
-    SET_VREG_F_GOTO(fa0, rOBJ, t1)         #  vA <- result
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_long: /* 0x8b */
-/* File: mips/op_double_to_long.S */
-    /*
-     * double-to-long
-     *
-     * We have to clip values to long min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64_F(fa0, fa0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-#ifdef MIPS32REVGE6
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    trunc.l.d fa0, fa0
-    SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) #  vA <- result
-#else
-    c.eq.d    fcc0, fa0, fa0
-    li        rRESULT0, 0
-    li        rRESULT1, 0
-    bc1f      fcc0, .Lop_double_to_long_get_opcode
-
-    li        t0, LONG_MIN_AS_DOUBLE_HIGH
-    mtc1      zero, fa1
-    MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
-    c.ole.d   fcc0, fa0, fa1
-    li        rRESULT1, LONG_MIN_HIGH
-    bc1t      fcc0, .Lop_double_to_long_get_opcode
-
-    neg.d     fa1, fa1
-    c.ole.d   fcc0, fa1, fa0
-    nor       rRESULT0, rRESULT0, zero
-    nor       rRESULT1, rRESULT1, zero
-    bc1t      fcc0, .Lop_double_to_long_get_opcode
-
-    JAL(__fixdfdi)
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    b         .Lop_double_to_long_set_vreg
-#endif
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_float: /* 0x8c */
-/* File: mips/op_double_to_float.S */
-/* File: mips/unopNarrower.S */
-    /*
-     * Generic 64bit-to-32bit floating-point unary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = op fa0".
-     *
-     * For: double-to-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64_F(fa0, fa0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    cvt.s.d fv0, fa0
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_byte: /* 0x8d */
-/* File: mips/op_int_to_byte.S */
-/* File: mips/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result0 = op a0".
-     * This could be a MIPS instruction or a function call.
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      neg-int, not-int, neg-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(t0)                           #  t0 <- A+
-    GET_VREG(a0, a3)                       #  a0 <- vB
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-                                  #  optional op
-    SEB(a0, a0)                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, t0, t1)        #  vA <- result0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_char: /* 0x8e */
-/* File: mips/op_int_to_char.S */
-/* File: mips/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result0 = op a0".
-     * This could be a MIPS instruction or a function call.
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      neg-int, not-int, neg-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(t0)                           #  t0 <- A+
-    GET_VREG(a0, a3)                       #  a0 <- vB
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-                                  #  optional op
-    and a0, 0xffff                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, t0, t1)        #  vA <- result0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_short: /* 0x8f */
-/* File: mips/op_int_to_short.S */
-/* File: mips/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result0 = op a0".
-     * This could be a MIPS instruction or a function call.
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      neg-int, not-int, neg-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(t0)                           #  t0 <- A+
-    GET_VREG(a0, a3)                       #  a0 <- vB
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-                                  #  optional op
-    SEH(a0, a0)                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, t0, t1)        #  vA <- result0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int: /* 0x90 */
-/* File: mips/op_add_int.S */
-/* File: mips/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-                                  #  optional op
-    addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_int: /* 0x91 */
-/* File: mips/op_sub_int.S */
-/* File: mips/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-                                  #  optional op
-    subu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int: /* 0x92 */
-/* File: mips/op_mul_int.S */
-/* File: mips/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-                                  #  optional op
-    mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int: /* 0x93 */
-/* File: mips/op_div_int.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if 1
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-                                  #  optional op
-    div a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-#else
-/* File: mips/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if 1
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    div zero, a0, a1                              #  optional op
-    mflo a0                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-#endif
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int: /* 0x94 */
-/* File: mips/op_rem_int.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if 1
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-                                  #  optional op
-    mod a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-#else
-/* File: mips/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if 1
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    div zero, a0, a1                              #  optional op
-    mfhi a0                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-#endif
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int: /* 0x95 */
-/* File: mips/op_and_int.S */
-/* File: mips/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-                                  #  optional op
-    and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int: /* 0x96 */
-/* File: mips/op_or_int.S */
-/* File: mips/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-                                  #  optional op
-    or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int: /* 0x97 */
-/* File: mips/op_xor_int.S */
-/* File: mips/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-                                  #  optional op
-    xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int: /* 0x98 */
-/* File: mips/op_shl_int.S */
-/* File: mips/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-                                  #  optional op
-    sll a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int: /* 0x99 */
-/* File: mips/op_shr_int.S */
-/* File: mips/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-                                  #  optional op
-    sra a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int: /* 0x9a */
-/* File: mips/op_ushr_int.S */
-/* File: mips/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-                                  #  optional op
-    srl a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_long: /* 0x9b */
-/* File: mips/op_add_long.S */
-/*
- *  The compiler generates the following sequence for
- *  [v1 v0] =  [a1 a0] + [a3 a2];
- *    addu v0,a2,a0
- *    addu a1,a3,a1
- *    sltu v1,v0,a2
- *    addu v1,v1,a1
- */
-/* File: mips/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a2-a3).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
-    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
-    .if 0
-    or        t0, a2, a3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    addu v0, a2, a0                              #  optional op
-    addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vAA/vAA+1 <- v0/v1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_long: /* 0x9c */
-/* File: mips/op_sub_long.S */
-/*
- * For little endian the code sequence looks as follows:
- *    subu    v0,a0,a2
- *    subu    v1,a1,a3
- *    sltu    a0,a0,v0
- *    subu    v1,v1,a0
- */
-/* File: mips/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a2-a3).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
-    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
-    .if 0
-    or        t0, a2, a3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    subu v0, a0, a2                              #  optional op
-    subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vAA/vAA+1 <- v0/v1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_long: /* 0x9d */
-/* File: mips/op_mul_long.S */
-    /*
-     * Signed 64-bit integer multiply.
-     *         a1   a0
-     *   x     a3   a2
-     *   -------------
-     *       a2a1 a2a0
-     *       a3a0
-     *  a3a1 (<= unused)
-     *  ---------------
-     *         v1   v0
-     */
-    /* mul-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    and       t0, a0, 255                  #  a2 <- BB
-    srl       t1, a0, 8                    #  a3 <- CC
-    EAS2(t0, rFP, t0)                      #  t0 <- &fp[BB]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vBB/vBB+1
-
-    EAS2(t1, rFP, t1)                      #  t0 <- &fp[CC]
-    LOAD64(a2, a3, t1)                     #  a2/a3 <- vCC/vCC+1
-
-    mul       v1, a3, a0                   #  v1= a3a0
-#ifdef MIPS32REVGE6
-    mulu      v0, a2, a0                   #  v0= a2a0
-    muhu      t1, a2, a0
-#else
-    multu     a2, a0
-    mfhi      t1
-    mflo      v0                           #  v0= a2a0
-#endif
-    mul       t0, a2, a1                   #  t0= a2a1
-    addu      v1, v1, t1                   #  v1+= hi(a2a0)
-    addu      v1, v1, t0                   #  v1= a3a0 + a2a1;
-
-    GET_OPA(a0)                            #  a0 <- AA
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    b         .Lop_mul_long_finish
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_long: /* 0x9e */
-/* File: mips/op_div_long.S */
-/* File: mips/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a2-a3).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
-    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
-    .if 1
-    or        t0, a2, a3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    JAL(__divdi3)                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vAA/vAA+1 <- v0/v1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_long: /* 0x9f */
-/* File: mips/op_rem_long.S */
-/* File: mips/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a2-a3).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
-    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
-    .if 1
-    or        t0, a2, a3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    JAL(__moddi3)                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vAA/vAA+1 <- v0/v1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_long: /* 0xa0 */
-/* File: mips/op_and_long.S */
-/* File: mips/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a2-a3).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
-    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
-    .if 0
-    or        t0, a2, a3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    and a0, a0, a2                              #  optional op
-    and a1, a1, a3                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vAA/vAA+1 <- a0/a1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_long: /* 0xa1 */
-/* File: mips/op_or_long.S */
-/* File: mips/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a2-a3).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
-    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
-    .if 0
-    or        t0, a2, a3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    or a0, a0, a2                              #  optional op
-    or a1, a1, a3                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vAA/vAA+1 <- a0/a1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_long: /* 0xa2 */
-/* File: mips/op_xor_long.S */
-/* File: mips/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a2-a3).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
-    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
-    .if 0
-    or        t0, a2, a3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    xor a0, a0, a2                              #  optional op
-    xor a1, a1, a3                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vAA/vAA+1 <- a0/a1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_long: /* 0xa3 */
-/* File: mips/op_shl_long.S */
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* shl-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(t2)                            #  t2 <- AA
-    and       a3, a0, 255                  #  a3 <- BB
-    srl       a0, a0, 8                    #  a0 <- CC
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
-    GET_VREG(a2, a0)                       #  a2 <- vCC
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v1, a2, 0x20                   #  shift< shift & 0x20
-    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
-    bnez    v1, .Lop_shl_long_finish
-    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
-    srl     a0, 1
-    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
-    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
-    or      v1, a0                         #  rhi<- rhi | alo
-    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vAA/vAA+1 <- v0/v1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_long: /* 0xa4 */
-/* File: mips/op_shr_long.S */
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* shr-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(t3)                            #  t3 <- AA
-    and       a3, a0, 255                  #  a3 <- BB
-    srl       a0, a0, 8                    #  a0 <- CC
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
-    GET_VREG(a2, a0)                       #  a2 <- vCC
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v0, a2, 0x20                   #  shift & 0x20
-    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
-    bnez    v0, .Lop_shr_long_finish
-    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
-    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
-    sll     a1, 1
-    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
-    or      v0, a1                         #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vAA/VAA+1 <- v0/v1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_long: /* 0xa5 */
-/* File: mips/op_ushr_long.S */
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* ushr-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a3, a0, 255                  #  a3 <- BB
-    srl       a0, a0, 8                    #  a0 <- CC
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
-    GET_VREG(a2, a0)                       #  a2 <- vCC
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi      v0, a2, 0x20                 #  shift & 0x20
-    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
-    bnez      v0, .Lop_ushr_long_finish
-    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
-    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
-    sll       a1, 1
-    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
-    or        v0, a1                       #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vAA/vAA+1 <- v0/v1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_float: /* 0xa6 */
-/* File: mips/op_add_float.S */
-/* File: mips/fbinop.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
-     */
-
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
-    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    add.s fv0, fa0, fa1                                 #  f0 = result
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vAA <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_float: /* 0xa7 */
-/* File: mips/op_sub_float.S */
-/* File: mips/fbinop.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
-     */
-
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
-    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    sub.s fv0, fa0, fa1                                 #  f0 = result
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vAA <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_float: /* 0xa8 */
-/* File: mips/op_mul_float.S */
-/* File: mips/fbinop.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
-     */
-
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
-    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    mul.s fv0, fa0, fa1                                 #  f0 = result
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vAA <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_float: /* 0xa9 */
-/* File: mips/op_div_float.S */
-/* File: mips/fbinop.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
-     */
-
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
-    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    div.s fv0, fa0, fa1                                 #  f0 = result
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vAA <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_float: /* 0xaa */
-/* File: mips/op_rem_float.S */
-/* File: mips/fbinop.S */
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
-     */
-
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
-    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    JAL(fmodf)                                 #  f0 = result
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vAA <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_double: /* 0xab */
-/* File: mips/op_add_double.S */
-/* File: mips/fbinopWide.S */
-    /*
-     * Generic 64-bit floating-point binary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * for: add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64_F(fa0, fa0f, a2)
-    LOAD64_F(fa1, fa1f, t1)
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    add.d fv0, fa0, fa1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vAA/vAA+1 <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_double: /* 0xac */
-/* File: mips/op_sub_double.S */
-/* File: mips/fbinopWide.S */
-    /*
-     * Generic 64-bit floating-point binary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * for: add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64_F(fa0, fa0f, a2)
-    LOAD64_F(fa1, fa1f, t1)
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    sub.d fv0, fa0, fa1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vAA/vAA+1 <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_double: /* 0xad */
-/* File: mips/op_mul_double.S */
-/* File: mips/fbinopWide.S */
-    /*
-     * Generic 64-bit floating-point binary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * for: add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64_F(fa0, fa0f, a2)
-    LOAD64_F(fa1, fa1f, t1)
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    mul.d fv0, fa0, fa1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vAA/vAA+1 <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_double: /* 0xae */
-/* File: mips/op_div_double.S */
-/* File: mips/fbinopWide.S */
-    /*
-     * Generic 64-bit floating-point binary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * for: add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64_F(fa0, fa0f, a2)
-    LOAD64_F(fa1, fa1f, t1)
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    div.d fv0, fa0, fa1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vAA/vAA+1 <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_double: /* 0xaf */
-/* File: mips/op_rem_double.S */
-/* File: mips/fbinopWide.S */
-    /*
-     * Generic 64-bit floating-point binary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * for: add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64_F(fa0, fa0f, a2)
-    LOAD64_F(fa1, fa1f, t1)
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    JAL(fmod)
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vAA/vAA+1 <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
-/* File: mips/op_add_int_2addr.S */
-/* File: mips/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
-/* File: mips/op_sub_int_2addr.S */
-/* File: mips/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    subu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
-/* File: mips/op_mul_int_2addr.S */
-/* File: mips/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
-/* File: mips/op_div_int_2addr.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if 1
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    div a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-#else
-/* File: mips/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if 1
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    div zero, a0, a1                              #  optional op
-    mflo a0                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-#endif
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
-/* File: mips/op_rem_int_2addr.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if 1
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    mod a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-#else
-/* File: mips/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if 1
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    div zero, a0, a1                              #  optional op
-    mfhi a0                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-#endif
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
-/* File: mips/op_and_int_2addr.S */
-/* File: mips/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
-/* File: mips/op_or_int_2addr.S */
-/* File: mips/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
-/* File: mips/op_xor_int_2addr.S */
-/* File: mips/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
-/* File: mips/op_shl_int_2addr.S */
-/* File: mips/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    sll a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
-/* File: mips/op_shr_int_2addr.S */
-/* File: mips/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    sra a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
-/* File: mips/op_ushr_int_2addr.S */
-/* File: mips/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    srl a0, a0, a1                                  #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_long_2addr: /* 0xbb */
-/* File: mips/op_add_long_2addr.S */
-/*
- * See op_add_long.S for details
- */
-/* File: mips/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a2-a3).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a2, a3, a1)               #  a2/a3 <- vB/vB+1
-    LOAD64(a0, a1, t0)               #  a0/a1 <- vA/vA+1
-    .if 0
-    or        t0, a2, a3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    addu v0, a2, a0                              #  optional op
-    addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vA/vA+1 <- v0/v1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
-/* File: mips/op_sub_long_2addr.S */
-/*
- * See op_sub_long.S for more details
- */
-/* File: mips/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a2-a3).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a2, a3, a1)               #  a2/a3 <- vB/vB+1
-    LOAD64(a0, a1, t0)               #  a0/a1 <- vA/vA+1
-    .if 0
-    or        t0, a2, a3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    subu v0, a0, a2                              #  optional op
-    subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vA/vA+1 <- v0/v1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
-/* File: mips/op_mul_long_2addr.S */
-    /*
-     * See op_mul_long.S for more details
-     */
-    /* mul-long/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  vAA.low / high
-
-    GET_OPB(t1)                            #  t1 <- B
-    EAS2(t1, rFP, t1)                      #  t1 <- &fp[B]
-    LOAD64(a2, a3, t1)                     #  vBB.low / high
-
-    mul       v1, a3, a0                   #  v1= a3a0
-#ifdef MIPS32REVGE6
-    mulu      v0, a2, a0                   #  v0= a2a0
-    muhu      t1, a2, a0
-#else
-    multu     a2, a0
-    mfhi      t1
-    mflo      v0                           #  v0= a2a0
- #endif
-    mul       t2, a2, a1                   #  t2= a2a1
-    addu      v1, v1, t1                   #  v1= a3a0 + hi(a2a0)
-    addu      v1, v1, t2                   #  v1= v1 + a2a1;
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, rOBJ, t1)      #  vA/vA+1 <- v0(low)/v1(high)
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_long_2addr: /* 0xbe */
-/* File: mips/op_div_long_2addr.S */
-/* File: mips/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a2-a3).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a2, a3, a1)               #  a2/a3 <- vB/vB+1
-    LOAD64(a0, a1, t0)               #  a0/a1 <- vA/vA+1
-    .if 1
-    or        t0, a2, a3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    JAL(__divdi3)                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vA/vA+1 <- v0/v1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
-/* File: mips/op_rem_long_2addr.S */
-/* File: mips/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a2-a3).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a2, a3, a1)               #  a2/a3 <- vB/vB+1
-    LOAD64(a0, a1, t0)               #  a0/a1 <- vA/vA+1
-    .if 1
-    or        t0, a2, a3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    JAL(__moddi3)                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vA/vA+1 <- v0/v1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
-/* File: mips/op_and_long_2addr.S */
-/* File: mips/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a2-a3).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a2, a3, a1)               #  a2/a3 <- vB/vB+1
-    LOAD64(a0, a1, t0)               #  a0/a1 <- vA/vA+1
-    .if 0
-    or        t0, a2, a3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    and a0, a0, a2                              #  optional op
-    and a1, a1, a3                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
-/* File: mips/op_or_long_2addr.S */
-/* File: mips/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a2-a3).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a2, a3, a1)               #  a2/a3 <- vB/vB+1
-    LOAD64(a0, a1, t0)               #  a0/a1 <- vA/vA+1
-    .if 0
-    or        t0, a2, a3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    or a0, a0, a2                              #  optional op
-    or a1, a1, a3                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
-/* File: mips/op_xor_long_2addr.S */
-/* File: mips/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a2-a3).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a2, a3, a1)               #  a2/a3 <- vB/vB+1
-    LOAD64(a0, a1, t0)               #  a0/a1 <- vA/vA+1
-    .if 0
-    or        t0, a2, a3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    xor a0, a0, a2                              #  optional op
-    xor a1, a1, a3                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
-/* File: mips/op_shl_long_2addr.S */
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* shl-long/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a2, a3)                       #  a2 <- vB
-    EAS2(t2, rFP, rOBJ)                    #  t2 <- &fp[A]
-    LOAD64(a0, a1, t2)                     #  a0/a1 <- vA/vA+1
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v1, a2, 0x20                   #  shift< shift & 0x20
-    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
-    bnez    v1, .Lop_shl_long_2addr_finish
-    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
-    srl     a0, 1
-    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
-    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
-    or      v1, a0                         #  rhi<- rhi | alo
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vA/vA+1 <- v0/v1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
-/* File: mips/op_shr_long_2addr.S */
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* shr-long/2addr vA, vB */
-    GET_OPA4(t2)                           #  t2 <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a2, a3)                       #  a2 <- vB
-    EAS2(t0, rFP, t2)                      #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vA/vA+1
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v0, a2, 0x20                   #  shift & 0x20
-    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
-    bnez    v0, .Lop_shr_long_2addr_finish
-    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
-    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
-    sll     a1, 1
-    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
-    or      v0, a1                         #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vA/vA+1 <- v0/v1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
-/* File: mips/op_ushr_long_2addr.S */
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* ushr-long/2addr vA, vB */
-    GET_OPA4(t3)                           #  t3 <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a2, a3)                       #  a2 <- vB
-    EAS2(t0, rFP, t3)                      #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vA/vA+1
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi      v0, a2, 0x20                 #  shift & 0x20
-    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
-    bnez      v0, .Lop_ushr_long_2addr_finish
-    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
-    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
-    sll       a1, 1
-    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
-    or        v0, a1                       #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vA/vA+1 <- v0/v1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
-/* File: mips/op_add_float_2addr.S */
-/* File: mips/fbinop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
-     * that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
-     *      div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, rOBJ)
-    GET_VREG_F(fa1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    add.s fv0, fa0, fa1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- result
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
-/* File: mips/op_sub_float_2addr.S */
-/* File: mips/fbinop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
-     * that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
-     *      div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, rOBJ)
-    GET_VREG_F(fa1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    sub.s fv0, fa0, fa1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- result
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
-/* File: mips/op_mul_float_2addr.S */
-/* File: mips/fbinop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
-     * that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
-     *      div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, rOBJ)
-    GET_VREG_F(fa1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    mul.s fv0, fa0, fa1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- result
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
-/* File: mips/op_div_float_2addr.S */
-/* File: mips/fbinop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
-     * that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
-     *      div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, rOBJ)
-    GET_VREG_F(fa1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    div.s fv0, fa0, fa1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- result
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_float_2addr: /* 0xca */
-/* File: mips/op_rem_float_2addr.S */
-/* File: mips/fbinop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
-     * that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
-     *      div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, rOBJ)
-    GET_VREG_F(fa1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    JAL(fmodf)
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- result
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_double_2addr: /* 0xcb */
-/* File: mips/op_add_double_2addr.S */
-/* File: mips/fbinopWide2addr.S */
-    /*
-     * Generic 64-bit floating-point "/2addr" binary operation.
-     * Provide an "instr" line that specifies an instruction that
-     * performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *      div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64_F(fa0, fa0f, t0)
-    LOAD64_F(fa1, fa1f, a1)
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    add.d fv0, fa0, fa1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vA/vA+1 <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
-/* File: mips/op_sub_double_2addr.S */
-/* File: mips/fbinopWide2addr.S */
-    /*
-     * Generic 64-bit floating-point "/2addr" binary operation.
-     * Provide an "instr" line that specifies an instruction that
-     * performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *      div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64_F(fa0, fa0f, t0)
-    LOAD64_F(fa1, fa1f, a1)
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    sub.d fv0, fa0, fa1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vA/vA+1 <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
-/* File: mips/op_mul_double_2addr.S */
-/* File: mips/fbinopWide2addr.S */
-    /*
-     * Generic 64-bit floating-point "/2addr" binary operation.
-     * Provide an "instr" line that specifies an instruction that
-     * performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *      div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64_F(fa0, fa0f, t0)
-    LOAD64_F(fa1, fa1f, a1)
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    mul.d fv0, fa0, fa1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vA/vA+1 <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_double_2addr: /* 0xce */
-/* File: mips/op_div_double_2addr.S */
-/* File: mips/fbinopWide2addr.S */
-    /*
-     * Generic 64-bit floating-point "/2addr" binary operation.
-     * Provide an "instr" line that specifies an instruction that
-     * performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *      div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64_F(fa0, fa0f, t0)
-    LOAD64_F(fa1, fa1f, a1)
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    div.d fv0, fa0, fa1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vA/vA+1 <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
-/* File: mips/op_rem_double_2addr.S */
-/* File: mips/fbinopWide2addr.S */
-    /*
-     * Generic 64-bit floating-point "/2addr" binary operation.
-     * Provide an "instr" line that specifies an instruction that
-     * performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *      div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64_F(fa0, fa0f, t0)
-    LOAD64_F(fa1, fa1f, a1)
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    JAL(fmod)
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vA/vA+1 <- fv0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
-/* File: mips/op_add_int_lit16.S */
-/* File: mips/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, +CCCC */
-    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
-    GET_OPB(a2)                            #  a2 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG(a0, a2)                       #  a0 <- vB
-    .if 0
-    # cmp a1, 0; is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* File: mips/op_rsub_int.S */
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-/* File: mips/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, +CCCC */
-    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
-    GET_OPB(a2)                            #  a2 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG(a0, a2)                       #  a0 <- vB
-    .if 0
-    # cmp a1, 0; is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    subu a0, a1, a0                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
-/* File: mips/op_mul_int_lit16.S */
-/* File: mips/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, +CCCC */
-    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
-    GET_OPB(a2)                            #  a2 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG(a0, a2)                       #  a0 <- vB
-    .if 0
-    # cmp a1, 0; is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
-/* File: mips/op_div_int_lit16.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, +CCCC */
-    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
-    GET_OPB(a2)                            #  a2 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG(a0, a2)                       #  a0 <- vB
-    .if 1
-    # cmp a1, 0; is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    div a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-#else
-/* File: mips/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, +CCCC */
-    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
-    GET_OPB(a2)                            #  a2 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG(a0, a2)                       #  a0 <- vB
-    .if 1
-    # cmp a1, 0; is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    div zero, a0, a1                              #  optional op
-    mflo a0                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-#endif
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
-/* File: mips/op_rem_int_lit16.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, +CCCC */
-    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
-    GET_OPB(a2)                            #  a2 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG(a0, a2)                       #  a0 <- vB
-    .if 1
-    # cmp a1, 0; is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    mod a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-#else
-/* File: mips/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, +CCCC */
-    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
-    GET_OPB(a2)                            #  a2 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG(a0, a2)                       #  a0 <- vB
-    .if 1
-    # cmp a1, 0; is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    div zero, a0, a1                              #  optional op
-    mfhi a0                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-#endif
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
-/* File: mips/op_and_int_lit16.S */
-/* File: mips/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, +CCCC */
-    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
-    GET_OPB(a2)                            #  a2 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG(a0, a2)                       #  a0 <- vB
-    .if 0
-    # cmp a1, 0; is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
-/* File: mips/op_or_int_lit16.S */
-/* File: mips/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, +CCCC */
-    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
-    GET_OPB(a2)                            #  a2 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG(a0, a2)                       #  a0 <- vB
-    .if 0
-    # cmp a1, 0; is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
-/* File: mips/op_xor_int_lit16.S */
-/* File: mips/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, +CCCC */
-    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
-    GET_OPB(a2)                            #  a2 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG(a0, a2)                       #  a0 <- vB
-    .if 0
-    # cmp a1, 0; is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
-/* File: mips/op_add_int_lit8.S */
-/* File: mips/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
-/* File: mips/op_rsub_int_lit8.S */
-/* File: mips/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    subu a0, a1, a0                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_lit8: /* 0xda */
-/* File: mips/op_mul_int_lit8.S */
-/* File: mips/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_lit8: /* 0xdb */
-/* File: mips/op_div_int_lit8.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if 1
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    div a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-#else
-/* File: mips/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if 1
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    div zero, a0, a1                              #  optional op
-    mflo a0                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-#endif
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
-/* File: mips/op_rem_int_lit8.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if 1
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    mod a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-#else
-/* File: mips/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if 1
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    div zero, a0, a1                              #  optional op
-    mfhi a0                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-#endif
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_lit8: /* 0xdd */
-/* File: mips/op_and_int_lit8.S */
-/* File: mips/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_lit8: /* 0xde */
-/* File: mips/op_or_int_lit8.S */
-/* File: mips/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
-/* File: mips/op_xor_int_lit8.S */
-/* File: mips/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
-/* File: mips/op_shl_int_lit8.S */
-/* File: mips/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    sll a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
-/* File: mips/op_shr_int_lit8.S */
-/* File: mips/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    sra a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
-/* File: mips/op_ushr_int_lit8.S */
-/* File: mips/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if 0
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-                                  #  optional op
-    srl a0, a0, a1                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_quick: /* 0xe3 */
-/* File: mips/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    # check object for null
-    beqz      a3, common_errNullObject     #  object was null
-    addu      t0, a3, a1
-    lw     a0, 0(t0)                    #  a0 <- obj.field (8/16/32 bits)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a2, t0)              #  fp[A] <- a0
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
-/* File: mips/op_iget_wide_quick.S */
-    /* iget-wide-quick vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    # check object for null
-    beqz      a3, common_errNullObject     #  object was null
-    addu      t0, a3, a1                   #  t0 <- a3 + a1
-    LOAD64(a0, a1, t0)                     #  a0 <- obj.field (64 bits, aligned)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[A] <- a0/a1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
-/* File: mips/op_iget_object_quick.S */
-    /* For: iget-object-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    EXPORT_PC()
-    GET_VREG(a0, a2)                       #  a0 <- object we're operating on
-    JAL(artIGetObjectFromMterp)            #  v0 <- GetObj(obj, offset)
-    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
-    GET_OPA4(a2)                           #  a2<- A+
-    PREFETCH_INST(2)                       #  load rINST
-    bnez a3, MterpPossibleException        #  bail out
-    ADVANCE(2)                             #  advance rPC
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_OBJECT_GOTO(v0, a2, t0)       #  fp[A] <- v0
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_quick: /* 0xe6 */
-/* File: mips/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    beqz      a3, common_errNullObject     #  object was null
-    GET_VREG(a0, a2)                       #  a0 <- fp[A]
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    addu      t0, a3, a1
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t1)
-    sw    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
-    JR(t1)                                 #  jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
-/* File: mips/op_iput_wide_quick.S */
-    /* iput-wide-quick vA, vB, offset@CCCC */
-    GET_OPA4(a0)                           #  a0 <- A(+)
-    GET_OPB(a1)                            #  a1 <- B
-    GET_VREG(a2, a1)                       #  a2 <- fp[B], the object pointer
-    # check object for null
-    beqz      a2, common_errNullObject     #  object was null
-    EAS2(a3, rFP, a0)                      #  a3 <- &fp[A]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[A]
-    FETCH(a3, 1)                           #  a3 <- field byte offset
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    addu      a2, a2, a3                   #  obj.field (64 bits, aligned) <- a0/a1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0/a1
-    JR(t0)                                 #  jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
-/* File: mips/op_iput_object_quick.S */
-    /* For: iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rPC
-    move   a2, rINST
-    JAL(MterpIputObjectQuick)
-    beqz   v0, MterpException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
-/* File: mips/op_invoke_virtual_quick.S */
-/* File: mips/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualQuick
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokeVirtualQuick)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
-/* File: mips/op_invoke_virtual_range_quick.S */
-/* File: mips/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualQuickRange
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokeVirtualQuickRange)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
-/* File: mips/op_iput_boolean_quick.S */
-/* File: mips/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    beqz      a3, common_errNullObject     #  object was null
-    GET_VREG(a0, a2)                       #  a0 <- fp[A]
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    addu      t0, a3, a1
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t1)
-    sb    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
-    JR(t1)                                 #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_byte_quick: /* 0xec */
-/* File: mips/op_iput_byte_quick.S */
-/* File: mips/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    beqz      a3, common_errNullObject     #  object was null
-    GET_VREG(a0, a2)                       #  a0 <- fp[A]
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    addu      t0, a3, a1
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t1)
-    sb    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
-    JR(t1)                                 #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_char_quick: /* 0xed */
-/* File: mips/op_iput_char_quick.S */
-/* File: mips/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    beqz      a3, common_errNullObject     #  object was null
-    GET_VREG(a0, a2)                       #  a0 <- fp[A]
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    addu      t0, a3, a1
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t1)
-    sh    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
-    JR(t1)                                 #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_short_quick: /* 0xee */
-/* File: mips/op_iput_short_quick.S */
-/* File: mips/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    beqz      a3, common_errNullObject     #  object was null
-    GET_VREG(a0, a2)                       #  a0 <- fp[A]
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    addu      t0, a3, a1
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t1)
-    sh    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
-    JR(t1)                                 #  jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
-/* File: mips/op_iget_boolean_quick.S */
-/* File: mips/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    # check object for null
-    beqz      a3, common_errNullObject     #  object was null
-    addu      t0, a3, a1
-    lbu     a0, 0(t0)                    #  a0 <- obj.field (8/16/32 bits)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a2, t0)              #  fp[A] <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
-/* File: mips/op_iget_byte_quick.S */
-/* File: mips/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    # check object for null
-    beqz      a3, common_errNullObject     #  object was null
-    addu      t0, a3, a1
-    lb     a0, 0(t0)                    #  a0 <- obj.field (8/16/32 bits)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a2, t0)              #  fp[A] <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
-/* File: mips/op_iget_char_quick.S */
-/* File: mips/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    # check object for null
-    beqz      a3, common_errNullObject     #  object was null
-    addu      t0, a3, a1
-    lhu     a0, 0(t0)                    #  a0 <- obj.field (8/16/32 bits)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a2, t0)              #  fp[A] <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
-/* File: mips/op_iget_short_quick.S */
-/* File: mips/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    # check object for null
-    beqz      a3, common_errNullObject     #  object was null
-    addu      t0, a3, a1
-    lh     a0, 0(t0)                    #  a0 <- obj.field (8/16/32 bits)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a2, t0)              #  fp[A] <- a0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/* File: mips/op_unused_f3.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/* File: mips/op_unused_f4.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/* File: mips/op_unused_f5.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/* File: mips/op_unused_f6.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/* File: mips/op_unused_f7.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/* File: mips/op_unused_f8.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/* File: mips/op_unused_f9.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
-/* File: mips/op_invoke_polymorphic.S */
-/* File: mips/invoke_polymorphic.S */
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern MterpInvokePolymorphic
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokePolymorphic)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(4)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
-/* File: mips/op_invoke_polymorphic_range.S */
-/* File: mips/invoke_polymorphic.S */
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern MterpInvokePolymorphicRange
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokePolymorphicRange)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(4)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_custom: /* 0xfc */
-/* File: mips/op_invoke_custom.S */
-/* File: mips/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeCustom
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokeCustom)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
-/* File: mips/op_invoke_custom_range.S */
-/* File: mips/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeCustomRange
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL(MterpInvokeCustomRange)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_method_handle: /* 0xfe */
-/* File: mips/op_const_method_handle.S */
-/* File: mips/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstMethodHandle
-    EXPORT_PC()
-    FETCH(a0, 1)                        # a0 <- BBBB
-    GET_OPA(a1)                         # a1 <- AA
-    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
-    move   a3, rSELF
-    JAL(MterpConstMethodHandle)                        # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST(2)                    # load rINST
-    bnez   v0, MterpPossibleException
-    ADVANCE(2)                          # advance rPC
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_method_type: /* 0xff */
-/* File: mips/op_const_method_type.S */
-/* File: mips/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstMethodType
-    EXPORT_PC()
-    FETCH(a0, 1)                        # a0 <- BBBB
-    GET_OPA(a1)                         # a1 <- AA
-    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
-    move   a3, rSELF
-    JAL(MterpConstMethodType)                        # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST(2)                    # load rINST
-    bnez   v0, MterpPossibleException
-    ADVANCE(2)                          # advance rPC
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-
-    .balign 128
-/* File: mips/instruction_end.S */
-
-    .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-
-/*
- * ===========================================================================
- *  Sister implementations
- * ===========================================================================
- */
-/* File: mips/instruction_start_sister.S */
-
-    .global artMterpAsmSisterStart
-    .text
-    .balign 4
-artMterpAsmSisterStart:
-
-
-/* continuation for op_float_to_long */
-
-#ifndef MIPS32REVGE6
-.Lop_float_to_long_get_opcode:
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-.Lop_float_to_long_set_vreg:
-    SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1)   #  vA/vA+1 <- v0/v1
-#endif
-
-/* continuation for op_double_to_long */
-
-#ifndef MIPS32REVGE6
-.Lop_double_to_long_get_opcode:
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-.Lop_double_to_long_set_vreg:
-    SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1)   #  vA/vA+1 <- v0/v1
-#endif
-
-/* continuation for op_mul_long */
-
-.Lop_mul_long_finish:
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, a0, t0)        #  vAA/vAA+1 <- v0(low)/v1(high)
-
-/* continuation for op_shl_long */
-
-.Lop_shl_long_finish:
-    SET_VREG64_GOTO(zero, v0, t2, t0)      #  vAA/vAA+1 <- rlo/rhi
-
-/* continuation for op_shr_long */
-
-.Lop_shr_long_finish:
-    sra     a3, a1, 31                     #  a3<- sign(ah)
-    SET_VREG64_GOTO(v1, a3, t3, t0)        #  vAA/VAA+1 <- rlo/rhi
-
-/* continuation for op_ushr_long */
-
-.Lop_ushr_long_finish:
-    SET_VREG64_GOTO(v1, zero, rOBJ, t0)    #  vAA/vAA+1 <- rlo/rhi
-
-/* continuation for op_shl_long_2addr */
-
-.Lop_shl_long_2addr_finish:
-    SET_VREG64_GOTO(zero, v0, rOBJ, t0)    #  vA/vA+1 <- rlo/rhi
-
-/* continuation for op_shr_long_2addr */
-
-.Lop_shr_long_2addr_finish:
-    sra     a3, a1, 31                     #  a3<- sign(ah)
-    SET_VREG64_GOTO(v1, a3, t2, t0)        #  vA/vA+1 <- rlo/rhi
-
-/* continuation for op_ushr_long_2addr */
-
-.Lop_ushr_long_2addr_finish:
-    SET_VREG64_GOTO(v1, zero, t3, t0)      #  vA/vA+1 <- rlo/rhi
-/* File: mips/instruction_end_sister.S */
-
-    .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
-
-/* File: mips/instruction_start_alt.S */
-
-    .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
-    .text
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (0 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move: /* 0x01 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (1 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (2 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (3 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (4 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (5 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (6 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (7 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (8 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (9 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (10 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (11 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (12 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (13 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (14 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return: /* 0x0f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (15 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (16 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (17 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (18 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (19 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const: /* 0x14 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (20 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (21 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (22 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (23 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (24 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (25 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (26 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (27 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (28 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (29 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (30 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (31 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (32 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (33 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (34 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (35 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (36 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (37 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (38 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (39 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (40 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (41 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (42 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (43 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (44 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (45 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (46 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (47 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (48 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (49 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (50 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (51 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (52 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (53 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (54 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (55 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (56 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (57 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (58 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (59 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (60 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (61 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (62 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (63 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (64 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (65 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (66 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (67 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (68 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (69 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (70 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (71 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (72 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (73 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (74 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (75 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (76 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (77 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (78 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (79 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (80 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (81 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (82 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (83 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (84 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (85 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (86 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (87 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (88 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (89 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (90 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (91 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (92 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (93 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (94 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (95 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (96 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (97 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (98 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (99 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (100 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (101 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (102 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (103 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (104 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (105 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (106 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (107 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (108 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (109 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (110 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (111 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (112 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (113 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (114 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (115 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (116 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (117 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (118 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (119 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (120 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (121 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (122 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (123 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (124 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (125 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (126 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (127 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (128 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (129 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (130 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (131 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (132 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (133 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (134 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (135 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (136 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (137 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (138 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (139 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (140 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (141 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (142 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (143 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (144 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (145 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (146 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (147 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (148 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (149 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (150 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (151 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (152 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (153 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (154 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (155 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (156 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (157 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (158 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (159 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (160 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (161 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (162 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (163 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (164 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (165 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (166 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (167 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (168 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (169 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (170 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (171 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (172 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (173 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (174 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (175 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (176 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (177 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (178 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (179 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (180 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (181 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (182 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (183 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (184 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (185 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (186 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (187 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (188 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (189 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (190 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (191 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (192 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (193 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (194 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (195 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (196 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (197 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (198 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (199 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (200 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (201 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (202 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (203 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (204 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (205 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (206 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (207 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (208 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (209 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (210 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (211 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (212 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (213 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (214 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (215 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (216 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (217 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (218 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (219 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (220 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (221 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (222 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (223 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (224 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (225 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (226 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (227 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (228 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (229 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (230 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (231 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (232 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (233 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (234 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (235 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (236 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (237 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (238 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (239 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (240 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (241 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (242 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (243 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (244 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (245 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (246 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (247 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (248 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (249 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (250 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (251 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (252 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (253 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (254 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.    Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    la     ra, artMterpAsmInstructionStart + (255 * 128)   # Addr of primary handler
-    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-    .balign 128
-/* File: mips/instruction_end_alt.S */
-
-    .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
-
-/* File: mips/footer.S */
-/*
- * ===========================================================================
- *  Common subroutines and data
- * ===========================================================================
- */
-
-    .text
-    .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogDivideByZeroException)
-#endif
-    b MterpCommonFallback
-
-common_errArrayIndex:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogArrayIndexException)
-#endif
-    b MterpCommonFallback
-
-common_errNegativeArraySize:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogNegativeArraySizeException)
-#endif
-    b MterpCommonFallback
-
-common_errNoSuchMethod:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogNoSuchMethodException)
-#endif
-    b MterpCommonFallback
-
-common_errNullObject:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogNullObjectException)
-#endif
-    b MterpCommonFallback
-
-common_exceptionThrown:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogExceptionThrownException)
-#endif
-    b MterpCommonFallback
-
-MterpSuspendFallback:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    lw    a2, THREAD_FLAGS_OFFSET(rSELF)
-    JAL(MterpLogSuspendFallback)
-#endif
-    b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    lw      a0, THREAD_EXCEPTION_OFFSET(rSELF)
-    beqz    a0, MterpFallback          # If exception, fall back to reference interpreter.
-    /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpHandleException)                    # (self, shadow_frame)
-    beqz    v0, MterpExceptionReturn             # no local catch, back to caller.
-    lw      a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
-    lw      a1, OFF_FP_DEX_PC(rFP)
-    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-    EAS1(rPC, a0, a1)                            # generate new dex_pc_ptr
-    /* Do we need to switch interpreters? */
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    /* resume execution at catch block */
-    EXPORT_PC()
-    FETCH_INST()
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-    /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    rINST          <= signed offset
- *    rPROFILE       <= signed hotness countdown (expanded to 32 bits)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- */
-MterpCommonTakenBranchNoFlags:
-    bgtz    rINST, .L_forward_branch    # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-#  error "JIT_CHECK_OSR must be -1."
-#endif
-    li      t0, JIT_CHECK_OSR
-    beq     rPROFILE, t0, .L_osr_check
-    blt     rPROFILE, t0, .L_resume_backward_branch
-    subu    rPROFILE, 1
-    beqz    rPROFILE, .L_add_batch      # counted down to zero - report
-.L_resume_backward_branch:
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    REFRESH_IBASE()
-    addu    a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB(a2)           # update rPC, load rINST
-    and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    bnez    ra, .L_suspend_request_pending
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-.L_suspend_request_pending:
-    EXPORT_PC()
-    move    a0, rSELF
-    JAL(MterpSuspendCheck)              # (self)
-    bnez    v0, MterpFallback
-    REFRESH_IBASE()                     # might have changed during suspend
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-.L_no_count_backwards:
-    li      t0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    bne     rPROFILE, t0, .L_resume_backward_branch
-.L_osr_check:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC()
-    JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    bnez    v0, MterpOnStackReplacement
-    b       .L_resume_backward_branch
-
-.L_forward_branch:
-    li      t0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    beq     rPROFILE, t0, .L_check_osr_forward
-.L_resume_forward_branch:
-    add     a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB(a2)           # update rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-.L_check_osr_forward:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC()
-    JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    bnez    v0, MterpOnStackReplacement
-    b       .L_resume_forward_branch
-
-.L_add_batch:
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    lw      a0, OFF_FP_METHOD(rFP)
-    move    a2, rSELF
-    JAL(MterpAddHotnessBatch)           # (method, shadow_frame, self)
-    move    rPROFILE, v0                # restore new hotness countdown to rPROFILE
-    b       .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    li      a2, 2
-    EXPORT_PC()
-    JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    bnez    v0, MterpOnStackReplacement
-    FETCH_ADVANCE_INST(2)
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    JAL(MterpLogOSR)
-#endif
-    li      v0, 1                       # Signal normal return
-    b       MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogFallback)
-#endif
-MterpCommonFallback:
-    move    v0, zero                    # signal retry with reference interpreter.
-    b       MterpDone
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR.  Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- *  uint32_t* rFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    li      v0, 1                       # signal return to caller.
-    b       MterpDone
-MterpReturn:
-    lw      a2, OFF_FP_RESULT_REGISTER(rFP)
-    sw      v0, 0(a2)
-    sw      v1, 4(a2)
-    li      v0, 1                       # signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    blez    rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
-
-MterpProfileActive:
-    move    rINST, v0                   # stash return value
-    /* Report cached hotness counts */
-    lw      a0, OFF_FP_METHOD(rFP)
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    JAL(MterpAddHotnessBatch)           # (method, shadow_frame, self)
-    move    v0, rINST                   # restore return value
-
-.L_pop_and_return:
-/* Restore from the stack and return. Frame size = STACK_SIZE */
-    STACK_LOAD_FULL()
-    jalr    zero, ra
-
-    .cfi_endproc
-    .end ExecuteMterpImpl
-
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
deleted file mode 100644
index 40a8396..0000000
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ /dev/null
@@ -1,11986 +0,0 @@
-/*
- * This file was generated automatically by gen-mterp.py for 'mips64'.
- *
- * --> DO NOT EDIT <--
- */
-
-/* File: mips64/header.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define zero $0  /* always zero */
-#define AT   $at /* assembler temp */
-#define v0   $2  /* return value */
-#define v1   $3
-#define a0   $4  /* argument registers */
-#define a1   $5
-#define a2   $6
-#define a3   $7
-#define a4   $8  /* expanded register arguments */
-#define a5   $9
-#define a6   $10
-#define a7   $11
-#define ta0  $8  /* alias */
-#define ta1  $9
-#define ta2  $10
-#define ta3  $11
-#define t0   $12 /* temp registers (not saved across subroutine calls) */
-#define t1   $13
-#define t2   $14
-#define t3   $15
-
-#define s0   $16 /* saved across subroutine calls (callee saved) */
-#define s1   $17
-#define s2   $18
-#define s3   $19
-#define s4   $20
-#define s5   $21
-#define s6   $22
-#define s7   $23
-#define t8   $24 /* two more temp registers */
-#define t9   $25
-#define k0   $26 /* kernel temporary */
-#define k1   $27
-#define gp   $28 /* global pointer */
-#define sp   $29 /* stack pointer */
-#define s8   $30 /* one more callee saved */
-#define ra   $31 /* return address */
-
-#define f0   $f0
-#define f1   $f1
-#define f2   $f2
-#define f3   $f3
-#define f12  $f12
-#define f13  $f13
-
-/*
- * It looks like the GNU assembler currently does not support the blec and bgtc
- * idioms, which should translate into bgec and bltc respectively with swapped
- * left and right register operands.
- * TODO: remove these macros when the assembler is fixed.
- */
-.macro blec lreg, rreg, target
-    bgec    \rreg, \lreg, \target
-.endm
-.macro bgtc lreg, rreg, target
-    bltc    \rreg, \lreg, \target
-.endm
-
-/*
-Mterp and MIPS64 notes:
-
-The following registers have fixed assignments:
-
-  reg nick      purpose
-  s0  rPC       interpreted program counter, used for fetching instructions
-  s1  rFP       interpreted frame pointer, used for accessing locals and args
-  s2  rSELF     self (Thread) pointer
-  s3  rINST     first 16-bit code unit of current instruction
-  s4  rIBASE    interpreted instruction base pointer, used for computed goto
-  s5  rREFS     base of object references in shadow frame  (ideally, we'll get rid of this later).
-  s6  rPROFILE  jit profile hotness countdown
-*/
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rPC      s0
-#define CFI_DEX  16  // DWARF register number of the register holding dex-pc (s0).
-#define CFI_TMP  4   // DWARF register number of the first argument register (a0).
-#define rFP      s1
-#define rSELF    s2
-#define rINST    s3
-#define rIBASE   s4
-#define rREFS    s5
-#define rPROFILE s6
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
-    sd      rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
-    ld      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
- */
-.macro FETCH_INST
-    lhu     rINST, 0(rPC)
-.endm
-
-/* Advance rPC by some number of code units. */
-.macro ADVANCE count
-    daddu   rPC, rPC, (\count) * 2
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg and advance xPC.
- * xPC to point to the next instruction.  "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.  Must not set flags.
- *
- */
-.macro FETCH_ADVANCE_INST_RB reg
-    daddu   rPC, rPC, \reg
-    FETCH_INST
-.endm
-
-/*
- * Fetch the next instruction from the specified offset.  Advances rPC
- * to point to the next instruction.
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss.  (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
-    ADVANCE \count
-    FETCH_INST
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
- * rINST ahead of possible exception point.  Be sure to manually advance rPC
- * later.
- */
-.macro PREFETCH_INST count
-    lhu     rINST, ((\count) * 2)(rPC)
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
-    and     \reg, rINST, 255
-.endm
-
-/*
- * Begin executing the opcode in _reg.
- */
-.macro GOTO_OPCODE reg
-    .set noat
-    sll     AT, \reg, 7
-    daddu   AT, rIBASE, AT
-    jic     AT, 0
-    .set at
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- * Note, GET_VREG does sign extension to 64 bits while
- * GET_VREG_U does zero extension to 64 bits.
- * One is useful for arithmetic while the other is
- * useful for storing the result value as 64-bit.
- */
-.macro GET_VREG reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lw      \reg, 0(AT)
-    .set at
-.endm
-.macro GET_VREG_U reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lwu     \reg, 0(AT)
-    .set at
-.endm
-.macro GET_VREG_FLOAT reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lwc1    \reg, 0(AT)
-    .set at
-.endm
-.macro SET_VREG reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    sw      \reg, 0(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    .set at
-.endm
-.macro SET_VREG_OBJECT reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    sw      \reg, 0(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      \reg, 0(AT)
-    .set at
-.endm
-.macro SET_VREG_FLOAT reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    swc1    \reg, 0(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    .set at
-.endm
-
-/*
- * Get/set the 64-bit value from a Dalvik register.
- * Avoid unaligned memory accesses.
- * Note, SET_VREG_WIDE clobbers the register containing the value being stored.
- * Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number.
- */
-.macro GET_VREG_WIDE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lw      \reg, 0(AT)
-    lw      AT, 4(AT)
-    dinsu   \reg, AT, 32, 32
-    .set at
-.endm
-.macro GET_VREG_DOUBLE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lwc1    \reg, 0(AT)
-    lw      AT, 4(AT)
-    mthc1   AT, \reg
-    .set at
-.endm
-.macro SET_VREG_WIDE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    sw      \reg, 0(AT)
-    drotr32 \reg, \reg, 0
-    sw      \reg, 4(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    sw      zero, 4(AT)
-    .set at
-.endm
-.macro SET_VREG_DOUBLE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    sw      zero, 4(AT)
-    dlsa    AT, \vreg, rFP, 2
-    swc1    \reg, 0(AT)
-    mfhc1   \vreg, \reg
-    sw      \vreg, 4(AT)
-    .set at
-.endm
-
-/*
- * On-stack offsets for spilling/unspilling callee-saved registers
- * and the frame size.
- */
-#define STACK_OFFSET_RA 0
-#define STACK_OFFSET_GP 8
-#define STACK_OFFSET_S0 16
-#define STACK_OFFSET_S1 24
-#define STACK_OFFSET_S2 32
-#define STACK_OFFSET_S3 40
-#define STACK_OFFSET_S4 48
-#define STACK_OFFSET_S5 56
-#define STACK_OFFSET_S6 64
-#define STACK_SIZE      80    /* needs 16 byte alignment */
-
-/* Constants for float/double_to_int/long conversions */
-#define INT_MIN             0x80000000
-#define INT_MIN_AS_FLOAT    0xCF000000
-#define INT_MIN_AS_DOUBLE   0xC1E0000000000000
-#define LONG_MIN            0x8000000000000000
-#define LONG_MIN_AS_FLOAT   0xDF000000
-#define LONG_MIN_AS_DOUBLE  0xC3E0000000000000
-
-/* File: mips64/entry.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * Interpreter entry point.
- */
-
-    .set    reorder
-
-    .text
-    .global ExecuteMterpImpl
-    .type   ExecuteMterpImpl, %function
-    .balign 16
-/*
- * On entry:
- *  a0  Thread* self
- *  a1  dex_instructions
- *  a2  ShadowFrame
- *  a3  JValue* result_register
- *
- */
-ExecuteMterpImpl:
-    .cfi_startproc
-    .cpsetup t9, t8, ExecuteMterpImpl
-
-    .cfi_def_cfa sp, 0
-    daddu   sp, sp, -STACK_SIZE
-    .cfi_adjust_cfa_offset STACK_SIZE
-
-    sd      t8, STACK_OFFSET_GP(sp)
-    .cfi_rel_offset 28, STACK_OFFSET_GP
-    sd      ra, STACK_OFFSET_RA(sp)
-    .cfi_rel_offset 31, STACK_OFFSET_RA
-
-    sd      s0, STACK_OFFSET_S0(sp)
-    .cfi_rel_offset 16, STACK_OFFSET_S0
-    sd      s1, STACK_OFFSET_S1(sp)
-    .cfi_rel_offset 17, STACK_OFFSET_S1
-    sd      s2, STACK_OFFSET_S2(sp)
-    .cfi_rel_offset 18, STACK_OFFSET_S2
-    sd      s3, STACK_OFFSET_S3(sp)
-    .cfi_rel_offset 19, STACK_OFFSET_S3
-    sd      s4, STACK_OFFSET_S4(sp)
-    .cfi_rel_offset 20, STACK_OFFSET_S4
-    sd      s5, STACK_OFFSET_S5(sp)
-    .cfi_rel_offset 21, STACK_OFFSET_S5
-    sd      s6, STACK_OFFSET_S6(sp)
-    .cfi_rel_offset 22, STACK_OFFSET_S6
-
-    /* Remember the return register */
-    sd      a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
-
-    /* Remember the dex instruction pointer */
-    sd      a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
-
-    /* set up "named" registers */
-    move    rSELF, a0
-    daddu   rFP, a2, SHADOWFRAME_VREGS_OFFSET
-    lw      v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
-    dlsa    rREFS, v0, rFP, 2
-    lw      v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
-    dlsa    rPC, v0, a1, 1
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-    EXPORT_PC
-
-    /* Starting ibase */
-    REFRESH_IBASE
-
-    /* Set up for backwards branches & osr profiling */
-    ld      a0, OFF_FP_METHOD(rFP)
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    jal     MterpSetUpHotnessCountdown
-    move    rPROFILE, v0                # Starting hotness countdown to rPROFILE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-    /* NOTE: no fallthrough */
-
-/* File: mips64/instruction_start.S */
-
-    .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
-    .text
-
-/* ------------------------------ */
-    .balign 128
-.L_op_nop: /* 0x00 */
-/* File: mips64/op_nop.S */
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move: /* 0x01 */
-/* File: mips64/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if 0
-    SET_VREG_OBJECT a0, a2              # vA <- vB
-    .else
-    SET_VREG a0, a2                     # vA <- vB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_from16: /* 0x02 */
-/* File: mips64/op_move_from16.S */
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    lhu     a3, 2(rPC)                  # a3 <- BBBB
-    srl     a2, rINST, 8                # a2 <- AA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vBBBB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if 0
-    SET_VREG_OBJECT a0, a2              # vAA <- vBBBB
-    .else
-    SET_VREG a0, a2                     # vAA <- vBBBB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_16: /* 0x03 */
-/* File: mips64/op_move_16.S */
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    lhu     a3, 4(rPC)                  # a3 <- BBBB
-    lhu     a2, 2(rPC)                  # a2 <- AAAA
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vBBBB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if 0
-    SET_VREG_OBJECT a0, a2              # vAAAA <- vBBBB
-    .else
-    SET_VREG a0, a2                     # vAAAA <- vBBBB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide: /* 0x04 */
-/* File: mips64/op_move_wide.S */
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    ext     a2, rINST, 8, 4             # a2 <- A
-    GET_VREG_WIDE a0, a3                # a0 <- vB
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vA <- vB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide_from16: /* 0x05 */
-/* File: mips64/op_move_wide_from16.S */
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    lhu     a3, 2(rPC)                  # a3 <- BBBB
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_WIDE a0, a3                # a0 <- vBBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- vBBBB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide_16: /* 0x06 */
-/* File: mips64/op_move_wide_16.S */
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    lhu     a3, 4(rPC)                  # a3 <- BBBB
-    lhu     a2, 2(rPC)                  # a2 <- AAAA
-    GET_VREG_WIDE a0, a3                # a0 <- vBBBB
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAAAA <- vBBBB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object: /* 0x07 */
-/* File: mips64/op_move_object.S */
-/* File: mips64/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if 1
-    SET_VREG_OBJECT a0, a2              # vA <- vB
-    .else
-    SET_VREG a0, a2                     # vA <- vB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object_from16: /* 0x08 */
-/* File: mips64/op_move_object_from16.S */
-/* File: mips64/op_move_from16.S */
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    lhu     a3, 2(rPC)                  # a3 <- BBBB
-    srl     a2, rINST, 8                # a2 <- AA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vBBBB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if 1
-    SET_VREG_OBJECT a0, a2              # vAA <- vBBBB
-    .else
-    SET_VREG a0, a2                     # vAA <- vBBBB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object_16: /* 0x09 */
-/* File: mips64/op_move_object_16.S */
-/* File: mips64/op_move_16.S */
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    lhu     a3, 4(rPC)                  # a3 <- BBBB
-    lhu     a2, 2(rPC)                  # a2 <- AAAA
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vBBBB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if 1
-    SET_VREG_OBJECT a0, a2              # vAAAA <- vBBBB
-    .else
-    SET_VREG a0, a2                     # vAAAA <- vBBBB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result: /* 0x0a */
-/* File: mips64/op_move_result.S */
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    srl     a2, rINST, 8                # a2 <- AA
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
-    lw      a0, 0(a0)                   # a0 <- result.i
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if 0
-    SET_VREG_OBJECT a0, a2              # vAA <- result
-    .else
-    SET_VREG a0, a2                     # vAA <- result
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result_wide: /* 0x0b */
-/* File: mips64/op_move_result_wide.S */
-    /* for: move-result-wide */
-    /* op vAA */
-    srl     a2, rINST, 8                # a2 <- AA
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
-    ld      a0, 0(a0)                   # a0 <- result.j
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result_object: /* 0x0c */
-/* File: mips64/op_move_result_object.S */
-/* File: mips64/op_move_result.S */
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    srl     a2, rINST, 8                # a2 <- AA
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
-    lw      a0, 0(a0)                   # a0 <- result.i
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if 1
-    SET_VREG_OBJECT a0, a2              # vAA <- result
-    .else
-    SET_VREG a0, a2                     # vAA <- result
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_exception: /* 0x0d */
-/* File: mips64/op_move_exception.S */
-    /* move-exception vAA */
-    srl     a2, rINST, 8                # a2 <- AA
-    ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)  # load exception obj
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    SET_VREG_OBJECT a0, a2              # vAA <- exception obj
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    sd      zero, THREAD_EXCEPTION_OFFSET(rSELF)  # clear exception
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_void: /* 0x0e */
-/* File: mips64/op_return_void.S */
-    .extern MterpThreadFenceForConstructor
-    .extern MterpSuspendCheck
-    jal     MterpThreadFenceForConstructor
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    li      a0, 0
-    b       MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return: /* 0x0f */
-/* File: mips64/op_return.S */
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return (sign-extend), return-object (zero-extend)
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    .extern MterpSuspendCheck
-    jal     MterpThreadFenceForConstructor
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG  a0, a2                      # a0 <- vAA
-    b       MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_wide: /* 0x10 */
-/* File: mips64/op_return_wide.S */
-    /*
-     * Return a 64-bit value.
-     */
-    /* return-wide vAA */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    .extern MterpSuspendCheck
-    jal     MterpThreadFenceForConstructor
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_WIDE a0, a2                # a0 <- vAA
-    b       MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_object: /* 0x11 */
-/* File: mips64/op_return_object.S */
-/* File: mips64/op_return.S */
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return (sign-extend), return-object (zero-extend)
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    .extern MterpSuspendCheck
-    jal     MterpThreadFenceForConstructor
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_U  a0, a2                      # a0 <- vAA
-    b       MterpReturn
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_4: /* 0x12 */
-/* File: mips64/op_const_4.S */
-    /* const/4 vA, #+B */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    seh     a0, rINST                   # sign extend B in rINST
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    sra     a0, a0, 12                  # shift B into its final position
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vA <- +B
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_16: /* 0x13 */
-/* File: mips64/op_const_16.S */
-    /* const/16 vAA, #+BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- sign-extended BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vAA <- +BBBB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const: /* 0x14 */
-/* File: mips64/op_const.S */
-    /* const vAA, #+BBBBbbbb */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (high)
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vAA <- +BBBBbbbb
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_high16: /* 0x15 */
-/* File: mips64/op_const_high16.S */
-    /* const/high16 vAA, #+BBBB0000 */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    sll     a0, a0, 16                  # a0 <- BBBB0000
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vAA <- +BBBB0000
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_16: /* 0x16 */
-/* File: mips64/op_const_wide_16.S */
-    /* const-wide/16 vAA, #+BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- sign-extended BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- +BBBB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_32: /* 0x17 */
-/* File: mips64/op_const_wide_32.S */
-    /* const-wide/32 vAA, #+BBBBbbbb */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (high)
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- +BBBBbbbb
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide: /* 0x18 */
-/* File: mips64/op_const_wide.S */
-    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
-    srl     a4, rINST, 8                # a4 <- AA
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (low middle)
-    lh      a2, 6(rPC)                  # a2 <- hhhh (high middle)
-    lh      a3, 8(rPC)                  # a3 <- HHHH (high)
-    FETCH_ADVANCE_INST 5                # advance rPC, load rINST
-    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
-    ins     a2, a3, 16, 16              # a2 = HHHHhhhh
-    dinsu   a0, a2, 32, 32              # a0 = HHHHhhhhBBBBbbbb
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a4                # vAA <- +HHHHhhhhBBBBbbbb
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_high16: /* 0x19 */
-/* File: mips64/op_const_wide_high16.S */
-    /* const-wide/high16 vAA, #+BBBB000000000000 */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    dsll32  a0, a0, 16                  # a0 <- BBBB000000000000
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- +BBBB000000000000
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_string: /* 0x1a */
-/* File: mips64/op_const_string.S */
-/* File: mips64/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstString
-    EXPORT_PC
-    lhu     a0, 2(rPC)                  # a0 <- BBBB
-    srl     a1, rINST, 8                # a1 <- AA
-    daddu   a2, rFP, OFF_FP_SHADOWFRAME
-    move    a3, rSELF
-    jal     MterpConstString                     # (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     # load rINST
-    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
-    ADVANCE 2                           # advance rPC
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
-/* File: mips64/op_const_string_jumbo.S */
-    /* const/string vAA, String//BBBBBBBB */
-    .extern MterpConstString
-    EXPORT_PC
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a4, 4(rPC)                  # a4 <- BBBB (high)
-    srl     a1, rINST, 8                # a1 <- AA
-    ins     a0, a4, 16, 16              # a0 <- BBBBbbbb
-    daddu   a2, rFP, OFF_FP_SHADOWFRAME
-    move    a3, rSELF
-    jal     MterpConstString            # (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 3                     # load rINST
-    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
-    ADVANCE 3                           # advance rPC
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_class: /* 0x1c */
-/* File: mips64/op_const_class.S */
-/* File: mips64/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstClass
-    EXPORT_PC
-    lhu     a0, 2(rPC)                  # a0 <- BBBB
-    srl     a1, rINST, 8                # a1 <- AA
-    daddu   a2, rFP, OFF_FP_SHADOWFRAME
-    move    a3, rSELF
-    jal     MterpConstClass                     # (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     # load rINST
-    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
-    ADVANCE 2                           # advance rPC
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_monitor_enter: /* 0x1d */
-/* File: mips64/op_monitor_enter.S */
-    /*
-     * Synchronize on an object.
-     */
-    /* monitor-enter vAA */
-    .extern artLockObjectFromCode
-    EXPORT_PC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vAA (object)
-    move    a1, rSELF                   # a1 <- self
-    jal     artLockObjectFromCode
-    bnezc   v0, MterpException
-    FETCH_ADVANCE_INST 1
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_monitor_exit: /* 0x1e */
-/* File: mips64/op_monitor_exit.S */
-    /*
-     * Unlock an object.
-     *
-     * Exceptions that occur when unlocking a monitor need to appear as
-     * if they happened at the following instruction.  See the Dalvik
-     * instruction spec.
-     */
-    /* monitor-exit vAA */
-    .extern artUnlockObjectFromCode
-    EXPORT_PC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vAA (object)
-    move    a1, rSELF                   # a1 <- self
-    jal     artUnlockObjectFromCode     # v0 <- success for unlock(self, obj)
-    bnezc   v0, MterpException
-    FETCH_ADVANCE_INST 1                # before throw: advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_check_cast: /* 0x1f */
-/* File: mips64/op_check_cast.S */
-    /*
-     * Check to see if a cast from one class to another is allowed.
-     */
-    /* check-cast vAA, class//BBBB */
-    .extern MterpCheckCast
-    EXPORT_PC
-    lhu     a0, 2(rPC)                  # a0 <- BBBB
-    srl     a1, rINST, 8                # a1 <- AA
-    dlsa    a1, a1, rFP, 2              # a1 <- &object
-    ld      a2, OFF_FP_METHOD(rFP)      # a2 <- method
-    move    a3, rSELF                   # a3 <- self
-    jal     MterpCheckCast              # (index, &obj, method, self)
-    PREFETCH_INST 2
-    bnez    v0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_instance_of: /* 0x20 */
-/* File: mips64/op_instance_of.S */
-    /*
-     * Check to see if an object reference is an instance of a class.
-     *
-     * Most common situation is a non-null object, being compared against
-     * an already-resolved class.
-     */
-    /* instance-of vA, vB, class//CCCC */
-    .extern MterpInstanceOf
-    EXPORT_PC
-    lhu     a0, 2(rPC)                  # a0 <- CCCC
-    srl     a1, rINST, 12               # a1 <- B
-    dlsa    a1, a1, rFP, 2              # a1 <- &object
-    ld      a2, OFF_FP_METHOD(rFP)      # a2 <- method
-    move    a3, rSELF                   # a3 <- self
-    jal     MterpInstanceOf             # (index, &obj, method, self)
-    ld      a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    PREFETCH_INST 2
-    bnez    a1, MterpException
-    ADVANCE 2                           # advance rPC
-    SET_VREG v0, a2                     # vA <- v0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_array_length: /* 0x21 */
-/* File: mips64/op_array_length.S */
-    /*
-     * Return the length of an array.
-     */
-    srl     a1, rINST, 12               # a1 <- B
-    GET_VREG_U a0, a1                   # a0 <- vB (object ref)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a0, common_errNullObject    # yup, fail
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- array length
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a3, a2                     # vB <- length
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_new_instance: /* 0x22 */
-/* File: mips64/op_new_instance.S */
-    /*
-     * Create a new instance of a class.
-     */
-    /* new-instance vAA, class//BBBB */
-    .extern MterpNewInstance
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rSELF
-    move    a2, rINST
-    jal     MterpNewInstance            # (shadow_frame, self, inst_data)
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_new_array: /* 0x23 */
-/* File: mips64/op_new_array.S */
-    /*
-     * Allocate an array of objects, specified with the array class
-     * and a count.
-     *
-     * The verifier guarantees that this is an array class, so we don't
-     * check for it here.
-     */
-    /* new-array vA, vB, class//CCCC */
-    .extern MterpNewArray
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rINST
-    move    a3, rSELF
-    jal     MterpNewArray
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_filled_new_array: /* 0x24 */
-/* File: mips64/op_filled_new_array.S */
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
-    .extern MterpFilledNewArray
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rSELF
-    jal     MterpFilledNewArray
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
-/* File: mips64/op_filled_new_array_range.S */
-/* File: mips64/op_filled_new_array.S */
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
-    .extern MterpFilledNewArrayRange
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rSELF
-    jal     MterpFilledNewArrayRange
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_fill_array_data: /* 0x26 */
-/* File: mips64/op_fill_array_data.S */
-    /* fill-array-data vAA, +BBBBBBBB */
-    .extern MterpFillArrayData
-    EXPORT_PC
-    lh      a1, 2(rPC)                  # a1 <- bbbb (lo)
-    lh      a0, 4(rPC)                  # a0 <- BBBB (hi)
-    srl     a3, rINST, 8                # a3 <- AA
-    ins     a1, a0, 16, 16              # a1 <- BBBBbbbb
-    GET_VREG_U a0, a3                   # a0 <- vAA (array object)
-    dlsa    a1, a1, rPC, 1              # a1 <- PC + BBBBbbbb*2 (array data off.)
-    jal     MterpFillArrayData          # (obj, payload)
-    beqzc   v0, MterpPossibleException  # exception?
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_throw: /* 0x27 */
-/* File: mips64/op_throw.S */
-    /*
-     * Throw an exception object in the current thread.
-     */
-    /* throw vAA */
-    EXPORT_PC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vAA (exception object)
-    beqzc   a0, common_errNullObject
-    sd      a0, THREAD_EXCEPTION_OFFSET(rSELF)  # thread->exception <- obj
-    b       MterpException
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto: /* 0x28 */
-/* File: mips64/op_goto.S */
-    /*
-     * Unconditional branch, 8-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto +AA */
-    srl     rINST, rINST, 8
-    seb     rINST, rINST                # rINST <- offset (sign-extended AA)
-    b       MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto_16: /* 0x29 */
-/* File: mips64/op_goto_16.S */
-    /*
-     * Unconditional branch, 16-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto/16 +AAAA */
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended AAAA)
-    b       MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto_32: /* 0x2a */
-/* File: mips64/op_goto_32.S */
-    /*
-     * Unconditional branch, 32-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     *
-     * Unlike most opcodes, this one is allowed to branch to itself, so
-     * our "backward branch" test must be "<=0" instead of "<0".
-     */
-    /* goto/32 +AAAAAAAA */
-    lh      rINST, 2(rPC)               # rINST <- aaaa (low)
-    lh      a1, 4(rPC)                  # a1 <- AAAA (high)
-    ins     rINST, a1, 16, 16           # rINST <- offset (sign-extended AAAAaaaa)
-    b       MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
-    .balign 128
-.L_op_packed_switch: /* 0x2b */
-/* File: mips64/op_packed_switch.S */
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBBBBBB */
-    .extern MterpDoPackedSwitch
-    lh      a0, 2(rPC)                  # a0 <- bbbb (lo)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (hi)
-    srl     a3, rINST, 8                # a3 <- AA
-    ins     a0, a1, 16, 16              # a0 <- BBBBbbbb
-    GET_VREG a1, a3                     # a1 <- vAA
-    dlsa    a0, a0, rPC, 1              # a0 <- PC + BBBBbbbb*2
-    jal     MterpDoPackedSwitch                       # v0 <- code-unit branch offset
-    move    rINST, v0
-    b       MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sparse_switch: /* 0x2c */
-/* File: mips64/op_sparse_switch.S */
-/* File: mips64/op_packed_switch.S */
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBBBBBB */
-    .extern MterpDoSparseSwitch
-    lh      a0, 2(rPC)                  # a0 <- bbbb (lo)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (hi)
-    srl     a3, rINST, 8                # a3 <- AA
-    ins     a0, a1, 16, 16              # a0 <- BBBBbbbb
-    GET_VREG a1, a3                     # a1 <- vAA
-    dlsa    a0, a0, rPC, 1              # a0 <- PC + BBBBbbbb*2
-    jal     MterpDoSparseSwitch                       # v0 <- code-unit branch offset
-    move    rINST, v0
-    b       MterpCommonTakenBranchNoFlags
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpl_float: /* 0x2d */
-/* File: mips64/op_cmpl_float.S */
-/* File: mips64/fcmp.S */
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * For: cmpl-float, cmpg-float
-     */
-    /* op vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f0, a2               # f0 <- vBB
-    GET_VREG_FLOAT f1, a3               # f1 <- vCC
-    cmp.eq.s f2, f0, f1
-    li      a0, 0
-    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
-    .if 0
-    cmp.lt.s f2, f0, f1
-    li      a0, -1
-    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
-    li      a0, 1                       # vBB > vCC or unordered
-    .else
-    cmp.lt.s f2, f1, f0
-    li      a0, 1
-    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
-    li      a0, -1                      # vBB < vCC or unordered
-    .endif
-1:
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                     # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpg_float: /* 0x2e */
-/* File: mips64/op_cmpg_float.S */
-/* File: mips64/fcmp.S */
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * For: cmpl-float, cmpg-float
-     */
-    /* op vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f0, a2               # f0 <- vBB
-    GET_VREG_FLOAT f1, a3               # f1 <- vCC
-    cmp.eq.s f2, f0, f1
-    li      a0, 0
-    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
-    .if 1
-    cmp.lt.s f2, f0, f1
-    li      a0, -1
-    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
-    li      a0, 1                       # vBB > vCC or unordered
-    .else
-    cmp.lt.s f2, f1, f0
-    li      a0, 1
-    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
-    li      a0, -1                      # vBB < vCC or unordered
-    .endif
-1:
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                     # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpl_double: /* 0x2f */
-/* File: mips64/op_cmpl_double.S */
-/* File: mips64/fcmpWide.S */
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * For: cmpl-double, cmpg-double
-     */
-    /* op vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
-    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
-    cmp.eq.d f2, f0, f1
-    li      a0, 0
-    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
-    .if 0
-    cmp.lt.d f2, f0, f1
-    li      a0, -1
-    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
-    li      a0, 1                       # vBB > vCC or unordered
-    .else
-    cmp.lt.d f2, f1, f0
-    li      a0, 1
-    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
-    li      a0, -1                      # vBB < vCC or unordered
-    .endif
-1:
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                     # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpg_double: /* 0x30 */
-/* File: mips64/op_cmpg_double.S */
-/* File: mips64/fcmpWide.S */
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * For: cmpl-double, cmpg-double
-     */
-    /* op vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
-    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
-    cmp.eq.d f2, f0, f1
-    li      a0, 0
-    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
-    .if 1
-    cmp.lt.d f2, f0, f1
-    li      a0, -1
-    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
-    li      a0, 1                       # vBB > vCC or unordered
-    .else
-    cmp.lt.d f2, f1, f0
-    li      a0, 1
-    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
-    li      a0, -1                      # vBB < vCC or unordered
-    .endif
-1:
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                     # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmp_long: /* 0x31 */
-/* File: mips64/op_cmp_long.S */
-    /* cmp-long vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    slt     a2, a0, a1
-    slt     a0, a1, a0
-    subu    a0, a0, a2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                     # vAA <- result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_eq: /* 0x32 */
-/* File: mips64/op_if_eq.S */
-/* File: mips64/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-le" you would use "le".
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended CCCC)
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    beqc a0, a1, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ne: /* 0x33 */
-/* File: mips64/op_if_ne.S */
-/* File: mips64/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-le" you would use "le".
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended CCCC)
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    bnec a0, a1, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_lt: /* 0x34 */
-/* File: mips64/op_if_lt.S */
-/* File: mips64/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-le" you would use "le".
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended CCCC)
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    bltc a0, a1, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ge: /* 0x35 */
-/* File: mips64/op_if_ge.S */
-/* File: mips64/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-le" you would use "le".
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended CCCC)
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    bgec a0, a1, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gt: /* 0x36 */
-/* File: mips64/op_if_gt.S */
-/* File: mips64/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-le" you would use "le".
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended CCCC)
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    bgtc a0, a1, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_le: /* 0x37 */
-/* File: mips64/op_if_le.S */
-/* File: mips64/bincmp.S */
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-le" you would use "le".
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended CCCC)
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    blec a0, a1, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_eqz: /* 0x38 */
-/* File: mips64/op_if_eqz.S */
-/* File: mips64/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-lez" you would use "le".
-     *
-     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended BBBB)
-    GET_VREG a0, a2                     # a0 <- vAA
-    beqzc a0, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_nez: /* 0x39 */
-/* File: mips64/op_if_nez.S */
-/* File: mips64/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-lez" you would use "le".
-     *
-     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended BBBB)
-    GET_VREG a0, a2                     # a0 <- vAA
-    bnezc a0, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ltz: /* 0x3a */
-/* File: mips64/op_if_ltz.S */
-/* File: mips64/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-lez" you would use "le".
-     *
-     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended BBBB)
-    GET_VREG a0, a2                     # a0 <- vAA
-    bltzc a0, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gez: /* 0x3b */
-/* File: mips64/op_if_gez.S */
-/* File: mips64/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-lez" you would use "le".
-     *
-     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended BBBB)
-    GET_VREG a0, a2                     # a0 <- vAA
-    bgezc a0, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gtz: /* 0x3c */
-/* File: mips64/op_if_gtz.S */
-/* File: mips64/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-lez" you would use "le".
-     *
-     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended BBBB)
-    GET_VREG a0, a2                     # a0 <- vAA
-    bgtzc a0, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_lez: /* 0x3d */
-/* File: mips64/op_if_lez.S */
-/* File: mips64/zcmp.S */
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-lez" you would use "le".
-     *
-     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended BBBB)
-    GET_VREG a0, a2                     # a0 <- vAA
-    blezc a0, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_3e: /* 0x3e */
-/* File: mips64/op_unused_3e.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_3f: /* 0x3f */
-/* File: mips64/op_unused_3f.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_40: /* 0x40 */
-/* File: mips64/op_unused_40.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_41: /* 0x41 */
-/* File: mips64/op_unused_41.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_42: /* 0x42 */
-/* File: mips64/op_unused_42.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_43: /* 0x43 */
-/* File: mips64/op_unused_43.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget: /* 0x44 */
-/* File: mips64/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if 2
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, 2          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    lw   a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0)        # a2 <- vBB[vCC]
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a2, a4                     # vAA <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_wide: /* 0x45 */
-/* File: mips64/op_aget_wide.S */
-    /*
-     * Array get, 64 bits.  vAA <- vBB[vCC].
-     *
-     */
-    /* aget-wide vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    dlsa    a0, a1, a0, 3               # a0 <- arrayObj + index*width
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    lw      a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
-    lw      a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)
-    dinsu   a2, a3, 32, 32              # a2 <- vBB[vCC]
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a2, a4                # vAA <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_object: /* 0x46 */
-/* File: mips64/op_aget_object.S */
-    /*
-     * Array object get.  vAA <- vBB[vCC].
-     *
-     * for: aget-object
-     */
-    /* op vAA, vBB, vCC */
-    .extern artAGetObjectFromMterp
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    EXPORT_PC
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    jal     artAGetObjectFromMterp      # (array, index)
-    ld      a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    srl     a4, rINST, 8                # a4 <- AA
-    PREFETCH_INST 2
-    bnez    a1, MterpException
-    SET_VREG_OBJECT v0, a4              # vAA <- v0
-    ADVANCE 2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_boolean: /* 0x47 */
-/* File: mips64/op_aget_boolean.S */
-/* File: mips64/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if 0
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, 0          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    lbu   a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0)        # a2 <- vBB[vCC]
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a2, a4                     # vAA <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_byte: /* 0x48 */
-/* File: mips64/op_aget_byte.S */
-/* File: mips64/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if 0
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, 0          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    lb   a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0)        # a2 <- vBB[vCC]
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a2, a4                     # vAA <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_char: /* 0x49 */
-/* File: mips64/op_aget_char.S */
-/* File: mips64/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if 1
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, 1          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    lhu   a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0)        # a2 <- vBB[vCC]
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a2, a4                     # vAA <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_short: /* 0x4a */
-/* File: mips64/op_aget_short.S */
-/* File: mips64/op_aget.S */
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if 1
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, 1          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    lh   a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0)        # a2 <- vBB[vCC]
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a2, a4                     # vAA <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput: /* 0x4b */
-/* File: mips64/op_aput.S */
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if 2
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, 2          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_VREG a2, a4                     # a2 <- vAA
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    sw  a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0)        # vBB[vCC] <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_wide: /* 0x4c */
-/* File: mips64/op_aput_wide.S */
-    /*
-     * Array put, 64 bits.  vBB[vCC] <- vAA.
-     *
-     */
-    /* aput-wide vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    dlsa    a0, a1, a0, 3               # a0 <- arrayObj + index*width
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    GET_VREG_WIDE a2, a4                # a2 <- vAA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    sw      a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
-    dsrl32  a2, a2, 0
-    sw      a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)  # vBB[vCC] <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_object: /* 0x4d */
-/* File: mips64/op_aput_object.S */
-    /*
-     * Store an object into an array.  vBB[vCC] <- vAA.
-     */
-    /* op vAA, vBB, vCC */
-    .extern MterpAputObject
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rINST
-    jal     MterpAputObject
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_boolean: /* 0x4e */
-/* File: mips64/op_aput_boolean.S */
-/* File: mips64/op_aput.S */
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if 0
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, 0          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_VREG a2, a4                     # a2 <- vAA
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    sb  a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0)        # vBB[vCC] <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_byte: /* 0x4f */
-/* File: mips64/op_aput_byte.S */
-/* File: mips64/op_aput.S */
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if 0
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, 0          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_VREG a2, a4                     # a2 <- vAA
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    sb  a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0)        # vBB[vCC] <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_char: /* 0x50 */
-/* File: mips64/op_aput_char.S */
-/* File: mips64/op_aput.S */
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if 1
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, 1          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_VREG a2, a4                     # a2 <- vAA
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    sh  a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0)        # vBB[vCC] <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_short: /* 0x51 */
-/* File: mips64/op_aput_short.S */
-/* File: mips64/op_aput.S */
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if 1
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, 1          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_VREG a2, a4                     # a2 <- vAA
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    sh  a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0)        # vBB[vCC] <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget: /* 0x52 */
-/* File: mips64/op_iget.S */
-/* File: mips64/field.S */
-TODO
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_wide: /* 0x53 */
-/* File: mips64/op_iget_wide.S */
-/* File: mips64/op_iget.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_object: /* 0x54 */
-/* File: mips64/op_iget_object.S */
-/* File: mips64/op_iget.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_boolean: /* 0x55 */
-/* File: mips64/op_iget_boolean.S */
-/* File: mips64/op_iget.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_byte: /* 0x56 */
-/* File: mips64/op_iget_byte.S */
-/* File: mips64/op_iget.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_char: /* 0x57 */
-/* File: mips64/op_iget_char.S */
-/* File: mips64/op_iget.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_short: /* 0x58 */
-/* File: mips64/op_iget_short.S */
-/* File: mips64/op_iget.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput: /* 0x59 */
-/* File: mips64/op_iput.S */
-/* File: mips64/field.S */
-TODO
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_wide: /* 0x5a */
-/* File: mips64/op_iput_wide.S */
-/* File: mips64/op_iput.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_object: /* 0x5b */
-/* File: mips64/op_iput_object.S */
-/* File: mips64/op_iput.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_boolean: /* 0x5c */
-/* File: mips64/op_iput_boolean.S */
-/* File: mips64/op_iput.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_byte: /* 0x5d */
-/* File: mips64/op_iput_byte.S */
-/* File: mips64/op_iput.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_char: /* 0x5e */
-/* File: mips64/op_iput_char.S */
-/* File: mips64/op_iput.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_short: /* 0x5f */
-/* File: mips64/op_iput_short.S */
-/* File: mips64/op_iput.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget: /* 0x60 */
-/* File: mips64/op_sget.S */
-/* File: mips64/field.S */
-TODO
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_wide: /* 0x61 */
-/* File: mips64/op_sget_wide.S */
-/* File: mips64/op_sget.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_object: /* 0x62 */
-/* File: mips64/op_sget_object.S */
-/* File: mips64/op_sget.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_boolean: /* 0x63 */
-/* File: mips64/op_sget_boolean.S */
-/* File: mips64/op_sget.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_byte: /* 0x64 */
-/* File: mips64/op_sget_byte.S */
-/* File: mips64/op_sget.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_char: /* 0x65 */
-/* File: mips64/op_sget_char.S */
-/* File: mips64/op_sget.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_short: /* 0x66 */
-/* File: mips64/op_sget_short.S */
-/* File: mips64/op_sget.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput: /* 0x67 */
-/* File: mips64/op_sput.S */
-/* File: mips64/field.S */
-TODO
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_wide: /* 0x68 */
-/* File: mips64/op_sput_wide.S */
-/* File: mips64/op_sput.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_object: /* 0x69 */
-/* File: mips64/op_sput_object.S */
-/* File: mips64/op_sput.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_boolean: /* 0x6a */
-/* File: mips64/op_sput_boolean.S */
-/* File: mips64/op_sput.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_byte: /* 0x6b */
-/* File: mips64/op_sput_byte.S */
-/* File: mips64/op_sput.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_char: /* 0x6c */
-/* File: mips64/op_sput_char.S */
-/* File: mips64/op_sput.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_short: /* 0x6d */
-/* File: mips64/op_sput_short.S */
-/* File: mips64/op_sput.S */
-/* File: mips64/field.S */
-TODO
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual: /* 0x6e */
-/* File: mips64/op_invoke_virtual.S */
-/* File: mips64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtual
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokeVirtual
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-    /*
-     * Handle a virtual method call.
-     *
-     * for: invoke-virtual, invoke-virtual/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_super: /* 0x6f */
-/* File: mips64/op_invoke_super.S */
-/* File: mips64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeSuper
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokeSuper
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-    /*
-     * Handle a "super" method call.
-     *
-     * for: invoke-super, invoke-super/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_direct: /* 0x70 */
-/* File: mips64/op_invoke_direct.S */
-/* File: mips64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeDirect
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokeDirect
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_static: /* 0x71 */
-/* File: mips64/op_invoke_static.S */
-/* File: mips64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeStatic
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokeStatic
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_interface: /* 0x72 */
-/* File: mips64/op_invoke_interface.S */
-/* File: mips64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeInterface
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokeInterface
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-    /*
-     * Handle an interface method call.
-     *
-     * for: invoke-interface, invoke-interface/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
-/* File: mips64/op_return_void_no_barrier.S */
-    .extern MterpSuspendCheck
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    li      a0, 0
-    b       MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
-/* File: mips64/op_invoke_virtual_range.S */
-/* File: mips64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualRange
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokeVirtualRange
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_super_range: /* 0x75 */
-/* File: mips64/op_invoke_super_range.S */
-/* File: mips64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeSuperRange
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokeSuperRange
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
-/* File: mips64/op_invoke_direct_range.S */
-/* File: mips64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeDirectRange
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokeDirectRange
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_static_range: /* 0x77 */
-/* File: mips64/op_invoke_static_range.S */
-/* File: mips64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeStaticRange
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokeStaticRange
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
-/* File: mips64/op_invoke_interface_range.S */
-/* File: mips64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeInterfaceRange
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokeInterfaceRange
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_79: /* 0x79 */
-/* File: mips64/op_unused_79.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_7a: /* 0x7a */
-/* File: mips64/op_unused_7a.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_int: /* 0x7b */
-/* File: mips64/op_neg_int.S */
-/* File: mips64/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "a0 = op a0".
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      not-int, neg-int
-     */
-    /* unop vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    ext     a2, rINST, 8, 4             # a2 <- A
-                               # optional op
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    subu    a0, zero, a0                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_not_int: /* 0x7c */
-/* File: mips64/op_not_int.S */
-/* File: mips64/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "a0 = op a0".
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      not-int, neg-int
-     */
-    /* unop vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    ext     a2, rINST, 8, 4             # a2 <- A
-                               # optional op
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    nor     a0, zero, a0                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_long: /* 0x7d */
-/* File: mips64/op_neg_long.S */
-/* File: mips64/unopWide.S */
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "a0 = op a0".
-     *
-     * For: not-long, neg-long
-     */
-    /* unop vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a3                # a0 <- vB
-    ext     a2, rINST, 8, 4             # a2 <- A
-                               # optional op
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    dsubu   a0, zero, a0                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_not_long: /* 0x7e */
-/* File: mips64/op_not_long.S */
-/* File: mips64/unopWide.S */
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "a0 = op a0".
-     *
-     * For: not-long, neg-long
-     */
-    /* unop vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a3                # a0 <- vB
-    ext     a2, rINST, 8, 4             # a2 <- A
-                               # optional op
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    nor     a0, zero, a0                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_float: /* 0x7f */
-/* File: mips64/op_neg_float.S */
-/* File: mips64/fcvtHeader.S */
-    /*
-     * Loads a specified register from vB. Used primarily for conversions
-     * from or to a floating-point type.
-     *
-     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
-     * store the result in vA and jump to the next instruction.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     */
-    ext     a1, rINST, 8, 4             # a1 <- A
-    srl     a2, rINST, 12               # a2 <- B
-    GET_VREG_FLOAT f0, a2
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-
-    neg.s   f0, f0
-/* File: mips64/fcvtFooter.S */
-    /*
-     * Stores a specified register containing the result of conversion
-     * from or to a floating-point type and jumps to the next instruction.
-     *
-     * Expects a1 to contain the destination Dalvik register number.
-     * a1 is set up by fcvtHeader.S.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     *
-     * Note that this file can't be included after a break in other files
-     * and in those files its contents appear as a copy.
-     * See: float-to-int, float-to-long, double-to-int, double-to-long.
-     */
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a1
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_double: /* 0x80 */
-/* File: mips64/op_neg_double.S */
-/* File: mips64/fcvtHeader.S */
-    /*
-     * Loads a specified register from vB. Used primarily for conversions
-     * from or to a floating-point type.
-     *
-     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
-     * store the result in vA and jump to the next instruction.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     */
-    ext     a1, rINST, 8, 4             # a1 <- A
-    srl     a2, rINST, 12               # a2 <- B
-    GET_VREG_DOUBLE f0, a2
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-
-    neg.d   f0, f0
-/* File: mips64/fcvtFooter.S */
-    /*
-     * Stores a specified register containing the result of conversion
-     * from or to a floating-point type and jumps to the next instruction.
-     *
-     * Expects a1 to contain the destination Dalvik register number.
-     * a1 is set up by fcvtHeader.S.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     *
-     * Note that this file can't be included after a break in other files
-     * and in those files its contents appear as a copy.
-     * See: float-to-int, float-to-long, double-to-int, double-to-long.
-     */
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a1
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_long: /* 0x81 */
-/* File: mips64/op_int_to_long.S */
-    /* int-to-long vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB (sign-extended to 64 bits)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vA <- vB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_float: /* 0x82 */
-/* File: mips64/op_int_to_float.S */
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-/* File: mips64/fcvtHeader.S */
-    /*
-     * Loads a specified register from vB. Used primarily for conversions
-     * from or to a floating-point type.
-     *
-     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
-     * store the result in vA and jump to the next instruction.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     */
-    ext     a1, rINST, 8, 4             # a1 <- A
-    srl     a2, rINST, 12               # a2 <- B
-    GET_VREG_FLOAT f0, a2
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-
-    cvt.s.w f0, f0
-/* File: mips64/fcvtFooter.S */
-    /*
-     * Stores a specified register containing the result of conversion
-     * from or to a floating-point type and jumps to the next instruction.
-     *
-     * Expects a1 to contain the destination Dalvik register number.
-     * a1 is set up by fcvtHeader.S.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     *
-     * Note that this file can't be included after a break in other files
-     * and in those files its contents appear as a copy.
-     * See: float-to-int, float-to-long, double-to-int, double-to-long.
-     */
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a1
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_double: /* 0x83 */
-/* File: mips64/op_int_to_double.S */
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-/* File: mips64/fcvtHeader.S */
-    /*
-     * Loads a specified register from vB. Used primarily for conversions
-     * from or to a floating-point type.
-     *
-     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
-     * store the result in vA and jump to the next instruction.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     */
-    ext     a1, rINST, 8, 4             # a1 <- A
-    srl     a2, rINST, 12               # a2 <- B
-    GET_VREG_FLOAT f0, a2
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-
-    cvt.d.w f0, f0
-/* File: mips64/fcvtFooter.S */
-    /*
-     * Stores a specified register containing the result of conversion
-     * from or to a floating-point type and jumps to the next instruction.
-     *
-     * Expects a1 to contain the destination Dalvik register number.
-     * a1 is set up by fcvtHeader.S.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     *
-     * Note that this file can't be included after a break in other files
-     * and in those files its contents appear as a copy.
-     * See: float-to-int, float-to-long, double-to-int, double-to-long.
-     */
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a1
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* File: mips64/op_long_to_int.S */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-/* File: mips64/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if 0
-    SET_VREG_OBJECT a0, a2              # vA <- vB
-    .else
-    SET_VREG a0, a2                     # vA <- vB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_float: /* 0x85 */
-/* File: mips64/op_long_to_float.S */
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-/* File: mips64/fcvtHeader.S */
-    /*
-     * Loads a specified register from vB. Used primarily for conversions
-     * from or to a floating-point type.
-     *
-     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
-     * store the result in vA and jump to the next instruction.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     */
-    ext     a1, rINST, 8, 4             # a1 <- A
-    srl     a2, rINST, 12               # a2 <- B
-    GET_VREG_DOUBLE f0, a2
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-
-    cvt.s.l f0, f0
-/* File: mips64/fcvtFooter.S */
-    /*
-     * Stores a specified register containing the result of conversion
-     * from or to a floating-point type and jumps to the next instruction.
-     *
-     * Expects a1 to contain the destination Dalvik register number.
-     * a1 is set up by fcvtHeader.S.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     *
-     * Note that this file can't be included after a break in other files
-     * and in those files its contents appear as a copy.
-     * See: float-to-int, float-to-long, double-to-int, double-to-long.
-     */
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a1
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_double: /* 0x86 */
-/* File: mips64/op_long_to_double.S */
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-/* File: mips64/fcvtHeader.S */
-    /*
-     * Loads a specified register from vB. Used primarily for conversions
-     * from or to a floating-point type.
-     *
-     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
-     * store the result in vA and jump to the next instruction.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     */
-    ext     a1, rINST, 8, 4             # a1 <- A
-    srl     a2, rINST, 12               # a2 <- B
-    GET_VREG_DOUBLE f0, a2
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-
-    cvt.d.l f0, f0
-/* File: mips64/fcvtFooter.S */
-    /*
-     * Stores a specified register containing the result of conversion
-     * from or to a floating-point type and jumps to the next instruction.
-     *
-     * Expects a1 to contain the destination Dalvik register number.
-     * a1 is set up by fcvtHeader.S.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     *
-     * Note that this file can't be included after a break in other files
-     * and in those files its contents appear as a copy.
-     * See: float-to-int, float-to-long, double-to-int, double-to-long.
-     */
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a1
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_int: /* 0x87 */
-/* File: mips64/op_float_to_int.S */
-/* File: mips64/fcvtHeader.S */
-    /*
-     * Loads a specified register from vB. Used primarily for conversions
-     * from or to a floating-point type.
-     *
-     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
-     * store the result in vA and jump to the next instruction.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     */
-    ext     a1, rINST, 8, 4             # a1 <- A
-    srl     a2, rINST, 12               # a2 <- B
-    GET_VREG_FLOAT f0, a2
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-
-    trunc.w.s f0, f0
-/* File: mips64/fcvtFooter.S */
-    /*
-     * Stores a specified register containing the result of conversion
-     * from or to a floating-point type and jumps to the next instruction.
-     *
-     * Expects a1 to contain the destination Dalvik register number.
-     * a1 is set up by fcvtHeader.S.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     *
-     * Note that this file can't be included after a break in other files
-     * and in those files its contents appear as a copy.
-     * See: float-to-int, float-to-long, double-to-int, double-to-long.
-     */
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a1
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_long: /* 0x88 */
-/* File: mips64/op_float_to_long.S */
-/* File: mips64/fcvtHeader.S */
-    /*
-     * Loads a specified register from vB. Used primarily for conversions
-     * from or to a floating-point type.
-     *
-     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
-     * store the result in vA and jump to the next instruction.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     */
-    ext     a1, rINST, 8, 4             # a1 <- A
-    srl     a2, rINST, 12               # a2 <- B
-    GET_VREG_FLOAT f0, a2
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-
-    trunc.l.s f0, f0
-/* File: mips64/fcvtFooter.S */
-    /*
-     * Stores a specified register containing the result of conversion
-     * from or to a floating-point type and jumps to the next instruction.
-     *
-     * Expects a1 to contain the destination Dalvik register number.
-     * a1 is set up by fcvtHeader.S.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     *
-     * Note that this file can't be included after a break in other files
-     * and in those files its contents appear as a copy.
-     * See: float-to-int, float-to-long, double-to-int, double-to-long.
-     */
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a1
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_double: /* 0x89 */
-/* File: mips64/op_float_to_double.S */
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-/* File: mips64/fcvtHeader.S */
-    /*
-     * Loads a specified register from vB. Used primarily for conversions
-     * from or to a floating-point type.
-     *
-     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
-     * store the result in vA and jump to the next instruction.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     */
-    ext     a1, rINST, 8, 4             # a1 <- A
-    srl     a2, rINST, 12               # a2 <- B
-    GET_VREG_FLOAT f0, a2
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-
-    cvt.d.s f0, f0
-/* File: mips64/fcvtFooter.S */
-    /*
-     * Stores a specified register containing the result of conversion
-     * from or to a floating-point type and jumps to the next instruction.
-     *
-     * Expects a1 to contain the destination Dalvik register number.
-     * a1 is set up by fcvtHeader.S.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     *
-     * Note that this file can't be included after a break in other files
-     * and in those files its contents appear as a copy.
-     * See: float-to-int, float-to-long, double-to-int, double-to-long.
-     */
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a1
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_int: /* 0x8a */
-/* File: mips64/op_double_to_int.S */
-/* File: mips64/fcvtHeader.S */
-    /*
-     * Loads a specified register from vB. Used primarily for conversions
-     * from or to a floating-point type.
-     *
-     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
-     * store the result in vA and jump to the next instruction.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     */
-    ext     a1, rINST, 8, 4             # a1 <- A
-    srl     a2, rINST, 12               # a2 <- B
-    GET_VREG_DOUBLE f0, a2
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-
-    trunc.w.d f0, f0
-/* File: mips64/fcvtFooter.S */
-    /*
-     * Stores a specified register containing the result of conversion
-     * from or to a floating-point type and jumps to the next instruction.
-     *
-     * Expects a1 to contain the destination Dalvik register number.
-     * a1 is set up by fcvtHeader.S.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     *
-     * Note that this file can't be included after a break in other files
-     * and in those files its contents appear as a copy.
-     * See: float-to-int, float-to-long, double-to-int, double-to-long.
-     */
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a1
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_long: /* 0x8b */
-/* File: mips64/op_double_to_long.S */
-/* File: mips64/fcvtHeader.S */
-    /*
-     * Loads a specified register from vB. Used primarily for conversions
-     * from or to a floating-point type.
-     *
-     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
-     * store the result in vA and jump to the next instruction.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     */
-    ext     a1, rINST, 8, 4             # a1 <- A
-    srl     a2, rINST, 12               # a2 <- B
-    GET_VREG_DOUBLE f0, a2
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-
-    trunc.l.d f0, f0
-/* File: mips64/fcvtFooter.S */
-    /*
-     * Stores a specified register containing the result of conversion
-     * from or to a floating-point type and jumps to the next instruction.
-     *
-     * Expects a1 to contain the destination Dalvik register number.
-     * a1 is set up by fcvtHeader.S.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     *
-     * Note that this file can't be included after a break in other files
-     * and in those files its contents appear as a copy.
-     * See: float-to-int, float-to-long, double-to-int, double-to-long.
-     */
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a1
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_float: /* 0x8c */
-/* File: mips64/op_double_to_float.S */
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-/* File: mips64/fcvtHeader.S */
-    /*
-     * Loads a specified register from vB. Used primarily for conversions
-     * from or to a floating-point type.
-     *
-     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
-     * store the result in vA and jump to the next instruction.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     */
-    ext     a1, rINST, 8, 4             # a1 <- A
-    srl     a2, rINST, 12               # a2 <- B
-    GET_VREG_DOUBLE f0, a2
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-
-    cvt.s.d f0, f0
-/* File: mips64/fcvtFooter.S */
-    /*
-     * Stores a specified register containing the result of conversion
-     * from or to a floating-point type and jumps to the next instruction.
-     *
-     * Expects a1 to contain the destination Dalvik register number.
-     * a1 is set up by fcvtHeader.S.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     *
-     * Note that this file can't be included after a break in other files
-     * and in those files its contents appear as a copy.
-     * See: float-to-int, float-to-long, double-to-int, double-to-long.
-     */
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a1
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_byte: /* 0x8d */
-/* File: mips64/op_int_to_byte.S */
-/* File: mips64/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "a0 = op a0".
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      not-int, neg-int
-     */
-    /* unop vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    ext     a2, rINST, 8, 4             # a2 <- A
-                               # optional op
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    seb     a0, a0                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_char: /* 0x8e */
-/* File: mips64/op_int_to_char.S */
-/* File: mips64/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "a0 = op a0".
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      not-int, neg-int
-     */
-    /* unop vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    ext     a2, rINST, 8, 4             # a2 <- A
-                               # optional op
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    and     a0, a0, 0xffff                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_short: /* 0x8f */
-/* File: mips64/op_int_to_short.S */
-/* File: mips64/unop.S */
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "a0 = op a0".
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      not-int, neg-int
-     */
-    /* unop vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    ext     a2, rINST, 8, 4             # a2 <- A
-                               # optional op
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    seh     a0, a0                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int: /* 0x90 */
-/* File: mips64/op_add_int.S */
-/* File: mips64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG a0, a2                     # a0 <- vBB
-    GET_VREG a1, a3                     # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    addu a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_int: /* 0x91 */
-/* File: mips64/op_sub_int.S */
-/* File: mips64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG a0, a2                     # a0 <- vBB
-    GET_VREG a1, a3                     # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    subu a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int: /* 0x92 */
-/* File: mips64/op_mul_int.S */
-/* File: mips64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG a0, a2                     # a0 <- vBB
-    GET_VREG a1, a3                     # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    mul a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int: /* 0x93 */
-/* File: mips64/op_div_int.S */
-/* File: mips64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG a0, a2                     # a0 <- vBB
-    GET_VREG a1, a3                     # a1 <- vCC
-    .if 1
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    div a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int: /* 0x94 */
-/* File: mips64/op_rem_int.S */
-/* File: mips64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG a0, a2                     # a0 <- vBB
-    GET_VREG a1, a3                     # a1 <- vCC
-    .if 1
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    mod a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int: /* 0x95 */
-/* File: mips64/op_and_int.S */
-/* File: mips64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG a0, a2                     # a0 <- vBB
-    GET_VREG a1, a3                     # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    and a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int: /* 0x96 */
-/* File: mips64/op_or_int.S */
-/* File: mips64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG a0, a2                     # a0 <- vBB
-    GET_VREG a1, a3                     # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    or a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int: /* 0x97 */
-/* File: mips64/op_xor_int.S */
-/* File: mips64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG a0, a2                     # a0 <- vBB
-    GET_VREG a1, a3                     # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    xor a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int: /* 0x98 */
-/* File: mips64/op_shl_int.S */
-/* File: mips64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG a0, a2                     # a0 <- vBB
-    GET_VREG a1, a3                     # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    sll a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int: /* 0x99 */
-/* File: mips64/op_shr_int.S */
-/* File: mips64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG a0, a2                     # a0 <- vBB
-    GET_VREG a1, a3                     # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    sra a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int: /* 0x9a */
-/* File: mips64/op_ushr_int.S */
-/* File: mips64/binop.S */
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG a0, a2                     # a0 <- vBB
-    GET_VREG a1, a3                     # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    srl a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_long: /* 0x9b */
-/* File: mips64/op_add_long.S */
-/* File: mips64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    daddu a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a4           # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_long: /* 0x9c */
-/* File: mips64/op_sub_long.S */
-/* File: mips64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    dsubu a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a4           # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_long: /* 0x9d */
-/* File: mips64/op_mul_long.S */
-/* File: mips64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    dmul a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a4           # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_long: /* 0x9e */
-/* File: mips64/op_div_long.S */
-/* File: mips64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    .if 1
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    ddiv a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a4           # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_long: /* 0x9f */
-/* File: mips64/op_rem_long.S */
-/* File: mips64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    .if 1
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    dmod a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a4           # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_long: /* 0xa0 */
-/* File: mips64/op_and_long.S */
-/* File: mips64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    and a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a4           # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_long: /* 0xa1 */
-/* File: mips64/op_or_long.S */
-/* File: mips64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    or a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a4           # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_long: /* 0xa2 */
-/* File: mips64/op_xor_long.S */
-/* File: mips64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    xor a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a4           # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_long: /* 0xa3 */
-/* File: mips64/op_shl_long.S */
-/* File: mips64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    dsll a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a4           # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_long: /* 0xa4 */
-/* File: mips64/op_shr_long.S */
-/* File: mips64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    dsra a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a4           # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_long: /* 0xa5 */
-/* File: mips64/op_ushr_long.S */
-/* File: mips64/binopWide.S */
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    dsrl a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a4           # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_float: /* 0xa6 */
-/* File: mips64/op_add_float.S */
-/* File: mips64/fbinop.S */
-    /*:
-     * Generic 32-bit floating-point operation.
-     *
-     * For: add-float, sub-float, mul-float, div-float.
-     * form: <op> f0, f0, f1
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f0, a2               # f0 <- vBB
-    GET_VREG_FLOAT f1, a3               # f1 <- vCC
-    add.s f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a4               # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_float: /* 0xa7 */
-/* File: mips64/op_sub_float.S */
-/* File: mips64/fbinop.S */
-    /*:
-     * Generic 32-bit floating-point operation.
-     *
-     * For: add-float, sub-float, mul-float, div-float.
-     * form: <op> f0, f0, f1
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f0, a2               # f0 <- vBB
-    GET_VREG_FLOAT f1, a3               # f1 <- vCC
-    sub.s f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a4               # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_float: /* 0xa8 */
-/* File: mips64/op_mul_float.S */
-/* File: mips64/fbinop.S */
-    /*:
-     * Generic 32-bit floating-point operation.
-     *
-     * For: add-float, sub-float, mul-float, div-float.
-     * form: <op> f0, f0, f1
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f0, a2               # f0 <- vBB
-    GET_VREG_FLOAT f1, a3               # f1 <- vCC
-    mul.s f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a4               # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_float: /* 0xa9 */
-/* File: mips64/op_div_float.S */
-/* File: mips64/fbinop.S */
-    /*:
-     * Generic 32-bit floating-point operation.
-     *
-     * For: add-float, sub-float, mul-float, div-float.
-     * form: <op> f0, f0, f1
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f0, a2               # f0 <- vBB
-    GET_VREG_FLOAT f1, a3               # f1 <- vCC
-    div.s f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a4               # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_float: /* 0xaa */
-/* File: mips64/op_rem_float.S */
-    /* rem-float vAA, vBB, vCC */
-    .extern fmodf
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f12, a2              # f12 <- vBB
-    GET_VREG_FLOAT f13, a3              # f13 <- vCC
-    jal     fmodf                       # f0 <- f12 op f13
-    srl     a4, rINST, 8                # a4 <- AA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a4               # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_double: /* 0xab */
-/* File: mips64/op_add_double.S */
-/* File: mips64/fbinopWide.S */
-    /*:
-     * Generic 64-bit floating-point operation.
-     *
-     * For: add-double, sub-double, mul-double, div-double.
-     * form: <op> f0, f0, f1
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
-    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
-    add.d f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a4              # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_double: /* 0xac */
-/* File: mips64/op_sub_double.S */
-/* File: mips64/fbinopWide.S */
-    /*:
-     * Generic 64-bit floating-point operation.
-     *
-     * For: add-double, sub-double, mul-double, div-double.
-     * form: <op> f0, f0, f1
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
-    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
-    sub.d f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a4              # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_double: /* 0xad */
-/* File: mips64/op_mul_double.S */
-/* File: mips64/fbinopWide.S */
-    /*:
-     * Generic 64-bit floating-point operation.
-     *
-     * For: add-double, sub-double, mul-double, div-double.
-     * form: <op> f0, f0, f1
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
-    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
-    mul.d f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a4              # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_double: /* 0xae */
-/* File: mips64/op_div_double.S */
-/* File: mips64/fbinopWide.S */
-    /*:
-     * Generic 64-bit floating-point operation.
-     *
-     * For: add-double, sub-double, mul-double, div-double.
-     * form: <op> f0, f0, f1
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
-    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
-    div.d f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a4              # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_double: /* 0xaf */
-/* File: mips64/op_rem_double.S */
-    /* rem-double vAA, vBB, vCC */
-    .extern fmod
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f12, a2             # f12 <- vBB
-    GET_VREG_DOUBLE f13, a3             # f13 <- vCC
-    jal     fmod                        # f0 <- f12 op f13
-    srl     a4, rINST, 8                # a4 <- AA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a4              # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
-/* File: mips64/op_add_int_2addr.S */
-/* File: mips64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    addu a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
-/* File: mips64/op_sub_int_2addr.S */
-/* File: mips64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    subu a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
-/* File: mips64/op_mul_int_2addr.S */
-/* File: mips64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    mul a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
-/* File: mips64/op_div_int_2addr.S */
-/* File: mips64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    .if 1
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    div a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
-/* File: mips64/op_rem_int_2addr.S */
-/* File: mips64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    .if 1
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    mod a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
-/* File: mips64/op_and_int_2addr.S */
-/* File: mips64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    and a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
-/* File: mips64/op_or_int_2addr.S */
-/* File: mips64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    or a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
-/* File: mips64/op_xor_int_2addr.S */
-/* File: mips64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    xor a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
-/* File: mips64/op_shl_int_2addr.S */
-/* File: mips64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    sll a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
-/* File: mips64/op_shr_int_2addr.S */
-/* File: mips64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    sra a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
-/* File: mips64/op_ushr_int_2addr.S */
-/* File: mips64/binop2addr.S */
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    srl a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_long_2addr: /* 0xbb */
-/* File: mips64/op_add_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a2                # a0 <- vA
-    GET_VREG_WIDE a1, a3                # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    daddu a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2           # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
-/* File: mips64/op_sub_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a2                # a0 <- vA
-    GET_VREG_WIDE a1, a3                # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    dsubu a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2           # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
-/* File: mips64/op_mul_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a2                # a0 <- vA
-    GET_VREG_WIDE a1, a3                # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    dmul a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2           # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_long_2addr: /* 0xbe */
-/* File: mips64/op_div_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a2                # a0 <- vA
-    GET_VREG_WIDE a1, a3                # a1 <- vB
-    .if 1
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    ddiv a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2           # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
-/* File: mips64/op_rem_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a2                # a0 <- vA
-    GET_VREG_WIDE a1, a3                # a1 <- vB
-    .if 1
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    dmod a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2           # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
-/* File: mips64/op_and_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a2                # a0 <- vA
-    GET_VREG_WIDE a1, a3                # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    and a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2           # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
-/* File: mips64/op_or_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a2                # a0 <- vA
-    GET_VREG_WIDE a1, a3                # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    or a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2           # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
-/* File: mips64/op_xor_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a2                # a0 <- vA
-    GET_VREG_WIDE a1, a3                # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    xor a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2           # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
-/* File: mips64/op_shl_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a2                # a0 <- vA
-    GET_VREG_WIDE a1, a3                # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    dsll a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2           # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
-/* File: mips64/op_shr_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a2                # a0 <- vA
-    GET_VREG_WIDE a1, a3                # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    dsra a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2           # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
-/* File: mips64/op_ushr_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a2                # a0 <- vA
-    GET_VREG_WIDE a1, a3                # a1 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-                               # optional op
-    dsrl a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2           # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
-/* File: mips64/op_add_float_2addr.S */
-/* File: mips64/fbinop2addr.S */
-    /*:
-     * Generic 32-bit "/2addr" floating-point operation.
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
-     * form: <op> f0, f0, f1
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_FLOAT f0, a2               # f0 <- vA
-    GET_VREG_FLOAT f1, a3               # f1 <- vB
-    add.s f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a2               # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
-/* File: mips64/op_sub_float_2addr.S */
-/* File: mips64/fbinop2addr.S */
-    /*:
-     * Generic 32-bit "/2addr" floating-point operation.
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
-     * form: <op> f0, f0, f1
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_FLOAT f0, a2               # f0 <- vA
-    GET_VREG_FLOAT f1, a3               # f1 <- vB
-    sub.s f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a2               # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
-/* File: mips64/op_mul_float_2addr.S */
-/* File: mips64/fbinop2addr.S */
-    /*:
-     * Generic 32-bit "/2addr" floating-point operation.
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
-     * form: <op> f0, f0, f1
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_FLOAT f0, a2               # f0 <- vA
-    GET_VREG_FLOAT f1, a3               # f1 <- vB
-    mul.s f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a2               # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
-/* File: mips64/op_div_float_2addr.S */
-/* File: mips64/fbinop2addr.S */
-    /*:
-     * Generic 32-bit "/2addr" floating-point operation.
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
-     * form: <op> f0, f0, f1
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_FLOAT f0, a2               # f0 <- vA
-    GET_VREG_FLOAT f1, a3               # f1 <- vB
-    div.s f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a2               # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_float_2addr: /* 0xca */
-/* File: mips64/op_rem_float_2addr.S */
-    /* rem-float/2addr vA, vB */
-    .extern fmodf
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_FLOAT f12, a2              # f12 <- vA
-    GET_VREG_FLOAT f13, a3              # f13 <- vB
-    jal     fmodf                       # f0 <- f12 op f13
-    ext     a2, rINST, 8, 4             # a2 <- A
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a2               # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_double_2addr: /* 0xcb */
-/* File: mips64/op_add_double_2addr.S */
-/* File: mips64/fbinopWide2addr.S */
-    /*:
-     * Generic 64-bit "/2addr" floating-point operation.
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
-     * form: <op> f0, f0, f1
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_DOUBLE f0, a2              # f0 <- vA
-    GET_VREG_DOUBLE f1, a3              # f1 <- vB
-    add.d f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a2              # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
-/* File: mips64/op_sub_double_2addr.S */
-/* File: mips64/fbinopWide2addr.S */
-    /*:
-     * Generic 64-bit "/2addr" floating-point operation.
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
-     * form: <op> f0, f0, f1
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_DOUBLE f0, a2              # f0 <- vA
-    GET_VREG_DOUBLE f1, a3              # f1 <- vB
-    sub.d f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a2              # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
-/* File: mips64/op_mul_double_2addr.S */
-/* File: mips64/fbinopWide2addr.S */
-    /*:
-     * Generic 64-bit "/2addr" floating-point operation.
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
-     * form: <op> f0, f0, f1
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_DOUBLE f0, a2              # f0 <- vA
-    GET_VREG_DOUBLE f1, a3              # f1 <- vB
-    mul.d f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a2              # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_double_2addr: /* 0xce */
-/* File: mips64/op_div_double_2addr.S */
-/* File: mips64/fbinopWide2addr.S */
-    /*:
-     * Generic 64-bit "/2addr" floating-point operation.
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
-     * form: <op> f0, f0, f1
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_DOUBLE f0, a2              # f0 <- vA
-    GET_VREG_DOUBLE f1, a3              # f1 <- vB
-    div.d f0, f0, f1                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a2              # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
-/* File: mips64/op_rem_double_2addr.S */
-    /* rem-double/2addr vA, vB */
-    .extern fmod
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_DOUBLE f12, a2             # f12 <- vA
-    GET_VREG_DOUBLE f13, a3             # f13 <- vB
-    jal     fmod                        # f0 <- f12 op f13
-    ext     a2, rINST, 8, 4             # a2 <- A
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a2              # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
-/* File: mips64/op_add_int_lit16.S */
-/* File: mips64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CCCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    addu a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* File: mips64/op_rsub_int.S */
-/* File: mips64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CCCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    subu a0, a1, a0                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
-/* File: mips64/op_mul_int_lit16.S */
-/* File: mips64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CCCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    mul a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
-/* File: mips64/op_div_int_lit16.S */
-/* File: mips64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CCCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    .if 1
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    div a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
-/* File: mips64/op_rem_int_lit16.S */
-/* File: mips64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CCCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    .if 1
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    mod a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
-/* File: mips64/op_and_int_lit16.S */
-/* File: mips64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CCCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    and a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
-/* File: mips64/op_or_int_lit16.S */
-/* File: mips64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CCCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    or a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
-/* File: mips64/op_xor_int_lit16.S */
-/* File: mips64/binopLit16.S */
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CCCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    xor a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
-/* File: mips64/op_add_int_lit8.S */
-/* File: mips64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    lbu     a3, 2(rPC)                  # a3 <- BB
-    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG a0, a3                     # a0 <- vBB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    addu a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
-/* File: mips64/op_rsub_int_lit8.S */
-/* File: mips64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    lbu     a3, 2(rPC)                  # a3 <- BB
-    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG a0, a3                     # a0 <- vBB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    subu a0, a1, a0                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_lit8: /* 0xda */
-/* File: mips64/op_mul_int_lit8.S */
-/* File: mips64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    lbu     a3, 2(rPC)                  # a3 <- BB
-    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG a0, a3                     # a0 <- vBB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    mul a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_lit8: /* 0xdb */
-/* File: mips64/op_div_int_lit8.S */
-/* File: mips64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    lbu     a3, 2(rPC)                  # a3 <- BB
-    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG a0, a3                     # a0 <- vBB
-    .if 1
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    div a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
-/* File: mips64/op_rem_int_lit8.S */
-/* File: mips64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    lbu     a3, 2(rPC)                  # a3 <- BB
-    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG a0, a3                     # a0 <- vBB
-    .if 1
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    mod a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_lit8: /* 0xdd */
-/* File: mips64/op_and_int_lit8.S */
-/* File: mips64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    lbu     a3, 2(rPC)                  # a3 <- BB
-    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG a0, a3                     # a0 <- vBB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    and a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_lit8: /* 0xde */
-/* File: mips64/op_or_int_lit8.S */
-/* File: mips64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    lbu     a3, 2(rPC)                  # a3 <- BB
-    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG a0, a3                     # a0 <- vBB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    or a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
-/* File: mips64/op_xor_int_lit8.S */
-/* File: mips64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    lbu     a3, 2(rPC)                  # a3 <- BB
-    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG a0, a3                     # a0 <- vBB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    xor a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
-/* File: mips64/op_shl_int_lit8.S */
-/* File: mips64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    lbu     a3, 2(rPC)                  # a3 <- BB
-    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG a0, a3                     # a0 <- vBB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    sll a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
-/* File: mips64/op_shr_int_lit8.S */
-/* File: mips64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    lbu     a3, 2(rPC)                  # a3 <- BB
-    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG a0, a3                     # a0 <- vBB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    sra a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
-/* File: mips64/op_ushr_int_lit8.S */
-/* File: mips64/binopLit8.S */
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    lbu     a3, 2(rPC)                  # a3 <- BB
-    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG a0, a3                     # a0 <- vBB
-    .if 0
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-                               # optional op
-    srl a0, a0, a1                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_quick: /* 0xe3 */
-/* File: mips64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- object we're operating on
-    ext     a4, rINST, 8, 4             # a4 <- A
-    daddu   a1, a1, a3
-    beqz    a3, common_errNullObject    # object was null
-    lw   a0, 0(a1)                   # a0 <- obj.field
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    SET_VREG a0, a4                     # fp[A] <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
-/* File: mips64/op_iget_wide_quick.S */
-    /* iget-wide-quick vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a4, 2(rPC)                  # a4 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- object we're operating on
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a3, common_errNullObject    # object was null
-    daddu   a4, a3, a4                  # create direct pointer
-    lw      a0, 0(a4)
-    lw      a1, 4(a4)
-    dinsu   a0, a1, 32, 32
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    SET_VREG_WIDE a0, a2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
-/* File: mips64/op_iget_object_quick.S */
-    /* For: iget-object-quick */
-    /* op vA, vB, offset//CCCC */
-    .extern artIGetObjectFromMterp
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    EXPORT_PC
-    GET_VREG_U a0, a2                   # a0 <- object we're operating on
-    jal     artIGetObjectFromMterp      # (obj, offset)
-    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    PREFETCH_INST 2
-    bnez    a3, MterpPossibleException  # bail out
-    SET_VREG_OBJECT v0, a2              # fp[A] <- v0
-    ADVANCE 2                           # advance rPC
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_quick: /* 0xe6 */
-/* File: mips64/op_iput_quick.S */
-    /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- fp[B], the object pointer
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a3, common_errNullObject    # object was null
-    GET_VREG a0, a2                     # a0 <- fp[A]
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    daddu   a1, a1, a3
-    sw  a0, 0(a1)                   # obj.field <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
-/* File: mips64/op_iput_wide_quick.S */
-    /* iput-wide-quick vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a3, 2(rPC)                  # a3 <- field byte offset
-    GET_VREG_U a2, a2                   # a2 <- fp[B], the object pointer
-    ext     a0, rINST, 8, 4             # a0 <- A
-    beqz    a2, common_errNullObject    # object was null
-    GET_VREG_WIDE a0, a0                # a0 <- fp[A]
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    daddu   a1, a2, a3                  # create a direct pointer
-    sw      a0, 0(a1)
-    dsrl32  a0, a0, 0
-    sw      a0, 4(a1)
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
-/* File: mips64/op_iput_object_quick.S */
-    .extern MterpIputObjectQuick
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rINST
-    jal     MterpIputObjectQuick
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
-/* File: mips64/op_invoke_virtual_quick.S */
-/* File: mips64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualQuick
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokeVirtualQuick
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
-/* File: mips64/op_invoke_virtual_range_quick.S */
-/* File: mips64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualQuickRange
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokeVirtualQuickRange
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
-/* File: mips64/op_iput_boolean_quick.S */
-/* File: mips64/op_iput_quick.S */
-    /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- fp[B], the object pointer
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a3, common_errNullObject    # object was null
-    GET_VREG a0, a2                     # a0 <- fp[A]
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    daddu   a1, a1, a3
-    sb  a0, 0(a1)                   # obj.field <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_byte_quick: /* 0xec */
-/* File: mips64/op_iput_byte_quick.S */
-/* File: mips64/op_iput_quick.S */
-    /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- fp[B], the object pointer
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a3, common_errNullObject    # object was null
-    GET_VREG a0, a2                     # a0 <- fp[A]
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    daddu   a1, a1, a3
-    sb  a0, 0(a1)                   # obj.field <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_char_quick: /* 0xed */
-/* File: mips64/op_iput_char_quick.S */
-/* File: mips64/op_iput_quick.S */
-    /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- fp[B], the object pointer
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a3, common_errNullObject    # object was null
-    GET_VREG a0, a2                     # a0 <- fp[A]
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    daddu   a1, a1, a3
-    sh  a0, 0(a1)                   # obj.field <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_short_quick: /* 0xee */
-/* File: mips64/op_iput_short_quick.S */
-/* File: mips64/op_iput_quick.S */
-    /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- fp[B], the object pointer
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a3, common_errNullObject    # object was null
-    GET_VREG a0, a2                     # a0 <- fp[A]
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    daddu   a1, a1, a3
-    sh  a0, 0(a1)                   # obj.field <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
-/* File: mips64/op_iget_boolean_quick.S */
-/* File: mips64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- object we're operating on
-    ext     a4, rINST, 8, 4             # a4 <- A
-    daddu   a1, a1, a3
-    beqz    a3, common_errNullObject    # object was null
-    lbu   a0, 0(a1)                   # a0 <- obj.field
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    SET_VREG a0, a4                     # fp[A] <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
-/* File: mips64/op_iget_byte_quick.S */
-/* File: mips64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- object we're operating on
-    ext     a4, rINST, 8, 4             # a4 <- A
-    daddu   a1, a1, a3
-    beqz    a3, common_errNullObject    # object was null
-    lb   a0, 0(a1)                   # a0 <- obj.field
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    SET_VREG a0, a4                     # fp[A] <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
-/* File: mips64/op_iget_char_quick.S */
-/* File: mips64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- object we're operating on
-    ext     a4, rINST, 8, 4             # a4 <- A
-    daddu   a1, a1, a3
-    beqz    a3, common_errNullObject    # object was null
-    lhu   a0, 0(a1)                   # a0 <- obj.field
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    SET_VREG a0, a4                     # fp[A] <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
-/* File: mips64/op_iget_short_quick.S */
-/* File: mips64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- object we're operating on
-    ext     a4, rINST, 8, 4             # a4 <- A
-    daddu   a1, a1, a3
-    beqz    a3, common_errNullObject    # object was null
-    lh   a0, 0(a1)                   # a0 <- obj.field
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    SET_VREG a0, a4                     # fp[A] <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/* File: mips64/op_unused_f3.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/* File: mips64/op_unused_f4.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/* File: mips64/op_unused_f5.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/* File: mips64/op_unused_f6.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/* File: mips64/op_unused_f7.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/* File: mips64/op_unused_f8.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/* File: mips64/op_unused_f9.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
-/* File: mips64/op_invoke_polymorphic.S */
-/* File: mips64/invoke_polymorphic.S */
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern MterpInvokePolymorphic
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokePolymorphic
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 4
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
-/* File: mips64/op_invoke_polymorphic_range.S */
-/* File: mips64/invoke_polymorphic.S */
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern MterpInvokePolymorphicRange
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokePolymorphicRange
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 4
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_custom: /* 0xfc */
-/* File: mips64/op_invoke_custom.S */
-/* File: mips64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeCustom
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokeCustom
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
-/* File: mips64/op_invoke_custom_range.S */
-/* File: mips64/invoke.S */
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeCustomRange
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     MterpInvokeCustomRange
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_method_handle: /* 0xfe */
-/* File: mips64/op_const_method_handle.S */
-/* File: mips64/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstMethodHandle
-    EXPORT_PC
-    lhu     a0, 2(rPC)                  # a0 <- BBBB
-    srl     a1, rINST, 8                # a1 <- AA
-    daddu   a2, rFP, OFF_FP_SHADOWFRAME
-    move    a3, rSELF
-    jal     MterpConstMethodHandle                     # (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     # load rINST
-    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
-    ADVANCE 2                           # advance rPC
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_method_type: /* 0xff */
-/* File: mips64/op_const_method_type.S */
-/* File: mips64/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstMethodType
-    EXPORT_PC
-    lhu     a0, 2(rPC)                  # a0 <- BBBB
-    srl     a1, rINST, 8                # a1 <- AA
-    daddu   a2, rFP, OFF_FP_SHADOWFRAME
-    move    a3, rSELF
-    jal     MterpConstMethodType                     # (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     # load rINST
-    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
-    ADVANCE 2                           # advance rPC
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-    .balign 128
-/* File: mips64/instruction_end.S */
-
-    .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-
-/*
- * ===========================================================================
- *  Sister implementations
- * ===========================================================================
- */
-/* File: mips64/instruction_start_sister.S */
-
-    .global artMterpAsmSisterStart
-    .text
-    .balign 4
-artMterpAsmSisterStart:
-
-/* File: mips64/instruction_end_sister.S */
-
-    .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
-
-/* File: mips64/instruction_start_alt.S */
-
-    .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
-    .text
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (0 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move: /* 0x01 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (1 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (2 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (3 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (4 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (5 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (6 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (7 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (8 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (9 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (10 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (11 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (12 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (13 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (14 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return: /* 0x0f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (15 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (16 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (17 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (18 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (19 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const: /* 0x14 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (20 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (21 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (22 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (23 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (24 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (25 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (26 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (27 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (28 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (29 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (30 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (31 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (32 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (33 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (34 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (35 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (36 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (37 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (38 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (39 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (40 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (41 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (42 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (43 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (44 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (45 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (46 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (47 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (48 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (49 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (50 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (51 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (52 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (53 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (54 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (55 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (56 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (57 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (58 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (59 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (60 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (61 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (62 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (63 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (64 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (65 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (66 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (67 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (68 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (69 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (70 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (71 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (72 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (73 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (74 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (75 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (76 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (77 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (78 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (79 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (80 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (81 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (82 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (83 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (84 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (85 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (86 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (87 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (88 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (89 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (90 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (91 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (92 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (93 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (94 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (95 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (96 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (97 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (98 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (99 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (100 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (101 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (102 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (103 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (104 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (105 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (106 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (107 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (108 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (109 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (110 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (111 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (112 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (113 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (114 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (115 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (116 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (117 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (118 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (119 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (120 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (121 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (122 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (123 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (124 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (125 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (126 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (127 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (128 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (129 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (130 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (131 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (132 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (133 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (134 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (135 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (136 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (137 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (138 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (139 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (140 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (141 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (142 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (143 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (144 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (145 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (146 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (147 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (148 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (149 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (150 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (151 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (152 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (153 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (154 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (155 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (156 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (157 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (158 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (159 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (160 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (161 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (162 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (163 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (164 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (165 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (166 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (167 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (168 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (169 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (170 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (171 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (172 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (173 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (174 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (175 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (176 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (177 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (178 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (179 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (180 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (181 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (182 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (183 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (184 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (185 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (186 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (187 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (188 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (189 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (190 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (191 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (192 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (193 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (194 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (195 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (196 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (197 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (198 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (199 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (200 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (201 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (202 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (203 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (204 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (205 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (206 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (207 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (208 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (209 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (210 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (211 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (212 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (213 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (214 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (215 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (216 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (217 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (218 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (219 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (220 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (221 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (222 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (223 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (224 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (225 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (226 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (227 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (228 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (229 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (230 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (231 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (232 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (233 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (234 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (235 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (236 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (237 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (238 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (239 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (240 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (241 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (242 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (243 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (244 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (245 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (246 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (247 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (248 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (249 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (250 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (251 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (252 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (253 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (254 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Note that the call to MterpCheckBefore is done as a tail call.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    dla     ra, artMterpAsmInstructionStart
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    daddu   ra, ra, (255 * 128)            # Addr of primary handler.
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-    .balign 128
-/* File: mips64/instruction_end_alt.S */
-
-    .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
-
-/* File: mips64/footer.S */
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-
-    .extern MterpLogDivideByZeroException
-common_errDivideByZero:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogDivideByZeroException
-#endif
-    b       MterpCommonFallback
-
-    .extern MterpLogArrayIndexException
-common_errArrayIndex:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogArrayIndexException
-#endif
-    b       MterpCommonFallback
-
-    .extern MterpLogNullObjectException
-common_errNullObject:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogNullObjectException
-#endif
-    b       MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)
-    beqzc   a0, MterpFallback                       # If not, fall back to reference interpreter.
-    /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-    .extern MterpHandleException
-    .extern MterpShouldSwitchInterpreters
-MterpException:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpHandleException                    # (self, shadow_frame)
-    beqzc   v0, MterpExceptionReturn                # no local catch, back to caller.
-    ld      a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
-    lwu     a1, OFF_FP_DEX_PC(rFP)
-    REFRESH_IBASE
-    dlsa    rPC, a1, a0, 1                          # generate new dex_pc_ptr
-    /* Do we need to switch interpreters? */
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    /* resume execution at catch block */
-    EXPORT_PC
-    FETCH_INST
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-    /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    rINST          <= signed offset
- *    rPROFILE       <= signed hotness countdown (expanded to 64 bits)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
-    bgtzc   rINST, .L_forward_branch    # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-    li      v0, JIT_CHECK_OSR
-    beqc    rPROFILE, v0, .L_osr_check
-    bltc    rPROFILE, v0, .L_resume_backward_branch
-    dsubu   rPROFILE, 1
-    beqzc   rPROFILE, .L_add_batch      # counted down to zero - report
-.L_resume_backward_branch:
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    REFRESH_IBASE
-    daddu   a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
-    and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    bnezc   ra, .L_suspend_request_pending
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-.L_suspend_request_pending:
-    EXPORT_PC
-    move    a0, rSELF
-    jal     MterpSuspendCheck           # (self)
-    bnezc   v0, MterpFallback
-    REFRESH_IBASE                       # might have changed during suspend
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-.L_no_count_backwards:
-    li      v0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    bnec    rPROFILE, v0, .L_resume_backward_branch
-.L_osr_check:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC
-    jal MterpMaybeDoOnStackReplacement  # (self, shadow_frame, offset)
-    bnezc   v0, MterpOnStackReplacement
-    b       .L_resume_backward_branch
-
-.L_forward_branch:
-    li      v0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    beqc    rPROFILE, v0, .L_check_osr_forward
-.L_resume_forward_branch:
-    daddu   a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-.L_check_osr_forward:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC
-    jal     MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
-    bnezc   v0, MterpOnStackReplacement
-    b       .L_resume_forward_branch
-
-.L_add_batch:
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    ld      a0, OFF_FP_METHOD(rFP)
-    move    a2, rSELF
-    jal     MterpAddHotnessBatch        # (method, shadow_frame, self)
-    move    rPROFILE, v0                # restore new hotness countdown to rPROFILE
-    b       .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    li      a2, 2
-    EXPORT_PC
-    jal     MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
-    bnezc   v0, MterpOnStackReplacement
-    FETCH_ADVANCE_INST 2 
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST                               # rINST contains offset
-    jal     MterpLogOSR
-#endif
-    li      v0, 1                                   # Signal normal return
-    b       MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-    .extern MterpLogFallback
-MterpFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogFallback
-#endif
-MterpCommonFallback:
-    li      v0, 0                                   # signal retry with reference interpreter.
-    b       MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and RA.  Here we restore SP, restore the registers, and then restore
- * RA to PC.
- *
- * On entry:
- *  uint32_t* rFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    li      v0, 1                                   # signal return to caller.
-    b       MterpDone
-/*
- * Returned value is expected in a0 and if it's not 64-bit, the 32 most
- * significant bits of a0 must be zero-extended or sign-extended
- * depending on the return type.
- */
-MterpReturn:
-    ld      a2, OFF_FP_RESULT_REGISTER(rFP)
-    sd      a0, 0(a2)
-    li      v0, 1                                   # signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    blez    rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
-
-MterpProfileActive:
-    move    rINST, v0                   # stash return value
-    /* Report cached hotness counts */
-    ld      a0, OFF_FP_METHOD(rFP)
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    jal     MterpAddHotnessBatch        # (method, shadow_frame, self)
-    move    v0, rINST                   # restore return value
-
-.L_pop_and_return:
-    ld      s6, STACK_OFFSET_S6(sp)
-    .cfi_restore 22
-    ld      s5, STACK_OFFSET_S5(sp)
-    .cfi_restore 21
-    ld      s4, STACK_OFFSET_S4(sp)
-    .cfi_restore 20
-    ld      s3, STACK_OFFSET_S3(sp)
-    .cfi_restore 19
-    ld      s2, STACK_OFFSET_S2(sp)
-    .cfi_restore 18
-    ld      s1, STACK_OFFSET_S1(sp)
-    .cfi_restore 17
-    ld      s0, STACK_OFFSET_S0(sp)
-    .cfi_restore 16
-
-    ld      ra, STACK_OFFSET_RA(sp)
-    .cfi_restore 31
-
-    ld      t8, STACK_OFFSET_GP(sp)
-    .cpreturn
-    .cfi_restore 28
-
-    .set    noreorder
-    jr      ra
-    daddu   sp, sp, STACK_SIZE
-    .cfi_adjust_cfa_offset -STACK_SIZE
-
-    .cfi_endproc
-    .set    reorder
-    .size ExecuteMterpImpl, .-ExecuteMterpImpl
-
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
deleted file mode 100644
index 32811ff..0000000
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ /dev/null
@@ -1,12855 +0,0 @@
-/*
- * This file was generated automatically by gen-mterp.py for 'x86'.
- *
- * --> DO NOT EDIT <--
- */
-
-/* File: x86/header.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-  Art assembly interpreter notes:
-
-  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
-  handle invoke, allows higher-level code to create frame & shadow frame.
-
-  Once that's working, support direct entry code & eliminate shadow frame (and
-  excess locals allocation.
-
-  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
-  base of the vreg array within the shadow frame.  Access the other fields,
-  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
-  the shadow frame mechanism of double-storing object references - via rFP &
-  number_of_vregs_.
-
- */
-
-/*
-x86 ABI general notes:
-
-Caller save set:
-   eax, edx, ecx, st(0)-st(7)
-Callee save set:
-   ebx, esi, edi, ebp
-Return regs:
-   32-bit in eax
-   64-bit in edx:eax (low-order 32 in eax)
-   fp on top of fp stack st(0)
-
-Parameters passed on stack, pushed right-to-left.  On entry to target, first
-parm is at 4(%esp).  Traditional entry code is:
-
-functEntry:
-    push    %ebp             # save old frame pointer
-    mov     %ebp,%esp        # establish new frame pointer
-    sub     FrameSize,%esp   # Allocate storage for spill, locals & outs
-
-Once past the prologue, arguments are referenced at ((argno + 2)*4)(%ebp)
-
-Stack must be 16-byte aligned to support SSE in native code.
-
-If we're not doing variable stack allocation (alloca), the frame pointer can be
-eliminated and all arg references adjusted to be esp relative.
-*/
-
-/*
-Mterp and x86 notes:
-
-Some key interpreter variables will be assigned to registers.
-
-  nick     reg   purpose
-  rPC      esi   interpreted program counter, used for fetching instructions
-  rFP      edi   interpreted frame pointer, used for accessing locals and args
-  rINSTw   bx    first 16-bit code of current instruction
-  rINSTbl  bl    opcode portion of instruction word
-  rINSTbh  bh    high byte of inst word, usually contains src/tgt reg names
-  rIBASE   edx   base of instruction handler table
-  rREFS    ebp   base of object references in shadow frame.
-
-Notes:
-   o High order 16 bits of ebx must be zero on entry to handler
-   o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
-   o eax and ecx are scratch, rINSTw/ebx sometimes scratch
-
-Macros are provided for common operations.  Each macro MUST emit only
-one instruction to make instruction-counting easier.  They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Handle mac compiler specific
- */
-#if defined(__APPLE__)
-    #define MACRO_LITERAL(value) $(value)
-    #define FUNCTION_TYPE(name)
-    #define OBJECT_TYPE(name)
-    #define SIZE(start,end)
-    // Mac OS' symbols have an _ prefix.
-    #define SYMBOL(name) _ ## name
-    #define ASM_HIDDEN .private_extern
-#else
-    #define MACRO_LITERAL(value) $value
-    #define FUNCTION_TYPE(name) .type name, @function
-    #define OBJECT_TYPE(name) .type name, @object
-    #define SIZE(start,end) .size start, .-end
-    #define SYMBOL(name) name
-    #define ASM_HIDDEN .hidden
-#endif
-
-.macro PUSH _reg
-    pushl \_reg
-    .cfi_adjust_cfa_offset 4
-    .cfi_rel_offset \_reg, 0
-.endm
-
-.macro POP _reg
-    popl \_reg
-    .cfi_adjust_cfa_offset -4
-    .cfi_restore \_reg
-.endm
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/* Frame size must be 16-byte aligned.
- * Remember about 4 bytes for return address + 4 * 4 for spills
- */
-#define FRAME_SIZE     28
-
-/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3        (FRAME_SIZE + 16 + 16)
-#define IN_ARG2        (FRAME_SIZE + 16 + 12)
-#define IN_ARG1        (FRAME_SIZE + 16 +  8)
-#define IN_ARG0        (FRAME_SIZE + 16 +  4)
-/* Spill offsets relative to %esp */
-#define LOCAL0         (FRAME_SIZE -  4)
-#define LOCAL1         (FRAME_SIZE -  8)
-#define LOCAL2         (FRAME_SIZE - 12)
-/* Out Arg offsets, relative to %esp */
-#define OUT_ARG3       ( 12)
-#define OUT_ARG2       (  8)
-#define OUT_ARG1       (  4)
-#define OUT_ARG0       (  0)  /* <- ExecuteMterpImpl esp + 0 */
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rSELF    IN_ARG0(%esp)
-#define rPC      %esi
-#define CFI_DEX  6  // DWARF register number of the register holding dex-pc (esi).
-#define CFI_TMP  0  // DWARF register number of the first argument register (eax).
-#define rFP      %edi
-#define rINST    %ebx
-#define rINSTw   %bx
-#define rINSTbh  %bh
-#define rINSTbl  %bl
-#define rIBASE   %edx
-#define rREFS    %ebp
-#define rPROFILE OFF_FP_COUNTDOWN_OFFSET(rFP)
-
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
-    movl    rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
-    movl    rSELF, rIBASE
-    movl    THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
-.endm
-
-/*
- * Refresh handler table.
- * IBase handles uses the caller save register so we must restore it after each call.
- * Also it is used as a result of some 64-bit operations (like imul) and we should
- * restore it in such cases also.
- *
- * TODO: Consider spilling the IBase instead of restoring it from Thread structure.
- */
-.macro RESTORE_IBASE
-    movl    rSELF, rIBASE
-    movl    THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
-.endm
-
-/*
- * If rSELF is already loaded then we can use it from known reg.
- */
-.macro RESTORE_IBASE_FROM_SELF _reg
-    movl    THREAD_CURRENT_IBASE_OFFSET(\_reg), rIBASE
-.endm
-
-/*
- * Refresh rINST.
- * At enter to handler rINST does not contain the opcode number.
- * However some utilities require the full value, so this macro
- * restores the opcode number.
- */
-.macro REFRESH_INST _opnum
-    movb    rINSTbl, rINSTbh
-    movb    MACRO_LITERAL(\_opnum), rINSTbl
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINSTw.  Does not advance rPC.
- */
-.macro FETCH_INST
-    movzwl  (rPC), rINST
-.endm
-
-/*
- * Remove opcode from rINST, compute the address of handler and jump to it.
- */
-.macro GOTO_NEXT
-    movzx   rINSTbl,%eax
-    movzbl  rINSTbh,rINST
-    shll    MACRO_LITERAL(7), %eax
-    addl    rIBASE, %eax
-    jmp     *%eax
-.endm
-
-/*
- * Advance rPC by instruction count.
- */
-.macro ADVANCE_PC _count
-    leal    2*\_count(rPC), rPC
-.endm
-
-/*
- * Advance rPC by instruction count, fetch instruction and jump to handler.
- */
-.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
-    ADVANCE_PC \_count
-    FETCH_INST
-    GOTO_NEXT
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
-#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
-#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
-#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
-
-.macro GET_VREG _reg _vreg
-    movl    (rFP,\_vreg,4), \_reg
-.endm
-
-/* Read wide value to xmm. */
-.macro GET_WIDE_FP_VREG _reg _vreg
-    movq    (rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG _reg _vreg
-    movl    \_reg, (rFP,\_vreg,4)
-    movl    MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-/* Write wide value from xmm. xmm is clobbered. */
-.macro SET_WIDE_FP_VREG _reg _vreg
-    movq    \_reg, (rFP,\_vreg,4)
-    pxor    \_reg, \_reg
-    movq    \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro SET_VREG_OBJECT _reg _vreg
-    movl    \_reg, (rFP,\_vreg,4)
-    movl    \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro GET_VREG_HIGH _reg _vreg
-    movl    4(rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG_HIGH _reg _vreg
-    movl    \_reg, 4(rFP,\_vreg,4)
-    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_REF _vreg
-    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_WIDE_REF _vreg
-    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
-    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-/* File: x86/entry.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
-    .text
-    ASM_HIDDEN SYMBOL(ExecuteMterpImpl)
-    .global SYMBOL(ExecuteMterpImpl)
-    FUNCTION_TYPE(ExecuteMterpImpl)
-
-/*
- * On entry:
- *  0  Thread* self
- *  1  insns_
- *  2  ShadowFrame
- *  3  JValue* result_register
- *
- */
-
-SYMBOL(ExecuteMterpImpl):
-    .cfi_startproc
-    .cfi_def_cfa esp, 4
-
-    /* Spill callee save regs */
-    PUSH    %ebp
-    PUSH    %edi
-    PUSH    %esi
-    PUSH    %ebx
-
-    /* Allocate frame */
-    subl    $FRAME_SIZE, %esp
-    .cfi_adjust_cfa_offset FRAME_SIZE
-
-    /* Load ShadowFrame pointer */
-    movl    IN_ARG2(%esp), %edx
-
-    /* Remember the return register */
-    movl    IN_ARG3(%esp), %eax
-    movl    %eax, SHADOWFRAME_RESULT_REGISTER_OFFSET(%edx)
-
-    /* Remember the code_item */
-    movl    IN_ARG1(%esp), %ecx
-    movl    %ecx, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(%edx)
-
-    /* set up "named" registers */
-    movl    SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(%edx), %eax
-    leal    SHADOWFRAME_VREGS_OFFSET(%edx), rFP
-    leal    (rFP, %eax, 4), rREFS
-    movl    SHADOWFRAME_DEX_PC_OFFSET(%edx), %eax
-    lea     (%ecx, %eax, 2), rPC
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-    EXPORT_PC
-
-    /* Set up for backwards branches & osr profiling */
-    movl    OFF_FP_METHOD(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG2(%esp)
-    call    SYMBOL(MterpSetUpHotnessCountdown)
-
-    /* Starting ibase */
-    REFRESH_IBASE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST
-    GOTO_NEXT
-    /* NOTE: no fallthrough */
-
-/* File: x86/instruction_start.S */
-
-    OBJECT_TYPE(artMterpAsmInstructionStart)
-    ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
-    .global SYMBOL(artMterpAsmInstructionStart)
-SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
-    .text
-
-/* ------------------------------ */
-    .balign 128
-.L_op_nop: /* 0x00 */
-/* File: x86/op_nop.S */
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move: /* 0x01 */
-/* File: x86/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    movzbl  rINSTbl, %eax                   # eax <- BA
-    andb    $0xf, %al                      # eax <- A
-    shrl    $4, rINST                      # rINST <- B
-    GET_VREG rINST, rINST
-    .if 0
-    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
-    .else
-    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_from16: /* 0x02 */
-/* File: x86/op_move_from16.S */
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    movzx   rINSTbl, %eax                   # eax <- AA
-    movw    2(rPC), rINSTw                  # rINSTw <- BBBB
-    GET_VREG rINST, rINST                   # rINST <- fp[BBBB]
-    .if 0
-    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
-    .else
-    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_16: /* 0x03 */
-/* File: x86/op_move_16.S */
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    movzwl  4(rPC), %ecx                    # ecx <- BBBB
-    movzwl  2(rPC), %eax                    # eax <- AAAA
-    GET_VREG rINST, %ecx
-    .if 0
-    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
-    .else
-    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide: /* 0x04 */
-/* File: x86/op_move_wide.S */
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    GET_WIDE_FP_VREG %xmm0, rINST           # xmm0 <- v[B]
-    SET_WIDE_FP_VREG %xmm0, %ecx            # v[A] <- xmm0
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide_from16: /* 0x05 */
-/* File: x86/op_move_wide_from16.S */
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    movzwl  2(rPC), %ecx                    # ecx <- BBBB
-    movzbl  rINSTbl, %eax                   # eax <- AAAA
-    GET_WIDE_FP_VREG %xmm0, %ecx            # xmm0 <- v[B]
-    SET_WIDE_FP_VREG %xmm0, %eax            # v[A] <- xmm0
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide_16: /* 0x06 */
-/* File: x86/op_move_wide_16.S */
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    movzwl  4(rPC), %ecx                    # ecx<- BBBB
-    movzwl  2(rPC), %eax                    # eax<- AAAA
-    GET_WIDE_FP_VREG %xmm0, %ecx            # xmm0 <- v[B]
-    SET_WIDE_FP_VREG %xmm0, %eax            # v[A] <- xmm0
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object: /* 0x07 */
-/* File: x86/op_move_object.S */
-/* File: x86/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    movzbl  rINSTbl, %eax                   # eax <- BA
-    andb    $0xf, %al                      # eax <- A
-    shrl    $4, rINST                      # rINST <- B
-    GET_VREG rINST, rINST
-    .if 1
-    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
-    .else
-    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object_from16: /* 0x08 */
-/* File: x86/op_move_object_from16.S */
-/* File: x86/op_move_from16.S */
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    movzx   rINSTbl, %eax                   # eax <- AA
-    movw    2(rPC), rINSTw                  # rINSTw <- BBBB
-    GET_VREG rINST, rINST                   # rINST <- fp[BBBB]
-    .if 1
-    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
-    .else
-    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object_16: /* 0x09 */
-/* File: x86/op_move_object_16.S */
-/* File: x86/op_move_16.S */
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    movzwl  4(rPC), %ecx                    # ecx <- BBBB
-    movzwl  2(rPC), %eax                    # eax <- AAAA
-    GET_VREG rINST, %ecx
-    .if 1
-    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
-    .else
-    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result: /* 0x0a */
-/* File: x86/op_move_result.S */
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    movl    OFF_FP_RESULT_REGISTER(rFP), %eax    # get pointer to result JType.
-    movl    (%eax), %eax                    # r0 <- result.i.
-    .if 0
-    SET_VREG_OBJECT %eax, rINST             # fp[A] <- fp[B]
-    .else
-    SET_VREG %eax, rINST                    # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result_wide: /* 0x0b */
-/* File: x86/op_move_result_wide.S */
-    /* move-result-wide vAA */
-    movl    OFF_FP_RESULT_REGISTER(rFP), %eax    # get pointer to result JType.
-    movl    4(%eax), %ecx                   # Get high
-    movl    (%eax), %eax                    # Get low
-    SET_VREG %eax, rINST                    # v[AA+0] <- eax
-    SET_VREG_HIGH %ecx, rINST               # v[AA+1] <- ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result_object: /* 0x0c */
-/* File: x86/op_move_result_object.S */
-/* File: x86/op_move_result.S */
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    movl    OFF_FP_RESULT_REGISTER(rFP), %eax    # get pointer to result JType.
-    movl    (%eax), %eax                    # r0 <- result.i.
-    .if 1
-    SET_VREG_OBJECT %eax, rINST             # fp[A] <- fp[B]
-    .else
-    SET_VREG %eax, rINST                    # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_exception: /* 0x0d */
-/* File: x86/op_move_exception.S */
-    /* move-exception vAA */
-    movl    rSELF, %ecx
-    movl    THREAD_EXCEPTION_OFFSET(%ecx), %eax
-    SET_VREG_OBJECT %eax, rINST             # fp[AA] <- exception object
-    movl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_void: /* 0x0e */
-/* File: x86/op_return_void.S */
-    .extern MterpThreadFenceForConstructor
-    call    SYMBOL(MterpThreadFenceForConstructor)
-    movl    rSELF, %eax
-    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
-    jz      1f
-    movl    %eax, OUT_ARG0(%esp)
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    xorl    %eax, %eax
-    xorl    %ecx, %ecx
-    jmp     MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return: /* 0x0f */
-/* File: x86/op_return.S */
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    call    SYMBOL(MterpThreadFenceForConstructor)
-    movl    rSELF, %eax
-    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
-    jz      1f
-    movl    %eax, OUT_ARG0(%esp)
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    GET_VREG %eax, rINST                    # eax <- vAA
-    xorl    %ecx, %ecx
-    jmp     MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_wide: /* 0x10 */
-/* File: x86/op_return_wide.S */
-/*
- * Return a 64-bit value.
- */
-    /* return-wide vAA */
-    .extern MterpThreadFenceForConstructor
-    call    SYMBOL(MterpThreadFenceForConstructor)
-    movl    rSELF, %eax
-    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
-    jz      1f
-    movl    %eax, OUT_ARG0(%esp)
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    GET_VREG %eax, rINST                    # eax <- v[AA+0]
-    GET_VREG_HIGH %ecx, rINST               # ecx <- v[AA+1]
-    jmp     MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_object: /* 0x11 */
-/* File: x86/op_return_object.S */
-/* File: x86/op_return.S */
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    call    SYMBOL(MterpThreadFenceForConstructor)
-    movl    rSELF, %eax
-    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
-    jz      1f
-    movl    %eax, OUT_ARG0(%esp)
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    GET_VREG %eax, rINST                    # eax <- vAA
-    xorl    %ecx, %ecx
-    jmp     MterpReturn
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_4: /* 0x12 */
-/* File: x86/op_const_4.S */
-    /* const/4 vA, #+B */
-    movsx   rINSTbl, %eax                   # eax <-ssssssBx
-    movl    $0xf, rINST
-    andl    %eax, rINST                     # rINST <- A
-    sarl    $4, %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_16: /* 0x13 */
-/* File: x86/op_const_16.S */
-    /* const/16 vAA, #+BBBB */
-    movswl  2(rPC), %ecx                    # ecx <- ssssBBBB
-    SET_VREG %ecx, rINST                    # vAA <- ssssBBBB
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const: /* 0x14 */
-/* File: x86/op_const.S */
-    /* const vAA, #+BBBBbbbb */
-    movl    2(rPC), %eax                    # grab all 32 bits at once
-    SET_VREG %eax, rINST                    # vAA<- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_high16: /* 0x15 */
-/* File: x86/op_const_high16.S */
-    /* const/high16 vAA, #+BBBB0000 */
-    movzwl  2(rPC), %eax                    # eax <- 0000BBBB
-    sall    $16, %eax                      # eax <- BBBB0000
-    SET_VREG %eax, rINST                    # vAA <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_16: /* 0x16 */
-/* File: x86/op_const_wide_16.S */
-    /* const-wide/16 vAA, #+BBBB */
-    movswl  2(rPC), %eax                    # eax <- ssssBBBB
-    movl    rIBASE, %ecx                    # preserve rIBASE (cltd trashes it)
-    cltd                                    # rIBASE:eax <- ssssssssssssBBBB
-    SET_VREG_HIGH rIBASE, rINST             # store msw
-    SET_VREG %eax, rINST                    # store lsw
-    movl    %ecx, rIBASE                    # restore rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_32: /* 0x17 */
-/* File: x86/op_const_wide_32.S */
-    /* const-wide/32 vAA, #+BBBBbbbb */
-    movl    2(rPC), %eax                    # eax <- BBBBbbbb
-    movl    rIBASE, %ecx                    # preserve rIBASE (cltd trashes it)
-    cltd                                    # rIBASE:eax <- ssssssssssssBBBB
-    SET_VREG_HIGH rIBASE, rINST             # store msw
-    SET_VREG %eax, rINST                    # store lsw
-    movl    %ecx, rIBASE                    # restore rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide: /* 0x18 */
-/* File: x86/op_const_wide.S */
-    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
-    movl    2(rPC), %eax                    # eax <- lsw
-    movzbl  rINSTbl, %ecx                   # ecx <- AA
-    movl    6(rPC), rINST                   # rINST <- msw
-    SET_VREG %eax, %ecx
-    SET_VREG_HIGH  rINST, %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_high16: /* 0x19 */
-/* File: x86/op_const_wide_high16.S */
-    /* const-wide/high16 vAA, #+BBBB000000000000 */
-    movzwl  2(rPC), %eax                    # eax <- 0000BBBB
-    sall    $16, %eax                      # eax <- BBBB0000
-    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
-    xorl    %eax, %eax
-    SET_VREG %eax, rINST                    # v[AA+0] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_string: /* 0x1a */
-/* File: x86/op_const_string.S */
-/* File: x86/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstString
-    EXPORT_PC
-    movzwl  2(rPC), %eax                    # eax <- BBBB
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rINST, OUT_ARG1(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)
-    call    SYMBOL(MterpConstString)                 # (index, tgt_reg, shadow_frame, self)
-    RESTORE_IBASE
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
-/* File: x86/op_const_string_jumbo.S */
-    /* const/string vAA, String@BBBBBBBB */
-    EXPORT_PC
-    movl    2(rPC), %eax                    # eax <- BBBB
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rINST, OUT_ARG1(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)
-    call    SYMBOL(MterpConstString)        # (index, tgt_reg, shadow_frame, self)
-    RESTORE_IBASE
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_class: /* 0x1c */
-/* File: x86/op_const_class.S */
-/* File: x86/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstClass
-    EXPORT_PC
-    movzwl  2(rPC), %eax                    # eax <- BBBB
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rINST, OUT_ARG1(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)
-    call    SYMBOL(MterpConstClass)                 # (index, tgt_reg, shadow_frame, self)
-    RESTORE_IBASE
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_monitor_enter: /* 0x1d */
-/* File: x86/op_monitor_enter.S */
-/*
- * Synchronize on an object.
- */
-    /* monitor-enter vAA */
-    EXPORT_PC
-    GET_VREG %ecx, rINST
-    movl    %ecx, OUT_ARG0(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG1(%esp)
-    call    SYMBOL(artLockObjectFromCode)   # (object, self)
-    RESTORE_IBASE
-    testb   %al, %al
-    jnz     MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_monitor_exit: /* 0x1e */
-/* File: x86/op_monitor_exit.S */
-/*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction.  See the Dalvik
- * instruction spec.
- */
-    /* monitor-exit vAA */
-    EXPORT_PC
-    GET_VREG %ecx, rINST
-    movl    %ecx, OUT_ARG0(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG1(%esp)
-    call    SYMBOL(artUnlockObjectFromCode) # (object, self)
-    RESTORE_IBASE
-    testb   %al, %al
-    jnz     MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_check_cast: /* 0x1f */
-/* File: x86/op_check_cast.S */
-/*
- * Check to see if a cast from one class to another is allowed.
- */
-    /* check-cast vAA, class@BBBB */
-    EXPORT_PC
-    movzwl  2(rPC), %eax                    # eax <- BBBB
-    movl    %eax, OUT_ARG0(%esp)
-    leal    VREG_ADDRESS(rINST), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    OFF_FP_METHOD(rFP),%eax
-    movl    %eax, OUT_ARG2(%esp)
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG3(%esp)
-    call    SYMBOL(MterpCheckCast)          # (index, &obj, method, self)
-    RESTORE_IBASE
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_instance_of: /* 0x20 */
-/* File: x86/op_instance_of.S */
-/*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
-    /* instance-of vA, vB, class@CCCC */
-    EXPORT_PC
-    movzwl  2(rPC), %eax                    # eax <- BBBB
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rINST, %eax                     # eax <- BA
-    sarl    $4, %eax                       # eax <- B
-    leal    VREG_ADDRESS(%eax), %ecx        # Get object address
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    OFF_FP_METHOD(rFP),%eax
-    movl    %eax, OUT_ARG2(%esp)
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInstanceOf)         # (index, &obj, method, self)
-    movl    rSELF, %ecx
-    RESTORE_IBASE_FROM_SELF %ecx
-    cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
-    jnz     MterpException
-    andb    $0xf, rINSTbl                  # rINSTbl <- A
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_array_length: /* 0x21 */
-/* File: x86/op_array_length.S */
-/*
- * Return the length of an array.
- */
-    mov     rINST, %eax                     # eax <- BA
-    sarl    $4, rINST                      # rINST <- B
-    GET_VREG %ecx, rINST                    # ecx <- vB (object ref)
-    testl   %ecx, %ecx                      # is null?
-    je      common_errNullObject
-    andb    $0xf, %al                      # eax <- A
-    movl    MIRROR_ARRAY_LENGTH_OFFSET(%ecx), rINST
-    SET_VREG rINST, %eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_new_instance: /* 0x22 */
-/* File: x86/op_new_instance.S */
-/*
- * Create a new instance of a class.
- */
-    /* new-instance vAA, class@BBBB */
-    EXPORT_PC
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    REFRESH_INST 34
-    movl    rINST, OUT_ARG2(%esp)
-    call    SYMBOL(MterpNewInstance)
-    RESTORE_IBASE
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_new_array: /* 0x23 */
-/* File: x86/op_new_array.S */
-/*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
-    /* new-array vA, vB, class@CCCC */
-    EXPORT_PC
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rPC, OUT_ARG1(%esp)
-    REFRESH_INST 35
-    movl    rINST, OUT_ARG2(%esp)
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG3(%esp)
-    call    SYMBOL(MterpNewArray)
-    RESTORE_IBASE
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_filled_new_array: /* 0x24 */
-/* File: x86/op_filled_new_array.S */
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    .extern MterpFilledNewArray
-    EXPORT_PC
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rPC, OUT_ARG1(%esp)
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG2(%esp)
-    call    SYMBOL(MterpFilledNewArray)
-    REFRESH_IBASE
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
-    .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
-/* File: x86/op_filled_new_array_range.S */
-/* File: x86/op_filled_new_array.S */
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    .extern MterpFilledNewArrayRange
-    EXPORT_PC
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rPC, OUT_ARG1(%esp)
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG2(%esp)
-    call    SYMBOL(MterpFilledNewArrayRange)
-    REFRESH_IBASE
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_fill_array_data: /* 0x26 */
-/* File: x86/op_fill_array_data.S */
-    /* fill-array-data vAA, +BBBBBBBB */
-    EXPORT_PC
-    movl    2(rPC), %ecx                    # ecx <- BBBBbbbb
-    leal    (rPC,%ecx,2), %ecx              # ecx <- PC + BBBBbbbb*2
-    GET_VREG %eax, rINST                    # eax <- vAA (array object)
-    movl    %eax, OUT_ARG0(%esp)
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpFillArrayData)      # (obj, payload)
-    REFRESH_IBASE
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
-    .balign 128
-.L_op_throw: /* 0x27 */
-/* File: x86/op_throw.S */
-/*
- * Throw an exception object in the current thread.
- */
-    /* throw vAA */
-    EXPORT_PC
-    GET_VREG %eax, rINST                    # eax<- vAA (exception object)
-    testl   %eax, %eax
-    jz      common_errNullObject
-    movl    rSELF,%ecx
-    movl    %eax, THREAD_EXCEPTION_OFFSET(%ecx)
-    jmp     MterpException
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto: /* 0x28 */
-/* File: x86/op_goto.S */
-/*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
-    /* goto +AA */
-    movsbl  rINSTbl, rINST                  # rINST <- ssssssAA
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto_16: /* 0x29 */
-/* File: x86/op_goto_16.S */
-/*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
-    /* goto/16 +AAAA */
-    movswl  2(rPC), rINST                   # rINST <- ssssAAAA
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto_32: /* 0x2a */
-/* File: x86/op_goto_32.S */
-/*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0".  Because
- * we need the V bit set, we'll use an adds to convert from Dalvik
- * offset to byte offset.
- */
-    /* goto/32 +AAAAAAAA */
-    movl    2(rPC), rINST                   # rINST <- AAAAAAAA
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-
-/* ------------------------------ */
-    .balign 128
-.L_op_packed_switch: /* 0x2b */
-/* File: x86/op_packed_switch.S */
-/*
- * Handle a packed-switch or sparse-switch instruction.  In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
-    /* op vAA, +BBBB */
-    movl    2(rPC), %ecx                    # ecx <- BBBBbbbb
-    GET_VREG %eax, rINST                    # eax <- vAA
-    leal    (rPC,%ecx,2), %ecx              # ecx <- PC + BBBBbbbb*2
-    movl    %eax, OUT_ARG1(%esp)            # ARG1 <- vAA
-    movl    %ecx, OUT_ARG0(%esp)            # ARG0 <- switchData
-    call    SYMBOL(MterpDoPackedSwitch)
-    REFRESH_IBASE
-    testl   %eax, %eax
-    movl    %eax, rINST
-    jmp     MterpCommonTakenBranch
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sparse_switch: /* 0x2c */
-/* File: x86/op_sparse_switch.S */
-/* File: x86/op_packed_switch.S */
-/*
- * Handle a packed-switch or sparse-switch instruction.  In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
-    /* op vAA, +BBBB */
-    movl    2(rPC), %ecx                    # ecx <- BBBBbbbb
-    GET_VREG %eax, rINST                    # eax <- vAA
-    leal    (rPC,%ecx,2), %ecx              # ecx <- PC + BBBBbbbb*2
-    movl    %eax, OUT_ARG1(%esp)            # ARG1 <- vAA
-    movl    %ecx, OUT_ARG0(%esp)            # ARG0 <- switchData
-    call    SYMBOL(MterpDoSparseSwitch)
-    REFRESH_IBASE
-    testl   %eax, %eax
-    movl    %eax, rINST
-    jmp     MterpCommonTakenBranch
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpl_float: /* 0x2d */
-/* File: x86/op_cmpl_float.S */
-/* File: x86/fpcmp.S */
-/*
- * Compare two floating-point values.  Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- *     if (x == y) {
- *         return 0;
- *     } else if (x < y) {
- *         return -1;
- *     } else if (x > y) {
- *         return 1;
- *     } else {
- *         return nanval ? 1 : -1;
- *     }
- * }
- */
-    /* op vAA, vBB, vCC */
-    movzbl  3(rPC), %ecx                    # ecx<- CC
-    movzbl  2(rPC), %eax                    # eax<- BB
-    movss VREG_ADDRESS(%eax), %xmm0
-    xor     %eax, %eax
-    ucomiss VREG_ADDRESS(%ecx), %xmm0
-    jp      .Lop_cmpl_float_nan_is_neg
-    je      .Lop_cmpl_float_finish
-    jb      .Lop_cmpl_float_less
-.Lop_cmpl_float_nan_is_pos:
-    incl    %eax
-    jmp     .Lop_cmpl_float_finish
-.Lop_cmpl_float_nan_is_neg:
-.Lop_cmpl_float_less:
-    decl    %eax
-.Lop_cmpl_float_finish:
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpg_float: /* 0x2e */
-/* File: x86/op_cmpg_float.S */
-/* File: x86/fpcmp.S */
-/*
- * Compare two floating-point values.  Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- *     if (x == y) {
- *         return 0;
- *     } else if (x < y) {
- *         return -1;
- *     } else if (x > y) {
- *         return 1;
- *     } else {
- *         return nanval ? 1 : -1;
- *     }
- * }
- */
-    /* op vAA, vBB, vCC */
-    movzbl  3(rPC), %ecx                    # ecx<- CC
-    movzbl  2(rPC), %eax                    # eax<- BB
-    movss VREG_ADDRESS(%eax), %xmm0
-    xor     %eax, %eax
-    ucomiss VREG_ADDRESS(%ecx), %xmm0
-    jp      .Lop_cmpg_float_nan_is_pos
-    je      .Lop_cmpg_float_finish
-    jb      .Lop_cmpg_float_less
-.Lop_cmpg_float_nan_is_pos:
-    incl    %eax
-    jmp     .Lop_cmpg_float_finish
-.Lop_cmpg_float_nan_is_neg:
-.Lop_cmpg_float_less:
-    decl    %eax
-.Lop_cmpg_float_finish:
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpl_double: /* 0x2f */
-/* File: x86/op_cmpl_double.S */
-/* File: x86/fpcmp.S */
-/*
- * Compare two floating-point values.  Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- *     if (x == y) {
- *         return 0;
- *     } else if (x < y) {
- *         return -1;
- *     } else if (x > y) {
- *         return 1;
- *     } else {
- *         return nanval ? 1 : -1;
- *     }
- * }
- */
-    /* op vAA, vBB, vCC */
-    movzbl  3(rPC), %ecx                    # ecx<- CC
-    movzbl  2(rPC), %eax                    # eax<- BB
-    movsd VREG_ADDRESS(%eax), %xmm0
-    xor     %eax, %eax
-    ucomisd VREG_ADDRESS(%ecx), %xmm0
-    jp      .Lop_cmpl_double_nan_is_neg
-    je      .Lop_cmpl_double_finish
-    jb      .Lop_cmpl_double_less
-.Lop_cmpl_double_nan_is_pos:
-    incl    %eax
-    jmp     .Lop_cmpl_double_finish
-.Lop_cmpl_double_nan_is_neg:
-.Lop_cmpl_double_less:
-    decl    %eax
-.Lop_cmpl_double_finish:
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpg_double: /* 0x30 */
-/* File: x86/op_cmpg_double.S */
-/* File: x86/fpcmp.S */
-/*
- * Compare two floating-point values.  Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- *     if (x == y) {
- *         return 0;
- *     } else if (x < y) {
- *         return -1;
- *     } else if (x > y) {
- *         return 1;
- *     } else {
- *         return nanval ? 1 : -1;
- *     }
- * }
- */
-    /* op vAA, vBB, vCC */
-    movzbl  3(rPC), %ecx                    # ecx<- CC
-    movzbl  2(rPC), %eax                    # eax<- BB
-    movsd VREG_ADDRESS(%eax), %xmm0
-    xor     %eax, %eax
-    ucomisd VREG_ADDRESS(%ecx), %xmm0
-    jp      .Lop_cmpg_double_nan_is_pos
-    je      .Lop_cmpg_double_finish
-    jb      .Lop_cmpg_double_less
-.Lop_cmpg_double_nan_is_pos:
-    incl    %eax
-    jmp     .Lop_cmpg_double_finish
-.Lop_cmpg_double_nan_is_neg:
-.Lop_cmpg_double_less:
-    decl    %eax
-.Lop_cmpg_double_finish:
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmp_long: /* 0x31 */
-/* File: x86/op_cmp_long.S */
-/*
- * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
- * register based on the results of the comparison.
- */
-    /* cmp-long vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1], BB is clobbered
-    cmpl    VREG_HIGH_ADDRESS(%ecx), %eax
-    jl      .Lop_cmp_long_smaller
-    jg      .Lop_cmp_long_bigger
-    movzbl  2(rPC), %eax                    # eax <- BB, restore BB
-    GET_VREG %eax, %eax                     # eax <- v[BB]
-    sub     VREG_ADDRESS(%ecx), %eax
-    ja      .Lop_cmp_long_bigger
-    jb      .Lop_cmp_long_smaller
-.Lop_cmp_long_finish:
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.Lop_cmp_long_bigger:
-    movl    $1, %eax
-    jmp     .Lop_cmp_long_finish
-
-.Lop_cmp_long_smaller:
-    movl    $-1, %eax
-    jmp     .Lop_cmp_long_finish
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_eq: /* 0x32 */
-/* File: x86/op_if_eq.S */
-/* File: x86/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
-    /* if-cmp vA, vB, +CCCC */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax, %ecx                     # eax <- vA
-    sarl    $4, rINST                      # rINST <- B
-    cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
-    jne   1f
-    movswl  2(rPC), rINST                   # Get signed branch offset
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-1:
-    cmpw    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ne: /* 0x33 */
-/* File: x86/op_if_ne.S */
-/* File: x86/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
-    /* if-cmp vA, vB, +CCCC */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax, %ecx                     # eax <- vA
-    sarl    $4, rINST                      # rINST <- B
-    cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
-    je   1f
-    movswl  2(rPC), rINST                   # Get signed branch offset
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-1:
-    cmpw    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_lt: /* 0x34 */
-/* File: x86/op_if_lt.S */
-/* File: x86/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
-    /* if-cmp vA, vB, +CCCC */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax, %ecx                     # eax <- vA
-    sarl    $4, rINST                      # rINST <- B
-    cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
-    jge   1f
-    movswl  2(rPC), rINST                   # Get signed branch offset
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-1:
-    cmpw    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ge: /* 0x35 */
-/* File: x86/op_if_ge.S */
-/* File: x86/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
-    /* if-cmp vA, vB, +CCCC */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax, %ecx                     # eax <- vA
-    sarl    $4, rINST                      # rINST <- B
-    cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
-    jl   1f
-    movswl  2(rPC), rINST                   # Get signed branch offset
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-1:
-    cmpw    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gt: /* 0x36 */
-/* File: x86/op_if_gt.S */
-/* File: x86/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
-    /* if-cmp vA, vB, +CCCC */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax, %ecx                     # eax <- vA
-    sarl    $4, rINST                      # rINST <- B
-    cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
-    jle   1f
-    movswl  2(rPC), rINST                   # Get signed branch offset
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-1:
-    cmpw    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_le: /* 0x37 */
-/* File: x86/op_if_le.S */
-/* File: x86/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
-    /* if-cmp vA, vB, +CCCC */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax, %ecx                     # eax <- vA
-    sarl    $4, rINST                      # rINST <- B
-    cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
-    jg   1f
-    movswl  2(rPC), rINST                   # Get signed branch offset
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-1:
-    cmpw    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_eqz: /* 0x38 */
-/* File: x86/op_if_eqz.S */
-/* File: x86/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
-    /* if-cmp vAA, +BBBB */
-    cmpl    $0, VREG_ADDRESS(rINST)        # compare (vA, 0)
-    jne   1f
-    movswl  2(rPC), rINST                   # fetch signed displacement
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-1:
-    cmpw    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_nez: /* 0x39 */
-/* File: x86/op_if_nez.S */
-/* File: x86/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
-    /* if-cmp vAA, +BBBB */
-    cmpl    $0, VREG_ADDRESS(rINST)        # compare (vA, 0)
-    je   1f
-    movswl  2(rPC), rINST                   # fetch signed displacement
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-1:
-    cmpw    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ltz: /* 0x3a */
-/* File: x86/op_if_ltz.S */
-/* File: x86/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
-    /* if-cmp vAA, +BBBB */
-    cmpl    $0, VREG_ADDRESS(rINST)        # compare (vA, 0)
-    jge   1f
-    movswl  2(rPC), rINST                   # fetch signed displacement
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-1:
-    cmpw    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gez: /* 0x3b */
-/* File: x86/op_if_gez.S */
-/* File: x86/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
-    /* if-cmp vAA, +BBBB */
-    cmpl    $0, VREG_ADDRESS(rINST)        # compare (vA, 0)
-    jl   1f
-    movswl  2(rPC), rINST                   # fetch signed displacement
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-1:
-    cmpw    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gtz: /* 0x3c */
-/* File: x86/op_if_gtz.S */
-/* File: x86/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
-    /* if-cmp vAA, +BBBB */
-    cmpl    $0, VREG_ADDRESS(rINST)        # compare (vA, 0)
-    jle   1f
-    movswl  2(rPC), rINST                   # fetch signed displacement
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-1:
-    cmpw    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_lez: /* 0x3d */
-/* File: x86/op_if_lez.S */
-/* File: x86/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
-    /* if-cmp vAA, +BBBB */
-    cmpl    $0, VREG_ADDRESS(rINST)        # compare (vA, 0)
-    jg   1f
-    movswl  2(rPC), rINST                   # fetch signed displacement
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-1:
-    cmpw    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_3e: /* 0x3e */
-/* File: x86/op_unused_3e.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_3f: /* 0x3f */
-/* File: x86/op_unused_3f.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_40: /* 0x40 */
-/* File: x86/op_unused_40.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_41: /* 0x41 */
-/* File: x86/op_unused_41.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_42: /* 0x42 */
-/* File: x86/op_unused_42.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_43: /* 0x43 */
-/* File: x86/op_unused_43.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget: /* 0x44 */
-/* File: x86/op_aget.S */
-/*
- * Array get, 32 bits or less.  vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    movl   MIRROR_INT_ARRAY_DATA_OFFSET(%eax,%ecx,4), %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_wide: /* 0x45 */
-/* File: x86/op_aget_wide.S */
-/*
- * Array get, 64 bits.  vAA <- vBB[vCC].
- */
-    /* aget-wide vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    leal    MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
-    movq    (%eax), %xmm0                   # xmm0 <- vBB[vCC]
-    SET_WIDE_FP_VREG %xmm0, rINST           # vAA <- xmm0
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_object: /* 0x46 */
-/* File: x86/op_aget_object.S */
-/*
- * Array object get.  vAA <- vBB[vCC].
- *
- * for: aget-object
- */
-    /* op vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecs <- vCC (requested index)
-    EXPORT_PC
-    movl    %eax, OUT_ARG0(%esp)
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(artAGetObjectFromMterp)  # (array, index)
-    movl    rSELF, %ecx
-    RESTORE_IBASE_FROM_SELF %ecx
-    cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
-    jnz     MterpException
-    SET_VREG_OBJECT %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_boolean: /* 0x47 */
-/* File: x86/op_aget_boolean.S */
-/* File: x86/op_aget.S */
-/*
- * Array get, 32 bits or less.  vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    movzbl   MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_byte: /* 0x48 */
-/* File: x86/op_aget_byte.S */
-/* File: x86/op_aget.S */
-/*
- * Array get, 32 bits or less.  vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    movsbl   MIRROR_BYTE_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_char: /* 0x49 */
-/* File: x86/op_aget_char.S */
-/* File: x86/op_aget.S */
-/*
- * Array get, 32 bits or less.  vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    movzwl   MIRROR_CHAR_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_short: /* 0x4a */
-/* File: x86/op_aget_short.S */
-/* File: x86/op_aget.S */
-/*
- * Array get, 32 bits or less.  vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    movswl   MIRROR_SHORT_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput: /* 0x4b */
-/* File: x86/op_aput.S */
-/*
- * Array put, 32 bits or less.  vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    leal    MIRROR_INT_ARRAY_DATA_OFFSET(%eax,%ecx,4), %eax
-    GET_VREG rINST, rINST
-    movl  rINST, (%eax)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_wide: /* 0x4c */
-/* File: x86/op_aput_wide.S */
-/*
- * Array put, 64 bits.  vBB[vCC] <- vAA.
- *
- */
-    /* aput-wide vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    leal    MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
-    GET_WIDE_FP_VREG %xmm0, rINST           # xmm0 <- vAA
-    movq    %xmm0, (%eax)                   # vBB[vCC] <- xmm0
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_object: /* 0x4d */
-/* File: x86/op_aput_object.S */
-/*
- * Store an object into an array.  vBB[vCC] <- vAA.
- */
-    /* op vAA, vBB, vCC */
-    EXPORT_PC
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rPC, OUT_ARG1(%esp)
-    REFRESH_INST 77
-    movl    rINST, OUT_ARG2(%esp)
-    call    SYMBOL(MterpAputObject)         # (array, index)
-    RESTORE_IBASE
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_boolean: /* 0x4e */
-/* File: x86/op_aput_boolean.S */
-/* File: x86/op_aput.S */
-/*
- * Array put, 32 bits or less.  vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    leal    MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
-    GET_VREG rINST, rINST
-    movb  rINSTbl, (%eax)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_byte: /* 0x4f */
-/* File: x86/op_aput_byte.S */
-/* File: x86/op_aput.S */
-/*
- * Array put, 32 bits or less.  vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    leal    MIRROR_BYTE_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
-    GET_VREG rINST, rINST
-    movb  rINSTbl, (%eax)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_char: /* 0x50 */
-/* File: x86/op_aput_char.S */
-/* File: x86/op_aput.S */
-/*
- * Array put, 32 bits or less.  vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    leal    MIRROR_CHAR_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
-    GET_VREG rINST, rINST
-    movw  rINSTw, (%eax)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_short: /* 0x51 */
-/* File: x86/op_aput_short.S */
-/* File: x86/op_aput.S */
-/*
- * Array put, 32 bits or less.  vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    leal    MIRROR_SHORT_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
-    GET_VREG rINST, rINST
-    movw  rINSTw, (%eax)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget: /* 0x52 */
-/* File: x86/op_iget.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU32
-    REFRESH_INST 82                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpIGetU32)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_wide: /* 0x53 */
-/* File: x86/op_iget_wide.S */
-/* File: x86/op_iget.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU64
-    REFRESH_INST 83                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpIGetU64)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_object: /* 0x54 */
-/* File: x86/op_iget_object.S */
-/* File: x86/op_iget.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetObj
-    REFRESH_INST 84                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpIGetObj)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_boolean: /* 0x55 */
-/* File: x86/op_iget_boolean.S */
-/* File: x86/op_iget.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU8
-    REFRESH_INST 85                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpIGetU8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_byte: /* 0x56 */
-/* File: x86/op_iget_byte.S */
-/* File: x86/op_iget.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetI8
-    REFRESH_INST 86                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpIGetI8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_char: /* 0x57 */
-/* File: x86/op_iget_char.S */
-/* File: x86/op_iget.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU16
-    REFRESH_INST 87                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpIGetU16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_short: /* 0x58 */
-/* File: x86/op_iget_short.S */
-/* File: x86/op_iget.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetI16
-    REFRESH_INST 88                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpIGetI16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput: /* 0x59 */
-/* File: x86/op_iput.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU32
-    REFRESH_INST 89                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpIPutU32)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_wide: /* 0x5a */
-/* File: x86/op_iput_wide.S */
-/* File: x86/op_iput.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU64
-    REFRESH_INST 90                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpIPutU64)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_object: /* 0x5b */
-/* File: x86/op_iput_object.S */
-/* File: x86/op_iput.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutObj
-    REFRESH_INST 91                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpIPutObj)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_boolean: /* 0x5c */
-/* File: x86/op_iput_boolean.S */
-/* File: x86/op_iput.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU8
-    REFRESH_INST 92                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpIPutU8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_byte: /* 0x5d */
-/* File: x86/op_iput_byte.S */
-/* File: x86/op_iput.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutI8
-    REFRESH_INST 93                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpIPutI8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_char: /* 0x5e */
-/* File: x86/op_iput_char.S */
-/* File: x86/op_iput.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU16
-    REFRESH_INST 94                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpIPutU16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_short: /* 0x5f */
-/* File: x86/op_iput_short.S */
-/* File: x86/op_iput.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutI16
-    REFRESH_INST 95                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpIPutI16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget: /* 0x60 */
-/* File: x86/op_sget.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU32
-    REFRESH_INST 96                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpSGetU32)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_wide: /* 0x61 */
-/* File: x86/op_sget_wide.S */
-/* File: x86/op_sget.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU64
-    REFRESH_INST 97                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpSGetU64)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_object: /* 0x62 */
-/* File: x86/op_sget_object.S */
-/* File: x86/op_sget.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetObj
-    REFRESH_INST 98                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpSGetObj)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_boolean: /* 0x63 */
-/* File: x86/op_sget_boolean.S */
-/* File: x86/op_sget.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU8
-    REFRESH_INST 99                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpSGetU8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_byte: /* 0x64 */
-/* File: x86/op_sget_byte.S */
-/* File: x86/op_sget.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetI8
-    REFRESH_INST 100                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpSGetI8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_char: /* 0x65 */
-/* File: x86/op_sget_char.S */
-/* File: x86/op_sget.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU16
-    REFRESH_INST 101                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpSGetU16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_short: /* 0x66 */
-/* File: x86/op_sget_short.S */
-/* File: x86/op_sget.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetI16
-    REFRESH_INST 102                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpSGetI16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput: /* 0x67 */
-/* File: x86/op_sput.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU32
-    REFRESH_INST 103                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpSPutU32)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_wide: /* 0x68 */
-/* File: x86/op_sput_wide.S */
-/* File: x86/op_sput.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU64
-    REFRESH_INST 104                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpSPutU64)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_object: /* 0x69 */
-/* File: x86/op_sput_object.S */
-/* File: x86/op_sput.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutObj
-    REFRESH_INST 105                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpSPutObj)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_boolean: /* 0x6a */
-/* File: x86/op_sput_boolean.S */
-/* File: x86/op_sput.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU8
-    REFRESH_INST 106                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpSPutU8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_byte: /* 0x6b */
-/* File: x86/op_sput_byte.S */
-/* File: x86/op_sput.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutI8
-    REFRESH_INST 107                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpSPutI8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_char: /* 0x6c */
-/* File: x86/op_sput_char.S */
-/* File: x86/op_sput.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU16
-    REFRESH_INST 108                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpSPutU16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_short: /* 0x6d */
-/* File: x86/op_sput_short.S */
-/* File: x86/op_sput.S */
-/* File: x86/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutI16
-    REFRESH_INST 109                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL(MterpSPutI16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual: /* 0x6e */
-/* File: x86/op_invoke_virtual.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtual
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 110
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokeVirtual)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-/*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_super: /* 0x6f */
-/* File: x86/op_invoke_super.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeSuper
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 111
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokeSuper)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-/*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_direct: /* 0x70 */
-/* File: x86/op_invoke_direct.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeDirect
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 112
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokeDirect)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_static: /* 0x71 */
-/* File: x86/op_invoke_static.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeStatic
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 113
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokeStatic)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_interface: /* 0x72 */
-/* File: x86/op_invoke_interface.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeInterface
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 114
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokeInterface)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-/*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
-/* File: x86/op_return_void_no_barrier.S */
-    movl    rSELF, %eax
-    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
-    jz      1f
-    movl    %eax, OUT_ARG0(%esp)
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    xorl    %eax, %eax
-    xorl    %ecx, %ecx
-    jmp     MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
-/* File: x86/op_invoke_virtual_range.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualRange
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 116
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokeVirtualRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_super_range: /* 0x75 */
-/* File: x86/op_invoke_super_range.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeSuperRange
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 117
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokeSuperRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
-/* File: x86/op_invoke_direct_range.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeDirectRange
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 118
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokeDirectRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_static_range: /* 0x77 */
-/* File: x86/op_invoke_static_range.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeStaticRange
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 119
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokeStaticRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
-/* File: x86/op_invoke_interface_range.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeInterfaceRange
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 120
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokeInterfaceRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_79: /* 0x79 */
-/* File: x86/op_unused_79.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_7a: /* 0x7a */
-/* File: x86/op_unused_7a.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_int: /* 0x7b */
-/* File: x86/op_neg_int.S */
-/* File: x86/unop.S */
-/*
- * Generic 32-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movzbl  rINSTbl,%ecx                    # ecx <- A+
-    sarl    $4,rINST                       # rINST <- B
-    GET_VREG %eax, rINST                    # eax <- vB
-    andb    $0xf,%cl                       # ecx <- A
-    negl    %eax
-    SET_VREG %eax, %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_not_int: /* 0x7c */
-/* File: x86/op_not_int.S */
-/* File: x86/unop.S */
-/*
- * Generic 32-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movzbl  rINSTbl,%ecx                    # ecx <- A+
-    sarl    $4,rINST                       # rINST <- B
-    GET_VREG %eax, rINST                    # eax <- vB
-    andb    $0xf,%cl                       # ecx <- A
-    notl %eax
-    SET_VREG %eax, %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_long: /* 0x7d */
-/* File: x86/op_neg_long.S */
-    /* unop vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, %ecx                     # eax <- v[B+0]
-    GET_VREG_HIGH %ecx, %ecx                # ecx <- v[B+1]
-    negl    %eax
-    adcl    $0, %ecx
-    negl    %ecx
-    SET_VREG %eax, rINST                    # v[A+0] <- eax
-    SET_VREG_HIGH %ecx, rINST               # v[A+1] <- ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_not_long: /* 0x7e */
-/* File: x86/op_not_long.S */
-    /* unop vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, %ecx                     # eax <- v[B+0]
-    GET_VREG_HIGH %ecx, %ecx                # ecx <- v[B+1]
-    notl    %eax
-    notl    %ecx
-    SET_VREG %eax, rINST                    # v[A+0] <- eax
-    SET_VREG_HIGH %ecx, rINST               # v[A+1] <- ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_float: /* 0x7f */
-/* File: x86/op_neg_float.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    flds   VREG_ADDRESS(rINST)             # %st0 <- vB
-    andb    $0xf, %cl                      # ecx <- A
-    fchs
-    fstps  VREG_ADDRESS(%ecx)              # vA <- %st0
-    .if 0
-    CLEAR_WIDE_REF %ecx
-    .else
-    CLEAR_REF %ecx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_double: /* 0x80 */
-/* File: x86/op_neg_double.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    fldl   VREG_ADDRESS(rINST)             # %st0 <- vB
-    andb    $0xf, %cl                      # ecx <- A
-    fchs
-    fstpl  VREG_ADDRESS(%ecx)              # vA <- %st0
-    .if 1
-    CLEAR_WIDE_REF %ecx
-    .else
-    CLEAR_REF %ecx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_long: /* 0x81 */
-/* File: x86/op_int_to_long.S */
-    /* int to long vA, vB */
-    movzbl  rINSTbl, %eax                   # eax <- +A
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %eax                     # eax <- vB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    movl    rIBASE, %ecx                    # cltd trashes rIBASE/edx
-    cltd                                    # rINST:eax<- sssssssBBBBBBBB
-    SET_VREG_HIGH rIBASE, rINST             # v[A+1] <- rIBASE
-    SET_VREG %eax, rINST                    # v[A+0] <- %eax
-    movl    %ecx, rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_float: /* 0x82 */
-/* File: x86/op_int_to_float.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    fildl   VREG_ADDRESS(rINST)             # %st0 <- vB
-    andb    $0xf, %cl                      # ecx <- A
-    
-    fstps  VREG_ADDRESS(%ecx)              # vA <- %st0
-    .if 0
-    CLEAR_WIDE_REF %ecx
-    .else
-    CLEAR_REF %ecx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_double: /* 0x83 */
-/* File: x86/op_int_to_double.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    fildl   VREG_ADDRESS(rINST)             # %st0 <- vB
-    andb    $0xf, %cl                      # ecx <- A
-    
-    fstpl  VREG_ADDRESS(%ecx)              # vA <- %st0
-    .if 1
-    CLEAR_WIDE_REF %ecx
-    .else
-    CLEAR_REF %ecx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* File: x86/op_long_to_int.S */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-/* File: x86/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    movzbl  rINSTbl, %eax                   # eax <- BA
-    andb    $0xf, %al                      # eax <- A
-    shrl    $4, rINST                      # rINST <- B
-    GET_VREG rINST, rINST
-    .if 0
-    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
-    .else
-    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_float: /* 0x85 */
-/* File: x86/op_long_to_float.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    fildll   VREG_ADDRESS(rINST)             # %st0 <- vB
-    andb    $0xf, %cl                      # ecx <- A
-    
-    fstps  VREG_ADDRESS(%ecx)              # vA <- %st0
-    .if 0
-    CLEAR_WIDE_REF %ecx
-    .else
-    CLEAR_REF %ecx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_double: /* 0x86 */
-/* File: x86/op_long_to_double.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    fildll   VREG_ADDRESS(rINST)             # %st0 <- vB
-    andb    $0xf, %cl                      # ecx <- A
-    
-    fstpl  VREG_ADDRESS(%ecx)              # vA <- %st0
-    .if 1
-    CLEAR_WIDE_REF %ecx
-    .else
-    CLEAR_REF %ecx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_int: /* 0x87 */
-/* File: x86/op_float_to_int.S */
-/* File: x86/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint.  If it is less
- * than minint, it should be clamped to minint.  If it is a nan, the result
- * should be zero.  Further, the rounding mode is to truncate.  This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
-    /* float/double to int/long vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    .if 0
-    fldl    VREG_ADDRESS(rINST)             # %st0 <- vB
-    .else
-    flds    VREG_ADDRESS(rINST)             # %st0 <- vB
-    .endif
-    ftst
-    fnstcw  LOCAL0(%esp)                    # remember original rounding mode
-    movzwl  LOCAL0(%esp), %eax
-    movb    $0xc, %ah
-    movw    %ax, LOCAL0+2(%esp)
-    fldcw   LOCAL0+2(%esp)                  # set "to zero" rounding mode
-    andb    $0xf, %cl                      # ecx <- A
-    .if 0
-    fistpll VREG_ADDRESS(%ecx)              # convert and store
-    .else
-    fistpl  VREG_ADDRESS(%ecx)              # convert and store
-    .endif
-    fldcw   LOCAL0(%esp)                    # restore previous rounding mode
-    .if 0
-    movl    $0x80000000, %eax
-    xorl    VREG_HIGH_ADDRESS(%ecx), %eax
-    orl     VREG_ADDRESS(%ecx), %eax
-    .else
-    cmpl    $0x80000000, VREG_ADDRESS(%ecx)
-    .endif
-    je      .Lop_float_to_int_special_case # fix up result
-
-.Lop_float_to_int_finish:
-    xor     %eax, %eax
-    mov     %eax, VREG_REF_ADDRESS(%ecx)
-    .if 0
-    mov     %eax, VREG_REF_HIGH_ADDRESS(%ecx)
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_float_to_int_special_case:
-    fnstsw  %ax
-    sahf
-    jp      .Lop_float_to_int_isNaN
-    adcl    $-1, VREG_ADDRESS(%ecx)
-    .if 0
-    adcl    $-1, VREG_HIGH_ADDRESS(%ecx)
-    .endif
-   jmp      .Lop_float_to_int_finish
-.Lop_float_to_int_isNaN:
-    movl    $0, VREG_ADDRESS(%ecx)
-    .if 0
-    movl    $0, VREG_HIGH_ADDRESS(%ecx)
-    .endif
-    jmp     .Lop_float_to_int_finish
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_long: /* 0x88 */
-/* File: x86/op_float_to_long.S */
-/* File: x86/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint.  If it is less
- * than minint, it should be clamped to minint.  If it is a nan, the result
- * should be zero.  Further, the rounding mode is to truncate.  This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
-    /* float/double to int/long vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    .if 0
-    fldl    VREG_ADDRESS(rINST)             # %st0 <- vB
-    .else
-    flds    VREG_ADDRESS(rINST)             # %st0 <- vB
-    .endif
-    ftst
-    fnstcw  LOCAL0(%esp)                    # remember original rounding mode
-    movzwl  LOCAL0(%esp), %eax
-    movb    $0xc, %ah
-    movw    %ax, LOCAL0+2(%esp)
-    fldcw   LOCAL0+2(%esp)                  # set "to zero" rounding mode
-    andb    $0xf, %cl                      # ecx <- A
-    .if 1
-    fistpll VREG_ADDRESS(%ecx)              # convert and store
-    .else
-    fistpl  VREG_ADDRESS(%ecx)              # convert and store
-    .endif
-    fldcw   LOCAL0(%esp)                    # restore previous rounding mode
-    .if 1
-    movl    $0x80000000, %eax
-    xorl    VREG_HIGH_ADDRESS(%ecx), %eax
-    orl     VREG_ADDRESS(%ecx), %eax
-    .else
-    cmpl    $0x80000000, VREG_ADDRESS(%ecx)
-    .endif
-    je      .Lop_float_to_long_special_case # fix up result
-
-.Lop_float_to_long_finish:
-    xor     %eax, %eax
-    mov     %eax, VREG_REF_ADDRESS(%ecx)
-    .if 1
-    mov     %eax, VREG_REF_HIGH_ADDRESS(%ecx)
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_float_to_long_special_case:
-    fnstsw  %ax
-    sahf
-    jp      .Lop_float_to_long_isNaN
-    adcl    $-1, VREG_ADDRESS(%ecx)
-    .if 1
-    adcl    $-1, VREG_HIGH_ADDRESS(%ecx)
-    .endif
-   jmp      .Lop_float_to_long_finish
-.Lop_float_to_long_isNaN:
-    movl    $0, VREG_ADDRESS(%ecx)
-    .if 1
-    movl    $0, VREG_HIGH_ADDRESS(%ecx)
-    .endif
-    jmp     .Lop_float_to_long_finish
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_double: /* 0x89 */
-/* File: x86/op_float_to_double.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    flds   VREG_ADDRESS(rINST)             # %st0 <- vB
-    andb    $0xf, %cl                      # ecx <- A
-    
-    fstpl  VREG_ADDRESS(%ecx)              # vA <- %st0
-    .if 1
-    CLEAR_WIDE_REF %ecx
-    .else
-    CLEAR_REF %ecx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_int: /* 0x8a */
-/* File: x86/op_double_to_int.S */
-/* File: x86/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint.  If it is less
- * than minint, it should be clamped to minint.  If it is a nan, the result
- * should be zero.  Further, the rounding mode is to truncate.  This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
-    /* float/double to int/long vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    .if 1
-    fldl    VREG_ADDRESS(rINST)             # %st0 <- vB
-    .else
-    flds    VREG_ADDRESS(rINST)             # %st0 <- vB
-    .endif
-    ftst
-    fnstcw  LOCAL0(%esp)                    # remember original rounding mode
-    movzwl  LOCAL0(%esp), %eax
-    movb    $0xc, %ah
-    movw    %ax, LOCAL0+2(%esp)
-    fldcw   LOCAL0+2(%esp)                  # set "to zero" rounding mode
-    andb    $0xf, %cl                      # ecx <- A
-    .if 0
-    fistpll VREG_ADDRESS(%ecx)              # convert and store
-    .else
-    fistpl  VREG_ADDRESS(%ecx)              # convert and store
-    .endif
-    fldcw   LOCAL0(%esp)                    # restore previous rounding mode
-    .if 0
-    movl    $0x80000000, %eax
-    xorl    VREG_HIGH_ADDRESS(%ecx), %eax
-    orl     VREG_ADDRESS(%ecx), %eax
-    .else
-    cmpl    $0x80000000, VREG_ADDRESS(%ecx)
-    .endif
-    je      .Lop_double_to_int_special_case # fix up result
-
-.Lop_double_to_int_finish:
-    xor     %eax, %eax
-    mov     %eax, VREG_REF_ADDRESS(%ecx)
-    .if 0
-    mov     %eax, VREG_REF_HIGH_ADDRESS(%ecx)
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_double_to_int_special_case:
-    fnstsw  %ax
-    sahf
-    jp      .Lop_double_to_int_isNaN
-    adcl    $-1, VREG_ADDRESS(%ecx)
-    .if 0
-    adcl    $-1, VREG_HIGH_ADDRESS(%ecx)
-    .endif
-   jmp      .Lop_double_to_int_finish
-.Lop_double_to_int_isNaN:
-    movl    $0, VREG_ADDRESS(%ecx)
-    .if 0
-    movl    $0, VREG_HIGH_ADDRESS(%ecx)
-    .endif
-    jmp     .Lop_double_to_int_finish
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_long: /* 0x8b */
-/* File: x86/op_double_to_long.S */
-/* File: x86/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint.  If it is less
- * than minint, it should be clamped to minint.  If it is a nan, the result
- * should be zero.  Further, the rounding mode is to truncate.  This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
-    /* float/double to int/long vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    .if 1
-    fldl    VREG_ADDRESS(rINST)             # %st0 <- vB
-    .else
-    flds    VREG_ADDRESS(rINST)             # %st0 <- vB
-    .endif
-    ftst
-    fnstcw  LOCAL0(%esp)                    # remember original rounding mode
-    movzwl  LOCAL0(%esp), %eax
-    movb    $0xc, %ah
-    movw    %ax, LOCAL0+2(%esp)
-    fldcw   LOCAL0+2(%esp)                  # set "to zero" rounding mode
-    andb    $0xf, %cl                      # ecx <- A
-    .if 1
-    fistpll VREG_ADDRESS(%ecx)              # convert and store
-    .else
-    fistpl  VREG_ADDRESS(%ecx)              # convert and store
-    .endif
-    fldcw   LOCAL0(%esp)                    # restore previous rounding mode
-    .if 1
-    movl    $0x80000000, %eax
-    xorl    VREG_HIGH_ADDRESS(%ecx), %eax
-    orl     VREG_ADDRESS(%ecx), %eax
-    .else
-    cmpl    $0x80000000, VREG_ADDRESS(%ecx)
-    .endif
-    je      .Lop_double_to_long_special_case # fix up result
-
-.Lop_double_to_long_finish:
-    xor     %eax, %eax
-    mov     %eax, VREG_REF_ADDRESS(%ecx)
-    .if 1
-    mov     %eax, VREG_REF_HIGH_ADDRESS(%ecx)
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_double_to_long_special_case:
-    fnstsw  %ax
-    sahf
-    jp      .Lop_double_to_long_isNaN
-    adcl    $-1, VREG_ADDRESS(%ecx)
-    .if 1
-    adcl    $-1, VREG_HIGH_ADDRESS(%ecx)
-    .endif
-   jmp      .Lop_double_to_long_finish
-.Lop_double_to_long_isNaN:
-    movl    $0, VREG_ADDRESS(%ecx)
-    .if 1
-    movl    $0, VREG_HIGH_ADDRESS(%ecx)
-    .endif
-    jmp     .Lop_double_to_long_finish
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_float: /* 0x8c */
-/* File: x86/op_double_to_float.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    fldl   VREG_ADDRESS(rINST)             # %st0 <- vB
-    andb    $0xf, %cl                      # ecx <- A
-    
-    fstps  VREG_ADDRESS(%ecx)              # vA <- %st0
-    .if 0
-    CLEAR_WIDE_REF %ecx
-    .else
-    CLEAR_REF %ecx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_byte: /* 0x8d */
-/* File: x86/op_int_to_byte.S */
-/* File: x86/unop.S */
-/*
- * Generic 32-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movzbl  rINSTbl,%ecx                    # ecx <- A+
-    sarl    $4,rINST                       # rINST <- B
-    GET_VREG %eax, rINST                    # eax <- vB
-    andb    $0xf,%cl                       # ecx <- A
-    movsbl  %al, %eax
-    SET_VREG %eax, %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_char: /* 0x8e */
-/* File: x86/op_int_to_char.S */
-/* File: x86/unop.S */
-/*
- * Generic 32-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movzbl  rINSTbl,%ecx                    # ecx <- A+
-    sarl    $4,rINST                       # rINST <- B
-    GET_VREG %eax, rINST                    # eax <- vB
-    andb    $0xf,%cl                       # ecx <- A
-    movzwl  %ax,%eax
-    SET_VREG %eax, %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_short: /* 0x8f */
-/* File: x86/op_int_to_short.S */
-/* File: x86/unop.S */
-/*
- * Generic 32-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movzbl  rINSTbl,%ecx                    # ecx <- A+
-    sarl    $4,rINST                       # rINST <- B
-    GET_VREG %eax, rINST                    # eax <- vB
-    andb    $0xf,%cl                       # ecx <- A
-    movswl %ax, %eax
-    SET_VREG %eax, %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int: /* 0x90 */
-/* File: x86/op_add_int.S */
-/* File: x86/binop.S */
-/*
- * Generic 32-bit binary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- *      xor-int, shl-int, shr-int, ushr-int
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB
-    addl    (rFP,%ecx,4), %eax                                  # ex: addl    (rFP,%ecx,4),%eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_int: /* 0x91 */
-/* File: x86/op_sub_int.S */
-/* File: x86/binop.S */
-/*
- * Generic 32-bit binary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- *      xor-int, shl-int, shr-int, ushr-int
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB
-    subl    (rFP,%ecx,4), %eax                                  # ex: addl    (rFP,%ecx,4),%eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int: /* 0x92 */
-/* File: x86/op_mul_int.S */
-    /*
-     * 32-bit binary multiplication.
-     */
-    /* mul vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB
-    mov     rIBASE, LOCAL0(%esp)
-    imull   (rFP,%ecx,4), %eax              # trashes rIBASE/edx
-    mov     LOCAL0(%esp), rIBASE
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int: /* 0x93 */
-/* File: x86/op_div_int.S */
-/* File: x86/bindiv.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op0=minint and
- * op1=-1.
- */
-    /* div/rem vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB
-    GET_VREG %ecx, %ecx                     # ecx <- vCC
-    mov     rIBASE, LOCAL0(%esp)
-    testl   %ecx, %ecx
-    je      common_errDivideByZero
-    movl    %eax, %edx
-    orl     %ecx, %edx
-    testl   $0xFFFFFF00, %edx              # If both arguments are less
-                                            #   than 8-bit and +ve
-    jz      .Lop_div_int_8                   # Do 8-bit divide
-    testl   $0xFFFF0000, %edx              # If both arguments are less
-                                            #   than 16-bit and +ve
-    jz      .Lop_div_int_16                  # Do 16-bit divide
-    cmpl    $-1, %ecx
-    jne     .Lop_div_int_32
-    cmpl    $0x80000000, %eax
-    jne     .Lop_div_int_32
-    movl    $0x80000000, %eax
-    jmp     .Lop_div_int_finish
-.Lop_div_int_32:
-    cltd
-    idivl   %ecx
-    jmp     .Lop_div_int_finish
-.Lop_div_int_8:
-    div     %cl                             # 8-bit divide otherwise.
-                                            # Remainder in %ah, quotient in %al
-    .if 0
-    movl    %eax, %edx
-    shr     $8, %edx
-    .else
-    andl    $0x000000FF, %eax
-    .endif
-    jmp     .Lop_div_int_finish
-.Lop_div_int_16:
-    xorl    %edx, %edx                      # Clear %edx before divide
-    div     %cx
-.Lop_div_int_finish:
-    SET_VREG %eax, rINST
-    mov     LOCAL0(%esp), rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int: /* 0x94 */
-/* File: x86/op_rem_int.S */
-/* File: x86/bindiv.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op0=minint and
- * op1=-1.
- */
-    /* div/rem vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB
-    GET_VREG %ecx, %ecx                     # ecx <- vCC
-    mov     rIBASE, LOCAL0(%esp)
-    testl   %ecx, %ecx
-    je      common_errDivideByZero
-    movl    %eax, %edx
-    orl     %ecx, %edx
-    testl   $0xFFFFFF00, %edx              # If both arguments are less
-                                            #   than 8-bit and +ve
-    jz      .Lop_rem_int_8                   # Do 8-bit divide
-    testl   $0xFFFF0000, %edx              # If both arguments are less
-                                            #   than 16-bit and +ve
-    jz      .Lop_rem_int_16                  # Do 16-bit divide
-    cmpl    $-1, %ecx
-    jne     .Lop_rem_int_32
-    cmpl    $0x80000000, %eax
-    jne     .Lop_rem_int_32
-    movl    $0, rIBASE
-    jmp     .Lop_rem_int_finish
-.Lop_rem_int_32:
-    cltd
-    idivl   %ecx
-    jmp     .Lop_rem_int_finish
-.Lop_rem_int_8:
-    div     %cl                             # 8-bit divide otherwise.
-                                            # Remainder in %ah, quotient in %al
-    .if 1
-    movl    %eax, %edx
-    shr     $8, %edx
-    .else
-    andl    $0x000000FF, %eax
-    .endif
-    jmp     .Lop_rem_int_finish
-.Lop_rem_int_16:
-    xorl    %edx, %edx                      # Clear %edx before divide
-    div     %cx
-.Lop_rem_int_finish:
-    SET_VREG rIBASE, rINST
-    mov     LOCAL0(%esp), rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int: /* 0x95 */
-/* File: x86/op_and_int.S */
-/* File: x86/binop.S */
-/*
- * Generic 32-bit binary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- *      xor-int, shl-int, shr-int, ushr-int
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB
-    andl    (rFP,%ecx,4), %eax                                  # ex: addl    (rFP,%ecx,4),%eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int: /* 0x96 */
-/* File: x86/op_or_int.S */
-/* File: x86/binop.S */
-/*
- * Generic 32-bit binary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- *      xor-int, shl-int, shr-int, ushr-int
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB
-    orl     (rFP,%ecx,4), %eax                                  # ex: addl    (rFP,%ecx,4),%eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int: /* 0x97 */
-/* File: x86/op_xor_int.S */
-/* File: x86/binop.S */
-/*
- * Generic 32-bit binary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- *      xor-int, shl-int, shr-int, ushr-int
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB
-    xorl    (rFP,%ecx,4), %eax                                  # ex: addl    (rFP,%ecx,4),%eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int: /* 0x98 */
-/* File: x86/op_shl_int.S */
-/* File: x86/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC),%eax                     # eax <- BB
-    movzbl  3(rPC),%ecx                     # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB
-    GET_VREG %ecx, %ecx                     # eax <- vBB
-    sall    %cl, %eax                                  # ex: addl    %ecx,%eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int: /* 0x99 */
-/* File: x86/op_shr_int.S */
-/* File: x86/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC),%eax                     # eax <- BB
-    movzbl  3(rPC),%ecx                     # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB
-    GET_VREG %ecx, %ecx                     # eax <- vBB
-    sarl    %cl, %eax                                  # ex: addl    %ecx,%eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int: /* 0x9a */
-/* File: x86/op_ushr_int.S */
-/* File: x86/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC),%eax                     # eax <- BB
-    movzbl  3(rPC),%ecx                     # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB
-    GET_VREG %ecx, %ecx                     # eax <- vBB
-    shrl    %cl, %eax                                  # ex: addl    %ecx,%eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_long: /* 0x9b */
-/* File: x86/op_add_long.S */
-/* File: x86/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    movl    rIBASE, LOCAL0(%esp)            # save rIBASE
-    GET_VREG rIBASE, %eax                   # rIBASE <- v[BB+0]
-    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1]
-    addl    (rFP,%ecx,4), rIBASE                                 # ex: addl   (rFP,%ecx,4),rIBASE
-    adcl    4(rFP,%ecx,4), %eax                                 # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG rIBASE, rINST                  # v[AA+0] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE            # restore rIBASE
-    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_long: /* 0x9c */
-/* File: x86/op_sub_long.S */
-/* File: x86/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    movl    rIBASE, LOCAL0(%esp)            # save rIBASE
-    GET_VREG rIBASE, %eax                   # rIBASE <- v[BB+0]
-    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1]
-    subl    (rFP,%ecx,4), rIBASE                                 # ex: addl   (rFP,%ecx,4),rIBASE
-    sbbl    4(rFP,%ecx,4), %eax                                 # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG rIBASE, rINST                  # v[AA+0] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE            # restore rIBASE
-    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_long: /* 0x9d */
-/* File: x86/op_mul_long.S */
-/*
- * Signed 64-bit integer multiply.
- *
- * We could definately use more free registers for
- * this code.   We spill rINSTw (ebx),
- * giving us eax, ebc, ecx and edx as computational
- * temps.  On top of that, we'll spill edi (rFP)
- * for use as the vB pointer and esi (rPC) for use
- * as the vC pointer.  Yuck.
- *
- */
-    /* mul-long vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- B
-    movzbl  3(rPC), %ecx                    # ecx <- C
-    mov     rPC, LOCAL0(%esp)               # save Interpreter PC
-    mov     rFP, LOCAL1(%esp)               # save FP
-    mov     rIBASE, LOCAL2(%esp)            # save rIBASE
-    leal    (rFP,%eax,4), %esi              # esi <- &v[B]
-    leal    (rFP,%ecx,4), rFP               # rFP <- &v[C]
-    movl    4(%esi), %ecx                   # ecx <- Bmsw
-    imull   (rFP), %ecx                     # ecx <- (Bmsw*Clsw)
-    movl    4(rFP), %eax                    # eax <- Cmsw
-    imull   (%esi), %eax                    # eax <- (Cmsw*Blsw)
-    addl    %eax, %ecx                      # ecx <- (Bmsw*Clsw)+(Cmsw*Blsw)
-    movl    (rFP), %eax                     # eax <- Clsw
-    mull    (%esi)                          # eax <- (Clsw*Alsw)
-    mov     LOCAL0(%esp), rPC               # restore Interpreter PC
-    mov     LOCAL1(%esp), rFP               # restore FP
-    leal    (%ecx,rIBASE), rIBASE           # full result now in rIBASE:%eax
-    SET_VREG_HIGH rIBASE, rINST             # v[B+1] <- rIBASE
-    mov     LOCAL2(%esp), rIBASE            # restore IBASE
-    SET_VREG %eax, rINST                    # v[B] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_long: /* 0x9e */
-/* File: x86/op_div_long.S */
-/* art_quick_* methods has quick abi,
- *   so use eax, ecx, edx, ebx for args
- */
-    /* div vAA, vBB, vCC */
-    .extern art_quick_ldiv
-    mov     rIBASE, LOCAL0(%esp)            # save rIBASE/%edx
-    mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
-    movzbl  3(rPC), %eax                    # eax <- CC
-    GET_VREG %ecx, %eax
-    GET_VREG_HIGH %ebx, %eax
-    movl    %ecx, %edx
-    orl     %ebx, %ecx
-    jz      common_errDivideByZero
-    movzbl  2(rPC), %eax                    # eax <- BB
-    GET_VREG_HIGH %ecx, %eax
-    GET_VREG %eax, %eax
-    call    SYMBOL(art_quick_ldiv)
-    mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
-    SET_VREG_HIGH rIBASE, rINST
-    SET_VREG %eax, rINST
-    mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_long: /* 0x9f */
-/* File: x86/op_rem_long.S */
-/* File: x86/op_div_long.S */
-/* art_quick_* methods has quick abi,
- *   so use eax, ecx, edx, ebx for args
- */
-    /* div vAA, vBB, vCC */
-    .extern art_quick_lmod
-    mov     rIBASE, LOCAL0(%esp)            # save rIBASE/%edx
-    mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
-    movzbl  3(rPC), %eax                    # eax <- CC
-    GET_VREG %ecx, %eax
-    GET_VREG_HIGH %ebx, %eax
-    movl    %ecx, %edx
-    orl     %ebx, %ecx
-    jz      common_errDivideByZero
-    movzbl  2(rPC), %eax                    # eax <- BB
-    GET_VREG_HIGH %ecx, %eax
-    GET_VREG %eax, %eax
-    call    SYMBOL(art_quick_lmod)
-    mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
-    SET_VREG_HIGH rIBASE, rINST
-    SET_VREG %eax, rINST
-    mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_long: /* 0xa0 */
-/* File: x86/op_and_long.S */
-/* File: x86/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    movl    rIBASE, LOCAL0(%esp)            # save rIBASE
-    GET_VREG rIBASE, %eax                   # rIBASE <- v[BB+0]
-    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1]
-    andl    (rFP,%ecx,4), rIBASE                                 # ex: addl   (rFP,%ecx,4),rIBASE
-    andl    4(rFP,%ecx,4), %eax                                 # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG rIBASE, rINST                  # v[AA+0] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE            # restore rIBASE
-    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_long: /* 0xa1 */
-/* File: x86/op_or_long.S */
-/* File: x86/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    movl    rIBASE, LOCAL0(%esp)            # save rIBASE
-    GET_VREG rIBASE, %eax                   # rIBASE <- v[BB+0]
-    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1]
-    orl     (rFP,%ecx,4), rIBASE                                 # ex: addl   (rFP,%ecx,4),rIBASE
-    orl     4(rFP,%ecx,4), %eax                                 # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG rIBASE, rINST                  # v[AA+0] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE            # restore rIBASE
-    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_long: /* 0xa2 */
-/* File: x86/op_xor_long.S */
-/* File: x86/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    movl    rIBASE, LOCAL0(%esp)            # save rIBASE
-    GET_VREG rIBASE, %eax                   # rIBASE <- v[BB+0]
-    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1]
-    xorl    (rFP,%ecx,4), rIBASE                                 # ex: addl   (rFP,%ecx,4),rIBASE
-    xorl    4(rFP,%ecx,4), %eax                                 # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG rIBASE, rINST                  # v[AA+0] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE            # restore rIBASE
-    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_long: /* 0xa3 */
-/* File: x86/op_shl_long.S */
-/*
- * Long integer shift.  This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.  x86 shifts automatically mask off
- * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
- * case specially.
- */
-    /* shl-long vAA, vBB, vCC */
-    /* ecx gets shift count */
-    /* Need to spill rINST */
-    /* rINSTw gets AA */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE, %eax              # ecx <- v[BB+1]
-    GET_VREG %ecx, %ecx                     # ecx <- vCC
-    GET_VREG %eax, %eax                     # eax <- v[BB+0]
-    shldl   %eax,rIBASE
-    sall    %cl, %eax
-    testb   $32, %cl
-    je      2f
-    movl    %eax, rIBASE
-    xorl    %eax, %eax
-2:
-    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax, rINST                    # v[AA+0] <- %eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_long: /* 0xa4 */
-/* File: x86/op_shr_long.S */
-/*
- * Long integer shift.  This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.  x86 shifts automatically mask off
- * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
- * case specially.
- */
-    /* shr-long vAA, vBB, vCC */
-    /* ecx gets shift count */
-    /* Need to spill rIBASE */
-    /* rINSTw gets AA */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE, %eax              # rIBASE<- v[BB+1]
-    GET_VREG %ecx, %ecx                     # ecx <- vCC
-    GET_VREG %eax, %eax                     # eax <- v[BB+0]
-    shrdl   rIBASE, %eax
-    sarl    %cl, rIBASE
-    testb   $32, %cl
-    je      2f
-    movl    rIBASE, %eax
-    sarl    $31, rIBASE
-2:
-    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax, rINST                    # v[AA+0] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_long: /* 0xa5 */
-/* File: x86/op_ushr_long.S */
-/*
- * Long integer shift.  This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.  x86 shifts automatically mask off
- * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
- * case specially.
- */
-    /* shr-long vAA, vBB, vCC */
-    /* ecx gets shift count */
-    /* Need to spill rIBASE */
-    /* rINSTw gets AA */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE, %eax              # rIBASE <- v[BB+1]
-    GET_VREG %ecx, %ecx                     # ecx <- vCC
-    GET_VREG %eax, %eax                     # eax <- v[BB+0]
-    shrdl   rIBASE, %eax
-    shrl    %cl, rIBASE
-    testb   $32, %cl
-    je      2f
-    movl    rIBASE, %eax
-    xorl    rIBASE, rIBASE
-2:
-    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax, rINST                    # v[BB+0] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_float: /* 0xa6 */
-/* File: x86/op_add_float.S */
-/* File: x86/sseBinop.S */
-    movzbl  2(rPC), %ecx                    # ecx <- BB
-    movzbl  3(rPC), %eax                    # eax <- CC
-    movss   VREG_ADDRESS(%ecx), %xmm0  # %xmm0 <- 1st src
-    addss VREG_ADDRESS(%eax), %xmm0
-    movss   %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movss   %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_float: /* 0xa7 */
-/* File: x86/op_sub_float.S */
-/* File: x86/sseBinop.S */
-    movzbl  2(rPC), %ecx                    # ecx <- BB
-    movzbl  3(rPC), %eax                    # eax <- CC
-    movss   VREG_ADDRESS(%ecx), %xmm0  # %xmm0 <- 1st src
-    subss VREG_ADDRESS(%eax), %xmm0
-    movss   %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movss   %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_float: /* 0xa8 */
-/* File: x86/op_mul_float.S */
-/* File: x86/sseBinop.S */
-    movzbl  2(rPC), %ecx                    # ecx <- BB
-    movzbl  3(rPC), %eax                    # eax <- CC
-    movss   VREG_ADDRESS(%ecx), %xmm0  # %xmm0 <- 1st src
-    mulss VREG_ADDRESS(%eax), %xmm0
-    movss   %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movss   %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_float: /* 0xa9 */
-/* File: x86/op_div_float.S */
-/* File: x86/sseBinop.S */
-    movzbl  2(rPC), %ecx                    # ecx <- BB
-    movzbl  3(rPC), %eax                    # eax <- CC
-    movss   VREG_ADDRESS(%ecx), %xmm0  # %xmm0 <- 1st src
-    divss VREG_ADDRESS(%eax), %xmm0
-    movss   %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movss   %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_float: /* 0xaa */
-/* File: x86/op_rem_float.S */
-    /* rem_float vAA, vBB, vCC */
-    movzbl  3(rPC), %ecx                    # ecx <- BB
-    movzbl  2(rPC), %eax                    # eax <- CC
-    flds    VREG_ADDRESS(%ecx)              # vBB to fp stack
-    flds    VREG_ADDRESS(%eax)              # vCC to fp stack
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstps   VREG_ADDRESS(rINST)             # %st to vAA
-    CLEAR_REF rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_double: /* 0xab */
-/* File: x86/op_add_double.S */
-/* File: x86/sseBinop.S */
-    movzbl  2(rPC), %ecx                    # ecx <- BB
-    movzbl  3(rPC), %eax                    # eax <- CC
-    movsd   VREG_ADDRESS(%ecx), %xmm0  # %xmm0 <- 1st src
-    addsd VREG_ADDRESS(%eax), %xmm0
-    movsd   %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd   %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_double: /* 0xac */
-/* File: x86/op_sub_double.S */
-/* File: x86/sseBinop.S */
-    movzbl  2(rPC), %ecx                    # ecx <- BB
-    movzbl  3(rPC), %eax                    # eax <- CC
-    movsd   VREG_ADDRESS(%ecx), %xmm0  # %xmm0 <- 1st src
-    subsd VREG_ADDRESS(%eax), %xmm0
-    movsd   %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd   %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_double: /* 0xad */
-/* File: x86/op_mul_double.S */
-/* File: x86/sseBinop.S */
-    movzbl  2(rPC), %ecx                    # ecx <- BB
-    movzbl  3(rPC), %eax                    # eax <- CC
-    movsd   VREG_ADDRESS(%ecx), %xmm0  # %xmm0 <- 1st src
-    mulsd VREG_ADDRESS(%eax), %xmm0
-    movsd   %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd   %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_double: /* 0xae */
-/* File: x86/op_div_double.S */
-/* File: x86/sseBinop.S */
-    movzbl  2(rPC), %ecx                    # ecx <- BB
-    movzbl  3(rPC), %eax                    # eax <- CC
-    movsd   VREG_ADDRESS(%ecx), %xmm0  # %xmm0 <- 1st src
-    divsd VREG_ADDRESS(%eax), %xmm0
-    movsd   %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd   %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_double: /* 0xaf */
-/* File: x86/op_rem_double.S */
-    /* rem_double vAA, vBB, vCC */
-    movzbl  3(rPC), %ecx                    # ecx <- BB
-    movzbl  2(rPC), %eax                    # eax <- CC
-    fldl    VREG_ADDRESS(%ecx)              # %st1 <- fp[vBB]
-    fldl    VREG_ADDRESS(%eax)              # %st0 <- fp[vCC]
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstpl   VREG_ADDRESS(rINST)             # fp[vAA] <- %st
-    CLEAR_WIDE_REF rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
-/* File: x86/op_add_int_2addr.S */
-/* File: x86/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    GET_VREG %eax, rINST                    # eax <- vB
-    andb    $0xf, %cl                      # ecx <- A
-    addl    %eax, (rFP,%ecx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
-    CLEAR_REF %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
-/* File: x86/op_sub_int_2addr.S */
-/* File: x86/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    GET_VREG %eax, rINST                    # eax <- vB
-    andb    $0xf, %cl                      # ecx <- A
-    subl    %eax, (rFP,%ecx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
-    CLEAR_REF %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
-/* File: x86/op_mul_int_2addr.S */
-    /* mul vA, vB */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    GET_VREG %eax, rINST                    # eax <- vB
-    andb    $0xf, %cl                      # ecx <- A
-    movl    rIBASE, rINST
-    imull   (rFP,%ecx,4), %eax              # trashes rIBASE/edx
-    movl    rINST, rIBASE
-    SET_VREG %eax, %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
-/* File: x86/op_div_int_2addr.S */
-/* File: x86/bindiv2addr.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op0=minint and
- * op1=-1.
- */
-    /* div/rem/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # eax <- BA
-    mov     rIBASE, LOCAL0(%esp)
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # eax <- vBB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, rINST                    # eax <- vBB
-    testl   %ecx, %ecx
-    je      common_errDivideByZero
-    cmpl    $-1, %ecx
-    jne     .Lop_div_int_2addr_continue_div2addr
-    cmpl    $0x80000000, %eax
-    jne     .Lop_div_int_2addr_continue_div2addr
-    movl    $0x80000000, %eax
-    SET_VREG %eax, rINST
-    mov     LOCAL0(%esp), rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_div_int_2addr_continue_div2addr:
-    cltd
-    idivl   %ecx
-    SET_VREG %eax, rINST
-    mov     LOCAL0(%esp), rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
-/* File: x86/op_rem_int_2addr.S */
-/* File: x86/bindiv2addr.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op0=minint and
- * op1=-1.
- */
-    /* div/rem/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # eax <- BA
-    mov     rIBASE, LOCAL0(%esp)
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # eax <- vBB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, rINST                    # eax <- vBB
-    testl   %ecx, %ecx
-    je      common_errDivideByZero
-    cmpl    $-1, %ecx
-    jne     .Lop_rem_int_2addr_continue_div2addr
-    cmpl    $0x80000000, %eax
-    jne     .Lop_rem_int_2addr_continue_div2addr
-    movl    $0, rIBASE
-    SET_VREG rIBASE, rINST
-    mov     LOCAL0(%esp), rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_rem_int_2addr_continue_div2addr:
-    cltd
-    idivl   %ecx
-    SET_VREG rIBASE, rINST
-    mov     LOCAL0(%esp), rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
-/* File: x86/op_and_int_2addr.S */
-/* File: x86/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    GET_VREG %eax, rINST                    # eax <- vB
-    andb    $0xf, %cl                      # ecx <- A
-    andl    %eax, (rFP,%ecx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
-    CLEAR_REF %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
-/* File: x86/op_or_int_2addr.S */
-/* File: x86/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    GET_VREG %eax, rINST                    # eax <- vB
-    andb    $0xf, %cl                      # ecx <- A
-    orl     %eax, (rFP,%ecx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
-    CLEAR_REF %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
-/* File: x86/op_xor_int_2addr.S */
-/* File: x86/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    GET_VREG %eax, rINST                    # eax <- vB
-    andb    $0xf, %cl                      # ecx <- A
-    xorl    %eax, (rFP,%ecx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
-    CLEAR_REF %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
-/* File: x86/op_shl_int_2addr.S */
-/* File: x86/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
-    /* shift/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # eax <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # eax <- vBB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, rINST                    # eax <- vAA
-    sall    %cl, %eax                                  # ex: sarl %cl, %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
-/* File: x86/op_shr_int_2addr.S */
-/* File: x86/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
-    /* shift/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # eax <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # eax <- vBB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, rINST                    # eax <- vAA
-    sarl    %cl, %eax                                  # ex: sarl %cl, %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
-/* File: x86/op_ushr_int_2addr.S */
-/* File: x86/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
-    /* shift/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # eax <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # eax <- vBB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, rINST                    # eax <- vAA
-    shrl    %cl, %eax                                  # ex: sarl %cl, %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_long_2addr: /* 0xbb */
-/* File: x86/op_add_long_2addr.S */
-/* File: x86/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop/2addr vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx<- BA
-    sarl    $4, %ecx                       # ecx<- B
-    GET_VREG %eax, %ecx                     # eax<- v[B+0]
-    GET_VREG_HIGH %ecx, %ecx                # eax<- v[B+1]
-    andb    $0xF, rINSTbl                  # rINST<- A
-    addl    %eax, (rFP,rINST,4)                                 # ex: addl   %eax,(rFP,rINST,4)
-    adcl    %ecx, 4(rFP,rINST,4)                                 # ex: adcl   %ecx,4(rFP,rINST,4)
-    CLEAR_WIDE_REF rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
-/* File: x86/op_sub_long_2addr.S */
-/* File: x86/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop/2addr vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx<- BA
-    sarl    $4, %ecx                       # ecx<- B
-    GET_VREG %eax, %ecx                     # eax<- v[B+0]
-    GET_VREG_HIGH %ecx, %ecx                # eax<- v[B+1]
-    andb    $0xF, rINSTbl                  # rINST<- A
-    subl    %eax, (rFP,rINST,4)                                 # ex: addl   %eax,(rFP,rINST,4)
-    sbbl    %ecx, 4(rFP,rINST,4)                                 # ex: adcl   %ecx,4(rFP,rINST,4)
-    CLEAR_WIDE_REF rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
-/* File: x86/op_mul_long_2addr.S */
-/*
- * Signed 64-bit integer multiply, 2-addr version
- *
- * We could definately use more free registers for
- * this code.  We must spill %edx (rIBASE) because it
- * is used by imul.  We'll also spill rINST (ebx),
- * giving us eax, ebc, ecx and rIBASE as computational
- * temps.  On top of that, we'll spill %esi (edi)
- * for use as the vA pointer and rFP (esi) for use
- * as the vB pointer.  Yuck.
- */
-    /* mul-long/2addr vA, vB */
-    movzbl  rINSTbl, %eax                   # eax <- BA
-    andb    $0xf, %al                      # eax <- A
-    CLEAR_WIDE_REF %eax                     # clear refs in advance
-    sarl    $4, rINST                      # rINST <- B
-    mov     rPC, LOCAL0(%esp)               # save Interpreter PC
-    mov     rFP, LOCAL1(%esp)               # save FP
-    mov     rIBASE, LOCAL2(%esp)            # save rIBASE
-    leal    (rFP,%eax,4), %esi              # esi <- &v[A]
-    leal    (rFP,rINST,4), rFP              # rFP <- &v[B]
-    movl    4(%esi), %ecx                   # ecx <- Amsw
-    imull   (rFP), %ecx                     # ecx <- (Amsw*Blsw)
-    movl    4(rFP), %eax                    # eax <- Bmsw
-    imull   (%esi), %eax                    # eax <- (Bmsw*Alsw)
-    addl    %eax, %ecx                      # ecx <- (Amsw*Blsw)+(Bmsw*Alsw)
-    movl    (rFP), %eax                     # eax <- Blsw
-    mull    (%esi)                          # eax <- (Blsw*Alsw)
-    leal    (%ecx,rIBASE), rIBASE           # full result now in %edx:%eax
-    movl    rIBASE, 4(%esi)                 # v[A+1] <- rIBASE
-    movl    %eax, (%esi)                    # v[A] <- %eax
-    mov     LOCAL0(%esp), rPC               # restore Interpreter PC
-    mov     LOCAL2(%esp), rIBASE            # restore IBASE
-    mov     LOCAL1(%esp), rFP               # restore FP
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_long_2addr: /* 0xbe */
-/* File: x86/op_div_long_2addr.S */
-/* art_quick_* methods has quick abi,
- *   so use eax, ecx, edx, ebx for args
- */
-    /* div/2addr vA, vB */
-    .extern   art_quick_ldiv
-    mov     rIBASE, LOCAL0(%esp)            # save rIBASE/%edx
-    movzbl  rINSTbl, %eax
-    shrl    $4, %eax                       # eax <- B
-    andb    $0xf, rINSTbl                  # rINST <- A
-    mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
-    movl    %ebx, %ecx
-    GET_VREG %edx, %eax
-    GET_VREG_HIGH %ebx, %eax
-    movl    %edx, %eax
-    orl     %ebx, %eax
-    jz      common_errDivideByZero
-    GET_VREG %eax, %ecx
-    GET_VREG_HIGH %ecx, %ecx
-    call    SYMBOL(art_quick_ldiv)
-    mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
-    SET_VREG_HIGH rIBASE, rINST
-    SET_VREG %eax, rINST
-    mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
-/* File: x86/op_rem_long_2addr.S */
-/* File: x86/op_div_long_2addr.S */
-/* art_quick_* methods has quick abi,
- *   so use eax, ecx, edx, ebx for args
- */
-    /* div/2addr vA, vB */
-    .extern   art_quick_lmod
-    mov     rIBASE, LOCAL0(%esp)            # save rIBASE/%edx
-    movzbl  rINSTbl, %eax
-    shrl    $4, %eax                       # eax <- B
-    andb    $0xf, rINSTbl                  # rINST <- A
-    mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
-    movl    %ebx, %ecx
-    GET_VREG %edx, %eax
-    GET_VREG_HIGH %ebx, %eax
-    movl    %edx, %eax
-    orl     %ebx, %eax
-    jz      common_errDivideByZero
-    GET_VREG %eax, %ecx
-    GET_VREG_HIGH %ecx, %ecx
-    call    SYMBOL(art_quick_lmod)
-    mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
-    SET_VREG_HIGH rIBASE, rINST
-    SET_VREG %eax, rINST
-    mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
-/* File: x86/op_and_long_2addr.S */
-/* File: x86/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop/2addr vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx<- BA
-    sarl    $4, %ecx                       # ecx<- B
-    GET_VREG %eax, %ecx                     # eax<- v[B+0]
-    GET_VREG_HIGH %ecx, %ecx                # eax<- v[B+1]
-    andb    $0xF, rINSTbl                  # rINST<- A
-    andl    %eax, (rFP,rINST,4)                                 # ex: addl   %eax,(rFP,rINST,4)
-    andl    %ecx, 4(rFP,rINST,4)                                 # ex: adcl   %ecx,4(rFP,rINST,4)
-    CLEAR_WIDE_REF rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
-/* File: x86/op_or_long_2addr.S */
-/* File: x86/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop/2addr vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx<- BA
-    sarl    $4, %ecx                       # ecx<- B
-    GET_VREG %eax, %ecx                     # eax<- v[B+0]
-    GET_VREG_HIGH %ecx, %ecx                # eax<- v[B+1]
-    andb    $0xF, rINSTbl                  # rINST<- A
-    orl     %eax, (rFP,rINST,4)                                 # ex: addl   %eax,(rFP,rINST,4)
-    orl     %ecx, 4(rFP,rINST,4)                                 # ex: adcl   %ecx,4(rFP,rINST,4)
-    CLEAR_WIDE_REF rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
-/* File: x86/op_xor_long_2addr.S */
-/* File: x86/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop/2addr vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx<- BA
-    sarl    $4, %ecx                       # ecx<- B
-    GET_VREG %eax, %ecx                     # eax<- v[B+0]
-    GET_VREG_HIGH %ecx, %ecx                # eax<- v[B+1]
-    andb    $0xF, rINSTbl                  # rINST<- A
-    xorl    %eax, (rFP,rINST,4)                                 # ex: addl   %eax,(rFP,rINST,4)
-    xorl    %ecx, 4(rFP,rINST,4)                                 # ex: adcl   %ecx,4(rFP,rINST,4)
-    CLEAR_WIDE_REF rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
-/* File: x86/op_shl_long_2addr.S */
-/*
- * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
-    /* shl-long/2addr vA, vB */
-    /* ecx gets shift count */
-    /* Need to spill rIBASE */
-    /* rINSTw gets AA */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, rINST                    # eax <- v[AA+0]
-    sarl    $4, %ecx                       # ecx <- B
-    movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
-    GET_VREG %ecx, %ecx                     # ecx <- vBB
-    shldl   %eax, rIBASE
-    sall    %cl, %eax
-    testb   $32, %cl
-    je      2f
-    movl    %eax, rIBASE
-    xorl    %eax, %eax
-2:
-    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax, rINST                    # v[AA+0] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
-/* File: x86/op_shr_long_2addr.S */
-/*
- * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
-    /* shl-long/2addr vA, vB */
-    /* ecx gets shift count */
-    /* Need to spill rIBASE */
-    /* rINSTw gets AA */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, rINST                    # eax <- v[AA+0]
-    sarl    $4, %ecx                       # ecx <- B
-    movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
-    GET_VREG %ecx, %ecx                     # ecx <- vBB
-    shrdl   rIBASE, %eax
-    sarl    %cl, rIBASE
-    testb   $32, %cl
-    je      2f
-    movl    rIBASE, %eax
-    sarl    $31, rIBASE
-2:
-    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax, rINST                    # v[AA+0] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
-/* File: x86/op_ushr_long_2addr.S */
-/*
- * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
-    /* shl-long/2addr vA, vB */
-    /* ecx gets shift count */
-    /* Need to spill rIBASE */
-    /* rINSTw gets AA */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, rINST                    # eax <- v[AA+0]
-    sarl    $4, %ecx                       # ecx <- B
-    movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
-    GET_VREG %ecx, %ecx                     # ecx <- vBB
-    shrdl   rIBASE, %eax
-    shrl    %cl, rIBASE
-    testb   $32, %cl
-    je      2f
-    movl    rIBASE, %eax
-    xorl    rIBASE, rIBASE
-2:
-    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax, rINST                    # v[AA+0] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
-/* File: x86/op_add_float_2addr.S */
-/* File: x86/sseBinop2Addr.S */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movss VREG_ADDRESS(%ecx), %xmm0      # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    addss VREG_ADDRESS(rINST), %xmm0
-    movss %xmm0, VREG_ADDRESS(%ecx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movss %xmm0, VREG_REF_ADDRESS(rINST)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
-/* File: x86/op_sub_float_2addr.S */
-/* File: x86/sseBinop2Addr.S */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movss VREG_ADDRESS(%ecx), %xmm0      # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    subss VREG_ADDRESS(rINST), %xmm0
-    movss %xmm0, VREG_ADDRESS(%ecx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movss %xmm0, VREG_REF_ADDRESS(rINST)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
-/* File: x86/op_mul_float_2addr.S */
-/* File: x86/sseBinop2Addr.S */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movss VREG_ADDRESS(%ecx), %xmm0      # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    mulss VREG_ADDRESS(rINST), %xmm0
-    movss %xmm0, VREG_ADDRESS(%ecx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movss %xmm0, VREG_REF_ADDRESS(rINST)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
-/* File: x86/op_div_float_2addr.S */
-/* File: x86/sseBinop2Addr.S */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movss VREG_ADDRESS(%ecx), %xmm0      # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    divss VREG_ADDRESS(rINST), %xmm0
-    movss %xmm0, VREG_ADDRESS(%ecx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movss %xmm0, VREG_REF_ADDRESS(rINST)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_float_2addr: /* 0xca */
-/* File: x86/op_rem_float_2addr.S */
-    /* rem_float/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    flds    VREG_ADDRESS(rINST)             # vB to fp stack
-    andb    $0xf, %cl                      # ecx <- A
-    flds    VREG_ADDRESS(%ecx)              # vA to fp stack
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstps   VREG_ADDRESS(%ecx)              # %st to vA
-    CLEAR_REF %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_double_2addr: /* 0xcb */
-/* File: x86/op_add_double_2addr.S */
-/* File: x86/sseBinop2Addr.S */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movsd VREG_ADDRESS(%ecx), %xmm0      # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    addsd VREG_ADDRESS(rINST), %xmm0
-    movsd %xmm0, VREG_ADDRESS(%ecx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd %xmm0, VREG_REF_ADDRESS(rINST)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
-/* File: x86/op_sub_double_2addr.S */
-/* File: x86/sseBinop2Addr.S */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movsd VREG_ADDRESS(%ecx), %xmm0      # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    subsd VREG_ADDRESS(rINST), %xmm0
-    movsd %xmm0, VREG_ADDRESS(%ecx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd %xmm0, VREG_REF_ADDRESS(rINST)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
-/* File: x86/op_mul_double_2addr.S */
-/* File: x86/sseBinop2Addr.S */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movsd VREG_ADDRESS(%ecx), %xmm0      # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    mulsd VREG_ADDRESS(rINST), %xmm0
-    movsd %xmm0, VREG_ADDRESS(%ecx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd %xmm0, VREG_REF_ADDRESS(rINST)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_double_2addr: /* 0xce */
-/* File: x86/op_div_double_2addr.S */
-/* File: x86/sseBinop2Addr.S */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movsd VREG_ADDRESS(%ecx), %xmm0      # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    divsd VREG_ADDRESS(rINST), %xmm0
-    movsd %xmm0, VREG_ADDRESS(%ecx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd %xmm0, VREG_REF_ADDRESS(rINST)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
-/* File: x86/op_rem_double_2addr.S */
-    /* rem_double/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    fldl    VREG_ADDRESS(rINST)             # vB to fp stack
-    andb    $0xf, %cl                      # ecx <- A
-    fldl    VREG_ADDRESS(%ecx)              # vA to fp stack
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstpl   VREG_ADDRESS(%ecx)              # %st to vA
-    CLEAR_WIDE_REF %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
-/* File: x86/op_add_int_lit16.S */
-/* File: x86/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- *      and-int/lit16, or-int/lit16, xor-int/lit16
- */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movzbl  rINSTbl, %eax                   # eax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %eax                     # eax <- vB
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    andb    $0xf, rINSTbl                  # rINST <- A
-    addl    %ecx, %eax                                  # for example: addl %ecx, %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* File: x86/op_rsub_int.S */
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-/* File: x86/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- *      and-int/lit16, or-int/lit16, xor-int/lit16
- */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movzbl  rINSTbl, %eax                   # eax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %eax                     # eax <- vB
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    andb    $0xf, rINSTbl                  # rINST <- A
-    subl    %eax, %ecx                                  # for example: addl %ecx, %eax
-    SET_VREG %ecx, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
-/* File: x86/op_mul_int_lit16.S */
-    /* mul/lit16 vA, vB, #+CCCC */
-    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
-    movzbl  rINSTbl, %eax                   # eax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %eax                     # eax <- vB
-    movl    rIBASE, %ecx
-    movswl  2(rPC), rIBASE                  # rIBASE <- ssssCCCC
-    andb    $0xf, rINSTbl                  # rINST <- A
-    imull   rIBASE, %eax                    # trashes rIBASE/edx
-    movl    %ecx, rIBASE
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
-/* File: x86/op_div_int_lit16.S */
-/* File: x86/bindivLit16.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op0=minint and
- * op1=-1.
- */
-    /* div/rem/lit16 vA, vB, #+CCCC */
-    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
-    movzbl  rINSTbl, %eax                   # eax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %eax                     # eax <- vB
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    andb    $0xf, rINSTbl                  # rINST <- A
-    testl   %ecx, %ecx
-    je      common_errDivideByZero
-    cmpl    $-1, %ecx
-    jne     .Lop_div_int_lit16_continue_div
-    cmpl    $0x80000000, %eax
-    jne     .Lop_div_int_lit16_continue_div
-    movl    $0x80000000, %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.Lop_div_int_lit16_continue_div:
-    mov     rIBASE, LOCAL0(%esp)
-    cltd
-    idivl   %ecx
-    SET_VREG %eax, rINST
-    mov     LOCAL0(%esp), rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
-/* File: x86/op_rem_int_lit16.S */
-/* File: x86/bindivLit16.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op0=minint and
- * op1=-1.
- */
-    /* div/rem/lit16 vA, vB, #+CCCC */
-    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
-    movzbl  rINSTbl, %eax                   # eax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %eax                     # eax <- vB
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    andb    $0xf, rINSTbl                  # rINST <- A
-    testl   %ecx, %ecx
-    je      common_errDivideByZero
-    cmpl    $-1, %ecx
-    jne     .Lop_rem_int_lit16_continue_div
-    cmpl    $0x80000000, %eax
-    jne     .Lop_rem_int_lit16_continue_div
-    movl    $0, %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.Lop_rem_int_lit16_continue_div:
-    mov     rIBASE, LOCAL0(%esp)
-    cltd
-    idivl   %ecx
-    SET_VREG rIBASE, rINST
-    mov     LOCAL0(%esp), rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
-/* File: x86/op_and_int_lit16.S */
-/* File: x86/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- *      and-int/lit16, or-int/lit16, xor-int/lit16
- */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movzbl  rINSTbl, %eax                   # eax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %eax                     # eax <- vB
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    andb    $0xf, rINSTbl                  # rINST <- A
-    andl    %ecx, %eax                                  # for example: addl %ecx, %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
-/* File: x86/op_or_int_lit16.S */
-/* File: x86/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- *      and-int/lit16, or-int/lit16, xor-int/lit16
- */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movzbl  rINSTbl, %eax                   # eax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %eax                     # eax <- vB
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    andb    $0xf, rINSTbl                  # rINST <- A
-    orl     %ecx, %eax                                  # for example: addl %ecx, %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
-/* File: x86/op_xor_int_lit16.S */
-/* File: x86/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- *      and-int/lit16, or-int/lit16, xor-int/lit16
- */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movzbl  rINSTbl, %eax                   # eax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %eax                     # eax <- vB
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    andb    $0xf, rINSTbl                  # rINST <- A
-    xorl    %ecx, %eax                                  # for example: addl %ecx, %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
-/* File: x86/op_add_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax, %eax                     # eax <- rBB
-    addl    %ecx, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
-/* File: x86/op_rsub_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax, %eax                     # eax <- rBB
-    subl    %eax, %ecx                                  # ex: addl %ecx,%eax
-    SET_VREG %ecx, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_lit8: /* 0xda */
-/* File: x86/op_mul_int_lit8.S */
-    /* mul/lit8 vAA, vBB, #+CC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movl    rIBASE, %ecx
-    GET_VREG  %eax, %eax                    # eax <- rBB
-    movsbl  3(rPC), rIBASE                  # rIBASE <- ssssssCC
-    imull   rIBASE, %eax                    # trashes rIBASE/edx
-    movl    %ecx, rIBASE
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_lit8: /* 0xdb */
-/* File: x86/op_div_int_lit8.S */
-/* File: x86/bindivLit8.S */
-/*
- * 32-bit div/rem "lit8" binary operation.  Handles special case of
- * op0=minint & op1=-1
- */
-    /* div/rem/lit8 vAA, vBB, #+CC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG  %eax, %eax                    # eax <- rBB
-    testl   %ecx, %ecx
-    je      common_errDivideByZero
-    cmpl    $0x80000000, %eax
-    jne     .Lop_div_int_lit8_continue_div
-    cmpl    $-1, %ecx
-    jne     .Lop_div_int_lit8_continue_div
-    movl    $0x80000000, %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.Lop_div_int_lit8_continue_div:
-    mov     rIBASE, LOCAL0(%esp)
-    cltd
-    idivl   %ecx
-    SET_VREG %eax, rINST
-    mov     LOCAL0(%esp), rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
-/* File: x86/op_rem_int_lit8.S */
-/* File: x86/bindivLit8.S */
-/*
- * 32-bit div/rem "lit8" binary operation.  Handles special case of
- * op0=minint & op1=-1
- */
-    /* div/rem/lit8 vAA, vBB, #+CC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG  %eax, %eax                    # eax <- rBB
-    testl   %ecx, %ecx
-    je      common_errDivideByZero
-    cmpl    $0x80000000, %eax
-    jne     .Lop_rem_int_lit8_continue_div
-    cmpl    $-1, %ecx
-    jne     .Lop_rem_int_lit8_continue_div
-    movl    $0, %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.Lop_rem_int_lit8_continue_div:
-    mov     rIBASE, LOCAL0(%esp)
-    cltd
-    idivl   %ecx
-    SET_VREG rIBASE, rINST
-    mov     LOCAL0(%esp), rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_lit8: /* 0xdd */
-/* File: x86/op_and_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax, %eax                     # eax <- rBB
-    andl    %ecx, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_lit8: /* 0xde */
-/* File: x86/op_or_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax, %eax                     # eax <- rBB
-    orl     %ecx, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
-/* File: x86/op_xor_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax, %eax                     # eax <- rBB
-    xorl    %ecx, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
-/* File: x86/op_shl_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax, %eax                     # eax <- rBB
-    sall    %cl, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
-/* File: x86/op_shr_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax, %eax                     # eax <- rBB
-    sarl    %cl, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
-/* File: x86/op_ushr_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax, %eax                     # eax <- rBB
-    shrl    %cl, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_quick: /* 0xe3 */
-/* File: x86/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    movl (%ecx,%eax,1), %eax
-    andb    $0xf,rINSTbl                   # rINST <- A
-    SET_VREG %eax, rINST                    # fp[A] <- value
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
-/* File: x86/op_iget_wide_quick.S */
-    /* iget-wide-quick vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    movq    (%ecx,%eax,1), %xmm0
-    andb    $0xf, rINSTbl                  # rINST <- A
-    SET_WIDE_FP_VREG %xmm0, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
-/* File: x86/op_iget_object_quick.S */
-    /* For: iget-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    movl    %ecx, OUT_ARG0(%esp)
-    movl    %eax, OUT_ARG1(%esp)
-    EXPORT_PC
-    call    SYMBOL(artIGetObjectFromMterp)  # (obj, offset)
-    movl    rSELF, %ecx
-    RESTORE_IBASE_FROM_SELF %ecx
-    cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
-    jnz     MterpException                  # bail out
-    andb    $0xf,rINSTbl                   # rINST <- A
-    SET_VREG_OBJECT %eax, rINST             # fp[A] <- value
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_quick: /* 0xe6 */
-/* File: x86/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST, rINST                   # rINST <- v[A]
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    movl    rINST, (%ecx,%eax,1)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
-/* File: x86/op_iput_wide_quick.S */
-    /* iput-wide-quick vA, vB, offset@CCCC */
-    movzbl    rINSTbl, %ecx                 # ecx<- BA
-    sarl      $4, %ecx                     # ecx<- B
-    GET_VREG  %ecx, %ecx                    # vB (object we're operating on)
-    testl     %ecx, %ecx                    # is object null?
-    je        common_errNullObject
-    movzwl    2(rPC), %eax                  # eax<- field byte offset
-    leal      (%ecx,%eax,1), %ecx           # ecx<- Address of 64-bit target
-    andb      $0xf, rINSTbl                # rINST<- A
-    GET_WIDE_FP_VREG %xmm0, rINST           # xmm0<- fp[A]/fp[A+1]
-    movq      %xmm0, (%ecx)                 # obj.field<- r0/r1
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
-/* File: x86/op_iput_object_quick.S */
-    EXPORT_PC
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rPC, OUT_ARG1(%esp)
-    REFRESH_INST 232
-    movl    rINST, OUT_ARG2(%esp)
-    call    SYMBOL(MterpIputObjectQuick)
-    testb   %al, %al
-    jz      MterpException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
-/* File: x86/op_invoke_virtual_quick.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualQuick
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 233
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokeVirtualQuick)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
-/* File: x86/op_invoke_virtual_range_quick.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualQuickRange
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 234
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokeVirtualQuickRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
-/* File: x86/op_iput_boolean_quick.S */
-/* File: x86/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST, rINST                   # rINST <- v[A]
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    movb    rINSTbl, (%ecx,%eax,1)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_byte_quick: /* 0xec */
-/* File: x86/op_iput_byte_quick.S */
-/* File: x86/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST, rINST                   # rINST <- v[A]
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    movb    rINSTbl, (%ecx,%eax,1)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_char_quick: /* 0xed */
-/* File: x86/op_iput_char_quick.S */
-/* File: x86/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST, rINST                   # rINST <- v[A]
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    movw    rINSTw, (%ecx,%eax,1)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_short_quick: /* 0xee */
-/* File: x86/op_iput_short_quick.S */
-/* File: x86/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST, rINST                   # rINST <- v[A]
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    movw    rINSTw, (%ecx,%eax,1)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
-/* File: x86/op_iget_boolean_quick.S */
-/* File: x86/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    movsbl (%ecx,%eax,1), %eax
-    andb    $0xf,rINSTbl                   # rINST <- A
-    SET_VREG %eax, rINST                    # fp[A] <- value
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
-/* File: x86/op_iget_byte_quick.S */
-/* File: x86/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    movsbl (%ecx,%eax,1), %eax
-    andb    $0xf,rINSTbl                   # rINST <- A
-    SET_VREG %eax, rINST                    # fp[A] <- value
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
-/* File: x86/op_iget_char_quick.S */
-/* File: x86/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    movzwl (%ecx,%eax,1), %eax
-    andb    $0xf,rINSTbl                   # rINST <- A
-    SET_VREG %eax, rINST                    # fp[A] <- value
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
-/* File: x86/op_iget_short_quick.S */
-/* File: x86/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    movswl (%ecx,%eax,1), %eax
-    andb    $0xf,rINSTbl                   # rINST <- A
-    SET_VREG %eax, rINST                    # fp[A] <- value
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/* File: x86/op_unused_f3.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/* File: x86/op_unused_f4.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/* File: x86/op_unused_f5.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/* File: x86/op_unused_f6.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/* File: x86/op_unused_f7.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/* File: x86/op_unused_f8.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/* File: x86/op_unused_f9.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
-/* File: x86/op_invoke_polymorphic.S */
-/* File: x86/invoke_polymorphic.S */
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern MterpInvokePolymorphic
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 250
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokePolymorphic)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 4
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
-/* File: x86/op_invoke_polymorphic_range.S */
-/* File: x86/invoke_polymorphic.S */
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern MterpInvokePolymorphicRange
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 251
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokePolymorphicRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 4
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_custom: /* 0xfc */
-/* File: x86/op_invoke_custom.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeCustom
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 252
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokeCustom)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
-/* File: x86/op_invoke_custom_range.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeCustomRange
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST 253
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInvokeCustomRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_method_handle: /* 0xfe */
-/* File: x86/op_const_method_handle.S */
-/* File: x86/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstMethodHandle
-    EXPORT_PC
-    movzwl  2(rPC), %eax                    # eax <- BBBB
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rINST, OUT_ARG1(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)
-    call    SYMBOL(MterpConstMethodHandle)                 # (index, tgt_reg, shadow_frame, self)
-    RESTORE_IBASE
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_method_type: /* 0xff */
-/* File: x86/op_const_method_type.S */
-/* File: x86/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstMethodType
-    EXPORT_PC
-    movzwl  2(rPC), %eax                    # eax <- BBBB
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rINST, OUT_ARG1(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)
-    call    SYMBOL(MterpConstMethodType)                 # (index, tgt_reg, shadow_frame, self)
-    RESTORE_IBASE
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-    .balign 128
-/* File: x86/instruction_end.S */
-
-    OBJECT_TYPE(artMterpAsmInstructionEnd)
-    ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
-    .global SYMBOL(artMterpAsmInstructionEnd)
-SYMBOL(artMterpAsmInstructionEnd):
-
-
-/*
- * ===========================================================================
- *  Sister implementations
- * ===========================================================================
- */
-/* File: x86/instruction_start_sister.S */
-
-    OBJECT_TYPE(artMterpAsmSisterStart)
-    ASM_HIDDEN SYMBOL(artMterpAsmSisterStart)
-    .global SYMBOL(artMterpAsmSisterStart)
-    .text
-    .balign 4
-SYMBOL(artMterpAsmSisterStart):
-
-/* File: x86/instruction_end_sister.S */
-
-    OBJECT_TYPE(artMterpAsmSisterEnd)
-    ASM_HIDDEN SYMBOL(artMterpAsmSisterEnd)
-    .global SYMBOL(artMterpAsmSisterEnd)
-SYMBOL(artMterpAsmSisterEnd):
-
-/* File: x86/instruction_start_alt.S */
-
-    OBJECT_TYPE(artMterpAsmAltInstructionStart)
-    ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
-    .global SYMBOL(artMterpAsmAltInstructionStart)
-    .text
-SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(0*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move: /* 0x01 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(1*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(2*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(3*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(4*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(5*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(6*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(7*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(8*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(9*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(10*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(11*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(12*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(13*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(14*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return: /* 0x0f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(15*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(16*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(17*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(18*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(19*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const: /* 0x14 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(20*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(21*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(22*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(23*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(24*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(25*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(26*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(27*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(28*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(29*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(30*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(31*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(32*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(33*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(34*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(35*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(36*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(37*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(38*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(39*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(40*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(41*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(42*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(43*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(44*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(45*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(46*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(47*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(48*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(49*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(50*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(51*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(52*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(53*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(54*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(55*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(56*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(57*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(58*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(59*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(60*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(61*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(62*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(63*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(64*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(65*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(66*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(67*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(68*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(69*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(70*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(71*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(72*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(73*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(74*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(75*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(76*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(77*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(78*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(79*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(80*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(81*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(82*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(83*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(84*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(85*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(86*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(87*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(88*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(89*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(90*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(91*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(92*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(93*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(94*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(95*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(96*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(97*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(98*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(99*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(100*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(101*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(102*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(103*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(104*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(105*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(106*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(107*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(108*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(109*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(110*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(111*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(112*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(113*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(114*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(115*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(116*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(117*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(118*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(119*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(120*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(121*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(122*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(123*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(124*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(125*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(126*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(127*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(128*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(129*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(130*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(131*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(132*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(133*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(134*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(135*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(136*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(137*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(138*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(139*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(140*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(141*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(142*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(143*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(144*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(145*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(146*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(147*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(148*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(149*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(150*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(151*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(152*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(153*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(154*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(155*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(156*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(157*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(158*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(159*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(160*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(161*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(162*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(163*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(164*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(165*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(166*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(167*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(168*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(169*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(170*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(171*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(172*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(173*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(174*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(175*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(176*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(177*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(178*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(179*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(180*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(181*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(182*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(183*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(184*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(185*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(186*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(187*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(188*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(189*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(190*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(191*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(192*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(193*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(194*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(195*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(196*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(197*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(198*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(199*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(200*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(201*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(202*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(203*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(204*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(205*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(206*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(207*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(208*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(209*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(210*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(211*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(212*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(213*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(214*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(215*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(216*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(217*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(218*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(219*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(220*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(221*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(222*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(223*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(224*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(225*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(226*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(227*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(228*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(229*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(230*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(231*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(232*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(233*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(234*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(235*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(236*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(237*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(238*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(239*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(240*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(241*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(242*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(243*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(244*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(245*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(246*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(247*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(248*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(249*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(250*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(251*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(252*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(253*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(254*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(255*128)
-
-    .balign 128
-/* File: x86/instruction_end_alt.S */
-
-    OBJECT_TYPE(artMterpAsmAltInstructionEnd)
-    ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
-    .global SYMBOL(artMterpAsmAltInstructionEnd)
-SYMBOL(artMterpAsmAltInstructionEnd):
-
-/* File: x86/footer.S */
-/*
- * ===========================================================================
- *  Common subroutines and data
- * ===========================================================================
- */
-
-    .text
-    .align  2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpLogDivideByZeroException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errArrayIndex:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpLogArrayIndexException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errNegativeArraySize:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpLogNegativeArraySizeException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errNoSuchMethod:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpLogNoSuchMethodException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errNullObject:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpLogNullObjectException)
-#endif
-    jmp     MterpCommonFallback
-
-common_exceptionThrown:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    call    SYMBOL(MterpLogExceptionThrownException)
-#endif
-    jmp     MterpCommonFallback
-
-MterpSuspendFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    movl    THREAD_FLAGS_OFFSET(%eax), %eax
-    movl    %eax, OUT_ARG2(%esp)
-    call    SYMBOL(MterpLogSuspendFallback)
-#endif
-    jmp     MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    movl    rSELF, %eax
-    testl   $-1, THREAD_EXCEPTION_OFFSET(%eax)
-    jz      MterpFallback
-    /* intentional fallthrough - handle pending exception. */
-
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpHandleException)
-    testb   %al, %al
-    jz      MterpExceptionReturn
-    movl    OFF_FP_DEX_INSTRUCTIONS(rFP), %eax
-    movl    OFF_FP_DEX_PC(rFP), %ecx
-    lea     (%eax, %ecx, 2), rPC
-    movl    rPC, OFF_FP_DEX_PC_PTR(rFP)
-    /* Do we need to switch interpreters? */
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    /* resume execution at catch block */
-    REFRESH_IBASE
-    FETCH_INST
-    GOTO_NEXT
-    /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    rINST          <= signed offset
- *    condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranch:
-    jg      .L_forward_branch               # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-#  error "JIT_CHECK_OSR must be -1."
-#endif
-    cmpw    $JIT_CHECK_OSR, rPROFILE
-    je      .L_osr_check
-    decw    rPROFILE
-    je      .L_add_batch                    # counted down to zero - report
-.L_resume_backward_branch:
-    movl    rSELF, %eax
-    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
-    leal    (rPC, rINST, 2), rPC
-    FETCH_INST
-    jnz     .L_suspend_request_pending
-    REFRESH_IBASE
-    GOTO_NEXT
-
-.L_suspend_request_pending:
-    EXPORT_PC
-    movl    %eax, OUT_ARG0(%esp)            # rSELF in eax
-    call    SYMBOL(MterpSuspendCheck)       # (self)
-    testb   %al, %al
-    jnz     MterpFallback
-    REFRESH_IBASE                           # might have changed during suspend
-    GOTO_NEXT
-
-.L_no_count_backwards:
-    cmpw    $JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
-    jne     .L_resume_backward_branch
-.L_osr_check:
-    EXPORT_PC
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    rINST, OUT_ARG2(%esp)
-    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    testb   %al, %al
-    jz      .L_resume_backward_branch
-    jmp     MterpOnStackReplacement
-
-.L_forward_branch:
-    cmpw    $JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
-    je      .L_check_osr_forward
-.L_resume_forward_branch:
-    leal    (rPC, rINST, 2), rPC
-    FETCH_INST
-    GOTO_NEXT
-
-.L_check_osr_forward:
-    EXPORT_PC
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    rINST, OUT_ARG2(%esp)
-    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    testb   %al, %al
-    REFRESH_IBASE
-    jz      .L_resume_forward_branch
-    jmp     MterpOnStackReplacement
-
-.L_add_batch:
-    movl    OFF_FP_METHOD(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG2(%esp)
-    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
-    jmp     .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    EXPORT_PC
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    $2, OUT_ARG2(%esp)
-    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    testb   %al, %al
-    REFRESH_IBASE
-    jnz     MterpOnStackReplacement
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    rINST, OUT_ARG2(%esp)
-    call    SYMBOL(MterpLogOSR)
-#endif
-    movl    $1, %eax
-    jmp     MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpLogFallback)
-#endif
-MterpCommonFallback:
-    xor     %eax, %eax
-    jmp     MterpDone
-
-/*
- * On entry:
- *  uint32_t* rFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    movl    $1, %eax
-    jmp     MterpDone
-MterpReturn:
-    movl    OFF_FP_RESULT_REGISTER(rFP), %edx
-    movl    %eax, (%edx)
-    movl    %ecx, 4(%edx)
-    mov     $1, %eax
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    cmpw    $0, rPROFILE
-    jle     MRestoreFrame                   # if > 0, we may have some counts to report.
-
-    movl    %eax, rINST                     # stash return value
-    /* Report cached hotness counts */
-    movl    OFF_FP_METHOD(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG2(%esp)
-    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
-    movl    rINST, %eax                     # restore return value
-
-    /* pop up frame */
-MRestoreFrame:
-    addl    $FRAME_SIZE, %esp
-    .cfi_adjust_cfa_offset -FRAME_SIZE
-
-    /* Restore callee save register */
-    POP     %ebx
-    POP     %esi
-    POP     %edi
-    POP     %ebp
-    ret
-    .cfi_endproc
-    SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
-
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
deleted file mode 100644
index 6d8bb4c..0000000
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ /dev/null
@@ -1,12040 +0,0 @@
-/*
- * This file was generated automatically by gen-mterp.py for 'x86_64'.
- *
- * --> DO NOT EDIT <--
- */
-
-/* File: x86_64/header.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-  Art assembly interpreter notes:
-
-  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
-  handle invoke, allows higher-level code to create frame & shadow frame.
-
-  Once that's working, support direct entry code & eliminate shadow frame (and
-  excess locals allocation.
-
-  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
-  base of the vreg array within the shadow frame.  Access the other fields,
-  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
-  the shadow frame mechanism of double-storing object references - via rFP &
-  number_of_vregs_.
-
- */
-
-/*
-x86_64 ABI general notes:
-
-Caller save set:
-   rax, rdx, rcx, rsi, rdi, r8-r11, st(0)-st(7)
-Callee save set:
-   rbx, rbp, r12-r15
-Return regs:
-   32-bit in eax
-   64-bit in rax
-   fp on xmm0
-
-First 8 fp parameters came in xmm0-xmm7.
-First 6 non-fp parameters came in rdi, rsi, rdx, rcx, r8, r9.
-Other parameters passed on stack, pushed right-to-left.  On entry to target, first
-param is at 8(%esp).  Traditional entry code is:
-
-Stack must be 16-byte aligned to support SSE in native code.
-
-If we're not doing variable stack allocation (alloca), the frame pointer can be
-eliminated and all arg references adjusted to be esp relative.
-*/
-
-/*
-Mterp and x86_64 notes:
-
-Some key interpreter variables will be assigned to registers.
-
-  nick     reg   purpose
-  rPROFILE rbp   countdown register for jit profiling
-  rPC      r12   interpreted program counter, used for fetching instructions
-  rFP      r13   interpreted frame pointer, used for accessing locals and args
-  rINSTw   bx    first 16-bit code of current instruction
-  rINSTbl  bl    opcode portion of instruction word
-  rINSTbh  bh    high byte of inst word, usually contains src/tgt reg names
-  rIBASE   r14   base of instruction handler table
-  rREFS    r15   base of object references in shadow frame.
-
-Notes:
-   o High order 16 bits of ebx must be zero on entry to handler
-   o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
-   o eax and ecx are scratch, rINSTw/ebx sometimes scratch
-
-Macros are provided for common operations.  Each macro MUST emit only
-one instruction to make instruction-counting easier.  They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Handle mac compiler specific
- */
-#if defined(__APPLE__)
-    #define MACRO_LITERAL(value) $(value)
-    #define FUNCTION_TYPE(name)
-    #define OBJECT_TYPE(name)
-    #define SIZE(start,end)
-    // Mac OS' symbols have an _ prefix.
-    #define SYMBOL(name) _ ## name
-    #define ASM_HIDDEN .private_extern
-#else
-    #define MACRO_LITERAL(value) $value
-    #define FUNCTION_TYPE(name) .type name, @function
-    #define OBJECT_TYPE(name) .type name, @object
-    #define SIZE(start,end) .size start, .-end
-    #define SYMBOL(name) name
-    #define ASM_HIDDEN .hidden
-#endif
-
-.macro PUSH _reg
-    pushq \_reg
-    .cfi_adjust_cfa_offset 8
-    .cfi_rel_offset \_reg, 0
-.endm
-
-.macro POP _reg
-    popq \_reg
-    .cfi_adjust_cfa_offset -8
-    .cfi_restore \_reg
-.endm
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
-#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
-
-/* Frame size must be 16-byte aligned.
- * Remember about 8 bytes for return address + 6 * 8 for spills.
- */
-#define FRAME_SIZE     8
-
-/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3        %rcx
-#define IN_ARG2        %rdx
-#define IN_ARG1        %rsi
-#define IN_ARG0        %rdi
-/* Spill offsets relative to %esp */
-#define SELF_SPILL     (FRAME_SIZE -  8)
-/* Out Args  */
-#define OUT_ARG3       %rcx
-#define OUT_ARG2       %rdx
-#define OUT_ARG1       %rsi
-#define OUT_ARG0       %rdi
-#define OUT_32_ARG3    %ecx
-#define OUT_32_ARG2    %edx
-#define OUT_32_ARG1    %esi
-#define OUT_32_ARG0    %edi
-#define OUT_FP_ARG1    %xmm1
-#define OUT_FP_ARG0    %xmm0
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rSELF    SELF_SPILL(%rsp)
-#define rPC      %r12
-#define CFI_DEX  12 // DWARF register number of the register holding dex-pc (rPC).
-#define CFI_TMP  5  // DWARF register number of the first argument register (rdi).
-#define rFP      %r13
-#define rINST    %ebx
-#define rINSTq   %rbx
-#define rINSTw   %bx
-#define rINSTbh  %bh
-#define rINSTbl  %bl
-#define rIBASE   %r14
-#define rREFS    %r15
-#define rPROFILE %ebp
-
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
-    movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- * IBase handles uses the caller save register so we must restore it after each call.
- * Also it is used as a result of some 64-bit operations (like imul) and we should
- * restore it in such cases also.
- *
- */
-.macro REFRESH_IBASE_REG self_reg
-    movq    THREAD_CURRENT_IBASE_OFFSET(\self_reg), rIBASE
-.endm
-.macro REFRESH_IBASE
-    movq    rSELF, rIBASE
-    REFRESH_IBASE_REG rIBASE
-.endm
-
-/*
- * Refresh rINST.
- * At enter to handler rINST does not contain the opcode number.
- * However some utilities require the full value, so this macro
- * restores the opcode number.
- */
-.macro REFRESH_INST _opnum
-    movb    rINSTbl, rINSTbh
-    movb    $\_opnum, rINSTbl
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINSTw.  Does not advance rPC.
- */
-.macro FETCH_INST
-    movzwq  (rPC), rINSTq
-.endm
-
-/*
- * Remove opcode from rINST, compute the address of handler and jump to it.
- */
-.macro GOTO_NEXT
-    movzx   rINSTbl,%eax
-    movzbl  rINSTbh,rINST
-    shll    MACRO_LITERAL(7), %eax
-    addq    rIBASE, %rax
-    jmp     *%rax
-.endm
-
-/*
- * Advance rPC by instruction count.
- */
-.macro ADVANCE_PC _count
-    leaq    2*\_count(rPC), rPC
-.endm
-
-/*
- * Advance rPC by instruction count, fetch instruction and jump to handler.
- */
-.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
-    ADVANCE_PC \_count
-    FETCH_INST
-    GOTO_NEXT
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
-#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
-
-.macro GET_VREG _reg _vreg
-    movl    (rFP,\_vreg,4), \_reg
-.endm
-
-/* Read wide value. */
-.macro GET_WIDE_VREG _reg _vreg
-    movq    (rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG _reg _vreg
-    movl    \_reg, (rFP,\_vreg,4)
-    movl    MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-/* Write wide value. reg is clobbered. */
-.macro SET_WIDE_VREG _reg _vreg
-    movq    \_reg, (rFP,\_vreg,4)
-    xorq    \_reg, \_reg
-    movq    \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro SET_VREG_OBJECT _reg _vreg
-    movl    \_reg, (rFP,\_vreg,4)
-    movl    \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro GET_VREG_HIGH _reg _vreg
-    movl    4(rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG_HIGH _reg _vreg
-    movl    \_reg, 4(rFP,\_vreg,4)
-    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_REF _vreg
-    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_WIDE_REF _vreg
-    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
-    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-/* File: x86_64/entry.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
-    .text
-    ASM_HIDDEN SYMBOL(ExecuteMterpImpl)
-    .global SYMBOL(ExecuteMterpImpl)
-    FUNCTION_TYPE(ExecuteMterpImpl)
-
-/*
- * On entry:
- *  0  Thread* self
- *  1  insns_
- *  2  ShadowFrame
- *  3  JValue* result_register
- *
- */
-
-SYMBOL(ExecuteMterpImpl):
-    .cfi_startproc
-    .cfi_def_cfa rsp, 8
-
-    /* Spill callee save regs */
-    PUSH %rbx
-    PUSH %rbp
-    PUSH %r12
-    PUSH %r13
-    PUSH %r14
-    PUSH %r15
-
-    /* Allocate frame */
-    subq    $FRAME_SIZE, %rsp
-    .cfi_adjust_cfa_offset FRAME_SIZE
-
-    /* Remember the return register */
-    movq    IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
-
-    /* Remember the code_item */
-    movq    IN_ARG1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(IN_ARG2)
-
-    /* set up "named" registers */
-    movl    SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
-    leaq    SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
-    leaq    (rFP, %rax, 4), rREFS
-    movl    SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
-    leaq    (IN_ARG1, %rax, 2), rPC
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-    EXPORT_PC
-
-    /* Starting ibase */
-    movq    IN_ARG0, rSELF
-    REFRESH_IBASE_REG IN_ARG0
-
-    /* Set up for backwards branches & osr profiling */
-    movq    IN_ARG0, OUT_ARG2  /* Set up OUT_ARG2 before clobbering IN_ARG0 */
-    movq    OFF_FP_METHOD(rFP), OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpSetUpHotnessCountdown)
-    movswl  %ax, rPROFILE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST
-    GOTO_NEXT
-    /* NOTE: no fallthrough */
-
-/* File: x86_64/instruction_start.S */
-
-    OBJECT_TYPE(artMterpAsmInstructionStart)
-    ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
-    .global SYMBOL(artMterpAsmInstructionStart)
-SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
-    .text
-
-/* ------------------------------ */
-    .balign 128
-.L_op_nop: /* 0x00 */
-/* File: x86_64/op_nop.S */
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move: /* 0x01 */
-/* File: x86_64/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    movl    rINST, %eax                     # eax <- BA
-    andb    $0xf, %al                      # eax <- A
-    shrl    $4, rINST                      # rINST <- B
-    GET_VREG %edx, rINSTq
-    .if 0
-    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
-    .else
-    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_from16: /* 0x02 */
-/* File: x86_64/op_move_from16.S */
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    movzwq  2(rPC), %rax                    # eax <- BBBB
-    GET_VREG %edx, %rax                     # edx <- fp[BBBB]
-    .if 0
-    SET_VREG_OBJECT %edx, rINSTq            # fp[A] <- fp[B]
-    .else
-    SET_VREG %edx, rINSTq                   # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_16: /* 0x03 */
-/* File: x86_64/op_move_16.S */
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    movzwq  4(rPC), %rcx                    # ecx <- BBBB
-    movzwq  2(rPC), %rax                    # eax <- AAAA
-    GET_VREG %edx, %rcx
-    .if 0
-    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
-    .else
-    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide: /* 0x04 */
-/* File: x86_64/op_move_wide.S */
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    movl    rINST, %ecx                     # ecx <- BA
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    GET_WIDE_VREG %rdx, rINSTq              # rdx <- v[B]
-    SET_WIDE_VREG %rdx, %rcx                # v[A] <- rdx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide_from16: /* 0x05 */
-/* File: x86_64/op_move_wide_from16.S */
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    movzwl  2(rPC), %ecx                    # ecx <- BBBB
-    GET_WIDE_VREG %rdx, %rcx                # rdx <- v[B]
-    SET_WIDE_VREG %rdx, rINSTq              # v[A] <- rdx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_wide_16: /* 0x06 */
-/* File: x86_64/op_move_wide_16.S */
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    movzwq  4(rPC), %rcx                    # ecx<- BBBB
-    movzwq  2(rPC), %rax                    # eax<- AAAA
-    GET_WIDE_VREG %rdx, %rcx                # rdx <- v[B]
-    SET_WIDE_VREG %rdx, %rax                # v[A] <- rdx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object: /* 0x07 */
-/* File: x86_64/op_move_object.S */
-/* File: x86_64/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    movl    rINST, %eax                     # eax <- BA
-    andb    $0xf, %al                      # eax <- A
-    shrl    $4, rINST                      # rINST <- B
-    GET_VREG %edx, rINSTq
-    .if 1
-    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
-    .else
-    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object_from16: /* 0x08 */
-/* File: x86_64/op_move_object_from16.S */
-/* File: x86_64/op_move_from16.S */
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    movzwq  2(rPC), %rax                    # eax <- BBBB
-    GET_VREG %edx, %rax                     # edx <- fp[BBBB]
-    .if 1
-    SET_VREG_OBJECT %edx, rINSTq            # fp[A] <- fp[B]
-    .else
-    SET_VREG %edx, rINSTq                   # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_object_16: /* 0x09 */
-/* File: x86_64/op_move_object_16.S */
-/* File: x86_64/op_move_16.S */
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    movzwq  4(rPC), %rcx                    # ecx <- BBBB
-    movzwq  2(rPC), %rax                    # eax <- AAAA
-    GET_VREG %edx, %rcx
-    .if 1
-    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
-    .else
-    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result: /* 0x0a */
-/* File: x86_64/op_move_result.S */
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    movq    OFF_FP_RESULT_REGISTER(rFP), %rax    # get pointer to result JType.
-    movl    (%rax), %eax                    # r0 <- result.i.
-    .if 0
-    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- fp[B]
-    .else
-    SET_VREG %eax, rINSTq                   # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result_wide: /* 0x0b */
-/* File: x86_64/op_move_result_wide.S */
-    /* move-result-wide vAA */
-    movq    OFF_FP_RESULT_REGISTER(rFP), %rax    # get pointer to result JType.
-    movq    (%rax), %rdx                         # Get wide
-    SET_WIDE_VREG %rdx, rINSTq                   # v[AA] <- rdx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_result_object: /* 0x0c */
-/* File: x86_64/op_move_result_object.S */
-/* File: x86_64/op_move_result.S */
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    movq    OFF_FP_RESULT_REGISTER(rFP), %rax    # get pointer to result JType.
-    movl    (%rax), %eax                    # r0 <- result.i.
-    .if 1
-    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- fp[B]
-    .else
-    SET_VREG %eax, rINSTq                   # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_move_exception: /* 0x0d */
-/* File: x86_64/op_move_exception.S */
-    /* move-exception vAA */
-    movq    rSELF, %rcx
-    movl    THREAD_EXCEPTION_OFFSET(%rcx), %eax
-    SET_VREG_OBJECT %eax, rINSTq            # fp[AA] <- exception object
-    movl    $0, THREAD_EXCEPTION_OFFSET(%rcx)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_void: /* 0x0e */
-/* File: x86_64/op_return_void.S */
-    .extern MterpThreadFenceForConstructor
-    call    SYMBOL(MterpThreadFenceForConstructor)
-    movq    rSELF, OUT_ARG0
-    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
-    jz      1f
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    xorq    %rax, %rax
-    jmp     MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return: /* 0x0f */
-/* File: x86_64/op_return.S */
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    call    SYMBOL(MterpThreadFenceForConstructor)
-    movq    rSELF, OUT_ARG0
-    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
-    jz      1f
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    GET_VREG %eax, rINSTq                   # eax <- vAA
-    jmp     MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_wide: /* 0x10 */
-/* File: x86_64/op_return_wide.S */
-/*
- * Return a 64-bit value.
- */
-    /* return-wide vAA */
-    .extern MterpThreadFenceForConstructor
-    call    SYMBOL(MterpThreadFenceForConstructor)
-    movq    rSELF, OUT_ARG0
-    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
-    jz      1f
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    GET_WIDE_VREG %rax, rINSTq              # eax <- v[AA]
-    jmp     MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_object: /* 0x11 */
-/* File: x86_64/op_return_object.S */
-/* File: x86_64/op_return.S */
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    call    SYMBOL(MterpThreadFenceForConstructor)
-    movq    rSELF, OUT_ARG0
-    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
-    jz      1f
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    GET_VREG %eax, rINSTq                   # eax <- vAA
-    jmp     MterpReturn
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_4: /* 0x12 */
-/* File: x86_64/op_const_4.S */
-    /* const/4 vA, #+B */
-    movsbl  rINSTbl, %eax                   # eax <-ssssssBx
-    movl    $0xf, rINST
-    andl    %eax, rINST                     # rINST <- A
-    sarl    $4, %eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_16: /* 0x13 */
-/* File: x86_64/op_const_16.S */
-    /* const/16 vAA, #+BBBB */
-    movswl  2(rPC), %ecx                    # ecx <- ssssBBBB
-    SET_VREG %ecx, rINSTq                   # vAA <- ssssBBBB
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const: /* 0x14 */
-/* File: x86_64/op_const.S */
-    /* const vAA, #+BBBBbbbb */
-    movl    2(rPC), %eax                    # grab all 32 bits at once
-    SET_VREG %eax, rINSTq                   # vAA<- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_high16: /* 0x15 */
-/* File: x86_64/op_const_high16.S */
-    /* const/high16 vAA, #+BBBB0000 */
-    movzwl  2(rPC), %eax                    # eax <- 0000BBBB
-    sall    $16, %eax                      # eax <- BBBB0000
-    SET_VREG %eax, rINSTq                   # vAA <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_16: /* 0x16 */
-/* File: x86_64/op_const_wide_16.S */
-    /* const-wide/16 vAA, #+BBBB */
-    movswq  2(rPC), %rax                    # rax <- ssssBBBB
-    SET_WIDE_VREG %rax, rINSTq              # store
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_32: /* 0x17 */
-/* File: x86_64/op_const_wide_32.S */
-    /* const-wide/32 vAA, #+BBBBbbbb */
-    movslq   2(rPC), %rax                   # eax <- ssssssssBBBBbbbb
-    SET_WIDE_VREG %rax, rINSTq              # store
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide: /* 0x18 */
-/* File: x86_64/op_const_wide.S */
-    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
-    movq    2(rPC), %rax                    # rax <- HHHHhhhhBBBBbbbb
-    SET_WIDE_VREG %rax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_wide_high16: /* 0x19 */
-/* File: x86_64/op_const_wide_high16.S */
-    /* const-wide/high16 vAA, #+BBBB000000000000 */
-    movzwq  2(rPC), %rax                    # eax <- 0000BBBB
-    salq    $48, %rax                      # eax <- BBBB0000
-    SET_WIDE_VREG %rax, rINSTq              # v[AA+0] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_string: /* 0x1a */
-/* File: x86_64/op_const_string.S */
-/* File: x86_64/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstString
-    EXPORT_PC
-    movzwq  2(rPC), OUT_ARG0                # eax <- OUT_ARG0
-    movq    rINSTq, OUT_ARG1
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
-    movq    rSELF, OUT_ARG3
-    call    SYMBOL(MterpConstString)                 # (index, tgt_reg, shadow_frame, self)
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
-/* File: x86_64/op_const_string_jumbo.S */
-    /* const/string vAA, String@BBBBBBBB */
-    EXPORT_PC
-    movl    2(rPC), OUT_32_ARG0             # OUT_32_ARG0 <- BBBB
-    movq    rINSTq, OUT_ARG1
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
-    movq    rSELF, OUT_ARG3
-    call    SYMBOL(MterpConstString)        # (index, tgt_reg, shadow_frame, self)
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_class: /* 0x1c */
-/* File: x86_64/op_const_class.S */
-/* File: x86_64/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstClass
-    EXPORT_PC
-    movzwq  2(rPC), OUT_ARG0                # eax <- OUT_ARG0
-    movq    rINSTq, OUT_ARG1
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
-    movq    rSELF, OUT_ARG3
-    call    SYMBOL(MterpConstClass)                 # (index, tgt_reg, shadow_frame, self)
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_monitor_enter: /* 0x1d */
-/* File: x86_64/op_monitor_enter.S */
-/*
- * Synchronize on an object.
- */
-    /* monitor-enter vAA */
-    EXPORT_PC
-    GET_VREG OUT_32_ARG0, rINSTq
-    movq    rSELF, OUT_ARG1
-    call    SYMBOL(artLockObjectFromCode)   # (object, self)
-    testq   %rax, %rax
-    jnz     MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_monitor_exit: /* 0x1e */
-/* File: x86_64/op_monitor_exit.S */
-/*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction.  See the Dalvik
- * instruction spec.
- */
-    /* monitor-exit vAA */
-    EXPORT_PC
-    GET_VREG OUT_32_ARG0, rINSTq
-    movq    rSELF, OUT_ARG1
-    call    SYMBOL(artUnlockObjectFromCode) # (object, self)
-    testq   %rax, %rax
-    jnz     MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_check_cast: /* 0x1f */
-/* File: x86_64/op_check_cast.S */
-/*
- * Check to see if a cast from one class to another is allowed.
- */
-    /* check-cast vAA, class@BBBB */
-    EXPORT_PC
-    movzwq  2(rPC), OUT_ARG0                # OUT_ARG0 <- BBBB
-    leaq    VREG_ADDRESS(rINSTq), OUT_ARG1
-    movq    OFF_FP_METHOD(rFP), OUT_ARG2
-    movq    rSELF, OUT_ARG3
-    call    SYMBOL(MterpCheckCast)          # (index, &obj, method, self)
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_instance_of: /* 0x20 */
-/* File: x86_64/op_instance_of.S */
-/*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
-    /* instance-of vA, vB, class@CCCC */
-    EXPORT_PC
-    movzwl  2(rPC), OUT_32_ARG0             # OUT_32_ARG0 <- CCCC
-    movl    rINST, %eax                     # eax <- BA
-    sarl    $4, %eax                       # eax <- B
-    leaq    VREG_ADDRESS(%rax), OUT_ARG1    # Get object address
-    movq    OFF_FP_METHOD(rFP), OUT_ARG2
-    movq    rSELF, OUT_ARG3
-    call    SYMBOL(MterpInstanceOf)         # (index, &obj, method, self)
-    movsbl  %al, %eax
-    movq    rSELF, %rcx
-    cmpq    $0, THREAD_EXCEPTION_OFFSET(%rcx)
-    jnz     MterpException
-    andb    $0xf, rINSTbl                  # rINSTbl <- A
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_array_length: /* 0x21 */
-/* File: x86_64/op_array_length.S */
-/*
- * Return the length of an array.
- */
-    movl    rINST, %eax                     # eax <- BA
-    sarl    $4, rINST                      # rINST <- B
-    GET_VREG %ecx, rINSTq                   # ecx <- vB (object ref)
-    testl   %ecx, %ecx                      # is null?
-    je      common_errNullObject
-    andb    $0xf, %al                      # eax <- A
-    movl    MIRROR_ARRAY_LENGTH_OFFSET(%rcx), rINST
-    SET_VREG rINST, %rax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_new_instance: /* 0x22 */
-/* File: x86_64/op_new_instance.S */
-/*
- * Create a new instance of a class.
- */
-    /* new-instance vAA, class@BBBB */
-    EXPORT_PC
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
-    movq    rSELF, OUT_ARG1
-    REFRESH_INST 34
-    movq    rINSTq, OUT_ARG2
-    call    SYMBOL(MterpNewInstance)
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_new_array: /* 0x23 */
-/* File: x86_64/op_new_array.S */
-/*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
-    /* new-array vA, vB, class@CCCC */
-    EXPORT_PC
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
-    movq    rPC, OUT_ARG1
-    REFRESH_INST 35
-    movq    rINSTq, OUT_ARG2
-    movq    rSELF, OUT_ARG3
-    call    SYMBOL(MterpNewArray)
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_filled_new_array: /* 0x24 */
-/* File: x86_64/op_filled_new_array.S */
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    .extern MterpFilledNewArray
-    EXPORT_PC
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
-    movq    rPC, OUT_ARG1
-    movq    rSELF, OUT_ARG2
-    call    SYMBOL(MterpFilledNewArray)
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
-    .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
-/* File: x86_64/op_filled_new_array_range.S */
-/* File: x86_64/op_filled_new_array.S */
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    .extern MterpFilledNewArrayRange
-    EXPORT_PC
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
-    movq    rPC, OUT_ARG1
-    movq    rSELF, OUT_ARG2
-    call    SYMBOL(MterpFilledNewArrayRange)
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_fill_array_data: /* 0x26 */
-/* File: x86_64/op_fill_array_data.S */
-    /* fill-array-data vAA, +BBBBBBBB */
-    EXPORT_PC
-    movslq  2(rPC), %rcx                    # rcx <- ssssssssBBBBbbbb
-    leaq    (rPC,%rcx,2), OUT_ARG1          # OUT_ARG1 <- PC + ssssssssBBBBbbbb*2
-    GET_VREG OUT_32_ARG0, rINSTq            # OUT_ARG0 <- vAA (array object)
-    call    SYMBOL(MterpFillArrayData)      # (obj, payload)
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
-    .balign 128
-.L_op_throw: /* 0x27 */
-/* File: x86_64/op_throw.S */
-/*
- * Throw an exception object in the current thread.
- */
-    /* throw vAA */
-    EXPORT_PC
-    GET_VREG %eax, rINSTq                   # eax<- vAA (exception object)
-    testb   %al, %al
-    jz      common_errNullObject
-    movq    rSELF, %rcx
-    movq    %rax, THREAD_EXCEPTION_OFFSET(%rcx)
-    jmp     MterpException
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto: /* 0x28 */
-/* File: x86_64/op_goto.S */
-/*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
-    /* goto +AA */
-    movsbq  rINSTbl, rINSTq                 # rINSTq <- ssssssAA
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto_16: /* 0x29 */
-/* File: x86_64/op_goto_16.S */
-/*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
-    /* goto/16 +AAAA */
-    movswq  2(rPC), rINSTq                  # rINSTq <- ssssAAAA
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-
-/* ------------------------------ */
-    .balign 128
-.L_op_goto_32: /* 0x2a */
-/* File: x86_64/op_goto_32.S */
-/*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- *  Because we need the SF bit set, we'll use an adds
- * to convert from Dalvik offset to byte offset.
- */
-    /* goto/32 +AAAAAAAA */
-    movslq  2(rPC), rINSTq                  # rINSTq <- AAAAAAAA
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-
-/* ------------------------------ */
-    .balign 128
-.L_op_packed_switch: /* 0x2b */
-/* File: x86_64/op_packed_switch.S */
-/*
- * Handle a packed-switch or sparse-switch instruction.  In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
-    /* op vAA, +BBBB */
-    movslq  2(rPC), OUT_ARG0                # rcx <- ssssssssBBBBbbbb
-    leaq    (rPC,OUT_ARG0,2), OUT_ARG0      # rcx <- PC + ssssssssBBBBbbbb*2
-    GET_VREG OUT_32_ARG1, rINSTq            # eax <- vAA
-    call    SYMBOL(MterpDoPackedSwitch)
-    testl   %eax, %eax
-    movslq  %eax, rINSTq
-    jmp     MterpCommonTakenBranch
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sparse_switch: /* 0x2c */
-/* File: x86_64/op_sparse_switch.S */
-/* File: x86_64/op_packed_switch.S */
-/*
- * Handle a packed-switch or sparse-switch instruction.  In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
-    /* op vAA, +BBBB */
-    movslq  2(rPC), OUT_ARG0                # rcx <- ssssssssBBBBbbbb
-    leaq    (rPC,OUT_ARG0,2), OUT_ARG0      # rcx <- PC + ssssssssBBBBbbbb*2
-    GET_VREG OUT_32_ARG1, rINSTq            # eax <- vAA
-    call    SYMBOL(MterpDoSparseSwitch)
-    testl   %eax, %eax
-    movslq  %eax, rINSTq
-    jmp     MterpCommonTakenBranch
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpl_float: /* 0x2d */
-/* File: x86_64/op_cmpl_float.S */
-/* File: x86_64/fpcmp.S */
-/*
- * Compare two floating-point values.  Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- *     if (x == y) {
- *         return 0;
- *     } else if (x < y) {
- *         return -1;
- *     } else if (x > y) {
- *         return 1;
- *     } else {
- *         return nanval ? 1 : -1;
- *     }
- * }
- */
-    /* op vAA, vBB, vCC */
-    movzbq  3(rPC), %rcx                    # ecx<- CC
-    movzbq  2(rPC), %rax                    # eax<- BB
-    movss VREG_ADDRESS(%rax), %xmm0
-    xor     %eax, %eax
-    ucomiss VREG_ADDRESS(%rcx), %xmm0
-    jp      .Lop_cmpl_float_nan_is_neg
-    je      .Lop_cmpl_float_finish
-    jb      .Lop_cmpl_float_less
-.Lop_cmpl_float_nan_is_pos:
-    addb    $1, %al
-    jmp     .Lop_cmpl_float_finish
-.Lop_cmpl_float_nan_is_neg:
-.Lop_cmpl_float_less:
-    movl    $-1, %eax
-.Lop_cmpl_float_finish:
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpg_float: /* 0x2e */
-/* File: x86_64/op_cmpg_float.S */
-/* File: x86_64/fpcmp.S */
-/*
- * Compare two floating-point values.  Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- *     if (x == y) {
- *         return 0;
- *     } else if (x < y) {
- *         return -1;
- *     } else if (x > y) {
- *         return 1;
- *     } else {
- *         return nanval ? 1 : -1;
- *     }
- * }
- */
-    /* op vAA, vBB, vCC */
-    movzbq  3(rPC), %rcx                    # ecx<- CC
-    movzbq  2(rPC), %rax                    # eax<- BB
-    movss VREG_ADDRESS(%rax), %xmm0
-    xor     %eax, %eax
-    ucomiss VREG_ADDRESS(%rcx), %xmm0
-    jp      .Lop_cmpg_float_nan_is_pos
-    je      .Lop_cmpg_float_finish
-    jb      .Lop_cmpg_float_less
-.Lop_cmpg_float_nan_is_pos:
-    addb    $1, %al
-    jmp     .Lop_cmpg_float_finish
-.Lop_cmpg_float_nan_is_neg:
-.Lop_cmpg_float_less:
-    movl    $-1, %eax
-.Lop_cmpg_float_finish:
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpl_double: /* 0x2f */
-/* File: x86_64/op_cmpl_double.S */
-/* File: x86_64/fpcmp.S */
-/*
- * Compare two floating-point values.  Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- *     if (x == y) {
- *         return 0;
- *     } else if (x < y) {
- *         return -1;
- *     } else if (x > y) {
- *         return 1;
- *     } else {
- *         return nanval ? 1 : -1;
- *     }
- * }
- */
-    /* op vAA, vBB, vCC */
-    movzbq  3(rPC), %rcx                    # ecx<- CC
-    movzbq  2(rPC), %rax                    # eax<- BB
-    movsd VREG_ADDRESS(%rax), %xmm0
-    xor     %eax, %eax
-    ucomisd VREG_ADDRESS(%rcx), %xmm0
-    jp      .Lop_cmpl_double_nan_is_neg
-    je      .Lop_cmpl_double_finish
-    jb      .Lop_cmpl_double_less
-.Lop_cmpl_double_nan_is_pos:
-    addb    $1, %al
-    jmp     .Lop_cmpl_double_finish
-.Lop_cmpl_double_nan_is_neg:
-.Lop_cmpl_double_less:
-    movl    $-1, %eax
-.Lop_cmpl_double_finish:
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmpg_double: /* 0x30 */
-/* File: x86_64/op_cmpg_double.S */
-/* File: x86_64/fpcmp.S */
-/*
- * Compare two floating-point values.  Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- *     if (x == y) {
- *         return 0;
- *     } else if (x < y) {
- *         return -1;
- *     } else if (x > y) {
- *         return 1;
- *     } else {
- *         return nanval ? 1 : -1;
- *     }
- * }
- */
-    /* op vAA, vBB, vCC */
-    movzbq  3(rPC), %rcx                    # ecx<- CC
-    movzbq  2(rPC), %rax                    # eax<- BB
-    movsd VREG_ADDRESS(%rax), %xmm0
-    xor     %eax, %eax
-    ucomisd VREG_ADDRESS(%rcx), %xmm0
-    jp      .Lop_cmpg_double_nan_is_pos
-    je      .Lop_cmpg_double_finish
-    jb      .Lop_cmpg_double_less
-.Lop_cmpg_double_nan_is_pos:
-    addb    $1, %al
-    jmp     .Lop_cmpg_double_finish
-.Lop_cmpg_double_nan_is_neg:
-.Lop_cmpg_double_less:
-    movl    $-1, %eax
-.Lop_cmpg_double_finish:
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_cmp_long: /* 0x31 */
-/* File: x86_64/op_cmp_long.S */
-/*
- * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
- * register based on the results of the comparison.
- */
-    /* cmp-long vAA, vBB, vCC */
-    movzbq  2(rPC), %rdx                    # edx <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_WIDE_VREG %rdx, %rdx                # rdx <- v[BB]
-    xorl    %eax, %eax
-    xorl    %edi, %edi
-    addb    $1, %al
-    movl    $-1, %esi
-    cmpq    VREG_ADDRESS(%rcx), %rdx
-    cmovl   %esi, %edi
-    cmovg   %eax, %edi
-    SET_VREG %edi, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_eq: /* 0x32 */
-/* File: x86_64/op_if_eq.S */
-/* File: x86_64/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
-    /* if-cmp vA, vB, +CCCC */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # rcx <- A
-    GET_VREG %eax, %rcx                     # eax <- vA
-    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
-    jne   1f
-    movswq  2(rPC), rINSTq                  # Get signed branch offset
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-1:
-    cmpl    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ne: /* 0x33 */
-/* File: x86_64/op_if_ne.S */
-/* File: x86_64/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
-    /* if-cmp vA, vB, +CCCC */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # rcx <- A
-    GET_VREG %eax, %rcx                     # eax <- vA
-    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
-    je   1f
-    movswq  2(rPC), rINSTq                  # Get signed branch offset
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-1:
-    cmpl    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_lt: /* 0x34 */
-/* File: x86_64/op_if_lt.S */
-/* File: x86_64/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
-    /* if-cmp vA, vB, +CCCC */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # rcx <- A
-    GET_VREG %eax, %rcx                     # eax <- vA
-    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
-    jge   1f
-    movswq  2(rPC), rINSTq                  # Get signed branch offset
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-1:
-    cmpl    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ge: /* 0x35 */
-/* File: x86_64/op_if_ge.S */
-/* File: x86_64/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
-    /* if-cmp vA, vB, +CCCC */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # rcx <- A
-    GET_VREG %eax, %rcx                     # eax <- vA
-    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
-    jl   1f
-    movswq  2(rPC), rINSTq                  # Get signed branch offset
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-1:
-    cmpl    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gt: /* 0x36 */
-/* File: x86_64/op_if_gt.S */
-/* File: x86_64/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
-    /* if-cmp vA, vB, +CCCC */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # rcx <- A
-    GET_VREG %eax, %rcx                     # eax <- vA
-    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
-    jle   1f
-    movswq  2(rPC), rINSTq                  # Get signed branch offset
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-1:
-    cmpl    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_le: /* 0x37 */
-/* File: x86_64/op_if_le.S */
-/* File: x86_64/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
-    /* if-cmp vA, vB, +CCCC */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # rcx <- A
-    GET_VREG %eax, %rcx                     # eax <- vA
-    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
-    jg   1f
-    movswq  2(rPC), rINSTq                  # Get signed branch offset
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-1:
-    cmpl    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_eqz: /* 0x38 */
-/* File: x86_64/op_if_eqz.S */
-/* File: x86_64/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
-    /* if-cmp vAA, +BBBB */
-    cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
-    jne   1f
-    movswq  2(rPC), rINSTq                  # fetch signed displacement
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-1:
-    cmpl    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_nez: /* 0x39 */
-/* File: x86_64/op_if_nez.S */
-/* File: x86_64/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
-    /* if-cmp vAA, +BBBB */
-    cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
-    je   1f
-    movswq  2(rPC), rINSTq                  # fetch signed displacement
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-1:
-    cmpl    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_ltz: /* 0x3a */
-/* File: x86_64/op_if_ltz.S */
-/* File: x86_64/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
-    /* if-cmp vAA, +BBBB */
-    cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
-    jge   1f
-    movswq  2(rPC), rINSTq                  # fetch signed displacement
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-1:
-    cmpl    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gez: /* 0x3b */
-/* File: x86_64/op_if_gez.S */
-/* File: x86_64/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
-    /* if-cmp vAA, +BBBB */
-    cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
-    jl   1f
-    movswq  2(rPC), rINSTq                  # fetch signed displacement
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-1:
-    cmpl    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_gtz: /* 0x3c */
-/* File: x86_64/op_if_gtz.S */
-/* File: x86_64/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
-    /* if-cmp vAA, +BBBB */
-    cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
-    jle   1f
-    movswq  2(rPC), rINSTq                  # fetch signed displacement
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-1:
-    cmpl    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_if_lez: /* 0x3d */
-/* File: x86_64/op_if_lez.S */
-/* File: x86_64/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
-    /* if-cmp vAA, +BBBB */
-    cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
-    jg   1f
-    movswq  2(rPC), rINSTq                  # fetch signed displacement
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-1:
-    cmpl    $JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_3e: /* 0x3e */
-/* File: x86_64/op_unused_3e.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_3f: /* 0x3f */
-/* File: x86_64/op_unused_3f.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_40: /* 0x40 */
-/* File: x86_64/op_unused_40.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_41: /* 0x41 */
-/* File: x86_64/op_unused_41.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_42: /* 0x42 */
-/* File: x86_64/op_unused_42.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_43: /* 0x43 */
-/* File: x86_64/op_unused_43.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget: /* 0x44 */
-/* File: x86_64/op_aget.S */
-/*
- * Array get, 32 bits or less.  vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    .if 0
-    movq    MIRROR_INT_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    movl   MIRROR_INT_ARRAY_DATA_OFFSET(%rax,%rcx,4), %eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_wide: /* 0x45 */
-/* File: x86_64/op_aget_wide.S */
-/* File: x86_64/op_aget.S */
-/*
- * Array get, 32 bits or less.  vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    .if 1
-    movq    MIRROR_WIDE_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    movq   MIRROR_WIDE_ARRAY_DATA_OFFSET(%rax,%rcx,8), %eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_object: /* 0x46 */
-/* File: x86_64/op_aget_object.S */
-/*
- * Array object get.  vAA <- vBB[vCC].
- *
- * for: aget-object
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG OUT_32_ARG0, %rax              # eax <- vBB (array object)
-    GET_VREG OUT_32_ARG1, %rcx              # ecx <- vCC (requested index)
-    EXPORT_PC
-    call    SYMBOL(artAGetObjectFromMterp)  # (array, index)
-    movq    rSELF, %rcx
-    cmpq    $0, THREAD_EXCEPTION_OFFSET(%rcx)
-    jnz     MterpException
-    SET_VREG_OBJECT %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_boolean: /* 0x47 */
-/* File: x86_64/op_aget_boolean.S */
-/* File: x86_64/op_aget.S */
-/*
- * Array get, 32 bits or less.  vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    .if 0
-    movq    MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    movzbl   MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%rax,%rcx,1), %eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_byte: /* 0x48 */
-/* File: x86_64/op_aget_byte.S */
-/* File: x86_64/op_aget.S */
-/*
- * Array get, 32 bits or less.  vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    .if 0
-    movq    MIRROR_BYTE_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    movsbl   MIRROR_BYTE_ARRAY_DATA_OFFSET(%rax,%rcx,1), %eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_char: /* 0x49 */
-/* File: x86_64/op_aget_char.S */
-/* File: x86_64/op_aget.S */
-/*
- * Array get, 32 bits or less.  vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    .if 0
-    movq    MIRROR_CHAR_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    movzwl   MIRROR_CHAR_ARRAY_DATA_OFFSET(%rax,%rcx,2), %eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aget_short: /* 0x4a */
-/* File: x86_64/op_aget_short.S */
-/* File: x86_64/op_aget.S */
-/*
- * Array get, 32 bits or less.  vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    .if 0
-    movq    MIRROR_SHORT_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    movswl   MIRROR_SHORT_ARRAY_DATA_OFFSET(%rax,%rcx,2), %eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput: /* 0x4b */
-/* File: x86_64/op_aput.S */
-/*
- * Array put, 32 bits or less.  vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    .if 0
-    GET_WIDE_VREG rINSTq, rINSTq
-    .else
-    GET_VREG rINST, rINSTq
-    .endif
-    movl    rINST, MIRROR_INT_ARRAY_DATA_OFFSET(%rax,%rcx,4)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_wide: /* 0x4c */
-/* File: x86_64/op_aput_wide.S */
-/* File: x86_64/op_aput.S */
-/*
- * Array put, 32 bits or less.  vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    .if 1
-    GET_WIDE_VREG rINSTq, rINSTq
-    .else
-    GET_VREG rINST, rINSTq
-    .endif
-    movq    rINSTq, MIRROR_WIDE_ARRAY_DATA_OFFSET(%rax,%rcx,8)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_object: /* 0x4d */
-/* File: x86_64/op_aput_object.S */
-/*
- * Store an object into an array.  vBB[vCC] <- vAA.
- */
-    /* op vAA, vBB, vCC */
-    EXPORT_PC
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
-    movq    rPC, OUT_ARG1
-    REFRESH_INST 77
-    movq    rINSTq, OUT_ARG2
-    call    SYMBOL(MterpAputObject)         # (array, index)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_boolean: /* 0x4e */
-/* File: x86_64/op_aput_boolean.S */
-/* File: x86_64/op_aput.S */
-/*
- * Array put, 32 bits or less.  vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    .if 0
-    GET_WIDE_VREG rINSTq, rINSTq
-    .else
-    GET_VREG rINST, rINSTq
-    .endif
-    movb    rINSTbl, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%rax,%rcx,1)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_byte: /* 0x4f */
-/* File: x86_64/op_aput_byte.S */
-/* File: x86_64/op_aput.S */
-/*
- * Array put, 32 bits or less.  vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    .if 0
-    GET_WIDE_VREG rINSTq, rINSTq
-    .else
-    GET_VREG rINST, rINSTq
-    .endif
-    movb    rINSTbl, MIRROR_BYTE_ARRAY_DATA_OFFSET(%rax,%rcx,1)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_char: /* 0x50 */
-/* File: x86_64/op_aput_char.S */
-/* File: x86_64/op_aput.S */
-/*
- * Array put, 32 bits or less.  vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    .if 0
-    GET_WIDE_VREG rINSTq, rINSTq
-    .else
-    GET_VREG rINST, rINSTq
-    .endif
-    movw    rINSTw, MIRROR_CHAR_ARRAY_DATA_OFFSET(%rax,%rcx,2)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_aput_short: /* 0x51 */
-/* File: x86_64/op_aput_short.S */
-/* File: x86_64/op_aput.S */
-/*
- * Array put, 32 bits or less.  vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    .if 0
-    GET_WIDE_VREG rINSTq, rINSTq
-    .else
-    GET_VREG rINST, rINSTq
-    .endif
-    movw    rINSTw, MIRROR_SHORT_ARRAY_DATA_OFFSET(%rax,%rcx,2)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget: /* 0x52 */
-/* File: x86_64/op_iget.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU32
-    REFRESH_INST 82                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpIGetU32)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_wide: /* 0x53 */
-/* File: x86_64/op_iget_wide.S */
-/* File: x86_64/op_iget.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU64
-    REFRESH_INST 83                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpIGetU64)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_object: /* 0x54 */
-/* File: x86_64/op_iget_object.S */
-/* File: x86_64/op_iget.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetObj
-    REFRESH_INST 84                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpIGetObj)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_boolean: /* 0x55 */
-/* File: x86_64/op_iget_boolean.S */
-/* File: x86_64/op_iget.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU8
-    REFRESH_INST 85                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpIGetU8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_byte: /* 0x56 */
-/* File: x86_64/op_iget_byte.S */
-/* File: x86_64/op_iget.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetI8
-    REFRESH_INST 86                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpIGetI8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_char: /* 0x57 */
-/* File: x86_64/op_iget_char.S */
-/* File: x86_64/op_iget.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetU16
-    REFRESH_INST 87                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpIGetU16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_short: /* 0x58 */
-/* File: x86_64/op_iget_short.S */
-/* File: x86_64/op_iget.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIGetI16
-    REFRESH_INST 88                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpIGetI16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput: /* 0x59 */
-/* File: x86_64/op_iput.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU32
-    REFRESH_INST 89                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpIPutU32)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_wide: /* 0x5a */
-/* File: x86_64/op_iput_wide.S */
-/* File: x86_64/op_iput.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU64
-    REFRESH_INST 90                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpIPutU64)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_object: /* 0x5b */
-/* File: x86_64/op_iput_object.S */
-/* File: x86_64/op_iput.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutObj
-    REFRESH_INST 91                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpIPutObj)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_boolean: /* 0x5c */
-/* File: x86_64/op_iput_boolean.S */
-/* File: x86_64/op_iput.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU8
-    REFRESH_INST 92                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpIPutU8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_byte: /* 0x5d */
-/* File: x86_64/op_iput_byte.S */
-/* File: x86_64/op_iput.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutI8
-    REFRESH_INST 93                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpIPutI8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_char: /* 0x5e */
-/* File: x86_64/op_iput_char.S */
-/* File: x86_64/op_iput.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutU16
-    REFRESH_INST 94                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpIPutU16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_short: /* 0x5f */
-/* File: x86_64/op_iput_short.S */
-/* File: x86_64/op_iput.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpIPutI16
-    REFRESH_INST 95                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpIPutI16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget: /* 0x60 */
-/* File: x86_64/op_sget.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU32
-    REFRESH_INST 96                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpSGetU32)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_wide: /* 0x61 */
-/* File: x86_64/op_sget_wide.S */
-/* File: x86_64/op_sget.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU64
-    REFRESH_INST 97                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpSGetU64)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_object: /* 0x62 */
-/* File: x86_64/op_sget_object.S */
-/* File: x86_64/op_sget.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetObj
-    REFRESH_INST 98                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpSGetObj)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_boolean: /* 0x63 */
-/* File: x86_64/op_sget_boolean.S */
-/* File: x86_64/op_sget.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU8
-    REFRESH_INST 99                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpSGetU8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_byte: /* 0x64 */
-/* File: x86_64/op_sget_byte.S */
-/* File: x86_64/op_sget.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetI8
-    REFRESH_INST 100                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpSGetI8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_char: /* 0x65 */
-/* File: x86_64/op_sget_char.S */
-/* File: x86_64/op_sget.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetU16
-    REFRESH_INST 101                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpSGetU16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sget_short: /* 0x66 */
-/* File: x86_64/op_sget_short.S */
-/* File: x86_64/op_sget.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSGetI16
-    REFRESH_INST 102                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpSGetI16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput: /* 0x67 */
-/* File: x86_64/op_sput.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU32
-    REFRESH_INST 103                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpSPutU32)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_wide: /* 0x68 */
-/* File: x86_64/op_sput_wide.S */
-/* File: x86_64/op_sput.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU64
-    REFRESH_INST 104                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpSPutU64)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_object: /* 0x69 */
-/* File: x86_64/op_sput_object.S */
-/* File: x86_64/op_sput.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutObj
-    REFRESH_INST 105                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpSPutObj)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_boolean: /* 0x6a */
-/* File: x86_64/op_sput_boolean.S */
-/* File: x86_64/op_sput.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU8
-    REFRESH_INST 106                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpSPutU8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_byte: /* 0x6b */
-/* File: x86_64/op_sput_byte.S */
-/* File: x86_64/op_sput.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutI8
-    REFRESH_INST 107                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpSPutI8)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_char: /* 0x6c */
-/* File: x86_64/op_sput_char.S */
-/* File: x86_64/op_sput.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutU16
-    REFRESH_INST 108                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpSPutU16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sput_short: /* 0x6d */
-/* File: x86_64/op_sput_short.S */
-/* File: x86_64/op_sput.S */
-/* File: x86_64/field.S */
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern MterpSPutI16
-    REFRESH_INST 109                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL(MterpSPutI16)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual: /* 0x6e */
-/* File: x86_64/op_invoke_virtual.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtual
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 110
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokeVirtual)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-/*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_super: /* 0x6f */
-/* File: x86_64/op_invoke_super.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeSuper
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 111
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokeSuper)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-/*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_direct: /* 0x70 */
-/* File: x86_64/op_invoke_direct.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeDirect
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 112
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokeDirect)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_static: /* 0x71 */
-/* File: x86_64/op_invoke_static.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeStatic
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 113
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokeStatic)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_interface: /* 0x72 */
-/* File: x86_64/op_invoke_interface.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeInterface
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 114
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokeInterface)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-/*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
-    .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
-/* File: x86_64/op_return_void_no_barrier.S */
-    movq    rSELF, OUT_ARG0
-    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
-    jz      1f
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    xorq    %rax, %rax
-    jmp     MterpReturn
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
-/* File: x86_64/op_invoke_virtual_range.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualRange
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 116
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokeVirtualRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_super_range: /* 0x75 */
-/* File: x86_64/op_invoke_super_range.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeSuperRange
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 117
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokeSuperRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
-/* File: x86_64/op_invoke_direct_range.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeDirectRange
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 118
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokeDirectRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_static_range: /* 0x77 */
-/* File: x86_64/op_invoke_static_range.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeStaticRange
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 119
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokeStaticRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
-/* File: x86_64/op_invoke_interface_range.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeInterfaceRange
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 120
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokeInterfaceRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_79: /* 0x79 */
-/* File: x86_64/op_unused_79.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_7a: /* 0x7a */
-/* File: x86_64/op_unused_7a.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_int: /* 0x7b */
-/* File: x86_64/op_neg_int.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4,rINST                       # rINST <- B
-    .if 0
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    .endif
-    andb    $0xf,%cl                       # ecx <- A
-
-    negl    %eax
-    .if 0
-    SET_WIDE_VREG %rax, %rcx
-    .else
-    SET_VREG %eax, %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_not_int: /* 0x7c */
-/* File: x86_64/op_not_int.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4,rINST                       # rINST <- B
-    .if 0
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    .endif
-    andb    $0xf,%cl                       # ecx <- A
-
-    notl    %eax
-    .if 0
-    SET_WIDE_VREG %rax, %rcx
-    .else
-    SET_VREG %eax, %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_long: /* 0x7d */
-/* File: x86_64/op_neg_long.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4,rINST                       # rINST <- B
-    .if 1
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    .endif
-    andb    $0xf,%cl                       # ecx <- A
-
-    negq    %rax
-    .if 1
-    SET_WIDE_VREG %rax, %rcx
-    .else
-    SET_VREG %eax, %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_not_long: /* 0x7e */
-/* File: x86_64/op_not_long.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4,rINST                       # rINST <- B
-    .if 1
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    .endif
-    andb    $0xf,%cl                       # ecx <- A
-
-    notq    %rax
-    .if 1
-    SET_WIDE_VREG %rax, %rcx
-    .else
-    SET_VREG %eax, %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_float: /* 0x7f */
-/* File: x86_64/op_neg_float.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4,rINST                       # rINST <- B
-    .if 0
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    .endif
-    andb    $0xf,%cl                       # ecx <- A
-
-    xorl    $0x80000000, %eax
-    .if 0
-    SET_WIDE_VREG %rax, %rcx
-    .else
-    SET_VREG %eax, %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_neg_double: /* 0x80 */
-/* File: x86_64/op_neg_double.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4,rINST                       # rINST <- B
-    .if 1
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    .endif
-    andb    $0xf,%cl                       # ecx <- A
-    movq    $0x8000000000000000, %rsi
-    xorq    %rsi, %rax
-    .if 1
-    SET_WIDE_VREG %rax, %rcx
-    .else
-    SET_VREG %eax, %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_long: /* 0x81 */
-/* File: x86_64/op_int_to_long.S */
-    /* int to long vA, vB */
-    movzbq  rINSTbl, %rax                   # rax <- +A
-    sarl    $4, %eax                       # eax <- B
-    andb    $0xf, rINSTbl                  # rINST <- A
-    movslq  VREG_ADDRESS(%rax), %rax
-    SET_WIDE_VREG %rax, rINSTq              # v[A] <- %rax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_float: /* 0x82 */
-/* File: x86_64/op_int_to_float.S */
-/* File: x86_64/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    cvtsi2ssl    VREG_ADDRESS(rINSTq), %xmm0
-    .if 0
-    movsd   %xmm0, VREG_ADDRESS(%rcx)
-    CLEAR_WIDE_REF %rcx
-    .else
-    movss   %xmm0, VREG_ADDRESS(%rcx)
-    CLEAR_REF %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_double: /* 0x83 */
-/* File: x86_64/op_int_to_double.S */
-/* File: x86_64/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    cvtsi2sdl    VREG_ADDRESS(rINSTq), %xmm0
-    .if 1
-    movsd   %xmm0, VREG_ADDRESS(%rcx)
-    CLEAR_WIDE_REF %rcx
-    .else
-    movss   %xmm0, VREG_ADDRESS(%rcx)
-    CLEAR_REF %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* File: x86_64/op_long_to_int.S */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-/* File: x86_64/op_move.S */
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    movl    rINST, %eax                     # eax <- BA
-    andb    $0xf, %al                      # eax <- A
-    shrl    $4, rINST                      # rINST <- B
-    GET_VREG %edx, rINSTq
-    .if 0
-    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
-    .else
-    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_float: /* 0x85 */
-/* File: x86_64/op_long_to_float.S */
-/* File: x86_64/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    cvtsi2ssq    VREG_ADDRESS(rINSTq), %xmm0
-    .if 0
-    movsd   %xmm0, VREG_ADDRESS(%rcx)
-    CLEAR_WIDE_REF %rcx
-    .else
-    movss   %xmm0, VREG_ADDRESS(%rcx)
-    CLEAR_REF %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_long_to_double: /* 0x86 */
-/* File: x86_64/op_long_to_double.S */
-/* File: x86_64/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    cvtsi2sdq    VREG_ADDRESS(rINSTq), %xmm0
-    .if 1
-    movsd   %xmm0, VREG_ADDRESS(%rcx)
-    CLEAR_WIDE_REF %rcx
-    .else
-    movss   %xmm0, VREG_ADDRESS(%rcx)
-    CLEAR_REF %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_int: /* 0x87 */
-/* File: x86_64/op_float_to_int.S */
-/* File: x86_64/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint.  If it is less
- * than minint, it should be clamped to minint.  If it is a nan, the result
- * should be zero.  Further, the rounding mode is to truncate.
- */
-    /* float/double to int/long vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    movss   VREG_ADDRESS(rINSTq), %xmm0
-    movl  $0x7fffffff, %eax
-    cvtsi2ssl %eax, %xmm1
-    comiss    %xmm1, %xmm0
-    jae     1f
-    jp      2f
-    cvttss2sil  %xmm0, %eax
-    jmp     1f
-2:
-    xorl    %eax, %eax
-1:
-    .if 0
-    SET_WIDE_VREG %eax, %rcx
-    .else
-    SET_VREG %eax, %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_long: /* 0x88 */
-/* File: x86_64/op_float_to_long.S */
-/* File: x86_64/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint.  If it is less
- * than minint, it should be clamped to minint.  If it is a nan, the result
- * should be zero.  Further, the rounding mode is to truncate.
- */
-    /* float/double to int/long vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    movss   VREG_ADDRESS(rINSTq), %xmm0
-    movq  $0x7fffffffffffffff, %rax
-    cvtsi2ssq %rax, %xmm1
-    comiss    %xmm1, %xmm0
-    jae     1f
-    jp      2f
-    cvttss2siq  %xmm0, %rax
-    jmp     1f
-2:
-    xorq    %rax, %rax
-1:
-    .if 1
-    SET_WIDE_VREG %rax, %rcx
-    .else
-    SET_VREG %rax, %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_float_to_double: /* 0x89 */
-/* File: x86_64/op_float_to_double.S */
-/* File: x86_64/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    cvtss2sd    VREG_ADDRESS(rINSTq), %xmm0
-    .if 1
-    movsd   %xmm0, VREG_ADDRESS(%rcx)
-    CLEAR_WIDE_REF %rcx
-    .else
-    movss   %xmm0, VREG_ADDRESS(%rcx)
-    CLEAR_REF %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_int: /* 0x8a */
-/* File: x86_64/op_double_to_int.S */
-/* File: x86_64/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint.  If it is less
- * than minint, it should be clamped to minint.  If it is a nan, the result
- * should be zero.  Further, the rounding mode is to truncate.
- */
-    /* float/double to int/long vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    movsd   VREG_ADDRESS(rINSTq), %xmm0
-    movl  $0x7fffffff, %eax
-    cvtsi2sdl %eax, %xmm1
-    comisd    %xmm1, %xmm0
-    jae     1f
-    jp      2f
-    cvttsd2sil  %xmm0, %eax
-    jmp     1f
-2:
-    xorl    %eax, %eax
-1:
-    .if 0
-    SET_WIDE_VREG %eax, %rcx
-    .else
-    SET_VREG %eax, %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_long: /* 0x8b */
-/* File: x86_64/op_double_to_long.S */
-/* File: x86_64/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint.  If it is less
- * than minint, it should be clamped to minint.  If it is a nan, the result
- * should be zero.  Further, the rounding mode is to truncate.
- */
-    /* float/double to int/long vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    movsd   VREG_ADDRESS(rINSTq), %xmm0
-    movq  $0x7fffffffffffffff, %rax
-    cvtsi2sdq %rax, %xmm1
-    comisd    %xmm1, %xmm0
-    jae     1f
-    jp      2f
-    cvttsd2siq  %xmm0, %rax
-    jmp     1f
-2:
-    xorq    %rax, %rax
-1:
-    .if 1
-    SET_WIDE_VREG %rax, %rcx
-    .else
-    SET_VREG %rax, %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_double_to_float: /* 0x8c */
-/* File: x86_64/op_double_to_float.S */
-/* File: x86_64/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    cvtsd2ss    VREG_ADDRESS(rINSTq), %xmm0
-    .if 0
-    movsd   %xmm0, VREG_ADDRESS(%rcx)
-    CLEAR_WIDE_REF %rcx
-    .else
-    movss   %xmm0, VREG_ADDRESS(%rcx)
-    CLEAR_REF %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_byte: /* 0x8d */
-/* File: x86_64/op_int_to_byte.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4,rINST                       # rINST <- B
-    .if 0
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    .endif
-    andb    $0xf,%cl                       # ecx <- A
-
-movsbl  %al, %eax
-    .if 0
-    SET_WIDE_VREG %rax, %rcx
-    .else
-    SET_VREG %eax, %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_char: /* 0x8e */
-/* File: x86_64/op_int_to_char.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4,rINST                       # rINST <- B
-    .if 0
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    .endif
-    andb    $0xf,%cl                       # ecx <- A
-
-movzwl  %ax,%eax
-    .if 0
-    SET_WIDE_VREG %rax, %rcx
-    .else
-    SET_VREG %eax, %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_int_to_short: /* 0x8f */
-/* File: x86_64/op_int_to_short.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4,rINST                       # rINST <- B
-    .if 0
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    .endif
-    andb    $0xf,%cl                       # ecx <- A
-
-movswl %ax, %eax
-    .if 0
-    SET_WIDE_VREG %rax, %rcx
-    .else
-    SET_VREG %eax, %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int: /* 0x90 */
-/* File: x86_64/op_add_int.S */
-/* File: x86_64/binop.S */
-/*
- * Generic 32-bit binary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- *      xor-int, shl-int, shr-int, ushr-int
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB
-    addl    (rFP,%rcx,4), %eax                                  # ex: addl    (rFP,%rcx,4),%eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_int: /* 0x91 */
-/* File: x86_64/op_sub_int.S */
-/* File: x86_64/binop.S */
-/*
- * Generic 32-bit binary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- *      xor-int, shl-int, shr-int, ushr-int
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB
-    subl    (rFP,%rcx,4), %eax                                  # ex: addl    (rFP,%rcx,4),%eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int: /* 0x92 */
-/* File: x86_64/op_mul_int.S */
-/* File: x86_64/binop.S */
-/*
- * Generic 32-bit binary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- *      xor-int, shl-int, shr-int, ushr-int
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB
-    imull   (rFP,%rcx,4), %eax                                  # ex: addl    (rFP,%rcx,4),%eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int: /* 0x93 */
-/* File: x86_64/op_div_int.S */
-/* File: x86_64/bindiv.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op1=-1.
- */
-    /* div/rem vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    .if 0
-    GET_WIDE_VREG %rax, %rax                # eax <- vBB
-    GET_WIDE_VREG %ecx, %rcx             # ecx <- vCC
-    .else
-    GET_VREG %eax, %rax                     # eax <- vBB
-    GET_VREG %ecx, %rcx                  # ecx <- vCC
-    .endif
-    testl   %ecx, %ecx
-    jz      common_errDivideByZero
-    cmpl  $-1, %ecx
-    je      2f
-    cdq                                    # rdx:rax <- sign-extended of rax
-    idivl   %ecx
-1:
-    .if 0
-    SET_WIDE_VREG %eax, rINSTq           # eax <- vBB
-    .else
-    SET_VREG %eax, rINSTq                # eax <- vBB
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
-    .if 0
-    xorl %eax, %eax
-    .else
-    negl %eax
-    .endif
-    jmp     1b
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int: /* 0x94 */
-/* File: x86_64/op_rem_int.S */
-/* File: x86_64/bindiv.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op1=-1.
- */
-    /* div/rem vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    .if 0
-    GET_WIDE_VREG %rax, %rax                # eax <- vBB
-    GET_WIDE_VREG %ecx, %rcx             # ecx <- vCC
-    .else
-    GET_VREG %eax, %rax                     # eax <- vBB
-    GET_VREG %ecx, %rcx                  # ecx <- vCC
-    .endif
-    testl   %ecx, %ecx
-    jz      common_errDivideByZero
-    cmpl  $-1, %ecx
-    je      2f
-    cdq                                    # rdx:rax <- sign-extended of rax
-    idivl   %ecx
-1:
-    .if 0
-    SET_WIDE_VREG %edx, rINSTq           # eax <- vBB
-    .else
-    SET_VREG %edx, rINSTq                # eax <- vBB
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
-    .if 1
-    xorl %edx, %edx
-    .else
-    negl %edx
-    .endif
-    jmp     1b
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int: /* 0x95 */
-/* File: x86_64/op_and_int.S */
-/* File: x86_64/binop.S */
-/*
- * Generic 32-bit binary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- *      xor-int, shl-int, shr-int, ushr-int
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB
-    andl    (rFP,%rcx,4), %eax                                  # ex: addl    (rFP,%rcx,4),%eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int: /* 0x96 */
-/* File: x86_64/op_or_int.S */
-/* File: x86_64/binop.S */
-/*
- * Generic 32-bit binary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- *      xor-int, shl-int, shr-int, ushr-int
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB
-    orl     (rFP,%rcx,4), %eax                                  # ex: addl    (rFP,%rcx,4),%eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int: /* 0x97 */
-/* File: x86_64/op_xor_int.S */
-/* File: x86_64/binop.S */
-/*
- * Generic 32-bit binary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- *      xor-int, shl-int, shr-int, ushr-int
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB
-    xorl    (rFP,%rcx,4), %eax                                  # ex: addl    (rFP,%rcx,4),%eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int: /* 0x98 */
-/* File: x86_64/op_shl_int.S */
-/* File: x86_64/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_VREG %ecx, %rcx                     # eax <- vCC
-    .if 0
-    GET_WIDE_VREG %rax, %rax                # rax <- vBB
-    sall    %cl, %eax                                  # ex: addl    %ecx,%eax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    GET_VREG %eax, %rax                     # eax <- vBB
-    sall    %cl, %eax                                  # ex: addl    %ecx,%eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int: /* 0x99 */
-/* File: x86_64/op_shr_int.S */
-/* File: x86_64/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_VREG %ecx, %rcx                     # eax <- vCC
-    .if 0
-    GET_WIDE_VREG %rax, %rax                # rax <- vBB
-    sarl    %cl, %eax                                  # ex: addl    %ecx,%eax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    GET_VREG %eax, %rax                     # eax <- vBB
-    sarl    %cl, %eax                                  # ex: addl    %ecx,%eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int: /* 0x9a */
-/* File: x86_64/op_ushr_int.S */
-/* File: x86_64/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_VREG %ecx, %rcx                     # eax <- vCC
-    .if 0
-    GET_WIDE_VREG %rax, %rax                # rax <- vBB
-    shrl    %cl, %eax                                  # ex: addl    %ecx,%eax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    GET_VREG %eax, %rax                     # eax <- vBB
-    shrl    %cl, %eax                                  # ex: addl    %ecx,%eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_long: /* 0x9b */
-/* File: x86_64/op_add_long.S */
-/* File: x86_64/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_WIDE_VREG %rax, %rax                # rax <- v[BB]
-    addq    (rFP,%rcx,4), %rax                                  # ex: addq   (rFP,%rcx,4),%rax
-    SET_WIDE_VREG %rax, rINSTq              # v[AA] <- rax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_long: /* 0x9c */
-/* File: x86_64/op_sub_long.S */
-/* File: x86_64/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_WIDE_VREG %rax, %rax                # rax <- v[BB]
-    subq    (rFP,%rcx,4), %rax                                  # ex: addq   (rFP,%rcx,4),%rax
-    SET_WIDE_VREG %rax, rINSTq              # v[AA] <- rax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_long: /* 0x9d */
-/* File: x86_64/op_mul_long.S */
-/* File: x86_64/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_WIDE_VREG %rax, %rax                # rax <- v[BB]
-    imulq   (rFP,%rcx,4), %rax                                  # ex: addq   (rFP,%rcx,4),%rax
-    SET_WIDE_VREG %rax, rINSTq              # v[AA] <- rax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_long: /* 0x9e */
-/* File: x86_64/op_div_long.S */
-/* File: x86_64/bindiv.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op1=-1.
- */
-    /* div/rem vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    .if 1
-    GET_WIDE_VREG %rax, %rax                # eax <- vBB
-    GET_WIDE_VREG %rcx, %rcx             # ecx <- vCC
-    .else
-    GET_VREG %eax, %rax                     # eax <- vBB
-    GET_VREG %rcx, %rcx                  # ecx <- vCC
-    .endif
-    testq   %rcx, %rcx
-    jz      common_errDivideByZero
-    cmpq  $-1, %rcx
-    je      2f
-    cqo                                    # rdx:rax <- sign-extended of rax
-    idivq   %rcx
-1:
-    .if 1
-    SET_WIDE_VREG %rax, rINSTq           # eax <- vBB
-    .else
-    SET_VREG %rax, rINSTq                # eax <- vBB
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
-    .if 0
-    xorq %rax, %rax
-    .else
-    negq %rax
-    .endif
-    jmp     1b
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_long: /* 0x9f */
-/* File: x86_64/op_rem_long.S */
-/* File: x86_64/bindiv.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op1=-1.
- */
-    /* div/rem vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    .if 1
-    GET_WIDE_VREG %rax, %rax                # eax <- vBB
-    GET_WIDE_VREG %rcx, %rcx             # ecx <- vCC
-    .else
-    GET_VREG %eax, %rax                     # eax <- vBB
-    GET_VREG %rcx, %rcx                  # ecx <- vCC
-    .endif
-    testq   %rcx, %rcx
-    jz      common_errDivideByZero
-    cmpq  $-1, %rcx
-    je      2f
-    cqo                                    # rdx:rax <- sign-extended of rax
-    idivq   %rcx
-1:
-    .if 1
-    SET_WIDE_VREG %rdx, rINSTq           # eax <- vBB
-    .else
-    SET_VREG %rdx, rINSTq                # eax <- vBB
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
-    .if 1
-    xorq %rdx, %rdx
-    .else
-    negq %rdx
-    .endif
-    jmp     1b
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_long: /* 0xa0 */
-/* File: x86_64/op_and_long.S */
-/* File: x86_64/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_WIDE_VREG %rax, %rax                # rax <- v[BB]
-    andq    (rFP,%rcx,4), %rax                                  # ex: addq   (rFP,%rcx,4),%rax
-    SET_WIDE_VREG %rax, rINSTq              # v[AA] <- rax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_long: /* 0xa1 */
-/* File: x86_64/op_or_long.S */
-/* File: x86_64/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_WIDE_VREG %rax, %rax                # rax <- v[BB]
-    orq     (rFP,%rcx,4), %rax                                  # ex: addq   (rFP,%rcx,4),%rax
-    SET_WIDE_VREG %rax, rINSTq              # v[AA] <- rax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_long: /* 0xa2 */
-/* File: x86_64/op_xor_long.S */
-/* File: x86_64/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_WIDE_VREG %rax, %rax                # rax <- v[BB]
-    xorq    (rFP,%rcx,4), %rax                                  # ex: addq   (rFP,%rcx,4),%rax
-    SET_WIDE_VREG %rax, rINSTq              # v[AA] <- rax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_long: /* 0xa3 */
-/* File: x86_64/op_shl_long.S */
-/* File: x86_64/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_VREG %ecx, %rcx                     # eax <- vCC
-    .if 1
-    GET_WIDE_VREG %rax, %rax                # rax <- vBB
-    salq    %cl, %rax                                  # ex: addl    %ecx,%eax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    GET_VREG %eax, %rax                     # eax <- vBB
-    salq    %cl, %rax                                  # ex: addl    %ecx,%eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_long: /* 0xa4 */
-/* File: x86_64/op_shr_long.S */
-/* File: x86_64/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_VREG %ecx, %rcx                     # eax <- vCC
-    .if 1
-    GET_WIDE_VREG %rax, %rax                # rax <- vBB
-    sarq    %cl, %rax                                  # ex: addl    %ecx,%eax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    GET_VREG %eax, %rax                     # eax <- vBB
-    sarq    %cl, %rax                                  # ex: addl    %ecx,%eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_long: /* 0xa5 */
-/* File: x86_64/op_ushr_long.S */
-/* File: x86_64/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_VREG %ecx, %rcx                     # eax <- vCC
-    .if 1
-    GET_WIDE_VREG %rax, %rax                # rax <- vBB
-    shrq    %cl, %rax                                  # ex: addl    %ecx,%eax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    GET_VREG %eax, %rax                     # eax <- vBB
-    shrq    %cl, %rax                                  # ex: addl    %ecx,%eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_float: /* 0xa6 */
-/* File: x86_64/op_add_float.S */
-/* File: x86_64/sseBinop.S */
-    movzbq  2(rPC), %rcx                    # ecx <- BB
-    movzbq  3(rPC), %rax                    # eax <- CC
-    movss   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
-    addss VREG_ADDRESS(%rax), %xmm0
-    movss   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movss   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_float: /* 0xa7 */
-/* File: x86_64/op_sub_float.S */
-/* File: x86_64/sseBinop.S */
-    movzbq  2(rPC), %rcx                    # ecx <- BB
-    movzbq  3(rPC), %rax                    # eax <- CC
-    movss   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
-    subss VREG_ADDRESS(%rax), %xmm0
-    movss   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movss   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_float: /* 0xa8 */
-/* File: x86_64/op_mul_float.S */
-/* File: x86_64/sseBinop.S */
-    movzbq  2(rPC), %rcx                    # ecx <- BB
-    movzbq  3(rPC), %rax                    # eax <- CC
-    movss   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
-    mulss VREG_ADDRESS(%rax), %xmm0
-    movss   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movss   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_float: /* 0xa9 */
-/* File: x86_64/op_div_float.S */
-/* File: x86_64/sseBinop.S */
-    movzbq  2(rPC), %rcx                    # ecx <- BB
-    movzbq  3(rPC), %rax                    # eax <- CC
-    movss   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
-    divss VREG_ADDRESS(%rax), %xmm0
-    movss   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movss   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_float: /* 0xaa */
-/* File: x86_64/op_rem_float.S */
-    /* rem_float vAA, vBB, vCC */
-    movzbq  3(rPC), %rcx                    # ecx <- BB
-    movzbq  2(rPC), %rax                    # eax <- CC
-    flds    VREG_ADDRESS(%rcx)              # vBB to fp stack
-    flds    VREG_ADDRESS(%rax)              # vCC to fp stack
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstps   VREG_ADDRESS(rINSTq)            # %st to vAA
-    CLEAR_REF rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_double: /* 0xab */
-/* File: x86_64/op_add_double.S */
-/* File: x86_64/sseBinop.S */
-    movzbq  2(rPC), %rcx                    # ecx <- BB
-    movzbq  3(rPC), %rax                    # eax <- CC
-    movsd   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
-    addsd VREG_ADDRESS(%rax), %xmm0
-    movsd   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_double: /* 0xac */
-/* File: x86_64/op_sub_double.S */
-/* File: x86_64/sseBinop.S */
-    movzbq  2(rPC), %rcx                    # ecx <- BB
-    movzbq  3(rPC), %rax                    # eax <- CC
-    movsd   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
-    subsd VREG_ADDRESS(%rax), %xmm0
-    movsd   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_double: /* 0xad */
-/* File: x86_64/op_mul_double.S */
-/* File: x86_64/sseBinop.S */
-    movzbq  2(rPC), %rcx                    # ecx <- BB
-    movzbq  3(rPC), %rax                    # eax <- CC
-    movsd   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
-    mulsd VREG_ADDRESS(%rax), %xmm0
-    movsd   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_double: /* 0xae */
-/* File: x86_64/op_div_double.S */
-/* File: x86_64/sseBinop.S */
-    movzbq  2(rPC), %rcx                    # ecx <- BB
-    movzbq  3(rPC), %rax                    # eax <- CC
-    movsd   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
-    divsd VREG_ADDRESS(%rax), %xmm0
-    movsd   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_double: /* 0xaf */
-/* File: x86_64/op_rem_double.S */
-    /* rem_double vAA, vBB, vCC */
-    movzbq  3(rPC), %rcx                    # ecx <- BB
-    movzbq  2(rPC), %rax                    # eax <- CC
-    fldl    VREG_ADDRESS(%rcx)              # %st1 <- fp[vBB]
-    fldl    VREG_ADDRESS(%rax)              # %st0 <- fp[vCC]
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstpl   VREG_ADDRESS(rINSTq)            # fp[vAA] <- %st
-    CLEAR_WIDE_REF rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
-/* File: x86_64/op_add_int_2addr.S */
-/* File: x86_64/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
-    /* binop/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    addl    %eax, (rFP,%rcx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
-    CLEAR_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
-/* File: x86_64/op_sub_int_2addr.S */
-/* File: x86_64/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
-    /* binop/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    subl    %eax, (rFP,%rcx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
-    CLEAR_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
-/* File: x86_64/op_mul_int_2addr.S */
-    /* mul vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax, %rcx                     # eax <- vA
-    imull   (rFP,rINSTq,4), %eax
-    SET_VREG %eax, %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
-/* File: x86_64/op_div_int_2addr.S */
-/* File: x86_64/bindiv2addr.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op1=-1.
- */
-    /* div/rem/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- BA
-    sarl    $4, %ecx                       # rcx <- B
-    andb    $0xf, rINSTbl                  # rINST <- A
-    .if 0
-    GET_WIDE_VREG %rax, rINSTq              # eax <- vA
-    GET_WIDE_VREG %ecx, %rcx             # ecx <- vB
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vA
-    GET_VREG %ecx, %rcx                  # ecx <- vB
-    .endif
-    testl   %ecx, %ecx
-    jz      common_errDivideByZero
-    cmpl  $-1, %ecx
-    je      2f
-    cdq                                    # rdx:rax <- sign-extended of rax
-    idivl   %ecx
-1:
-    .if 0
-    SET_WIDE_VREG %eax, rINSTq           # vA <- result
-    .else
-    SET_VREG %eax, rINSTq                # vA <- result
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-2:
-    .if 0
-    xorl %eax, %eax
-    .else
-    negl %eax
-    .endif
-    jmp     1b
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
-/* File: x86_64/op_rem_int_2addr.S */
-/* File: x86_64/bindiv2addr.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op1=-1.
- */
-    /* div/rem/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- BA
-    sarl    $4, %ecx                       # rcx <- B
-    andb    $0xf, rINSTbl                  # rINST <- A
-    .if 0
-    GET_WIDE_VREG %rax, rINSTq              # eax <- vA
-    GET_WIDE_VREG %ecx, %rcx             # ecx <- vB
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vA
-    GET_VREG %ecx, %rcx                  # ecx <- vB
-    .endif
-    testl   %ecx, %ecx
-    jz      common_errDivideByZero
-    cmpl  $-1, %ecx
-    je      2f
-    cdq                                    # rdx:rax <- sign-extended of rax
-    idivl   %ecx
-1:
-    .if 0
-    SET_WIDE_VREG %edx, rINSTq           # vA <- result
-    .else
-    SET_VREG %edx, rINSTq                # vA <- result
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-2:
-    .if 1
-    xorl %edx, %edx
-    .else
-    negl %edx
-    .endif
-    jmp     1b
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
-/* File: x86_64/op_and_int_2addr.S */
-/* File: x86_64/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
-    /* binop/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    andl    %eax, (rFP,%rcx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
-    CLEAR_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
-/* File: x86_64/op_or_int_2addr.S */
-/* File: x86_64/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
-    /* binop/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    orl     %eax, (rFP,%rcx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
-    CLEAR_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
-/* File: x86_64/op_xor_int_2addr.S */
-/* File: x86_64/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
-    /* binop/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    xorl    %eax, (rFP,%rcx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
-    CLEAR_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
-/* File: x86_64/op_shl_int_2addr.S */
-/* File: x86_64/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
-    /* shift/2addr vA, vB */
-    movl    rINST, %ecx                     # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # ecx <- vBB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    .if 0
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vAA
-    sall    %cl, %eax                                  # ex: sarl %cl, %eax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vAA
-    sall    %cl, %eax                                  # ex: sarl %cl, %eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
-/* File: x86_64/op_shr_int_2addr.S */
-/* File: x86_64/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
-    /* shift/2addr vA, vB */
-    movl    rINST, %ecx                     # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # ecx <- vBB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    .if 0
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vAA
-    sarl    %cl, %eax                                  # ex: sarl %cl, %eax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vAA
-    sarl    %cl, %eax                                  # ex: sarl %cl, %eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
-/* File: x86_64/op_ushr_int_2addr.S */
-/* File: x86_64/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
-    /* shift/2addr vA, vB */
-    movl    rINST, %ecx                     # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # ecx <- vBB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    .if 0
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vAA
-    shrl    %cl, %eax                                  # ex: sarl %cl, %eax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vAA
-    shrl    %cl, %eax                                  # ex: sarl %cl, %eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_long_2addr: /* 0xbb */
-/* File: x86_64/op_add_long_2addr.S */
-/* File: x86_64/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    addq    %rax, (rFP,%rcx,4)                                  # for ex: addq   %rax,(rFP,%rcx,4)
-    CLEAR_WIDE_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
-/* File: x86_64/op_sub_long_2addr.S */
-/* File: x86_64/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    subq    %rax, (rFP,%rcx,4)                                  # for ex: addq   %rax,(rFP,%rcx,4)
-    CLEAR_WIDE_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
-/* File: x86_64/op_mul_long_2addr.S */
-    /* mul vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    GET_WIDE_VREG %rax, %rcx                # rax <- vA
-    imulq   (rFP,rINSTq,4), %rax
-    SET_WIDE_VREG %rax, %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_long_2addr: /* 0xbe */
-/* File: x86_64/op_div_long_2addr.S */
-/* File: x86_64/bindiv2addr.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op1=-1.
- */
-    /* div/rem/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- BA
-    sarl    $4, %ecx                       # rcx <- B
-    andb    $0xf, rINSTbl                  # rINST <- A
-    .if 1
-    GET_WIDE_VREG %rax, rINSTq              # eax <- vA
-    GET_WIDE_VREG %rcx, %rcx             # ecx <- vB
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vA
-    GET_VREG %rcx, %rcx                  # ecx <- vB
-    .endif
-    testq   %rcx, %rcx
-    jz      common_errDivideByZero
-    cmpq  $-1, %rcx
-    je      2f
-    cqo                                    # rdx:rax <- sign-extended of rax
-    idivq   %rcx
-1:
-    .if 1
-    SET_WIDE_VREG %rax, rINSTq           # vA <- result
-    .else
-    SET_VREG %rax, rINSTq                # vA <- result
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-2:
-    .if 0
-    xorq %rax, %rax
-    .else
-    negq %rax
-    .endif
-    jmp     1b
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
-/* File: x86_64/op_rem_long_2addr.S */
-/* File: x86_64/bindiv2addr.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op1=-1.
- */
-    /* div/rem/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- BA
-    sarl    $4, %ecx                       # rcx <- B
-    andb    $0xf, rINSTbl                  # rINST <- A
-    .if 1
-    GET_WIDE_VREG %rax, rINSTq              # eax <- vA
-    GET_WIDE_VREG %rcx, %rcx             # ecx <- vB
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vA
-    GET_VREG %rcx, %rcx                  # ecx <- vB
-    .endif
-    testq   %rcx, %rcx
-    jz      common_errDivideByZero
-    cmpq  $-1, %rcx
-    je      2f
-    cqo                                    # rdx:rax <- sign-extended of rax
-    idivq   %rcx
-1:
-    .if 1
-    SET_WIDE_VREG %rdx, rINSTq           # vA <- result
-    .else
-    SET_VREG %rdx, rINSTq                # vA <- result
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-2:
-    .if 1
-    xorq %rdx, %rdx
-    .else
-    negq %rdx
-    .endif
-    jmp     1b
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
-/* File: x86_64/op_and_long_2addr.S */
-/* File: x86_64/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    andq    %rax, (rFP,%rcx,4)                                  # for ex: addq   %rax,(rFP,%rcx,4)
-    CLEAR_WIDE_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
-/* File: x86_64/op_or_long_2addr.S */
-/* File: x86_64/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    orq     %rax, (rFP,%rcx,4)                                  # for ex: addq   %rax,(rFP,%rcx,4)
-    CLEAR_WIDE_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
-/* File: x86_64/op_xor_long_2addr.S */
-/* File: x86_64/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    andb    $0xf, %cl                      # ecx <- A
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    xorq    %rax, (rFP,%rcx,4)                                  # for ex: addq   %rax,(rFP,%rcx,4)
-    CLEAR_WIDE_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
-/* File: x86_64/op_shl_long_2addr.S */
-/* File: x86_64/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
-    /* shift/2addr vA, vB */
-    movl    rINST, %ecx                     # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # ecx <- vBB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    .if 1
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vAA
-    salq    %cl, %rax                                  # ex: sarl %cl, %eax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vAA
-    salq    %cl, %rax                                  # ex: sarl %cl, %eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
-/* File: x86_64/op_shr_long_2addr.S */
-/* File: x86_64/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
-    /* shift/2addr vA, vB */
-    movl    rINST, %ecx                     # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # ecx <- vBB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    .if 1
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vAA
-    sarq    %cl, %rax                                  # ex: sarl %cl, %eax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vAA
-    sarq    %cl, %rax                                  # ex: sarl %cl, %eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
-/* File: x86_64/op_ushr_long_2addr.S */
-/* File: x86_64/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
-    /* shift/2addr vA, vB */
-    movl    rINST, %ecx                     # ecx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # ecx <- vBB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    .if 1
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vAA
-    shrq    %cl, %rax                                  # ex: sarl %cl, %eax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vAA
-    shrq    %cl, %rax                                  # ex: sarl %cl, %eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
-/* File: x86_64/op_add_float_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
-    movl    rINST, %ecx                     # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movss VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    addss VREG_ADDRESS(rINSTq), %xmm0
-    movss %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movss %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
-/* File: x86_64/op_sub_float_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
-    movl    rINST, %ecx                     # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movss VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    subss VREG_ADDRESS(rINSTq), %xmm0
-    movss %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movss %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
-/* File: x86_64/op_mul_float_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
-    movl    rINST, %ecx                     # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movss VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    mulss VREG_ADDRESS(rINSTq), %xmm0
-    movss %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movss %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
-/* File: x86_64/op_div_float_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
-    movl    rINST, %ecx                     # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movss VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    divss VREG_ADDRESS(rINSTq), %xmm0
-    movss %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movss %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_float_2addr: /* 0xca */
-/* File: x86_64/op_rem_float_2addr.S */
-    /* rem_float/2addr vA, vB */
-    movzbq  rINSTbl, %rcx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    flds    VREG_ADDRESS(rINSTq)            # vB to fp stack
-    andb    $0xf, %cl                      # ecx <- A
-    flds    VREG_ADDRESS(%rcx)              # vA to fp stack
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstps   VREG_ADDRESS(%rcx)              # %st to vA
-    CLEAR_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_double_2addr: /* 0xcb */
-/* File: x86_64/op_add_double_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
-    movl    rINST, %ecx                     # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movsd VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    addsd VREG_ADDRESS(rINSTq), %xmm0
-    movsd %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
-/* File: x86_64/op_sub_double_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
-    movl    rINST, %ecx                     # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movsd VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    subsd VREG_ADDRESS(rINSTq), %xmm0
-    movsd %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
-/* File: x86_64/op_mul_double_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
-    movl    rINST, %ecx                     # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movsd VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    mulsd VREG_ADDRESS(rINSTq), %xmm0
-    movsd %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_double_2addr: /* 0xce */
-/* File: x86_64/op_div_double_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
-    movl    rINST, %ecx                     # ecx <- A+
-    andl    $0xf, %ecx                     # ecx <- A
-    movsd VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
-    sarl    $4, rINST                      # rINST<- B
-    divsd VREG_ADDRESS(rINSTq), %xmm0
-    movsd %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movsd %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
-/* File: x86_64/op_rem_double_2addr.S */
-    /* rem_double/2addr vA, vB */
-    movzbq  rINSTbl, %rcx                   # ecx <- A+
-    sarl    $4, rINST                      # rINST <- B
-    fldl    VREG_ADDRESS(rINSTq)            # vB to fp stack
-    andb    $0xf, %cl                      # ecx <- A
-    fldl    VREG_ADDRESS(%rcx)              # vA to fp stack
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstpl   VREG_ADDRESS(%rcx)              # %st to vA
-    CLEAR_WIDE_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
-/* File: x86_64/op_add_int_lit16.S */
-/* File: x86_64/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- *      and-int/lit16, or-int/lit16, xor-int/lit16
- */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movl    rINST, %eax                     # rax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %rax                     # eax <- vB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    addl    %ecx, %eax                                  # for example: addl %ecx, %eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* File: x86_64/op_rsub_int.S */
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-/* File: x86_64/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- *      and-int/lit16, or-int/lit16, xor-int/lit16
- */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movl    rINST, %eax                     # rax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %rax                     # eax <- vB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    subl    %eax, %ecx                                  # for example: addl %ecx, %eax
-    SET_VREG %ecx, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
-/* File: x86_64/op_mul_int_lit16.S */
-/* File: x86_64/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- *      and-int/lit16, or-int/lit16, xor-int/lit16
- */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movl    rINST, %eax                     # rax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %rax                     # eax <- vB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    imull   %ecx, %eax                                  # for example: addl %ecx, %eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
-/* File: x86_64/op_div_int_lit16.S */
-/* File: x86_64/bindivLit16.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op1=-1.
- */
-    /* div/rem/lit16 vA, vB, #+CCCC */
-    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
-    movl    rINST, %eax                     # rax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %rax                     # eax <- vB
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    andb    $0xf, rINSTbl                  # rINST <- A
-    testl   %ecx, %ecx
-    jz      common_errDivideByZero
-    cmpl    $-1, %ecx
-    je      2f
-    cdq                                     # rax <- sign-extended of eax
-    idivl   %ecx
-1:
-    SET_VREG %eax, rINSTq                # vA <- result
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
-    .if 0
-    xorl    %eax, %eax
-    .else
-    negl    %eax
-    .endif
-    jmp     1b
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
-/* File: x86_64/op_rem_int_lit16.S */
-/* File: x86_64/bindivLit16.S */
-/*
- * 32-bit binary div/rem operation.  Handles special case of op1=-1.
- */
-    /* div/rem/lit16 vA, vB, #+CCCC */
-    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
-    movl    rINST, %eax                     # rax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %rax                     # eax <- vB
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    andb    $0xf, rINSTbl                  # rINST <- A
-    testl   %ecx, %ecx
-    jz      common_errDivideByZero
-    cmpl    $-1, %ecx
-    je      2f
-    cdq                                     # rax <- sign-extended of eax
-    idivl   %ecx
-1:
-    SET_VREG %edx, rINSTq                # vA <- result
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
-    .if 1
-    xorl    %edx, %edx
-    .else
-    negl    %edx
-    .endif
-    jmp     1b
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
-/* File: x86_64/op_and_int_lit16.S */
-/* File: x86_64/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- *      and-int/lit16, or-int/lit16, xor-int/lit16
- */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movl    rINST, %eax                     # rax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %rax                     # eax <- vB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    andl    %ecx, %eax                                  # for example: addl %ecx, %eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
-/* File: x86_64/op_or_int_lit16.S */
-/* File: x86_64/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- *      and-int/lit16, or-int/lit16, xor-int/lit16
- */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movl    rINST, %eax                     # rax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %rax                     # eax <- vB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    orl     %ecx, %eax                                  # for example: addl %ecx, %eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
-/* File: x86_64/op_xor_int_lit16.S */
-/* File: x86_64/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- *      and-int/lit16, or-int/lit16, xor-int/lit16
- */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movl    rINST, %eax                     # rax <- 000000BA
-    sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax, %rax                     # eax <- vB
-    andb    $0xf, rINSTbl                  # rINST <- A
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    xorl    %ecx, %eax                                  # for example: addl %ecx, %eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
-/* File: x86_64/op_add_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
-    GET_VREG %eax, %rax                     # eax <- rBB
-    addl    %ecx, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
-/* File: x86_64/op_rsub_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
-    GET_VREG %eax, %rax                     # eax <- rBB
-    subl    %eax, %ecx                                  # ex: addl %ecx,%eax
-    SET_VREG %ecx, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_mul_int_lit8: /* 0xda */
-/* File: x86_64/op_mul_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
-    GET_VREG %eax, %rax                     # eax <- rBB
-    imull   %ecx, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_div_int_lit8: /* 0xdb */
-/* File: x86_64/op_div_int_lit8.S */
-/* File: x86_64/bindivLit8.S */
-/*
- * 32-bit div/rem "lit8" binary operation.  Handles special case of
- * op0=minint & op1=-1
- */
-    /* div/rem/lit8 vAA, vBB, #+CC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG  %eax, %rax                    # eax <- rBB
-    testl   %ecx, %ecx
-    je      common_errDivideByZero
-    cmpl    $-1, %ecx
-    je      2f
-    cdq                                     # rax <- sign-extended of eax
-    idivl   %ecx
-1:
-    SET_VREG %eax, rINSTq                # vA <- result
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
-    .if 0
-    xorl    %eax, %eax
-    .else
-    negl    %eax
-    .endif
-    jmp     1b
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
-/* File: x86_64/op_rem_int_lit8.S */
-/* File: x86_64/bindivLit8.S */
-/*
- * 32-bit div/rem "lit8" binary operation.  Handles special case of
- * op0=minint & op1=-1
- */
-    /* div/rem/lit8 vAA, vBB, #+CC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG  %eax, %rax                    # eax <- rBB
-    testl   %ecx, %ecx
-    je      common_errDivideByZero
-    cmpl    $-1, %ecx
-    je      2f
-    cdq                                     # rax <- sign-extended of eax
-    idivl   %ecx
-1:
-    SET_VREG %edx, rINSTq                # vA <- result
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
-    .if 1
-    xorl    %edx, %edx
-    .else
-    negl    %edx
-    .endif
-    jmp     1b
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_and_int_lit8: /* 0xdd */
-/* File: x86_64/op_and_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
-    GET_VREG %eax, %rax                     # eax <- rBB
-    andl    %ecx, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_or_int_lit8: /* 0xde */
-/* File: x86_64/op_or_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
-    GET_VREG %eax, %rax                     # eax <- rBB
-    orl     %ecx, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
-/* File: x86_64/op_xor_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
-    GET_VREG %eax, %rax                     # eax <- rBB
-    xorl    %ecx, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
-/* File: x86_64/op_shl_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
-    GET_VREG %eax, %rax                     # eax <- rBB
-    sall    %cl, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
-/* File: x86_64/op_shr_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
-    GET_VREG %eax, %rax                     # eax <- rBB
-    sarl    %cl, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
-/* File: x86_64/op_ushr_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
-    GET_VREG %eax, %rax                     # eax <- rBB
-    shrl    %cl, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_quick: /* 0xe3 */
-/* File: x86_64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
-    /* op vA, vB, offset@CCCC */
-    movl    rINST, %ecx                     # rcx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
-    movzwq  2(rPC), %rax                    # eax <- field byte offset
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf,rINSTbl                   # rINST <- A
-    .if 0
-    movq (%rcx,%rax,1), %rax
-    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
-    .else
-    movl (%rcx,%rax,1), %eax
-    SET_VREG %eax, rINSTq                   # fp[A] <- value
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
-/* File: x86_64/op_iget_wide_quick.S */
-/* File: x86_64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
-    /* op vA, vB, offset@CCCC */
-    movl    rINST, %ecx                     # rcx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
-    movzwq  2(rPC), %rax                    # eax <- field byte offset
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf,rINSTbl                   # rINST <- A
-    .if 1
-    movq (%rcx,%rax,1), %rax
-    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
-    .else
-    movswl (%rcx,%rax,1), %eax
-    SET_VREG %eax, rINSTq                   # fp[A] <- value
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
-/* File: x86_64/op_iget_object_quick.S */
-    /* For: iget-object-quick */
-    /* op vA, vB, offset@CCCC */
-    .extern artIGetObjectFromMterp
-    movzbq  rINSTbl, %rcx                   # rcx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG OUT_32_ARG0, %rcx              # vB (object we're operating on)
-    movzwl  2(rPC), OUT_32_ARG1             # eax <- field byte offset
-    EXPORT_PC
-    callq   SYMBOL(artIGetObjectFromMterp)  # (obj, offset)
-    movq    rSELF, %rcx
-    cmpq    $0, THREAD_EXCEPTION_OFFSET(%rcx)
-    jnz     MterpException                  # bail out
-    andb    $0xf, rINSTbl                  # rINST <- A
-    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_quick: /* 0xe6 */
-/* File: x86_64/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbq  rINSTbl, %rcx                   # rcx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST, rINSTq                  # rINST <- v[A]
-    movzwq  2(rPC), %rax                    # rax <- field byte offset
-    movl    rINST, (%rcx,%rax,1)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
-/* File: x86_64/op_iput_wide_quick.S */
-    /* iput-wide-quick vA, vB, offset@CCCC */
-    movzbq    rINSTbl, %rcx                 # rcx<- BA
-    sarl      $4, %ecx                     # ecx<- B
-    GET_VREG  %ecx, %rcx                    # vB (object we're operating on)
-    testl     %ecx, %ecx                    # is object null?
-    je        common_errNullObject
-    movzwq    2(rPC), %rax                  # rax<- field byte offset
-    leaq      (%rcx,%rax,1), %rcx           # ecx<- Address of 64-bit target
-    andb      $0xf, rINSTbl                # rINST<- A
-    GET_WIDE_VREG %rax, rINSTq              # rax<- fp[A]/fp[A+1]
-    movq      %rax, (%rcx)                  # obj.field<- r0/r1
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
-/* File: x86_64/op_iput_object_quick.S */
-    EXPORT_PC
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
-    movq    rPC, OUT_ARG1
-    REFRESH_INST 232
-    movl    rINST, OUT_32_ARG2
-    call    SYMBOL(MterpIputObjectQuick)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
-/* File: x86_64/op_invoke_virtual_quick.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualQuick
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 233
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokeVirtualQuick)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
-/* File: x86_64/op_invoke_virtual_range_quick.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeVirtualQuickRange
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 234
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokeVirtualQuickRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
-/* File: x86_64/op_iput_boolean_quick.S */
-/* File: x86_64/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbq  rINSTbl, %rcx                   # rcx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST, rINSTq                  # rINST <- v[A]
-    movzwq  2(rPC), %rax                    # rax <- field byte offset
-    movb    rINSTbl, (%rcx,%rax,1)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_byte_quick: /* 0xec */
-/* File: x86_64/op_iput_byte_quick.S */
-/* File: x86_64/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbq  rINSTbl, %rcx                   # rcx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST, rINSTq                  # rINST <- v[A]
-    movzwq  2(rPC), %rax                    # rax <- field byte offset
-    movb    rINSTbl, (%rcx,%rax,1)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_char_quick: /* 0xed */
-/* File: x86_64/op_iput_char_quick.S */
-/* File: x86_64/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbq  rINSTbl, %rcx                   # rcx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST, rINSTq                  # rINST <- v[A]
-    movzwq  2(rPC), %rax                    # rax <- field byte offset
-    movw    rINSTw, (%rcx,%rax,1)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iput_short_quick: /* 0xee */
-/* File: x86_64/op_iput_short_quick.S */
-/* File: x86_64/op_iput_quick.S */
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbq  rINSTbl, %rcx                   # rcx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST, rINSTq                  # rINST <- v[A]
-    movzwq  2(rPC), %rax                    # rax <- field byte offset
-    movw    rINSTw, (%rcx,%rax,1)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
-/* File: x86_64/op_iget_boolean_quick.S */
-/* File: x86_64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
-    /* op vA, vB, offset@CCCC */
-    movl    rINST, %ecx                     # rcx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
-    movzwq  2(rPC), %rax                    # eax <- field byte offset
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf,rINSTbl                   # rINST <- A
-    .if 0
-    movq (%rcx,%rax,1), %rax
-    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
-    .else
-    movsbl (%rcx,%rax,1), %eax
-    SET_VREG %eax, rINSTq                   # fp[A] <- value
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
-/* File: x86_64/op_iget_byte_quick.S */
-/* File: x86_64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
-    /* op vA, vB, offset@CCCC */
-    movl    rINST, %ecx                     # rcx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
-    movzwq  2(rPC), %rax                    # eax <- field byte offset
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf,rINSTbl                   # rINST <- A
-    .if 0
-    movq (%rcx,%rax,1), %rax
-    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
-    .else
-    movsbl (%rcx,%rax,1), %eax
-    SET_VREG %eax, rINSTq                   # fp[A] <- value
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
-/* File: x86_64/op_iget_char_quick.S */
-/* File: x86_64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
-    /* op vA, vB, offset@CCCC */
-    movl    rINST, %ecx                     # rcx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
-    movzwq  2(rPC), %rax                    # eax <- field byte offset
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf,rINSTbl                   # rINST <- A
-    .if 0
-    movq (%rcx,%rax,1), %rax
-    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
-    .else
-    movzwl (%rcx,%rax,1), %eax
-    SET_VREG %eax, rINSTq                   # fp[A] <- value
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
-/* File: x86_64/op_iget_short_quick.S */
-/* File: x86_64/op_iget_quick.S */
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
-    /* op vA, vB, offset@CCCC */
-    movl    rINST, %ecx                     # rcx <- BA
-    sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
-    movzwq  2(rPC), %rax                    # eax <- field byte offset
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $0xf,rINSTbl                   # rINST <- A
-    .if 0
-    movq (%rcx,%rax,1), %rax
-    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
-    .else
-    movswl (%rcx,%rax,1), %eax
-    SET_VREG %eax, rINSTq                   # fp[A] <- value
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/* File: x86_64/op_unused_f3.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/* File: x86_64/op_unused_f4.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/* File: x86_64/op_unused_f5.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/* File: x86_64/op_unused_f6.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/* File: x86_64/op_unused_f7.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/* File: x86_64/op_unused_f8.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/* File: x86_64/op_unused_f9.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
-/* File: x86_64/op_invoke_polymorphic.S */
-/* File: x86_64/invoke_polymorphic.S */
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern MterpInvokePolymorphic
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 250
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokePolymorphic)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 4
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
-/* File: x86_64/op_invoke_polymorphic_range.S */
-/* File: x86_64/invoke_polymorphic.S */
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern MterpInvokePolymorphicRange
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 251
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokePolymorphicRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 4
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_custom: /* 0xfc */
-/* File: x86_64/op_invoke_custom.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeCustom
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 252
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokeCustom)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
-/* File: x86_64/op_invoke_custom_range.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern MterpInvokeCustomRange
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST 253
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL(MterpInvokeCustomRange)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_method_handle: /* 0xfe */
-/* File: x86_64/op_const_method_handle.S */
-/* File: x86_64/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstMethodHandle
-    EXPORT_PC
-    movzwq  2(rPC), OUT_ARG0                # eax <- OUT_ARG0
-    movq    rINSTq, OUT_ARG1
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
-    movq    rSELF, OUT_ARG3
-    call    SYMBOL(MterpConstMethodHandle)                 # (index, tgt_reg, shadow_frame, self)
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
-    .balign 128
-.L_op_const_method_type: /* 0xff */
-/* File: x86_64/op_const_method_type.S */
-/* File: x86_64/const.S */
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern MterpConstMethodType
-    EXPORT_PC
-    movzwq  2(rPC), OUT_ARG0                # eax <- OUT_ARG0
-    movq    rINSTq, OUT_ARG1
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
-    movq    rSELF, OUT_ARG3
-    call    SYMBOL(MterpConstMethodType)                 # (index, tgt_reg, shadow_frame, self)
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-    .balign 128
-/* File: x86_64/instruction_end.S */
-
-    OBJECT_TYPE(artMterpAsmInstructionEnd)
-    ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
-    .global SYMBOL(artMterpAsmInstructionEnd)
-SYMBOL(artMterpAsmInstructionEnd):
-
-
-/*
- * ===========================================================================
- *  Sister implementations
- * ===========================================================================
- */
-/* File: x86_64/instruction_start_sister.S */
-
-    OBJECT_TYPE(artMterpAsmSisterStart)
-    ASM_HIDDEN SYMBOL(artMterpAsmSisterStart)
-    .global SYMBOL(artMterpAsmSisterStart)
-    .text
-    .balign 4
-SYMBOL(artMterpAsmSisterStart):
-
-/* File: x86_64/instruction_end_sister.S */
-
-    OBJECT_TYPE(artMterpAsmSisterEnd)
-    ASM_HIDDEN SYMBOL(artMterpAsmSisterEnd)
-    .global SYMBOL(artMterpAsmSisterEnd)
-SYMBOL(artMterpAsmSisterEnd):
-
-/* File: x86_64/instruction_start_alt.S */
-
-    OBJECT_TYPE(artMterpAsmAltInstructionStart)
-    ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
-    .global SYMBOL(artMterpAsmAltInstructionStart)
-    .text
-SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(0*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move: /* 0x01 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(1*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(2*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(3*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(4*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(5*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(6*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(7*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(8*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(9*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(10*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(11*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(12*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(13*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(14*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return: /* 0x0f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(15*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(16*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(17*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(18*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(19*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const: /* 0x14 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(20*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(21*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(22*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(23*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(24*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(25*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(26*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(27*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(28*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(29*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(30*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(31*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(32*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(33*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(34*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(35*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(36*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(37*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(38*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(39*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(40*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(41*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(42*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(43*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(44*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(45*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(46*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(47*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(48*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(49*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(50*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(51*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(52*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(53*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(54*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(55*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(56*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(57*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(58*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(59*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(60*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(61*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(62*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(63*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(64*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(65*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(66*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(67*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(68*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(69*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(70*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(71*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(72*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(73*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(74*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(75*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(76*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(77*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(78*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(79*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(80*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(81*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(82*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(83*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(84*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(85*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(86*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(87*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(88*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(89*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(90*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(91*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(92*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(93*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(94*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(95*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(96*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(97*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(98*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(99*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(100*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(101*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(102*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(103*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(104*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(105*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(106*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(107*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(108*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(109*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(110*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(111*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(112*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(113*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(114*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(115*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(116*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(117*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(118*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(119*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(120*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(121*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(122*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(123*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(124*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(125*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(126*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(127*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(128*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(129*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(130*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(131*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(132*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(133*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(134*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(135*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(136*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(137*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(138*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(139*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(140*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(141*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(142*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(143*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(144*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(145*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(146*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(147*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(148*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(149*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(150*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(151*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(152*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(153*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(154*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(155*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(156*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(157*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(158*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(159*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(160*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(161*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(162*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(163*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(164*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(165*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(166*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(167*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(168*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(169*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(170*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(171*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(172*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(173*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(174*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(175*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(176*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(177*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(178*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(179*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(180*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(181*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(182*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(183*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(184*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(185*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(186*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(187*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(188*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(189*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(190*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(191*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(192*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(193*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(194*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(195*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(196*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(197*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(198*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(199*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(200*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(201*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(202*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(203*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(204*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(205*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(206*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(207*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(208*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(209*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(210*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(211*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(212*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(213*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(214*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(215*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(216*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(217*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(218*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(219*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(220*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(221*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(222*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(223*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(224*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(225*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(226*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(227*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(228*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(229*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(230*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(231*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(232*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(233*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(234*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(235*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(236*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(237*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(238*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(239*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(240*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(241*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(242*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(243*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(244*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(245*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(246*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(247*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(248*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(249*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(250*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(251*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(252*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(253*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(254*128)
-
-/* ------------------------------ */
-    .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(255*128)
-
-    .balign 128
-/* File: x86_64/instruction_end_alt.S */
-
-    OBJECT_TYPE(artMterpAsmAltInstructionEnd)
-    ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
-    .global SYMBOL(artMterpAsmAltInstructionEnd)
-SYMBOL(artMterpAsmAltInstructionEnd):
-
-/* File: x86_64/footer.S */
-/*
- * ===========================================================================
- *  Common subroutines and data
- * ===========================================================================
- */
-
-    .text
-    .align  2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpLogDivideByZeroException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errArrayIndex:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpLogArrayIndexException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errNegativeArraySize:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpLogNegativeArraySizeException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errNoSuchMethod:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpLogNoSuchMethodException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errNullObject:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpLogNullObjectException)
-#endif
-    jmp     MterpCommonFallback
-
-common_exceptionThrown:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpLogExceptionThrownException)
-#endif
-    jmp     MterpCommonFallback
-
-MterpSuspendFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movl    THREAD_FLAGS_OFFSET(OUT_ARG0), OUT_32_ARG2
-    call    SYMBOL(MterpLogSuspendFallback)
-#endif
-    jmp     MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    movq    rSELF, %rcx
-    cmpq    $0, THREAD_EXCEPTION_OFFSET(%rcx)
-    jz      MterpFallback
-    /* intentional fallthrough - handle pending exception. */
-
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpHandleException)
-    testb   %al, %al
-    jz      MterpExceptionReturn
-    movq    OFF_FP_DEX_INSTRUCTIONS(rFP), %rax
-    mov     OFF_FP_DEX_PC(rFP), %ecx
-    leaq    (%rax, %rcx, 2), rPC
-    movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
-    /* Do we need to switch interpreters? */
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    /* resume execution at catch block */
-    REFRESH_IBASE
-    FETCH_INST
-    GOTO_NEXT
-    /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    rINST          <= signed offset
- *    rPROFILE       <= signed hotness countdown (expanded to 32 bits)
- *    condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranch:
-    jg      .L_forward_branch               # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-#  error "JIT_CHECK_OSR must be -1."
-#endif
-    cmpl    $JIT_CHECK_OSR, rPROFILE
-    je      .L_osr_check
-    decl    rPROFILE
-    je      .L_add_batch                    # counted down to zero - report
-.L_resume_backward_branch:
-    movq    rSELF, %rax
-    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
-    REFRESH_IBASE_REG %rax
-    leaq    (rPC, rINSTq, 2), rPC
-    FETCH_INST
-    jnz     .L_suspend_request_pending
-    GOTO_NEXT
-
-.L_suspend_request_pending:
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    call    SYMBOL(MterpSuspendCheck)       # (self)
-    testb   %al, %al
-    jnz     MterpFallback
-    REFRESH_IBASE                           # might have changed during suspend
-    GOTO_NEXT
-
-.L_no_count_backwards:
-    cmpl    $JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
-    jne     .L_resume_backward_branch
-.L_osr_check:
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rINSTq, OUT_ARG2
-    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    testb   %al, %al
-    jz      .L_resume_backward_branch
-    jmp     MterpOnStackReplacement
-
-.L_forward_branch:
-    cmpl    $JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
-    je      .L_check_osr_forward
-.L_resume_forward_branch:
-    leaq    (rPC, rINSTq, 2), rPC
-    FETCH_INST
-    GOTO_NEXT
-
-.L_check_osr_forward:
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rINSTq, OUT_ARG2
-    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    testb   %al, %al
-    jz      .L_resume_forward_branch
-    jmp     MterpOnStackReplacement
-
-.L_add_batch:
-    movl    rPROFILE, %eax
-    movq    OFF_FP_METHOD(rFP), OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movw    %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
-    movq    rSELF, OUT_ARG2
-    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
-    movswl  %ax, rPROFILE
-    jmp     .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movl    $2, OUT_32_ARG2
-    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    testb   %al, %al
-    jnz     MterpOnStackReplacement
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movl    rINST, OUT_32_ARG2
-    call    SYMBOL(MterpLogOSR)
-#endif
-    movl    $1, %eax
-    jmp     MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpLogFallback)
-#endif
-MterpCommonFallback:
-    xorl    %eax, %eax
-    jmp     MterpDone
-
-/*
- * On entry:
- *  uint32_t* rFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    movl    $1, %eax
-    jmp     MterpDone
-MterpReturn:
-    movq    OFF_FP_RESULT_REGISTER(rFP), %rdx
-    movq    %rax, (%rdx)
-    movl    $1, %eax
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    testl   rPROFILE, rPROFILE
-    jle     MRestoreFrame                   # if > 0, we may have some counts to report.
-
-    movl    %eax, rINST                     # stash return value
-    /* Report cached hotness counts */
-    movl    rPROFILE, %eax
-    movq    OFF_FP_METHOD(rFP), OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movw    %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
-    movq    rSELF, OUT_ARG2
-    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
-    movl    rINST, %eax                     # restore return value
-
-    /* pop up frame */
-MRestoreFrame:
-    addq    $FRAME_SIZE, %rsp
-    .cfi_adjust_cfa_offset -FRAME_SIZE
-
-    /* Restore callee save register */
-    POP %r15
-    POP %r14
-    POP %r13
-    POP %r12
-    POP %rbp
-    POP %rbx
-    ret
-    .cfi_endproc
-    SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
-
diff --git a/runtime/interpreter/mterp/rebuild.sh b/runtime/interpreter/mterp/rebuild.sh
deleted file mode 100755
index ca3dcd9..0000000
--- a/runtime/interpreter/mterp/rebuild.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/sh
-#
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Rebuild for all known targets.  Necessary until the stuff in "out" gets
-# generated as part of the build.
-#
-set -e
-
-for arch in arm x86 mips arm64 x86_64 mips64; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done
diff --git a/runtime/interpreter/mterp/x86/alt_stub.S b/runtime/interpreter/mterp/x86/alt_stub.S
deleted file mode 100644
index a5b39b8..0000000
--- a/runtime/interpreter/mterp/x86/alt_stub.S
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    REFRESH_IBASE
-    jmp     .L_op_nop+(${opnum}*${handler_size_bytes})
diff --git a/runtime/interpreter/mterp/x86/arithmetic.S b/runtime/interpreter/mterp/x86/arithmetic.S
new file mode 100644
index 0000000..973e5b8
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/arithmetic.S
@@ -0,0 +1,943 @@
+%def bindiv(result="", special="", rem=""):
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op0=minint and
+ * op1=-1.
+ */
+    /* div/rem vAA, vBB, vCC */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    GET_VREG %eax, %eax                     # eax <- vBB
+    GET_VREG %ecx, %ecx                     # ecx <- vCC
+    mov     rIBASE, LOCAL0(%esp)
+    testl   %ecx, %ecx
+    je      common_errDivideByZero
+    movl    %eax, %edx
+    orl     %ecx, %edx
+    testl   $$0xFFFFFF00, %edx              # If both arguments are less
+                                            #   than 8-bit and +ve
+    jz      .L${opcode}_8                   # Do 8-bit divide
+    testl   $$0xFFFF0000, %edx              # If both arguments are less
+                                            #   than 16-bit and +ve
+    jz      .L${opcode}_16                  # Do 16-bit divide
+    cmpl    $$-1, %ecx
+    jne     .L${opcode}_32
+    cmpl    $$0x80000000, %eax
+    jne     .L${opcode}_32
+    movl    $special, $result
+    jmp     .L${opcode}_finish
+%  add_helper(lambda: bindiv_helper(result, rem))
+
+%def bindiv_helper(result, rem):
+.L${opcode}_32:
+    cltd
+    idivl   %ecx
+    jmp     .L${opcode}_finish
+.L${opcode}_8:
+    div     %cl                             # 8-bit divide otherwise.
+                                            # Remainder in %ah, quotient in %al
+    .if $rem
+    movl    %eax, %edx
+    shr     $$8, %edx
+    .else
+    andl    $$0x000000FF, %eax
+    .endif
+    jmp     .L${opcode}_finish
+.L${opcode}_16:
+    xorl    %edx, %edx                      # Clear %edx before divide
+    div     %cx
+.L${opcode}_finish:
+    SET_VREG $result, rINST
+    mov     LOCAL0(%esp), rIBASE
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def bindiv2addr(result="", special=""):
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op0=minint and
+ * op1=-1.
+ */
+    /* div/rem/2addr vA, vB */
+    movzx   rINSTbl, %ecx                   # eax <- BA
+    mov     rIBASE, LOCAL0(%esp)
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %ecx                     # eax <- vBB
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    GET_VREG %eax, rINST                    # eax <- vBB
+    testl   %ecx, %ecx
+    je      common_errDivideByZero
+    cmpl    $$-1, %ecx
+    jne     .L${opcode}_continue_div2addr
+    cmpl    $$0x80000000, %eax
+    jne     .L${opcode}_continue_div2addr
+    movl    $special, $result
+    SET_VREG $result, rINST
+    mov     LOCAL0(%esp), rIBASE
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+%  add_helper(lambda: bindiv2addr_helper(result))
+
+%def bindiv2addr_helper(result):
+.L${opcode}_continue_div2addr:
+    cltd
+    idivl   %ecx
+    SET_VREG $result, rINST
+    mov     LOCAL0(%esp), rIBASE
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def bindivLit16(result="", special=""):
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op0=minint and
+ * op1=-1.
+ */
+    /* div/rem/lit16 vA, vB, #+CCCC */
+    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
+    movzbl  rINSTbl, %eax                   # eax <- 000000BA
+    sarl    $$4, %eax                       # eax <- B
+    GET_VREG %eax, %eax                     # eax <- vB
+    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    testl   %ecx, %ecx
+    je      common_errDivideByZero
+    cmpl    $$-1, %ecx
+    jne     .L${opcode}_continue_div
+    cmpl    $$0x80000000, %eax
+    jne     .L${opcode}_continue_div
+    movl    $special, %eax
+    SET_VREG %eax, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+.L${opcode}_continue_div:
+    mov     rIBASE, LOCAL0(%esp)
+    cltd
+    idivl   %ecx
+    SET_VREG $result, rINST
+    mov     LOCAL0(%esp), rIBASE
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def bindivLit8(result="", special=""):
+/*
+ * 32-bit div/rem "lit8" binary operation.  Handles special case of
+ * op0=minint & op1=-1
+ */
+    /* div/rem/lit8 vAA, vBB, #+CC */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
+    GET_VREG  %eax, %eax                    # eax <- rBB
+    testl   %ecx, %ecx
+    je      common_errDivideByZero
+    cmpl    $$0x80000000, %eax
+    jne     .L${opcode}_continue_div
+    cmpl    $$-1, %ecx
+    jne     .L${opcode}_continue_div
+    movl    $special, %eax
+    SET_VREG %eax, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+.L${opcode}_continue_div:
+    mov     rIBASE, LOCAL0(%esp)
+    cltd
+    idivl   %ecx
+    SET_VREG $result, rINST
+    mov     LOCAL0(%esp), rIBASE
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binop(result="%eax", instr=""):
+/*
+ * Generic 32-bit binary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op VREG_ADDRESS(%ecx)".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ *      xor-int, shl-int, shr-int, ushr-int
+ */
+    /* binop vAA, vBB, vCC */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    GET_VREG %eax, %eax                     # eax <- vBB
+    $instr VREG_ADDRESS(%ecx), %eax
+    SET_VREG $result, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binop1(result="%eax", tmp="%ecx", instr=""):
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+    /* binop vAA, vBB, vCC */
+    movzbl  2(rPC),%eax                     # eax <- BB
+    movzbl  3(rPC),%ecx                     # ecx <- CC
+    GET_VREG %eax, %eax                     # eax <- vBB
+    GET_VREG %ecx, %ecx                     # eax <- vBB
+    $instr                                  # ex: addl    %ecx,%eax
+    SET_VREG $result, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binop2addr(result="%eax", instr=""):
+/*
+ * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+    /* binop/2addr vA, vB */
+    movzx   rINSTbl, %ecx                   # ecx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    GET_VREG %eax, rINST                    # eax <- vB
+    andb    $$0xf, %cl                      # ecx <- A
+    $instr %eax, VREG_ADDRESS(%ecx)
+    CLEAR_REF %ecx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def binopLit16(result="%eax", instr=""):
+/*
+ * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ *      and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+    /* binop/lit16 vA, vB, #+CCCC */
+    movzbl  rINSTbl, %eax                   # eax <- 000000BA
+    sarl    $$4, %eax                       # eax <- B
+    GET_VREG %eax, %eax                     # eax <- vB
+    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    $instr                                  # for example: addl %ecx, %eax
+    SET_VREG $result, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binopLit8(result="%eax", instr=""):
+/*
+ * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ *      and-int/lit8, or-int/lit8, xor-int/lit8,
+ *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
+    GET_VREG %eax, %eax                     # eax <- rBB
+    $instr                                  # ex: addl %ecx,%eax
+    SET_VREG $result, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binopWide(instr1="", instr2=""):
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop vAA, vBB, vCC */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    movl    rIBASE, LOCAL0(%esp)            # save rIBASE
+    GET_VREG rIBASE, %eax                   # rIBASE <- v[BB+0]
+    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1]
+    $instr1 VREG_ADDRESS(%ecx), rIBASE
+    $instr2 VREG_HIGH_ADDRESS(%ecx), %eax
+    SET_VREG rIBASE, rINST                  # v[AA+0] <- rIBASE
+    movl    LOCAL0(%esp), rIBASE            # restore rIBASE
+    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binopWide2addr(instr1="", instr2=""):
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop/2addr vA, vB */
+    movzbl  rINSTbl, %ecx                   # ecx<- BA
+    sarl    $$4, %ecx                       # ecx<- B
+    GET_VREG %eax, %ecx                     # eax<- v[B+0]
+    GET_VREG_HIGH %ecx, %ecx                # eax<- v[B+1]
+    andb    $$0xF, rINSTbl                  # rINST<- A
+    $instr1 %eax, VREG_ADDRESS(rINST)
+    $instr2 %ecx, VREG_HIGH_ADDRESS(rINST)
+    CLEAR_WIDE_REF rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def cvtfp_int(srcdouble="1", tgtlong="1"):
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint.  If it is less
+ * than minint, it should be clamped to minint.  If it is a nan, the result
+ * should be zero.  Further, the rounding mode is to truncate.  This model
+ * differs from what is delivered normally via the x86 fpu, so we have
+ * to play some games.
+ */
+    /* float/double to int/long vA, vB */
+    movzbl  rINSTbl, %ecx                   # ecx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    .if $srcdouble
+    fldl    VREG_ADDRESS(rINST)             # %st0 <- vB
+    .else
+    flds    VREG_ADDRESS(rINST)             # %st0 <- vB
+    .endif
+    ftst
+    fnstcw  LOCAL0(%esp)                    # remember original rounding mode
+    movzwl  LOCAL0(%esp), %eax
+    movb    $$0xc, %ah
+    movw    %ax, LOCAL0+2(%esp)
+    fldcw   LOCAL0+2(%esp)                  # set "to zero" rounding mode
+    andb    $$0xf, %cl                      # ecx <- A
+    .if $tgtlong
+    fistpll VREG_ADDRESS(%ecx)              # convert and store
+    .else
+    fistpl  VREG_ADDRESS(%ecx)              # convert and store
+    .endif
+    fldcw   LOCAL0(%esp)                    # restore previous rounding mode
+    .if $tgtlong
+    movl    $$0x80000000, %eax
+    xorl    VREG_HIGH_ADDRESS(%ecx), %eax
+    orl     VREG_ADDRESS(%ecx), %eax
+    .else
+    cmpl    $$0x80000000, VREG_ADDRESS(%ecx)
+    .endif
+    je      .L${opcode}_special_case # fix up result
+
+.L${opcode}_finish:
+    xor     %eax, %eax
+    mov     %eax, VREG_REF_ADDRESS(%ecx)
+    .if $tgtlong
+    mov     %eax, VREG_REF_HIGH_ADDRESS(%ecx)
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+%  add_helper(lambda: cvtfp_int_helper(tgtlong))
+
+%def cvtfp_int_helper(tgtlong):
+.L${opcode}_special_case:
+    fnstsw  %ax
+    sahf
+    jp      .L${opcode}_isNaN
+    adcl    $$-1, VREG_ADDRESS(%ecx)
+    .if $tgtlong
+    adcl    $$-1, VREG_HIGH_ADDRESS(%ecx)
+    .endif
+   jmp      .L${opcode}_finish
+.L${opcode}_isNaN:
+    movl    $$0, VREG_ADDRESS(%ecx)
+    .if $tgtlong
+    movl    $$0, VREG_HIGH_ADDRESS(%ecx)
+    .endif
+    jmp     .L${opcode}_finish
+
+%def shop2addr(result="%eax", instr=""):
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+    /* shift/2addr vA, vB */
+    movzx   rINSTbl, %ecx                   # eax <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %ecx                     # eax <- vBB
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    GET_VREG %eax, rINST                    # eax <- vAA
+    $instr                                  # ex: sarl %cl, %eax
+    SET_VREG $result, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def unop(instr=""):
+/*
+ * Generic 32-bit unary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+    /* unop vA, vB */
+    movzbl  rINSTbl,%ecx                    # ecx <- A+
+    sarl    $$4,rINST                       # rINST <- B
+    GET_VREG %eax, rINST                    # eax <- vB
+    andb    $$0xf,%cl                       # ecx <- A
+    $instr
+    SET_VREG %eax, %ecx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_add_int():
+%  binop(instr="addl")
+
+%def op_add_int_2addr():
+%  binop2addr(instr="addl")
+
+%def op_add_int_lit16():
+%  binopLit16(instr="addl    %ecx, %eax")
+
+%def op_add_int_lit8():
+%  binopLit8(instr="addl    %ecx, %eax")
+
+%def op_add_long():
+%  binopWide(instr1="addl", instr2="adcl")
+
+%def op_add_long_2addr():
+%  binopWide2addr(instr1="addl", instr2="adcl")
+
+%def op_and_int():
+%  binop(instr="andl")
+
+%def op_and_int_2addr():
+%  binop2addr(instr="andl")
+
+%def op_and_int_lit16():
+%  binopLit16(instr="andl    %ecx, %eax")
+
+%def op_and_int_lit8():
+%  binopLit8(instr="andl    %ecx, %eax")
+
+%def op_and_long():
+%  binopWide(instr1="andl", instr2="andl")
+
+%def op_and_long_2addr():
+%  binopWide2addr(instr1="andl", instr2="andl")
+
+%def op_cmp_long():
+/*
+ * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ */
+    /* cmp-long vAA, vBB, vCC */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1], BB is clobbered
+    cmpl    VREG_HIGH_ADDRESS(%ecx), %eax
+    jl      .L${opcode}_smaller
+    jg      .L${opcode}_bigger
+    movzbl  2(rPC), %eax                    # eax <- BB, restore BB
+    GET_VREG %eax, %eax                     # eax <- v[BB]
+    sub     VREG_ADDRESS(%ecx), %eax
+    ja      .L${opcode}_bigger
+    jb      .L${opcode}_smaller
+.L${opcode}_finish:
+    SET_VREG %eax, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+.L${opcode}_bigger:
+    movl    $$1, %eax
+    jmp     .L${opcode}_finish
+
+.L${opcode}_smaller:
+    movl    $$-1, %eax
+    jmp     .L${opcode}_finish
+
+%def op_div_int():
+%  bindiv(result="%eax", special="$0x80000000", rem="0")
+
+%def op_div_int_2addr():
+%  bindiv2addr(result="%eax", special="$0x80000000")
+
+%def op_div_int_lit16():
+%  bindivLit16(result="%eax", special="$0x80000000")
+
+%def op_div_int_lit8():
+%  bindivLit8(result="%eax", special="$0x80000000")
+
+%def op_div_long(routine="art_quick_ldiv"):
+/* art_quick_* methods has quick abi,
+ *   so use eax, ecx, edx, ebx for args
+ */
+    /* div vAA, vBB, vCC */
+    .extern $routine
+    mov     rIBASE, LOCAL0(%esp)            # save rIBASE/%edx
+    mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
+    movzbl  3(rPC), %eax                    # eax <- CC
+    GET_VREG %ecx, %eax
+    GET_VREG_HIGH %ebx, %eax
+    movl    %ecx, %edx
+    orl     %ebx, %ecx
+    jz      common_errDivideByZero
+    movzbl  2(rPC), %eax                    # eax <- BB
+    GET_VREG_HIGH %ecx, %eax
+    GET_VREG %eax, %eax
+    call    SYMBOL($routine)
+    mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
+    SET_VREG_HIGH rIBASE, rINST
+    SET_VREG %eax, rINST
+    mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_div_long_2addr(routine="art_quick_ldiv"):
+/* art_quick_* methods has quick abi,
+ *   so use eax, ecx, edx, ebx for args
+ */
+    /* div/2addr vA, vB */
+    .extern   $routine
+    mov     rIBASE, LOCAL0(%esp)            # save rIBASE/%edx
+    movzbl  rINSTbl, %eax
+    shrl    $$4, %eax                       # eax <- B
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
+    movl    %ebx, %ecx
+    GET_VREG %edx, %eax
+    GET_VREG_HIGH %ebx, %eax
+    movl    %edx, %eax
+    orl     %ebx, %eax
+    jz      common_errDivideByZero
+    GET_VREG %eax, %ecx
+    GET_VREG_HIGH %ecx, %ecx
+    call    SYMBOL($routine)
+    mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
+    SET_VREG_HIGH rIBASE, rINST
+    SET_VREG %eax, rINST
+    mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_int_to_byte():
+%  unop(instr="movsbl  %al, %eax")
+
+%def op_int_to_char():
+%  unop(instr="movzwl  %ax,%eax")
+
+%def op_int_to_long():
+    /* int to long vA, vB */
+    movzbl  rINSTbl, %eax                   # eax <- +A
+    sarl    $$4, %eax                       # eax <- B
+    GET_VREG %eax, %eax                     # eax <- vB
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    movl    rIBASE, %ecx                    # cltd trashes rIBASE/edx
+    cltd                                    # rINST:eax<- sssssssBBBBBBBB
+    SET_VREG_HIGH rIBASE, rINST             # v[A+1] <- rIBASE
+    SET_VREG %eax, rINST                    # v[A+0] <- %eax
+    movl    %ecx, rIBASE
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+%def op_int_to_short():
+%  unop(instr="movswl %ax, %eax")
+
+%def op_long_to_int():
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%  op_move()
+
+%def op_mul_int():
+    /*
+     * 32-bit binary multiplication.
+     */
+    /* mul vAA, vBB, vCC */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    GET_VREG %eax, %eax                     # eax <- vBB
+    mov     rIBASE, LOCAL0(%esp)
+    imull   VREG_ADDRESS(%ecx), %eax        # trashes rIBASE/edx
+    mov     LOCAL0(%esp), rIBASE
+    SET_VREG %eax, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_mul_int_2addr():
+    /* mul vA, vB */
+    movzx   rINSTbl, %ecx                   # ecx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    GET_VREG %eax, rINST                    # eax <- vB
+    andb    $$0xf, %cl                      # ecx <- A
+    movl    rIBASE, rINST
+    imull   VREG_ADDRESS(%ecx), %eax        # trashes rIBASE/edx
+    movl    rINST, rIBASE
+    SET_VREG %eax, %ecx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_mul_int_lit16():
+    /* mul/lit16 vA, vB, #+CCCC */
+    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
+    movzbl  rINSTbl, %eax                   # eax <- 000000BA
+    sarl    $$4, %eax                       # eax <- B
+    GET_VREG %eax, %eax                     # eax <- vB
+    movl    rIBASE, %ecx
+    movswl  2(rPC), rIBASE                  # rIBASE <- ssssCCCC
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    imull   rIBASE, %eax                    # trashes rIBASE/edx
+    movl    %ecx, rIBASE
+    SET_VREG %eax, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_mul_int_lit8():
+    /* mul/lit8 vAA, vBB, #+CC */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movl    rIBASE, %ecx
+    GET_VREG  %eax, %eax                    # eax <- rBB
+    movsbl  3(rPC), rIBASE                  # rIBASE <- ssssssCC
+    imull   rIBASE, %eax                    # trashes rIBASE/edx
+    movl    %ecx, rIBASE
+    SET_VREG %eax, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_mul_long():
+/*
+ * Signed 64-bit integer multiply.
+ *
+ * We could definately use more free registers for
+ * this code.   We spill rINSTw (ebx),
+ * giving us eax, ebc, ecx and edx as computational
+ * temps.  On top of that, we'll spill edi (rFP)
+ * for use as the vB pointer and esi (rPC) for use
+ * as the vC pointer.  Yuck.
+ *
+ */
+    /* mul-long vAA, vBB, vCC */
+    movzbl  2(rPC), %eax                    # eax <- B
+    movzbl  3(rPC), %ecx                    # ecx <- C
+    mov     rPC, LOCAL0(%esp)               # save Interpreter PC
+    mov     rFP, LOCAL1(%esp)               # save FP
+    mov     rIBASE, LOCAL2(%esp)            # save rIBASE
+    leal    (rFP,%eax,4), %esi              # esi <- &v[B]
+    leal    VREG_ADDRESS(%ecx), rFP         # rFP <- &v[C]
+    movl    4(%esi), %ecx                   # ecx <- Bmsw
+    imull   (rFP), %ecx                     # ecx <- (Bmsw*Clsw)
+    movl    4(rFP), %eax                    # eax <- Cmsw
+    imull   (%esi), %eax                    # eax <- (Cmsw*Blsw)
+    addl    %eax, %ecx                      # ecx <- (Bmsw*Clsw)+(Cmsw*Blsw)
+    movl    (rFP), %eax                     # eax <- Clsw
+    mull    (%esi)                          # eax <- (Clsw*Alsw)
+    mov     LOCAL0(%esp), rPC               # restore Interpreter PC
+    mov     LOCAL1(%esp), rFP               # restore FP
+    leal    (%ecx,rIBASE), rIBASE           # full result now in rIBASE:%eax
+    SET_VREG_HIGH rIBASE, rINST             # v[B+1] <- rIBASE
+    mov     LOCAL2(%esp), rIBASE            # restore IBASE
+    SET_VREG %eax, rINST                    # v[B] <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_mul_long_2addr():
+/*
+ * Signed 64-bit integer multiply, 2-addr version
+ *
+ * We could definately use more free registers for
+ * this code.  We must spill %edx (rIBASE) because it
+ * is used by imul.  We'll also spill rINST (ebx),
+ * giving us eax, ebc, ecx and rIBASE as computational
+ * temps.  On top of that, we'll spill %esi (edi)
+ * for use as the vA pointer and rFP (esi) for use
+ * as the vB pointer.  Yuck.
+ */
+    /* mul-long/2addr vA, vB */
+    movzbl  rINSTbl, %eax                   # eax <- BA
+    andb    $$0xf, %al                      # eax <- A
+    CLEAR_WIDE_REF %eax                     # clear refs in advance
+    sarl    $$4, rINST                      # rINST <- B
+    mov     rPC, LOCAL0(%esp)               # save Interpreter PC
+    mov     rFP, LOCAL1(%esp)               # save FP
+    mov     rIBASE, LOCAL2(%esp)            # save rIBASE
+    leal    (rFP,%eax,4), %esi              # esi <- &v[A]
+    leal    (rFP,rINST,4), rFP              # rFP <- &v[B]
+    movl    4(%esi), %ecx                   # ecx <- Amsw
+    imull   (rFP), %ecx                     # ecx <- (Amsw*Blsw)
+    movl    4(rFP), %eax                    # eax <- Bmsw
+    imull   (%esi), %eax                    # eax <- (Bmsw*Alsw)
+    addl    %eax, %ecx                      # ecx <- (Amsw*Blsw)+(Bmsw*Alsw)
+    movl    (rFP), %eax                     # eax <- Blsw
+    mull    (%esi)                          # eax <- (Blsw*Alsw)
+    leal    (%ecx,rIBASE), rIBASE           # full result now in %edx:%eax
+    movl    rIBASE, 4(%esi)                 # v[A+1] <- rIBASE
+    movl    %eax, (%esi)                    # v[A] <- %eax
+    mov     LOCAL0(%esp), rPC               # restore Interpreter PC
+    mov     LOCAL2(%esp), rIBASE            # restore IBASE
+    mov     LOCAL1(%esp), rFP               # restore FP
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_neg_int():
+%  unop(instr="negl    %eax")
+
+%def op_neg_long():
+    /* unop vA, vB */
+    movzbl  rINSTbl, %ecx                   # ecx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    GET_VREG %eax, %ecx                     # eax <- v[B+0]
+    GET_VREG_HIGH %ecx, %ecx                # ecx <- v[B+1]
+    negl    %eax
+    adcl    $$0, %ecx
+    negl    %ecx
+    SET_VREG %eax, rINST                    # v[A+0] <- eax
+    SET_VREG_HIGH %ecx, rINST               # v[A+1] <- ecx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+%def op_not_int():
+%  unop(instr="notl %eax")
+
+%def op_not_long():
+    /* unop vA, vB */
+    movzbl  rINSTbl, %ecx                   # ecx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    GET_VREG %eax, %ecx                     # eax <- v[B+0]
+    GET_VREG_HIGH %ecx, %ecx                # ecx <- v[B+1]
+    notl    %eax
+    notl    %ecx
+    SET_VREG %eax, rINST                    # v[A+0] <- eax
+    SET_VREG_HIGH %ecx, rINST               # v[A+1] <- ecx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_or_int():
+%  binop(instr="orl")
+
+%def op_or_int_2addr():
+%  binop2addr(instr="orl")
+
+%def op_or_int_lit16():
+%  binopLit16(instr="orl     %ecx, %eax")
+
+%def op_or_int_lit8():
+%  binopLit8(instr="orl     %ecx, %eax")
+
+%def op_or_long():
+%  binopWide(instr1="orl", instr2="orl")
+
+%def op_or_long_2addr():
+%  binopWide2addr(instr1="orl", instr2="orl")
+
+%def op_rem_int():
+%  bindiv(result="rIBASE", special="$0", rem="1")
+
+%def op_rem_int_2addr():
+%  bindiv2addr(result="rIBASE", special="$0")
+
+%def op_rem_int_lit16():
+%  bindivLit16(result="rIBASE", special="$0")
+
+%def op_rem_int_lit8():
+%  bindivLit8(result="rIBASE", special="$0")
+
+%def op_rem_long():
+%  op_div_long(routine="art_quick_lmod")
+
+%def op_rem_long_2addr():
+%  op_div_long_2addr(routine="art_quick_lmod")
+
+%def op_rsub_int():
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+%  binopLit16(instr="subl    %eax, %ecx", result="%ecx")
+
+%def op_rsub_int_lit8():
+%  binopLit8(instr="subl    %eax, %ecx", result="%ecx")
+
+%def op_shl_int():
+%  binop1(instr="sall    %cl, %eax")
+
+%def op_shl_int_2addr():
+%  shop2addr(instr="sall    %cl, %eax")
+
+%def op_shl_int_lit8():
+%  binopLit8(instr="sall    %cl, %eax")
+
+%def op_shl_long():
+/*
+ * Long integer shift.  This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.  x86 shifts automatically mask off
+ * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
+ * case specially.
+ */
+    /* shl-long vAA, vBB, vCC */
+    /* ecx gets shift count */
+    /* Need to spill rINST */
+    /* rINSTw gets AA */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    movl    rIBASE, LOCAL0(%esp)
+    GET_VREG_HIGH rIBASE, %eax              # ecx <- v[BB+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vCC
+    GET_VREG %eax, %eax                     # eax <- v[BB+0]
+    shldl   %eax,rIBASE
+    sall    %cl, %eax
+    testb   $$32, %cl
+    je      2f
+    movl    %eax, rIBASE
+    xorl    %eax, %eax
+2:
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
+    movl    LOCAL0(%esp), rIBASE
+    SET_VREG %eax, rINST                    # v[AA+0] <- %eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_shl_long_2addr():
+/*
+ * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+    /* shl-long/2addr vA, vB */
+    /* ecx gets shift count */
+    /* Need to spill rIBASE */
+    /* rINSTw gets AA */
+    movzbl  rINSTbl, %ecx                   # ecx <- BA
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    GET_VREG %eax, rINST                    # eax <- v[AA+0]
+    sarl    $$4, %ecx                       # ecx <- B
+    movl    rIBASE, LOCAL0(%esp)
+    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vBB
+    shldl   %eax, rIBASE
+    sall    %cl, %eax
+    testb   $$32, %cl
+    je      2f
+    movl    %eax, rIBASE
+    xorl    %eax, %eax
+2:
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
+    movl    LOCAL0(%esp), rIBASE
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_shr_int():
+%  binop1(instr="sarl    %cl, %eax")
+
+%def op_shr_int_2addr():
+%  shop2addr(instr="sarl    %cl, %eax")
+
+%def op_shr_int_lit8():
+%  binopLit8(instr="sarl    %cl, %eax")
+
+%def op_shr_long():
+/*
+ * Long integer shift.  This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.  x86 shifts automatically mask off
+ * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
+ * case specially.
+ */
+    /* shr-long vAA, vBB, vCC */
+    /* ecx gets shift count */
+    /* Need to spill rIBASE */
+    /* rINSTw gets AA */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    movl    rIBASE, LOCAL0(%esp)
+    GET_VREG_HIGH rIBASE, %eax              # rIBASE<- v[BB+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vCC
+    GET_VREG %eax, %eax                     # eax <- v[BB+0]
+    shrdl   rIBASE, %eax
+    sarl    %cl, rIBASE
+    testb   $$32, %cl
+    je      2f
+    movl    rIBASE, %eax
+    sarl    $$31, rIBASE
+2:
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
+    movl    LOCAL0(%esp), rIBASE
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_shr_long_2addr():
+/*
+ * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+    /* shl-long/2addr vA, vB */
+    /* ecx gets shift count */
+    /* Need to spill rIBASE */
+    /* rINSTw gets AA */
+    movzbl  rINSTbl, %ecx                   # ecx <- BA
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    GET_VREG %eax, rINST                    # eax <- v[AA+0]
+    sarl    $$4, %ecx                       # ecx <- B
+    movl    rIBASE, LOCAL0(%esp)
+    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vBB
+    shrdl   rIBASE, %eax
+    sarl    %cl, rIBASE
+    testb   $$32, %cl
+    je      2f
+    movl    rIBASE, %eax
+    sarl    $$31, rIBASE
+2:
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
+    movl    LOCAL0(%esp), rIBASE
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_sub_int():
+%  binop(instr="subl")
+
+%def op_sub_int_2addr():
+%  binop2addr(instr="subl")
+
+%def op_sub_long():
+%  binopWide(instr1="subl", instr2="sbbl")
+
+%def op_sub_long_2addr():
+%  binopWide2addr(instr1="subl", instr2="sbbl")
+
+%def op_ushr_int():
+%  binop1(instr="shrl    %cl, %eax")
+
+%def op_ushr_int_2addr():
+%  shop2addr(instr="shrl    %cl, %eax")
+
+%def op_ushr_int_lit8():
+%  binopLit8(instr="shrl    %cl, %eax")
+
+%def op_ushr_long():
+/*
+ * Long integer shift.  This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.  x86 shifts automatically mask off
+ * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
+ * case specially.
+ */
+    /* shr-long vAA, vBB, vCC */
+    /* ecx gets shift count */
+    /* Need to spill rIBASE */
+    /* rINSTw gets AA */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    movl    rIBASE, LOCAL0(%esp)
+    GET_VREG_HIGH rIBASE, %eax              # rIBASE <- v[BB+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vCC
+    GET_VREG %eax, %eax                     # eax <- v[BB+0]
+    shrdl   rIBASE, %eax
+    shrl    %cl, rIBASE
+    testb   $$32, %cl
+    je      2f
+    movl    rIBASE, %eax
+    xorl    rIBASE, rIBASE
+2:
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
+    movl    LOCAL0(%esp), rIBASE
+    SET_VREG %eax, rINST                    # v[BB+0] <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_ushr_long_2addr():
+/*
+ * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+    /* shl-long/2addr vA, vB */
+    /* ecx gets shift count */
+    /* Need to spill rIBASE */
+    /* rINSTw gets AA */
+    movzbl  rINSTbl, %ecx                   # ecx <- BA
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    GET_VREG %eax, rINST                    # eax <- v[AA+0]
+    sarl    $$4, %ecx                       # ecx <- B
+    movl    rIBASE, LOCAL0(%esp)
+    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vBB
+    shrdl   rIBASE, %eax
+    shrl    %cl, rIBASE
+    testb   $$32, %cl
+    je      2f
+    movl    rIBASE, %eax
+    xorl    rIBASE, rIBASE
+2:
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
+    movl    LOCAL0(%esp), rIBASE
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_xor_int():
+%  binop(instr="xorl")
+
+%def op_xor_int_2addr():
+%  binop2addr(instr="xorl")
+
+%def op_xor_int_lit16():
+%  binopLit16(instr="xorl    %ecx, %eax")
+
+%def op_xor_int_lit8():
+%  binopLit8(instr="xorl    %ecx, %eax")
+
+%def op_xor_long():
+%  binopWide(instr1="xorl", instr2="xorl")
+
+%def op_xor_long_2addr():
+%  binopWide2addr(instr1="xorl", instr2="xorl")
diff --git a/runtime/interpreter/mterp/x86/array.S b/runtime/interpreter/mterp/x86/array.S
new file mode 100644
index 0000000..de846a4
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/array.S
@@ -0,0 +1,215 @@
+%def op_aget(load="movl", shift="4", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+/*
+ * Array get, 32 bits or less.  vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    $load   $data_offset(%eax,%ecx,$shift), %eax
+    SET_VREG %eax, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aget_boolean():
+%  op_aget(load="movzbl", shift="1", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aget_byte():
+%  op_aget(load="movsbl", shift="1", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aget_char():
+%  op_aget(load="movzwl", shift="2", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aget_object():
+/*
+ * Array object get.  vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+    /* op vAA, vBB, vCC */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecs <- vCC (requested index)
+    EXPORT_PC
+    movl    %eax, OUT_ARG0(%esp)
+    movl    %ecx, OUT_ARG1(%esp)
+    call    SYMBOL(artAGetObjectFromMterp)  # (array, index)
+    movl    rSELF, %ecx
+    RESTORE_IBASE_FROM_SELF %ecx
+    cmpl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
+    jnz     MterpException
+    SET_VREG_OBJECT %eax, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aget_short():
+%  op_aget(load="movswl", shift="2", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aget_wide():
+/*
+ * Array get, 64 bits.  vAA <- vBB[vCC].
+ */
+    /* aget-wide vAA, vBB, vCC */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    leal    MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
+    movq    (%eax), %xmm0                   # xmm0 <- vBB[vCC]
+    SET_WIDE_FP_VREG %xmm0, rINST           # vAA <- xmm0
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aput(reg="rINST", store="movl", shift="4", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+/*
+ * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    leal    $data_offset(%eax,%ecx,$shift), %eax
+    GET_VREG rINST, rINST
+    $store  $reg, (%eax)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aput_boolean():
+%  op_aput(reg="rINSTbl", store="movb", shift="1", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aput_byte():
+%  op_aput(reg="rINSTbl", store="movb", shift="1", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aput_char():
+%  op_aput(reg="rINSTw", store="movw", shift="2", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aput_object():
+/*
+ * Store an object into an array.  vBB[vCC] <- vAA.
+ */
+    /* op vAA, vBB, vCC */
+    EXPORT_PC
+    leal    OFF_FP_SHADOWFRAME(rFP), %eax
+    movl    %eax, OUT_ARG0(%esp)
+    movl    rPC, OUT_ARG1(%esp)
+    REFRESH_INST ${opnum}
+    movl    rINST, OUT_ARG2(%esp)
+    call    SYMBOL(MterpAputObject)         # (array, index)
+    RESTORE_IBASE
+    testb   %al, %al
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aput_short():
+%  op_aput(reg="rINSTw", store="movw", shift="2", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aput_wide():
+/*
+ * Array put, 64 bits.  vBB[vCC] <- vAA.
+ *
+ */
+    /* aput-wide vAA, vBB, vCC */
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    leal    MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
+    GET_WIDE_FP_VREG %xmm0, rINST           # xmm0 <- vAA
+    movq    %xmm0, (%eax)                   # vBB[vCC] <- xmm0
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_array_length():
+/*
+ * Return the length of an array.
+ */
+    mov     rINST, %eax                     # eax <- BA
+    sarl    $$4, rINST                      # rINST <- B
+    GET_VREG %ecx, rINST                    # ecx <- vB (object ref)
+    testl   %ecx, %ecx                      # is null?
+    je      common_errNullObject
+    andb    $$0xf, %al                      # eax <- A
+    movl    MIRROR_ARRAY_LENGTH_OFFSET(%ecx), rINST
+    SET_VREG rINST, %eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_fill_array_data():
+    /* fill-array-data vAA, +BBBBBBBB */
+    EXPORT_PC
+    movl    2(rPC), %ecx                    # ecx <- BBBBbbbb
+    leal    (rPC,%ecx,2), %ecx              # ecx <- PC + BBBBbbbb*2
+    GET_VREG %eax, rINST                    # eax <- vAA (array object)
+    movl    %eax, OUT_ARG0(%esp)
+    movl    %ecx, OUT_ARG1(%esp)
+    call    SYMBOL(MterpFillArrayData)      # (obj, payload)
+    REFRESH_IBASE
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_filled_new_array(helper="MterpFilledNewArray"):
+/*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+    .extern $helper
+    EXPORT_PC
+    leal    OFF_FP_SHADOWFRAME(rFP), %eax
+    movl    %eax, OUT_ARG0(%esp)
+    movl    rPC, OUT_ARG1(%esp)
+    movl    rSELF, %ecx
+    movl    %ecx, OUT_ARG2(%esp)
+    call    SYMBOL($helper)
+    REFRESH_IBASE
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_filled_new_array_range():
+%  op_filled_new_array(helper="MterpFilledNewArrayRange")
+
+%def op_new_array():
+/*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+    /* new-array vA, vB, class@CCCC */
+    EXPORT_PC
+    leal    OFF_FP_SHADOWFRAME(rFP), %eax
+    movl    %eax, OUT_ARG0(%esp)
+    movl    rPC, OUT_ARG1(%esp)
+    REFRESH_INST ${opnum}
+    movl    rINST, OUT_ARG2(%esp)
+    movl    rSELF, %ecx
+    movl    %ecx, OUT_ARG3(%esp)
+    call    SYMBOL(MterpNewArray)
+    RESTORE_IBASE
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/bincmp.S b/runtime/interpreter/mterp/x86/bincmp.S
deleted file mode 100644
index ee32278..0000000
--- a/runtime/interpreter/mterp/x86/bincmp.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
-    /* if-cmp vA, vB, +CCCC */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andb    $$0xf, %cl                      # ecx <- A
-    GET_VREG %eax, %ecx                     # eax <- vA
-    sarl    $$4, rINST                      # rINST <- B
-    cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
-    j${revcmp}   1f
-    movswl  2(rPC), rINST                   # Get signed branch offset
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-1:
-    cmpw    $$JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/bindiv.S b/runtime/interpreter/mterp/x86/bindiv.S
deleted file mode 100644
index e87ba45..0000000
--- a/runtime/interpreter/mterp/x86/bindiv.S
+++ /dev/null
@@ -1,48 +0,0 @@
-%default {"result":"","special":"","rem":""}
-/*
- * 32-bit binary div/rem operation.  Handles special case of op0=minint and
- * op1=-1.
- */
-    /* div/rem vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB
-    GET_VREG %ecx, %ecx                     # ecx <- vCC
-    mov     rIBASE, LOCAL0(%esp)
-    testl   %ecx, %ecx
-    je      common_errDivideByZero
-    movl    %eax, %edx
-    orl     %ecx, %edx
-    testl   $$0xFFFFFF00, %edx              # If both arguments are less
-                                            #   than 8-bit and +ve
-    jz      .L${opcode}_8                   # Do 8-bit divide
-    testl   $$0xFFFF0000, %edx              # If both arguments are less
-                                            #   than 16-bit and +ve
-    jz      .L${opcode}_16                  # Do 16-bit divide
-    cmpl    $$-1, %ecx
-    jne     .L${opcode}_32
-    cmpl    $$0x80000000, %eax
-    jne     .L${opcode}_32
-    movl    $special, $result
-    jmp     .L${opcode}_finish
-.L${opcode}_32:
-    cltd
-    idivl   %ecx
-    jmp     .L${opcode}_finish
-.L${opcode}_8:
-    div     %cl                             # 8-bit divide otherwise.
-                                            # Remainder in %ah, quotient in %al
-    .if $rem
-    movl    %eax, %edx
-    shr     $$8, %edx
-    .else
-    andl    $$0x000000FF, %eax
-    .endif
-    jmp     .L${opcode}_finish
-.L${opcode}_16:
-    xorl    %edx, %edx                      # Clear %edx before divide
-    div     %cx
-.L${opcode}_finish:
-    SET_VREG $result, rINST
-    mov     LOCAL0(%esp), rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/bindiv2addr.S b/runtime/interpreter/mterp/x86/bindiv2addr.S
deleted file mode 100644
index e620996..0000000
--- a/runtime/interpreter/mterp/x86/bindiv2addr.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default {"result":"","special":""}
-/*
- * 32-bit binary div/rem operation.  Handles special case of op0=minint and
- * op1=-1.
- */
-    /* div/rem/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # eax <- BA
-    mov     rIBASE, LOCAL0(%esp)
-    sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # eax <- vBB
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, rINST                    # eax <- vBB
-    testl   %ecx, %ecx
-    je      common_errDivideByZero
-    cmpl    $$-1, %ecx
-    jne     .L${opcode}_continue_div2addr
-    cmpl    $$0x80000000, %eax
-    jne     .L${opcode}_continue_div2addr
-    movl    $special, $result
-    SET_VREG $result, rINST
-    mov     LOCAL0(%esp), rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.L${opcode}_continue_div2addr:
-    cltd
-    idivl   %ecx
-    SET_VREG $result, rINST
-    mov     LOCAL0(%esp), rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/bindivLit16.S b/runtime/interpreter/mterp/x86/bindivLit16.S
deleted file mode 100644
index be094ae..0000000
--- a/runtime/interpreter/mterp/x86/bindivLit16.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default {"result":"","special":""}
-/*
- * 32-bit binary div/rem operation.  Handles special case of op0=minint and
- * op1=-1.
- */
-    /* div/rem/lit16 vA, vB, #+CCCC */
-    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
-    movzbl  rINSTbl, %eax                   # eax <- 000000BA
-    sarl    $$4, %eax                       # eax <- B
-    GET_VREG %eax, %eax                     # eax <- vB
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    testl   %ecx, %ecx
-    je      common_errDivideByZero
-    cmpl    $$-1, %ecx
-    jne     .L${opcode}_continue_div
-    cmpl    $$0x80000000, %eax
-    jne     .L${opcode}_continue_div
-    movl    $special, %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.L${opcode}_continue_div:
-    mov     rIBASE, LOCAL0(%esp)
-    cltd
-    idivl   %ecx
-    SET_VREG $result, rINST
-    mov     LOCAL0(%esp), rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/bindivLit8.S b/runtime/interpreter/mterp/x86/bindivLit8.S
deleted file mode 100644
index fddb545..0000000
--- a/runtime/interpreter/mterp/x86/bindivLit8.S
+++ /dev/null
@@ -1,26 +0,0 @@
-%default {"result":"","special":""}
-/*
- * 32-bit div/rem "lit8" binary operation.  Handles special case of
- * op0=minint & op1=-1
- */
-    /* div/rem/lit8 vAA, vBB, #+CC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG  %eax, %eax                    # eax <- rBB
-    testl   %ecx, %ecx
-    je      common_errDivideByZero
-    cmpl    $$0x80000000, %eax
-    jne     .L${opcode}_continue_div
-    cmpl    $$-1, %ecx
-    jne     .L${opcode}_continue_div
-    movl    $special, %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.L${opcode}_continue_div:
-    mov     rIBASE, LOCAL0(%esp)
-    cltd
-    idivl   %ecx
-    SET_VREG $result, rINST
-    mov     LOCAL0(%esp), rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binop.S b/runtime/interpreter/mterp/x86/binop.S
deleted file mode 100644
index d895235..0000000
--- a/runtime/interpreter/mterp/x86/binop.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit binary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- *      xor-int, shl-int, shr-int, ushr-int
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB
-    $instr                                  # ex: addl    (rFP,%ecx,4),%eax
-    SET_VREG $result, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binop1.S b/runtime/interpreter/mterp/x86/binop1.S
deleted file mode 100644
index 5049bb3..0000000
--- a/runtime/interpreter/mterp/x86/binop1.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default {"result":"%eax","tmp":"%ecx"}
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC),%eax                     # eax <- BB
-    movzbl  3(rPC),%ecx                     # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB
-    GET_VREG %ecx, %ecx                     # eax <- vBB
-    $instr                                  # ex: addl    %ecx,%eax
-    SET_VREG $result, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binop2addr.S b/runtime/interpreter/mterp/x86/binop2addr.S
deleted file mode 100644
index f126234..0000000
--- a/runtime/interpreter/mterp/x86/binop2addr.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
-    /* binop/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    sarl    $$4, rINST                      # rINST <- B
-    GET_VREG %eax, rINST                    # eax <- vB
-    andb    $$0xf, %cl                      # ecx <- A
-    $instr                                  # for ex: addl   %eax,(rFP,%ecx,4)
-    CLEAR_REF %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/binopLit16.S b/runtime/interpreter/mterp/x86/binopLit16.S
deleted file mode 100644
index 2fd59de..0000000
--- a/runtime/interpreter/mterp/x86/binopLit16.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- *      and-int/lit16, or-int/lit16, xor-int/lit16
- */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movzbl  rINSTbl, %eax                   # eax <- 000000BA
-    sarl    $$4, %eax                       # eax <- B
-    GET_VREG %eax, %eax                     # eax <- vB
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    $instr                                  # for example: addl %ecx, %eax
-    SET_VREG $result, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binopLit8.S b/runtime/interpreter/mterp/x86/binopLit8.S
deleted file mode 100644
index 67cead2..0000000
--- a/runtime/interpreter/mterp/x86/binopLit8.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax, %eax                     # eax <- rBB
-    $instr                                  # ex: addl %ecx,%eax
-    SET_VREG $result, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binopWide.S b/runtime/interpreter/mterp/x86/binopWide.S
deleted file mode 100644
index da1293d..0000000
--- a/runtime/interpreter/mterp/x86/binopWide.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    movl    rIBASE, LOCAL0(%esp)            # save rIBASE
-    GET_VREG rIBASE, %eax                   # rIBASE <- v[BB+0]
-    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1]
-    $instr1                                 # ex: addl   (rFP,%ecx,4),rIBASE
-    $instr2                                 # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG rIBASE, rINST                  # v[AA+0] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE            # restore rIBASE
-    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binopWide2addr.S b/runtime/interpreter/mterp/x86/binopWide2addr.S
deleted file mode 100644
index da816f4..0000000
--- a/runtime/interpreter/mterp/x86/binopWide2addr.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop/2addr vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx<- BA
-    sarl    $$4, %ecx                       # ecx<- B
-    GET_VREG %eax, %ecx                     # eax<- v[B+0]
-    GET_VREG_HIGH %ecx, %ecx                # eax<- v[B+1]
-    andb    $$0xF, rINSTbl                  # rINST<- A
-    $instr1                                 # ex: addl   %eax,(rFP,rINST,4)
-    $instr2                                 # ex: adcl   %ecx,4(rFP,rINST,4)
-    CLEAR_WIDE_REF rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/const.S b/runtime/interpreter/mterp/x86/const.S
deleted file mode 100644
index f0cac1a..0000000
--- a/runtime/interpreter/mterp/x86/const.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default { "helper":"UndefinedConstHandler" }
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern $helper
-    EXPORT_PC
-    movzwl  2(rPC), %eax                    # eax <- BBBB
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rINST, OUT_ARG1(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)
-    call    SYMBOL($helper)                 # (index, tgt_reg, shadow_frame, self)
-    RESTORE_IBASE
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/control_flow.S b/runtime/interpreter/mterp/x86/control_flow.S
new file mode 100644
index 0000000..74b4fad
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/control_flow.S
@@ -0,0 +1,219 @@
+%def bincmp(revcmp=""):
+/*
+ * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+    /* if-cmp vA, vB, +CCCC */
+    movzx   rINSTbl, %ecx                   # ecx <- A+
+    andb    $$0xf, %cl                      # ecx <- A
+    GET_VREG %eax, %ecx                     # eax <- vA
+    sarl    $$4, rINST                      # rINST <- B
+    cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
+    j${revcmp}   1f
+    movswl  2(rPC), rINST                   # Get signed branch offset
+    testl   rINST, rINST
+    jmp     MterpCommonTakenBranch
+1:
+    cmpw    $$JIT_CHECK_OSR, rPROFILE
+    je      .L_check_not_taken_osr
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def zcmp(revcmp=""):
+/*
+ * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+    /* if-cmp vAA, +BBBB */
+    cmpl    $$0, VREG_ADDRESS(rINST)        # compare (vA, 0)
+    j${revcmp}   1f
+    movswl  2(rPC), rINST                   # fetch signed displacement
+    testl   rINST, rINST
+    jmp     MterpCommonTakenBranch
+1:
+    cmpw    $$JIT_CHECK_OSR, rPROFILE
+    je      .L_check_not_taken_osr
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_goto():
+/*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+    /* goto +AA */
+    movsbl  rINSTbl, rINST                  # rINST <- ssssssAA
+    testl   rINST, rINST
+    jmp     MterpCommonTakenBranch
+
+%def op_goto_16():
+/*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+    /* goto/16 +AAAA */
+    movswl  2(rPC), rINST                   # rINST <- ssssAAAA
+    testl   rINST, rINST
+    jmp     MterpCommonTakenBranch
+
+%def op_goto_32():
+/*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0".  Because
+ * we need the V bit set, we'll use an adds to convert from Dalvik
+ * offset to byte offset.
+ */
+    /* goto/32 +AAAAAAAA */
+    movl    2(rPC), rINST                   # rINST <- AAAAAAAA
+    testl   rINST, rINST
+    jmp     MterpCommonTakenBranch
+
+%def op_if_eq():
+%  bincmp(revcmp="ne")
+
+%def op_if_eqz():
+%  zcmp(revcmp="ne")
+
+%def op_if_ge():
+%  bincmp(revcmp="l")
+
+%def op_if_gez():
+%  zcmp(revcmp="l")
+
+%def op_if_gt():
+%  bincmp(revcmp="le")
+
+%def op_if_gtz():
+%  zcmp(revcmp="le")
+
+%def op_if_le():
+%  bincmp(revcmp="g")
+
+%def op_if_lez():
+%  zcmp(revcmp="g")
+
+%def op_if_lt():
+%  bincmp(revcmp="ge")
+
+%def op_if_ltz():
+%  zcmp(revcmp="ge")
+
+%def op_if_ne():
+%  bincmp(revcmp="e")
+
+%def op_if_nez():
+%  zcmp(revcmp="e")
+
+%def op_packed_switch(func="MterpDoPackedSwitch"):
+/*
+ * Handle a packed-switch or sparse-switch instruction.  In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+    /* op vAA, +BBBB */
+    movl    2(rPC), %ecx                    # ecx <- BBBBbbbb
+    GET_VREG %eax, rINST                    # eax <- vAA
+    leal    (rPC,%ecx,2), %ecx              # ecx <- PC + BBBBbbbb*2
+    movl    %eax, OUT_ARG1(%esp)            # ARG1 <- vAA
+    movl    %ecx, OUT_ARG0(%esp)            # ARG0 <- switchData
+    call    SYMBOL($func)
+    REFRESH_IBASE
+    testl   %eax, %eax
+    movl    %eax, rINST
+    jmp     MterpCommonTakenBranch
+
+%def op_return():
+/*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
+    movl    rSELF, %eax
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    jz      1f
+    movl    %eax, OUT_ARG0(%esp)
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    GET_VREG %eax, rINST                    # eax <- vAA
+    xorl    %ecx, %ecx
+    jmp     MterpReturn
+
+%def op_return_object():
+%  op_return()
+
+%def op_return_void():
+    .extern MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
+    movl    rSELF, %eax
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    jz      1f
+    movl    %eax, OUT_ARG0(%esp)
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    xorl    %eax, %eax
+    xorl    %ecx, %ecx
+    jmp     MterpReturn
+
+%def op_return_void_no_barrier():
+    movl    rSELF, %eax
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    jz      1f
+    movl    %eax, OUT_ARG0(%esp)
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    xorl    %eax, %eax
+    xorl    %ecx, %ecx
+    jmp     MterpReturn
+
+%def op_return_wide():
+/*
+ * Return a 64-bit value.
+ */
+    /* return-wide vAA */
+    .extern MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
+    movl    rSELF, %eax
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    jz      1f
+    movl    %eax, OUT_ARG0(%esp)
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    GET_VREG %eax, rINST                    # eax <- v[AA+0]
+    GET_VREG_HIGH %ecx, rINST               # ecx <- v[AA+1]
+    jmp     MterpReturn
+
+%def op_sparse_switch():
+%  op_packed_switch(func="MterpDoSparseSwitch")
+
+%def op_throw():
+/*
+ * Throw an exception object in the current thread.
+ */
+    /* throw vAA */
+    EXPORT_PC
+    GET_VREG %eax, rINST                    # eax<- vAA (exception object)
+    testl   %eax, %eax
+    jz      common_errNullObject
+    movl    rSELF,%ecx
+    movl    %eax, THREAD_EXCEPTION_OFFSET(%ecx)
+    jmp     MterpException
diff --git a/runtime/interpreter/mterp/x86/cvtfp_int.S b/runtime/interpreter/mterp/x86/cvtfp_int.S
deleted file mode 100644
index a8bad63..0000000
--- a/runtime/interpreter/mterp/x86/cvtfp_int.S
+++ /dev/null
@@ -1,61 +0,0 @@
-%default {"srcdouble":"1","tgtlong":"1"}
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint.  If it is less
- * than minint, it should be clamped to minint.  If it is a nan, the result
- * should be zero.  Further, the rounding mode is to truncate.  This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
-    /* float/double to int/long vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- A+
-    sarl    $$4, rINST                      # rINST <- B
-    .if $srcdouble
-    fldl    VREG_ADDRESS(rINST)             # %st0 <- vB
-    .else
-    flds    VREG_ADDRESS(rINST)             # %st0 <- vB
-    .endif
-    ftst
-    fnstcw  LOCAL0(%esp)                    # remember original rounding mode
-    movzwl  LOCAL0(%esp), %eax
-    movb    $$0xc, %ah
-    movw    %ax, LOCAL0+2(%esp)
-    fldcw   LOCAL0+2(%esp)                  # set "to zero" rounding mode
-    andb    $$0xf, %cl                      # ecx <- A
-    .if $tgtlong
-    fistpll VREG_ADDRESS(%ecx)              # convert and store
-    .else
-    fistpl  VREG_ADDRESS(%ecx)              # convert and store
-    .endif
-    fldcw   LOCAL0(%esp)                    # restore previous rounding mode
-    .if $tgtlong
-    movl    $$0x80000000, %eax
-    xorl    VREG_HIGH_ADDRESS(%ecx), %eax
-    orl     VREG_ADDRESS(%ecx), %eax
-    .else
-    cmpl    $$0x80000000, VREG_ADDRESS(%ecx)
-    .endif
-    je      .L${opcode}_special_case # fix up result
-
-.L${opcode}_finish:
-    xor     %eax, %eax
-    mov     %eax, VREG_REF_ADDRESS(%ecx)
-    .if $tgtlong
-    mov     %eax, VREG_REF_HIGH_ADDRESS(%ecx)
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.L${opcode}_special_case:
-    fnstsw  %ax
-    sahf
-    jp      .L${opcode}_isNaN
-    adcl    $$-1, VREG_ADDRESS(%ecx)
-    .if $tgtlong
-    adcl    $$-1, VREG_HIGH_ADDRESS(%ecx)
-    .endif
-   jmp      .L${opcode}_finish
-.L${opcode}_isNaN:
-    movl    $$0, VREG_ADDRESS(%ecx)
-    .if $tgtlong
-    movl    $$0, VREG_HIGH_ADDRESS(%ecx)
-    .endif
-    jmp     .L${opcode}_finish
diff --git a/runtime/interpreter/mterp/x86/entry.S b/runtime/interpreter/mterp/x86/entry.S
deleted file mode 100644
index 939dc61..0000000
--- a/runtime/interpreter/mterp/x86/entry.S
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
-    .text
-    ASM_HIDDEN SYMBOL(ExecuteMterpImpl)
-    .global SYMBOL(ExecuteMterpImpl)
-    FUNCTION_TYPE(ExecuteMterpImpl)
-
-/*
- * On entry:
- *  0  Thread* self
- *  1  insns_
- *  2  ShadowFrame
- *  3  JValue* result_register
- *
- */
-
-SYMBOL(ExecuteMterpImpl):
-    .cfi_startproc
-    .cfi_def_cfa esp, 4
-
-    /* Spill callee save regs */
-    PUSH    %ebp
-    PUSH    %edi
-    PUSH    %esi
-    PUSH    %ebx
-
-    /* Allocate frame */
-    subl    $$FRAME_SIZE, %esp
-    .cfi_adjust_cfa_offset FRAME_SIZE
-
-    /* Load ShadowFrame pointer */
-    movl    IN_ARG2(%esp), %edx
-
-    /* Remember the return register */
-    movl    IN_ARG3(%esp), %eax
-    movl    %eax, SHADOWFRAME_RESULT_REGISTER_OFFSET(%edx)
-
-    /* Remember the code_item */
-    movl    IN_ARG1(%esp), %ecx
-    movl    %ecx, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(%edx)
-
-    /* set up "named" registers */
-    movl    SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(%edx), %eax
-    leal    SHADOWFRAME_VREGS_OFFSET(%edx), rFP
-    leal    (rFP, %eax, 4), rREFS
-    movl    SHADOWFRAME_DEX_PC_OFFSET(%edx), %eax
-    lea     (%ecx, %eax, 2), rPC
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-    EXPORT_PC
-
-    /* Set up for backwards branches & osr profiling */
-    movl    OFF_FP_METHOD(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG2(%esp)
-    call    SYMBOL(MterpSetUpHotnessCountdown)
-
-    /* Starting ibase */
-    REFRESH_IBASE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST
-    GOTO_NEXT
-    /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/x86/fallback.S b/runtime/interpreter/mterp/x86/fallback.S
deleted file mode 100644
index 8d61166..0000000
--- a/runtime/interpreter/mterp/x86/fallback.S
+++ /dev/null
@@ -1,3 +0,0 @@
-/* Transfer stub to alternate interpreter */
-    jmp     MterpFallback
-
diff --git a/runtime/interpreter/mterp/x86/field.S b/runtime/interpreter/mterp/x86/field.S
deleted file mode 100644
index 8432c74..0000000
--- a/runtime/interpreter/mterp/x86/field.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default { }
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern $helper
-    REFRESH_INST ${opnum}                   # fix rINST to include opcode
-    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
-    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
-    call    SYMBOL($helper)
-    testb   %al, %al
-    jz      MterpPossibleException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/floating_point.S b/runtime/interpreter/mterp/x86/floating_point.S
new file mode 100644
index 0000000..bc7c59d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/floating_point.S
@@ -0,0 +1,236 @@
+%def fpcmp(suff="d", nanval="pos"):
+/*
+ * Compare two floating-point values.  Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ *     if (x == y) {
+ *         return 0;
+ *     } else if (x < y) {
+ *         return -1;
+ *     } else if (x > y) {
+ *         return 1;
+ *     } else {
+ *         return nanval ? 1 : -1;
+ *     }
+ * }
+ */
+    /* op vAA, vBB, vCC */
+    movzbl  3(rPC), %ecx                    # ecx<- CC
+    movzbl  2(rPC), %eax                    # eax<- BB
+    GET_VREG_XMM${suff} %xmm0, %eax
+    xor     %eax, %eax
+    ucomis${suff} VREG_ADDRESS(%ecx), %xmm0
+    jp      .L${opcode}_nan_is_${nanval}
+    je      .L${opcode}_finish
+    jb      .L${opcode}_less
+.L${opcode}_nan_is_pos:
+    incl    %eax
+    jmp     .L${opcode}_finish
+.L${opcode}_nan_is_neg:
+.L${opcode}_less:
+    decl    %eax
+.L${opcode}_finish:
+    SET_VREG %eax, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def fpcvt(instr="", load="", store="", wide="0"):
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+    /* unop vA, vB */
+    movzbl  rINSTbl, %ecx                   # ecx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    $load   VREG_ADDRESS(rINST)             # %st0 <- vB
+    andb    $$0xf, %cl                      # ecx <- A
+    $instr
+    $store  VREG_ADDRESS(%ecx)              # vA <- %st0
+    .if $wide
+    CLEAR_WIDE_REF %ecx
+    .else
+    CLEAR_REF %ecx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def sseBinop(instr="", suff=""):
+    movzbl  2(rPC), %ecx                    # ecx <- BB
+    movzbl  3(rPC), %eax                    # eax <- CC
+    GET_VREG_XMM${suff} %xmm0, %ecx         # %xmm0 <- 1st src
+    ${instr}${suff} VREG_ADDRESS(%eax), %xmm0
+    SET_VREG_XMM${suff} %xmm0, rINST        # vAA <- %xmm0
+    pxor    %xmm0, %xmm0
+    movs${suff}   %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def sseBinop2Addr(instr="", suff=""):
+    movzx   rINSTbl, %ecx                   # ecx <- A+
+    andl    $$0xf, %ecx                     # ecx <- A
+    GET_VREG_XMM${suff} %xmm0, %ecx         # %xmm0 <- 1st src
+    sarl    $$4, rINST                      # rINST<- B
+    ${instr}${suff} VREG_ADDRESS(rINST), %xmm0
+    SET_VREG_XMM${suff} %xmm0, %ecx         # vAA<- %xmm0
+    pxor    %xmm0, %xmm0
+    movs${suff} %xmm0, VREG_REF_ADDRESS(rINST)  # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_add_double():
+%  sseBinop(instr="adds", suff="d")
+
+%def op_add_double_2addr():
+%  sseBinop2Addr(instr="adds", suff="d")
+
+%def op_add_float():
+%  sseBinop(instr="adds", suff="s")
+
+%def op_add_float_2addr():
+%  sseBinop2Addr(instr="adds", suff="s")
+
+%def op_cmpg_double():
+%  fpcmp(suff="d", nanval="pos")
+
+%def op_cmpg_float():
+%  fpcmp(suff="s", nanval="pos")
+
+%def op_cmpl_double():
+%  fpcmp(suff="d", nanval="neg")
+
+%def op_cmpl_float():
+%  fpcmp(suff="s", nanval="neg")
+
+%def op_div_double():
+%  sseBinop(instr="divs", suff="d")
+
+%def op_div_double_2addr():
+%  sseBinop2Addr(instr="divs", suff="d")
+
+%def op_div_float():
+%  sseBinop(instr="divs", suff="s")
+
+%def op_div_float_2addr():
+%  sseBinop2Addr(instr="divs", suff="s")
+
+%def op_double_to_float():
+%  fpcvt(load="fldl", store="fstps")
+
+%def op_double_to_int():
+%  cvtfp_int(srcdouble="1", tgtlong="0")
+
+%def op_double_to_long():
+%  cvtfp_int(srcdouble="1", tgtlong="1")
+
+%def op_float_to_double():
+%  fpcvt(load="flds", store="fstpl", wide="1")
+
+%def op_float_to_int():
+%  cvtfp_int(srcdouble="0", tgtlong="0")
+
+%def op_float_to_long():
+%  cvtfp_int(srcdouble="0", tgtlong="1")
+
+%def op_int_to_double():
+%  fpcvt(load="fildl", store="fstpl", wide="1")
+
+%def op_int_to_float():
+%  fpcvt(load="fildl", store="fstps")
+
+%def op_long_to_double():
+%  fpcvt(load="fildll", store="fstpl", wide="1")
+
+%def op_long_to_float():
+%  fpcvt(load="fildll", store="fstps")
+
+%def op_mul_double():
+%  sseBinop(instr="muls", suff="d")
+
+%def op_mul_double_2addr():
+%  sseBinop2Addr(instr="muls", suff="d")
+
+%def op_mul_float():
+%  sseBinop(instr="muls", suff="s")
+
+%def op_mul_float_2addr():
+%  sseBinop2Addr(instr="muls", suff="s")
+
+%def op_neg_double():
+%  fpcvt(instr="fchs", load="fldl", store="fstpl", wide="1")
+
+%def op_neg_float():
+%  fpcvt(instr="fchs", load="flds", store="fstps")
+
+%def op_rem_double():
+    /* rem_double vAA, vBB, vCC */
+    movzbl  3(rPC), %ecx                    # ecx <- BB
+    movzbl  2(rPC), %eax                    # eax <- CC
+    fldl    VREG_ADDRESS(%ecx)              # %st1 <- fp[vBB]
+    fldl    VREG_ADDRESS(%eax)              # %st0 <- fp[vCC]
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstpl   VREG_ADDRESS(rINST)             # fp[vAA] <- %st
+    CLEAR_WIDE_REF rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_rem_double_2addr():
+    /* rem_double/2addr vA, vB */
+    movzx   rINSTbl, %ecx                   # ecx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    fldl    VREG_ADDRESS(rINST)             # vB to fp stack
+    andb    $$0xf, %cl                      # ecx <- A
+    fldl    VREG_ADDRESS(%ecx)              # vA to fp stack
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstpl   VREG_ADDRESS(%ecx)              # %st to vA
+    CLEAR_WIDE_REF %ecx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_rem_float():
+    /* rem_float vAA, vBB, vCC */
+    movzbl  3(rPC), %ecx                    # ecx <- BB
+    movzbl  2(rPC), %eax                    # eax <- CC
+    flds    VREG_ADDRESS(%ecx)              # vBB to fp stack
+    flds    VREG_ADDRESS(%eax)              # vCC to fp stack
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstps   VREG_ADDRESS(rINST)             # %st to vAA
+    CLEAR_REF rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_rem_float_2addr():
+    /* rem_float/2addr vA, vB */
+    movzx   rINSTbl, %ecx                   # ecx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    flds    VREG_ADDRESS(rINST)             # vB to fp stack
+    andb    $$0xf, %cl                      # ecx <- A
+    flds    VREG_ADDRESS(%ecx)              # vA to fp stack
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstps   VREG_ADDRESS(%ecx)              # %st to vA
+    CLEAR_REF %ecx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_sub_double():
+%  sseBinop(instr="subs", suff="d")
+
+%def op_sub_double_2addr():
+%  sseBinop2Addr(instr="subs", suff="d")
+
+%def op_sub_float():
+%  sseBinop(instr="subs", suff="s")
+
+%def op_sub_float_2addr():
+%  sseBinop2Addr(instr="subs", suff="s")
diff --git a/runtime/interpreter/mterp/x86/footer.S b/runtime/interpreter/mterp/x86/footer.S
deleted file mode 100644
index 0b08cf9..0000000
--- a/runtime/interpreter/mterp/x86/footer.S
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- * ===========================================================================
- *  Common subroutines and data
- * ===========================================================================
- */
-
-    .text
-    .align  2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpLogDivideByZeroException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errArrayIndex:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpLogArrayIndexException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errNegativeArraySize:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpLogNegativeArraySizeException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errNoSuchMethod:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpLogNoSuchMethodException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errNullObject:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpLogNullObjectException)
-#endif
-    jmp     MterpCommonFallback
-
-common_exceptionThrown:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    call    SYMBOL(MterpLogExceptionThrownException)
-#endif
-    jmp     MterpCommonFallback
-
-MterpSuspendFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    movl    THREAD_FLAGS_OFFSET(%eax), %eax
-    movl    %eax, OUT_ARG2(%esp)
-    call    SYMBOL(MterpLogSuspendFallback)
-#endif
-    jmp     MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    movl    rSELF, %eax
-    testl   $$-1, THREAD_EXCEPTION_OFFSET(%eax)
-    jz      MterpFallback
-    /* intentional fallthrough - handle pending exception. */
-
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpHandleException)
-    testb   %al, %al
-    jz      MterpExceptionReturn
-    movl    OFF_FP_DEX_INSTRUCTIONS(rFP), %eax
-    movl    OFF_FP_DEX_PC(rFP), %ecx
-    lea     (%eax, %ecx, 2), rPC
-    movl    rPC, OFF_FP_DEX_PC_PTR(rFP)
-    /* Do we need to switch interpreters? */
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    /* resume execution at catch block */
-    REFRESH_IBASE
-    FETCH_INST
-    GOTO_NEXT
-    /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    rINST          <= signed offset
- *    condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranch:
-    jg      .L_forward_branch               # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-#  error "JIT_CHECK_OSR must be -1."
-#endif
-    cmpw    $$JIT_CHECK_OSR, rPROFILE
-    je      .L_osr_check
-    decw    rPROFILE
-    je      .L_add_batch                    # counted down to zero - report
-.L_resume_backward_branch:
-    movl    rSELF, %eax
-    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
-    leal    (rPC, rINST, 2), rPC
-    FETCH_INST
-    jnz     .L_suspend_request_pending
-    REFRESH_IBASE
-    GOTO_NEXT
-
-.L_suspend_request_pending:
-    EXPORT_PC
-    movl    %eax, OUT_ARG0(%esp)            # rSELF in eax
-    call    SYMBOL(MterpSuspendCheck)       # (self)
-    testb   %al, %al
-    jnz     MterpFallback
-    REFRESH_IBASE                           # might have changed during suspend
-    GOTO_NEXT
-
-.L_no_count_backwards:
-    cmpw    $$JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
-    jne     .L_resume_backward_branch
-.L_osr_check:
-    EXPORT_PC
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    rINST, OUT_ARG2(%esp)
-    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    testb   %al, %al
-    jz      .L_resume_backward_branch
-    jmp     MterpOnStackReplacement
-
-.L_forward_branch:
-    cmpw    $$JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
-    je      .L_check_osr_forward
-.L_resume_forward_branch:
-    leal    (rPC, rINST, 2), rPC
-    FETCH_INST
-    GOTO_NEXT
-
-.L_check_osr_forward:
-    EXPORT_PC
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    rINST, OUT_ARG2(%esp)
-    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    testb   %al, %al
-    REFRESH_IBASE
-    jz      .L_resume_forward_branch
-    jmp     MterpOnStackReplacement
-
-.L_add_batch:
-    movl    OFF_FP_METHOD(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG2(%esp)
-    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
-    jmp     .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    EXPORT_PC
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    $$2, OUT_ARG2(%esp)
-    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    testb   %al, %al
-    REFRESH_IBASE
-    jnz     MterpOnStackReplacement
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    rINST, OUT_ARG2(%esp)
-    call    SYMBOL(MterpLogOSR)
-#endif
-    movl    $$1, %eax
-    jmp     MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG0(%esp)
-    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpLogFallback)
-#endif
-MterpCommonFallback:
-    xor     %eax, %eax
-    jmp     MterpDone
-
-/*
- * On entry:
- *  uint32_t* rFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    movl    $$1, %eax
-    jmp     MterpDone
-MterpReturn:
-    movl    OFF_FP_RESULT_REGISTER(rFP), %edx
-    movl    %eax, (%edx)
-    movl    %ecx, 4(%edx)
-    mov     $$1, %eax
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    cmpw    $$0, rPROFILE
-    jle     MRestoreFrame                   # if > 0, we may have some counts to report.
-
-    movl    %eax, rINST                     # stash return value
-    /* Report cached hotness counts */
-    movl    OFF_FP_METHOD(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG2(%esp)
-    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
-    movl    rINST, %eax                     # restore return value
-
-    /* pop up frame */
-MRestoreFrame:
-    addl    $$FRAME_SIZE, %esp
-    .cfi_adjust_cfa_offset -FRAME_SIZE
-
-    /* Restore callee save register */
-    POP     %ebx
-    POP     %esi
-    POP     %edi
-    POP     %ebp
-    ret
-    .cfi_endproc
-    SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
diff --git a/runtime/interpreter/mterp/x86/fpcmp.S b/runtime/interpreter/mterp/x86/fpcmp.S
deleted file mode 100644
index 5f9eef9..0000000
--- a/runtime/interpreter/mterp/x86/fpcmp.S
+++ /dev/null
@@ -1,35 +0,0 @@
-%default {"suff":"d","nanval":"pos"}
-/*
- * Compare two floating-point values.  Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- *     if (x == y) {
- *         return 0;
- *     } else if (x < y) {
- *         return -1;
- *     } else if (x > y) {
- *         return 1;
- *     } else {
- *         return nanval ? 1 : -1;
- *     }
- * }
- */
-    /* op vAA, vBB, vCC */
-    movzbl  3(rPC), %ecx                    # ecx<- CC
-    movzbl  2(rPC), %eax                    # eax<- BB
-    movs${suff} VREG_ADDRESS(%eax), %xmm0
-    xor     %eax, %eax
-    ucomis${suff} VREG_ADDRESS(%ecx), %xmm0
-    jp      .L${opcode}_nan_is_${nanval}
-    je      .L${opcode}_finish
-    jb      .L${opcode}_less
-.L${opcode}_nan_is_pos:
-    incl    %eax
-    jmp     .L${opcode}_finish
-.L${opcode}_nan_is_neg:
-.L${opcode}_less:
-    decl    %eax
-.L${opcode}_finish:
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/fpcvt.S b/runtime/interpreter/mterp/x86/fpcvt.S
deleted file mode 100644
index 7808285..0000000
--- a/runtime/interpreter/mterp/x86/fpcvt.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {"instr":"","load":"","store":"","wide":"0"}
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- A+
-    sarl    $$4, rINST                      # rINST <- B
-    $load   VREG_ADDRESS(rINST)             # %st0 <- vB
-    andb    $$0xf, %cl                      # ecx <- A
-    $instr
-    $store  VREG_ADDRESS(%ecx)              # vA <- %st0
-    .if $wide
-    CLEAR_WIDE_REF %ecx
-    .else
-    CLEAR_REF %ecx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/header.S b/runtime/interpreter/mterp/x86/header.S
deleted file mode 100644
index a79db27..0000000
--- a/runtime/interpreter/mterp/x86/header.S
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-  Art assembly interpreter notes:
-
-  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
-  handle invoke, allows higher-level code to create frame & shadow frame.
-
-  Once that's working, support direct entry code & eliminate shadow frame (and
-  excess locals allocation.
-
-  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
-  base of the vreg array within the shadow frame.  Access the other fields,
-  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
-  the shadow frame mechanism of double-storing object references - via rFP &
-  number_of_vregs_.
-
- */
-
-/*
-x86 ABI general notes:
-
-Caller save set:
-   eax, edx, ecx, st(0)-st(7)
-Callee save set:
-   ebx, esi, edi, ebp
-Return regs:
-   32-bit in eax
-   64-bit in edx:eax (low-order 32 in eax)
-   fp on top of fp stack st(0)
-
-Parameters passed on stack, pushed right-to-left.  On entry to target, first
-parm is at 4(%esp).  Traditional entry code is:
-
-functEntry:
-    push    %ebp             # save old frame pointer
-    mov     %ebp,%esp        # establish new frame pointer
-    sub     FrameSize,%esp   # Allocate storage for spill, locals & outs
-
-Once past the prologue, arguments are referenced at ((argno + 2)*4)(%ebp)
-
-Stack must be 16-byte aligned to support SSE in native code.
-
-If we're not doing variable stack allocation (alloca), the frame pointer can be
-eliminated and all arg references adjusted to be esp relative.
-*/
-
-/*
-Mterp and x86 notes:
-
-Some key interpreter variables will be assigned to registers.
-
-  nick     reg   purpose
-  rPC      esi   interpreted program counter, used for fetching instructions
-  rFP      edi   interpreted frame pointer, used for accessing locals and args
-  rINSTw   bx    first 16-bit code of current instruction
-  rINSTbl  bl    opcode portion of instruction word
-  rINSTbh  bh    high byte of inst word, usually contains src/tgt reg names
-  rIBASE   edx   base of instruction handler table
-  rREFS    ebp   base of object references in shadow frame.
-
-Notes:
-   o High order 16 bits of ebx must be zero on entry to handler
-   o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
-   o eax and ecx are scratch, rINSTw/ebx sometimes scratch
-
-Macros are provided for common operations.  Each macro MUST emit only
-one instruction to make instruction-counting easier.  They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Handle mac compiler specific
- */
-#if defined(__APPLE__)
-    #define MACRO_LITERAL(value) $$(value)
-    #define FUNCTION_TYPE(name)
-    #define OBJECT_TYPE(name)
-    #define SIZE(start,end)
-    // Mac OS' symbols have an _ prefix.
-    #define SYMBOL(name) _ ## name
-    #define ASM_HIDDEN .private_extern
-#else
-    #define MACRO_LITERAL(value) $$value
-    #define FUNCTION_TYPE(name) .type name, @function
-    #define OBJECT_TYPE(name) .type name, @object
-    #define SIZE(start,end) .size start, .-end
-    #define SYMBOL(name) name
-    #define ASM_HIDDEN .hidden
-#endif
-
-.macro PUSH _reg
-    pushl \_reg
-    .cfi_adjust_cfa_offset 4
-    .cfi_rel_offset \_reg, 0
-.endm
-
-.macro POP _reg
-    popl \_reg
-    .cfi_adjust_cfa_offset -4
-    .cfi_restore \_reg
-.endm
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/* Frame size must be 16-byte aligned.
- * Remember about 4 bytes for return address + 4 * 4 for spills
- */
-#define FRAME_SIZE     28
-
-/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3        (FRAME_SIZE + 16 + 16)
-#define IN_ARG2        (FRAME_SIZE + 16 + 12)
-#define IN_ARG1        (FRAME_SIZE + 16 +  8)
-#define IN_ARG0        (FRAME_SIZE + 16 +  4)
-/* Spill offsets relative to %esp */
-#define LOCAL0         (FRAME_SIZE -  4)
-#define LOCAL1         (FRAME_SIZE -  8)
-#define LOCAL2         (FRAME_SIZE - 12)
-/* Out Arg offsets, relative to %esp */
-#define OUT_ARG3       ( 12)
-#define OUT_ARG2       (  8)
-#define OUT_ARG1       (  4)
-#define OUT_ARG0       (  0)  /* <- ExecuteMterpImpl esp + 0 */
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rSELF    IN_ARG0(%esp)
-#define rPC      %esi
-#define CFI_DEX  6  // DWARF register number of the register holding dex-pc (esi).
-#define CFI_TMP  0  // DWARF register number of the first argument register (eax).
-#define rFP      %edi
-#define rINST    %ebx
-#define rINSTw   %bx
-#define rINSTbh  %bh
-#define rINSTbl  %bl
-#define rIBASE   %edx
-#define rREFS    %ebp
-#define rPROFILE OFF_FP_COUNTDOWN_OFFSET(rFP)
-
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
-    movl    rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
-    movl    rSELF, rIBASE
-    movl    THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
-.endm
-
-/*
- * Refresh handler table.
- * IBase handles uses the caller save register so we must restore it after each call.
- * Also it is used as a result of some 64-bit operations (like imul) and we should
- * restore it in such cases also.
- *
- * TODO: Consider spilling the IBase instead of restoring it from Thread structure.
- */
-.macro RESTORE_IBASE
-    movl    rSELF, rIBASE
-    movl    THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
-.endm
-
-/*
- * If rSELF is already loaded then we can use it from known reg.
- */
-.macro RESTORE_IBASE_FROM_SELF _reg
-    movl    THREAD_CURRENT_IBASE_OFFSET(\_reg), rIBASE
-.endm
-
-/*
- * Refresh rINST.
- * At enter to handler rINST does not contain the opcode number.
- * However some utilities require the full value, so this macro
- * restores the opcode number.
- */
-.macro REFRESH_INST _opnum
-    movb    rINSTbl, rINSTbh
-    movb    MACRO_LITERAL(\_opnum), rINSTbl
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINSTw.  Does not advance rPC.
- */
-.macro FETCH_INST
-    movzwl  (rPC), rINST
-.endm
-
-/*
- * Remove opcode from rINST, compute the address of handler and jump to it.
- */
-.macro GOTO_NEXT
-    movzx   rINSTbl,%eax
-    movzbl  rINSTbh,rINST
-    shll    MACRO_LITERAL(${handler_size_bits}), %eax
-    addl    rIBASE, %eax
-    jmp     *%eax
-.endm
-
-/*
- * Advance rPC by instruction count.
- */
-.macro ADVANCE_PC _count
-    leal    2*\_count(rPC), rPC
-.endm
-
-/*
- * Advance rPC by instruction count, fetch instruction and jump to handler.
- */
-.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
-    ADVANCE_PC \_count
-    FETCH_INST
-    GOTO_NEXT
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
-#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
-#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
-#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
-
-.macro GET_VREG _reg _vreg
-    movl    (rFP,\_vreg,4), \_reg
-.endm
-
-/* Read wide value to xmm. */
-.macro GET_WIDE_FP_VREG _reg _vreg
-    movq    (rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG _reg _vreg
-    movl    \_reg, (rFP,\_vreg,4)
-    movl    MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-/* Write wide value from xmm. xmm is clobbered. */
-.macro SET_WIDE_FP_VREG _reg _vreg
-    movq    \_reg, (rFP,\_vreg,4)
-    pxor    \_reg, \_reg
-    movq    \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro SET_VREG_OBJECT _reg _vreg
-    movl    \_reg, (rFP,\_vreg,4)
-    movl    \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro GET_VREG_HIGH _reg _vreg
-    movl    4(rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG_HIGH _reg _vreg
-    movl    \_reg, 4(rFP,\_vreg,4)
-    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_REF _vreg
-    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_WIDE_REF _vreg
-    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
-    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
diff --git a/runtime/interpreter/mterp/x86/instruction_end.S b/runtime/interpreter/mterp/x86/instruction_end.S
deleted file mode 100644
index 94587f8..0000000
--- a/runtime/interpreter/mterp/x86/instruction_end.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
-    OBJECT_TYPE(artMterpAsmInstructionEnd)
-    ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
-    .global SYMBOL(artMterpAsmInstructionEnd)
-SYMBOL(artMterpAsmInstructionEnd):
diff --git a/runtime/interpreter/mterp/x86/instruction_end_alt.S b/runtime/interpreter/mterp/x86/instruction_end_alt.S
deleted file mode 100644
index 7757bce..0000000
--- a/runtime/interpreter/mterp/x86/instruction_end_alt.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
-    OBJECT_TYPE(artMterpAsmAltInstructionEnd)
-    ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
-    .global SYMBOL(artMterpAsmAltInstructionEnd)
-SYMBOL(artMterpAsmAltInstructionEnd):
diff --git a/runtime/interpreter/mterp/x86/instruction_end_sister.S b/runtime/interpreter/mterp/x86/instruction_end_sister.S
deleted file mode 100644
index 8eb79ac..0000000
--- a/runtime/interpreter/mterp/x86/instruction_end_sister.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
-    OBJECT_TYPE(artMterpAsmSisterEnd)
-    ASM_HIDDEN SYMBOL(artMterpAsmSisterEnd)
-    .global SYMBOL(artMterpAsmSisterEnd)
-SYMBOL(artMterpAsmSisterEnd):
diff --git a/runtime/interpreter/mterp/x86/instruction_start.S b/runtime/interpreter/mterp/x86/instruction_start.S
deleted file mode 100644
index 5d29a819..0000000
--- a/runtime/interpreter/mterp/x86/instruction_start.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
-    OBJECT_TYPE(artMterpAsmInstructionStart)
-    ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
-    .global SYMBOL(artMterpAsmInstructionStart)
-SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
-    .text
diff --git a/runtime/interpreter/mterp/x86/instruction_start_alt.S b/runtime/interpreter/mterp/x86/instruction_start_alt.S
deleted file mode 100644
index 8dcf5bf..0000000
--- a/runtime/interpreter/mterp/x86/instruction_start_alt.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
-    OBJECT_TYPE(artMterpAsmAltInstructionStart)
-    ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
-    .global SYMBOL(artMterpAsmAltInstructionStart)
-    .text
-SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
diff --git a/runtime/interpreter/mterp/x86/instruction_start_sister.S b/runtime/interpreter/mterp/x86/instruction_start_sister.S
deleted file mode 100644
index 796e98b..0000000
--- a/runtime/interpreter/mterp/x86/instruction_start_sister.S
+++ /dev/null
@@ -1,7 +0,0 @@
-
-    OBJECT_TYPE(artMterpAsmSisterStart)
-    ASM_HIDDEN SYMBOL(artMterpAsmSisterStart)
-    .global SYMBOL(artMterpAsmSisterStart)
-    .text
-    .balign 4
-SYMBOL(artMterpAsmSisterStart):
diff --git a/runtime/interpreter/mterp/x86/invoke.S b/runtime/interpreter/mterp/x86/invoke.S
index c23053b..06cd904 100644
--- a/runtime/interpreter/mterp/x86/invoke.S
+++ b/runtime/interpreter/mterp/x86/invoke.S
@@ -1,4 +1,4 @@
-%default { "helper":"UndefinedInvokeHandler" }
+%def invoke(helper="UndefinedInvokeHandler"):
 /*
  * Generic invoke handler wrapper.
  */
@@ -17,9 +17,105 @@
     testb   %al, %al
     jz      MterpException
     ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
+    movl    rSELF, %eax
+    cmpb    LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
+    jz      MterpFallback
     RESTORE_IBASE
     FETCH_INST
     GOTO_NEXT
+
+%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
+    /*
+     * invoke-polymorphic handler wrapper.
+     */
+    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+    .extern $helper
+    EXPORT_PC
+    movl    rSELF, %ecx
+    movl    %ecx, OUT_ARG0(%esp)
+    leal    OFF_FP_SHADOWFRAME(rFP), %eax
+    movl    %eax, OUT_ARG1(%esp)
+    movl    rPC, OUT_ARG2(%esp)
+    REFRESH_INST ${opnum}
+    movl    rINST, OUT_ARG3(%esp)
+    call    SYMBOL($helper)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC 4
+    movl    rSELF, %eax
+    cmpb    LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
+    jz      MterpFallback
+    RESTORE_IBASE
+    FETCH_INST
+    GOTO_NEXT
+
+%def op_invoke_custom():
+%  invoke(helper="MterpInvokeCustom")
+
+%def op_invoke_custom_range():
+%  invoke(helper="MterpInvokeCustomRange")
+
+%def op_invoke_direct():
+%  invoke(helper="MterpInvokeDirect")
+
+%def op_invoke_direct_range():
+%  invoke(helper="MterpInvokeDirectRange")
+
+%def op_invoke_interface():
+%  invoke(helper="MterpInvokeInterface")
+/*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_interface_range():
+%  invoke(helper="MterpInvokeInterfaceRange")
+
+%def op_invoke_polymorphic():
+%  invoke_polymorphic(helper="MterpInvokePolymorphic")
+
+%def op_invoke_polymorphic_range():
+%  invoke_polymorphic(helper="MterpInvokePolymorphicRange")
+
+%def op_invoke_static():
+%  invoke(helper="MterpInvokeStatic")
+
+
+%def op_invoke_static_range():
+%  invoke(helper="MterpInvokeStaticRange")
+
+%def op_invoke_super():
+%  invoke(helper="MterpInvokeSuper")
+/*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_super_range():
+%  invoke(helper="MterpInvokeSuperRange")
+
+%def op_invoke_virtual():
+%  invoke(helper="MterpInvokeVirtual")
+/*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_virtual_quick():
+%  invoke(helper="MterpInvokeVirtualQuick")
+
+%def op_invoke_virtual_range():
+%  invoke(helper="MterpInvokeVirtualRange")
+
+%def op_invoke_virtual_range_quick():
+%  invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/x86/invoke_polymorphic.S b/runtime/interpreter/mterp/x86/invoke_polymorphic.S
deleted file mode 100644
index 5690b22..0000000
--- a/runtime/interpreter/mterp/x86/invoke_polymorphic.S
+++ /dev/null
@@ -1,25 +0,0 @@
-%default { "helper":"UndefinedInvokeHandler" }
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern $helper
-    EXPORT_PC
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG0(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG1(%esp)
-    movl    rPC, OUT_ARG2(%esp)
-    REFRESH_INST ${opnum}
-    movl    rINST, OUT_ARG3(%esp)
-    call    SYMBOL($helper)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 4
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    RESTORE_IBASE
-    FETCH_INST
-    GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86/main.S b/runtime/interpreter/mterp/x86/main.S
new file mode 100644
index 0000000..6eaea6f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/main.S
@@ -0,0 +1,804 @@
+%def header():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+  Art assembly interpreter notes:
+
+  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+  handle invoke, allows higher-level code to create frame & shadow frame.
+
+  Once that's working, support direct entry code & eliminate shadow frame (and
+  excess locals allocation.
+
+  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
+  base of the vreg array within the shadow frame.  Access the other fields,
+  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
+  the shadow frame mechanism of double-storing object references - via rFP &
+  number_of_vregs_.
+
+ */
+
+/*
+x86 ABI general notes:
+
+Caller save set:
+   eax, edx, ecx, st(0)-st(7)
+Callee save set:
+   ebx, esi, edi, ebp
+Return regs:
+   32-bit in eax
+   64-bit in edx:eax (low-order 32 in eax)
+   fp on top of fp stack st(0)
+
+Parameters passed on stack, pushed right-to-left.  On entry to target, first
+parm is at 4(%esp).  Traditional entry code is:
+
+functEntry:
+    push    %ebp             # save old frame pointer
+    mov     %ebp,%esp        # establish new frame pointer
+    sub     FrameSize,%esp   # Allocate storage for spill, locals & outs
+
+Once past the prologue, arguments are referenced at ((argno + 2)*4)(%ebp)
+
+Stack must be 16-byte aligned to support SSE in native code.
+
+If we're not doing variable stack allocation (alloca), the frame pointer can be
+eliminated and all arg references adjusted to be esp relative.
+*/
+
+/*
+Mterp and x86 notes:
+
+Some key interpreter variables will be assigned to registers.
+
+  nick     reg   purpose
+  rPC      esi   interpreted program counter, used for fetching instructions
+  rFP      edi   interpreted frame pointer, used for accessing locals and args
+  rINSTw   bx    first 16-bit code of current instruction
+  rINSTbl  bl    opcode portion of instruction word
+  rINSTbh  bh    high byte of inst word, usually contains src/tgt reg names
+  rIBASE   edx   base of instruction handler table
+  rREFS    ebp   base of object references in shadow frame.
+
+Notes:
+   o High order 16 bits of ebx must be zero on entry to handler
+   o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
+   o eax and ecx are scratch, rINSTw/ebx sometimes scratch
+
+Macros are provided for common operations.  Each macro MUST emit only
+one instruction to make instruction-counting easier.  They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+#include "interpreter/cfi_asm_support.h"
+
+#define LITERAL(value) $$(value)
+
+/*
+ * Handle mac compiler specific
+ */
+#if defined(__APPLE__)
+    #define MACRO_LITERAL(value) $$(value)
+    #define FUNCTION_TYPE(name)
+    #define OBJECT_TYPE(name)
+    #define SIZE(start,end)
+    // Mac OS' symbols have an _ prefix.
+    #define SYMBOL(name) _ ## name
+    #define ASM_HIDDEN .private_extern
+#else
+    #define MACRO_LITERAL(value) $$value
+    #define FUNCTION_TYPE(name) .type name, @function
+    #define OBJECT_TYPE(name) .type name, @object
+    #define SIZE(start,end) .size start, .-end
+    #define SYMBOL(name) name
+    #define ASM_HIDDEN .hidden
+#endif
+
+.macro PUSH _reg
+    pushl \_reg
+    .cfi_adjust_cfa_offset 4
+    .cfi_rel_offset \_reg, 0
+.endm
+
+.macro POP _reg
+    popl \_reg
+    .cfi_adjust_cfa_offset -4
+    .cfi_restore \_reg
+.endm
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
+ * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
+#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
+#define OFF_FP_SHADOWFRAME OFF_FP(0)
+
+/* Frame size must be 16-byte aligned.
+ * Remember about 4 bytes for return address + 4 * 4 for spills
+ */
+#define FRAME_SIZE     28
+
+/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
+#define IN_ARG3        (FRAME_SIZE + 16 + 16)
+#define IN_ARG2        (FRAME_SIZE + 16 + 12)
+#define IN_ARG1        (FRAME_SIZE + 16 +  8)
+#define IN_ARG0        (FRAME_SIZE + 16 +  4)
+/* Spill offsets relative to %esp */
+#define LOCAL0         (FRAME_SIZE -  4)
+#define LOCAL1         (FRAME_SIZE -  8)
+#define LOCAL2         (FRAME_SIZE - 12)
+/* Out Arg offsets, relative to %esp */
+#define OUT_ARG3       ( 12)
+#define OUT_ARG2       (  8)
+#define OUT_ARG1       (  4)
+#define OUT_ARG0       (  0)  /* <- ExecuteMterpImpl esp + 0 */
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rSELF    IN_ARG0(%esp)
+#define rPC      %esi
+#define CFI_DEX  6  // DWARF register number of the register holding dex-pc (esi).
+#define CFI_TMP  0  // DWARF register number of the first argument register (eax).
+#define rFP      %edi
+#define rINST    %ebx
+#define rINSTw   %bx
+#define rINSTbh  %bh
+#define rINSTbl  %bl
+#define rIBASE   %edx
+#define rREFS    %ebp
+#define rPROFILE OFF_FP_COUNTDOWN_OFFSET(rFP)
+
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array.  For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+    movl    rPC, OFF_FP_DEX_PC_PTR(rFP)
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+    movl    rSELF, rIBASE
+    movl    THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
+.endm
+
+/*
+ * Refresh handler table.
+ * IBase handles uses the caller save register so we must restore it after each call.
+ * Also it is used as a result of some 64-bit operations (like imul) and we should
+ * restore it in such cases also.
+ *
+ * TODO: Consider spilling the IBase instead of restoring it from Thread structure.
+ */
+.macro RESTORE_IBASE
+    movl    rSELF, rIBASE
+    movl    THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
+.endm
+
+/*
+ * If rSELF is already loaded then we can use it from known reg.
+ */
+.macro RESTORE_IBASE_FROM_SELF _reg
+    movl    THREAD_CURRENT_IBASE_OFFSET(\_reg), rIBASE
+.endm
+
+/*
+ * Refresh rINST.
+ * At enter to handler rINST does not contain the opcode number.
+ * However some utilities require the full value, so this macro
+ * restores the opcode number.
+ */
+.macro REFRESH_INST _opnum
+    movb    rINSTbl, rINSTbh
+    movb    MACRO_LITERAL(\_opnum), rINSTbl
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINSTw.  Does not advance rPC.
+ */
+.macro FETCH_INST
+    movzwl  (rPC), rINST
+.endm
+
+/*
+ * Remove opcode from rINST, compute the address of handler and jump to it.
+ */
+.macro GOTO_NEXT
+    movzx   rINSTbl,%eax
+    movzbl  rINSTbh,rINST
+    shll    MACRO_LITERAL(${handler_size_bits}), %eax
+    addl    rIBASE, %eax
+    jmp     *%eax
+.endm
+
+/*
+ * Advance rPC by instruction count.
+ */
+.macro ADVANCE_PC _count
+    leal    2*\_count(rPC), rPC
+.endm
+
+/*
+ * Advance rPC by instruction count, fetch instruction and jump to handler.
+ */
+.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
+    ADVANCE_PC \_count
+    FETCH_INST
+    GOTO_NEXT
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
+#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
+#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
+#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
+
+.macro GET_VREG _reg _vreg
+    movl    VREG_ADDRESS(\_vreg), \_reg
+.endm
+
+/* Read wide value to xmm. */
+.macro GET_WIDE_FP_VREG _reg _vreg
+    movq    VREG_ADDRESS(\_vreg), \_reg
+.endm
+
+.macro SET_VREG _reg _vreg
+    movl    \_reg, VREG_ADDRESS(\_vreg)
+    movl    MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+.endm
+
+/* Write wide value from xmm. xmm is clobbered. */
+.macro SET_WIDE_FP_VREG _reg _vreg
+    movq    \_reg, VREG_ADDRESS(\_vreg)
+    pxor    \_reg, \_reg
+    movq    \_reg, VREG_REF_ADDRESS(\_vreg)
+.endm
+
+.macro SET_VREG_OBJECT _reg _vreg
+    movl    \_reg, VREG_ADDRESS(\_vreg)
+    movl    \_reg, VREG_REF_ADDRESS(\_vreg)
+.endm
+
+.macro GET_VREG_HIGH _reg _vreg
+    movl    VREG_HIGH_ADDRESS(\_vreg), \_reg
+.endm
+
+.macro SET_VREG_HIGH _reg _vreg
+    movl    \_reg, VREG_HIGH_ADDRESS(\_vreg)
+    movl    MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
+.endm
+
+.macro CLEAR_REF _vreg
+    movl    MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+.endm
+
+.macro CLEAR_WIDE_REF _vreg
+    movl    MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+    movl    MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
+.endm
+
+.macro GET_VREG_XMMs _xmmreg _vreg
+    movss VREG_ADDRESS(\_vreg), \_xmmreg
+.endm
+.macro GET_VREG_XMMd _xmmreg _vreg
+    movsd VREG_ADDRESS(\_vreg), \_xmmreg
+.endm
+.macro SET_VREG_XMMs _xmmreg _vreg
+    movss \_xmmreg, VREG_ADDRESS(\_vreg)
+.endm
+.macro SET_VREG_XMMd _xmmreg _vreg
+    movsd \_xmmreg, VREG_ADDRESS(\_vreg)
+.endm
+
+/*
+ * function support macros.
+ */
+.macro ENTRY name
+    .text
+    ASM_HIDDEN SYMBOL(\name)
+    .global SYMBOL(\name)
+    FUNCTION_TYPE(\name)
+SYMBOL(\name):
+.endm
+
+.macro END name
+    SIZE(\name,\name)
+.endm
+
+%def entry():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ *
+ * On entry:
+ *  0  Thread* self
+ *  1  insns_
+ *  2  ShadowFrame
+ *  3  JValue* result_register
+ *
+ */
+ENTRY ExecuteMterpImpl
+    .cfi_startproc
+    .cfi_def_cfa esp, 4
+
+    /* Spill callee save regs */
+    PUSH    %ebp
+    PUSH    %edi
+    PUSH    %esi
+    PUSH    %ebx
+
+    /* Allocate frame */
+    subl    $$FRAME_SIZE, %esp
+    .cfi_adjust_cfa_offset FRAME_SIZE
+
+    /* Load ShadowFrame pointer */
+    movl    IN_ARG2(%esp), %edx
+
+    /* Remember the return register */
+    movl    IN_ARG3(%esp), %eax
+    movl    %eax, SHADOWFRAME_RESULT_REGISTER_OFFSET(%edx)
+
+    /* Remember the code_item */
+    movl    IN_ARG1(%esp), %ecx
+    movl    %ecx, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(%edx)
+
+    /* set up "named" registers */
+    movl    SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(%edx), %eax
+    leal    SHADOWFRAME_VREGS_OFFSET(%edx), rFP
+    leal    (rFP, %eax, 4), rREFS
+    movl    SHADOWFRAME_DEX_PC_OFFSET(%edx), %eax
+    lea     (%ecx, %eax, 2), rPC
+    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
+    EXPORT_PC
+
+    /* Set up for backwards branches & osr profiling */
+    movl    OFF_FP_METHOD(rFP), %eax
+    movl    %eax, OUT_ARG0(%esp)
+    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG2(%esp)
+    call    SYMBOL(MterpSetUpHotnessCountdown)
+
+    /* Starting ibase */
+    REFRESH_IBASE
+
+    /* start executing the instruction at rPC */
+    FETCH_INST
+    GOTO_NEXT
+    /* NOTE: no fallthrough */
+    // cfi info continues, and covers the whole mterp implementation.
+    END ExecuteMterpImpl
+
+%def dchecks_before_helper():
+    // Call C++ to do debug checks and return to the handler using tail call.
+    .extern MterpCheckBefore
+    popl    %eax                     # Return address (the instuction handler).
+    movl    rSELF, %ecx
+    movl    %ecx, OUT_ARG0(%esp)
+    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    movl    rPC, OUT_ARG2(%esp)
+    pushl   %eax                     # Return address for the tail call.
+    jmp     SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
+
+%def opcode_pre():
+%  add_helper(dchecks_before_helper, "Mterp_dchecks_before_helper")
+    #if !defined(NDEBUG)
+    call    SYMBOL(Mterp_dchecks_before_helper)
+    REFRESH_IBASE
+    #endif
+
+%def fallback():
+/* Transfer stub to alternate interpreter */
+    jmp     MterpFallback
+
+
+%def helpers():
+    ENTRY MterpHelpers
+
+%def footer():
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+    .text
+    .align  2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    call    SYMBOL(MterpLogDivideByZeroException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errArrayIndex:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    call    SYMBOL(MterpLogArrayIndexException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errNegativeArraySize:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    call    SYMBOL(MterpLogNegativeArraySizeException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errNoSuchMethod:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    call    SYMBOL(MterpLogNoSuchMethodException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errNullObject:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    call    SYMBOL(MterpLogNullObjectException)
+#endif
+    jmp     MterpCommonFallback
+
+common_exceptionThrown:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG0(%esp)
+    call    SYMBOL(MterpLogExceptionThrownException)
+#endif
+    jmp     MterpCommonFallback
+
+MterpSuspendFallback:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG0(%esp)
+    movl    THREAD_FLAGS_OFFSET(%eax), %eax
+    movl    %eax, OUT_ARG2(%esp)
+    call    SYMBOL(MterpLogSuspendFallback)
+#endif
+    jmp     MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary.  If there is a pending
+ * exception, handle it.  Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+    movl    rSELF, %eax
+    testl   $$-1, THREAD_EXCEPTION_OFFSET(%eax)
+    jz      MterpFallback
+    /* intentional fallthrough - handle pending exception. */
+
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    call    SYMBOL(MterpHandleException)
+    testb   %al, %al
+    jz      MterpExceptionReturn
+    movl    OFF_FP_DEX_INSTRUCTIONS(rFP), %eax
+    movl    OFF_FP_DEX_PC(rFP), %ecx
+    lea     (%eax, %ecx, 2), rPC
+    movl    rPC, OFF_FP_DEX_PC_PTR(rFP)
+    /* Do we need to switch interpreters? */
+    movl    rSELF, %eax
+    cmpb    LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
+    jz      MterpFallback
+    /* resume execution at catch block */
+    REFRESH_IBASE
+    FETCH_INST
+    GOTO_NEXT
+    /* NOTE: no fallthrough */
+
+/*
+ * Common handling for branches with support for Jit profiling.
+ * On entry:
+ *    rINST          <= signed offset
+ *    condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
+ *
+ * We have quite a few different cases for branch profiling, OSR detection and
+ * suspend check support here.
+ *
+ * Taken backward branches:
+ *    If profiling active, do hotness countdown and report if we hit zero.
+ *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *    Is there a pending suspend request?  If so, suspend.
+ *
+ * Taken forward branches and not-taken backward branches:
+ *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *
+ * Our most common case is expected to be a taken backward branch with active jit profiling,
+ * but no full OSR check and no pending suspend request.
+ * Next most common case is not-taken branch with no full OSR check.
+ *
+ */
+MterpCommonTakenBranch:
+    jg      .L_forward_branch               # don't add forward branches to hotness
+/*
+ * We need to subtract 1 from positive values and we should not see 0 here,
+ * so we may use the result of the comparison with -1.
+ */
+#if JIT_CHECK_OSR != -1
+#  error "JIT_CHECK_OSR must be -1."
+#endif
+    cmpw    $$JIT_CHECK_OSR, rPROFILE
+    je      .L_osr_check
+    decw    rPROFILE
+    je      .L_add_batch                    # counted down to zero - report
+.L_resume_backward_branch:
+    movl    rSELF, %eax
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    leal    (rPC, rINST, 2), rPC
+    FETCH_INST
+    jnz     .L_suspend_request_pending
+    REFRESH_IBASE
+    GOTO_NEXT
+
+.L_suspend_request_pending:
+    EXPORT_PC
+    movl    %eax, OUT_ARG0(%esp)            # rSELF in eax
+    call    SYMBOL(MterpSuspendCheck)       # (self)
+    testb   %al, %al
+    jnz     MterpFallback
+    REFRESH_IBASE                           # might have changed during suspend
+    GOTO_NEXT
+
+.L_no_count_backwards:
+    cmpw    $$JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
+    jne     .L_resume_backward_branch
+.L_osr_check:
+    EXPORT_PC
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    movl    rINST, OUT_ARG2(%esp)
+    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+    testb   %al, %al
+    jz      .L_resume_backward_branch
+    jmp     MterpOnStackReplacement
+
+.L_forward_branch:
+    cmpw    $$JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
+    je      .L_check_osr_forward
+.L_resume_forward_branch:
+    leal    (rPC, rINST, 2), rPC
+    FETCH_INST
+    GOTO_NEXT
+
+.L_check_osr_forward:
+    EXPORT_PC
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    movl    rINST, OUT_ARG2(%esp)
+    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+    testb   %al, %al
+    REFRESH_IBASE
+    jz      .L_resume_forward_branch
+    jmp     MterpOnStackReplacement
+
+.L_add_batch:
+    movl    OFF_FP_METHOD(rFP), %eax
+    movl    %eax, OUT_ARG0(%esp)
+    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG2(%esp)
+    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
+    jmp     .L_no_count_backwards
+
+/*
+ * Entered from the conditional branch handlers when OSR check request active on
+ * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
+ */
+.L_check_not_taken_osr:
+    EXPORT_PC
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    movl    $$2, OUT_ARG2(%esp)
+    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+    testb   %al, %al
+    REFRESH_IBASE
+    jnz     MterpOnStackReplacement
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    movl    rINST, OUT_ARG2(%esp)
+    call    SYMBOL(MterpLogOSR)
+#endif
+    movl    $$1, %eax
+    jmp     MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    call    SYMBOL(MterpLogFallback)
+#endif
+MterpCommonFallback:
+    xor     %eax, %eax
+    jmp     MterpDone
+
+/*
+ * On entry:
+ *  uint32_t* rFP  (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+    movl    $$1, %eax
+    jmp     MterpDone
+MterpReturn:
+    movl    OFF_FP_RESULT_REGISTER(rFP), %edx
+    movl    %eax, (%edx)
+    movl    %ecx, 4(%edx)
+    mov     $$1, %eax
+MterpDone:
+/*
+ * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
+ * checking for OSR.  If greater than zero, we might have unreported hotness to register
+ * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
+ * should only reach zero immediately after a hotness decrement, and is then reset to either
+ * a negative special state or the new non-zero countdown value.
+ */
+    cmpw    $$0, rPROFILE
+    jle     MRestoreFrame                   # if > 0, we may have some counts to report.
+
+    movl    %eax, rINST                     # stash return value
+    /* Report cached hotness counts */
+    movl    OFF_FP_METHOD(rFP), %eax
+    movl    %eax, OUT_ARG0(%esp)
+    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG2(%esp)
+    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
+    movl    rINST, %eax                     # restore return value
+
+    /* pop up frame */
+MRestoreFrame:
+    addl    $$FRAME_SIZE, %esp
+    .cfi_adjust_cfa_offset -FRAME_SIZE
+
+    /* Restore callee save register */
+    POP     %ebx
+    POP     %esi
+    POP     %edi
+    POP     %ebp
+    ret
+    .cfi_endproc
+    END MterpHelpers
+
+%def instruction_end():
+
+    OBJECT_TYPE(artMterpAsmInstructionEnd)
+    ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
+    .global SYMBOL(artMterpAsmInstructionEnd)
+SYMBOL(artMterpAsmInstructionEnd):
+
+%def instruction_start():
+
+    OBJECT_TYPE(artMterpAsmInstructionStart)
+    ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
+    .global SYMBOL(artMterpAsmInstructionStart)
+SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
+    .text
+
+%def opcode_start():
+    ENTRY Mterp_${opcode}
+%def opcode_end():
+    END Mterp_${opcode}
+%def helper_start(name):
+    ENTRY ${name}
+%def helper_end(name):
+    END ${name}
diff --git a/runtime/interpreter/mterp/x86/object.S b/runtime/interpreter/mterp/x86/object.S
new file mode 100644
index 0000000..a47fa3a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/object.S
@@ -0,0 +1,278 @@
+%def field(helper=""):
+    /*
+     * General field read / write (iget-* iput-* sget-* sput-*).
+     */
+    .extern $helper
+    REFRESH_INST ${opnum}                   # fix rINST to include opcode
+    movl    rPC, OUT_ARG0(%esp)             # arg0: Instruction* inst
+    movl    rINST, OUT_ARG1(%esp)           # arg1: uint16_t inst_data
+    leal    OFF_FP_SHADOWFRAME(rFP), %eax
+    movl    %eax, OUT_ARG2(%esp)            # arg2: ShadowFrame* sf
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG3(%esp)            # arg3: Thread* self
+    call    SYMBOL($helper)
+    testb   %al, %al
+    jz      MterpPossibleException
+    RESTORE_IBASE
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_check_cast():
+/*
+ * Check to see if a cast from one class to another is allowed.
+ */
+    /* check-cast vAA, class@BBBB */
+    EXPORT_PC
+    movzwl  2(rPC), %eax                    # eax <- BBBB
+    movl    %eax, OUT_ARG0(%esp)
+    leal    VREG_ADDRESS(rINST), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    movl    OFF_FP_METHOD(rFP),%eax
+    movl    %eax, OUT_ARG2(%esp)
+    movl    rSELF, %ecx
+    movl    %ecx, OUT_ARG3(%esp)
+    call    SYMBOL(MterpCheckCast)          # (index, &obj, method, self)
+    RESTORE_IBASE
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iget(is_object="0", helper="MterpIGetU32"):
+%  field(helper=helper)
+
+%def op_iget_boolean():
+%  op_iget(helper="MterpIGetU8")
+
+%def op_iget_boolean_quick():
+%  op_iget_quick(load="movsbl")
+
+%def op_iget_byte():
+%  op_iget(helper="MterpIGetI8")
+
+%def op_iget_byte_quick():
+%  op_iget_quick(load="movsbl")
+
+%def op_iget_char():
+%  op_iget(helper="MterpIGetU16")
+
+%def op_iget_char_quick():
+%  op_iget_quick(load="movzwl")
+
+%def op_iget_object():
+%  op_iget(is_object="1", helper="MterpIGetObj")
+
+%def op_iget_object_quick():
+    /* For: iget-object-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbl  rINSTbl, %ecx                   # ecx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
+    movzwl  2(rPC), %eax                    # eax <- field byte offset
+    movl    %ecx, OUT_ARG0(%esp)
+    movl    %eax, OUT_ARG1(%esp)
+    EXPORT_PC
+    call    SYMBOL(artIGetObjectFromMterp)  # (obj, offset)
+    movl    rSELF, %ecx
+    RESTORE_IBASE_FROM_SELF %ecx
+    cmpl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
+    jnz     MterpException                  # bail out
+    andb    $$0xf,rINSTbl                   # rINST <- A
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <- value
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iget_quick(load="movl"):
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbl  rINSTbl, %ecx                   # ecx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
+    movzwl  2(rPC), %eax                    # eax <- field byte offset
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    ${load} (%ecx,%eax,1), %eax
+    andb    $$0xf,rINSTbl                   # rINST <- A
+    SET_VREG %eax, rINST                    # fp[A] <- value
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iget_short():
+%  op_iget(helper="MterpIGetI16")
+
+%def op_iget_short_quick():
+%  op_iget_quick(load="movswl")
+
+%def op_iget_wide():
+%  op_iget(helper="MterpIGetU64")
+
+%def op_iget_wide_quick():
+    /* iget-wide-quick vA, vB, offset@CCCC */
+    movzbl  rINSTbl, %ecx                   # ecx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
+    movzwl  2(rPC), %eax                    # eax <- field byte offset
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    movq    (%ecx,%eax,1), %xmm0
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    SET_WIDE_FP_VREG %xmm0, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_instance_of():
+/*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+    /* instance-of vA, vB, class@CCCC */
+    EXPORT_PC
+    movzwl  2(rPC), %eax                    # eax <- BBBB
+    movl    %eax, OUT_ARG0(%esp)
+    movl    rINST, %eax                     # eax <- BA
+    sarl    $$4, %eax                       # eax <- B
+    leal    VREG_ADDRESS(%eax), %ecx        # Get object address
+    movl    %ecx, OUT_ARG1(%esp)
+    movl    OFF_FP_METHOD(rFP),%eax
+    movl    %eax, OUT_ARG2(%esp)
+    movl    rSELF, %ecx
+    movl    %ecx, OUT_ARG3(%esp)
+    call    SYMBOL(MterpInstanceOf)         # (index, &obj, method, self)
+    movl    rSELF, %ecx
+    RESTORE_IBASE_FROM_SELF %ecx
+    cmpl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
+    jnz     MterpException
+    andb    $$0xf, rINSTbl                  # rINSTbl <- A
+    SET_VREG %eax, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iput(is_object="0", helper="MterpIPutU32"):
+%  field(helper=helper)
+
+%def op_iput_boolean():
+%  op_iput(helper="MterpIPutU8")
+
+%def op_iput_boolean_quick():
+%  op_iput_quick(reg="rINSTbl", store="movb")
+
+%def op_iput_byte():
+%  op_iput(helper="MterpIPutI8")
+
+%def op_iput_byte_quick():
+%  op_iput_quick(reg="rINSTbl", store="movb")
+
+%def op_iput_char():
+%  op_iput(helper="MterpIPutU16")
+
+%def op_iput_char_quick():
+%  op_iput_quick(reg="rINSTw", store="movw")
+
+%def op_iput_object():
+%  op_iput(is_object="1", helper="MterpIPutObj")
+
+%def op_iput_object_quick():
+    EXPORT_PC
+    leal    OFF_FP_SHADOWFRAME(rFP), %eax
+    movl    %eax, OUT_ARG0(%esp)
+    movl    rPC, OUT_ARG1(%esp)
+    REFRESH_INST ${opnum}
+    movl    rINST, OUT_ARG2(%esp)
+    call    SYMBOL(MterpIputObjectQuick)
+    testb   %al, %al
+    jz      MterpException
+    RESTORE_IBASE
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iput_quick(reg="rINST", store="movl"):
+    /* For: iput-quick, iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbl  rINSTbl, %ecx                   # ecx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    GET_VREG rINST, rINST                   # rINST <- v[A]
+    movzwl  2(rPC), %eax                    # eax <- field byte offset
+    ${store}    ${reg}, (%ecx,%eax,1)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iput_short():
+%  op_iput(helper="MterpIPutI16")
+
+%def op_iput_short_quick():
+%  op_iput_quick(reg="rINSTw", store="movw")
+
+%def op_iput_wide():
+%  op_iput(helper="MterpIPutU64")
+
+%def op_iput_wide_quick():
+    /* iput-wide-quick vA, vB, offset@CCCC */
+    movzbl    rINSTbl, %ecx                 # ecx<- BA
+    sarl      $$4, %ecx                     # ecx<- B
+    GET_VREG  %ecx, %ecx                    # vB (object we're operating on)
+    testl     %ecx, %ecx                    # is object null?
+    je        common_errNullObject
+    movzwl    2(rPC), %eax                  # eax<- field byte offset
+    leal      (%ecx,%eax,1), %ecx           # ecx<- Address of 64-bit target
+    andb      $$0xf, rINSTbl                # rINST<- A
+    GET_WIDE_FP_VREG %xmm0, rINST           # xmm0<- fp[A]/fp[A+1]
+    movq      %xmm0, (%ecx)                 # obj.field<- r0/r1
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_new_instance():
+/*
+ * Create a new instance of a class.
+ */
+    /* new-instance vAA, class@BBBB */
+    EXPORT_PC
+    leal    OFF_FP_SHADOWFRAME(rFP), %eax
+    movl    %eax, OUT_ARG0(%esp)
+    movl    rSELF, %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    REFRESH_INST ${opnum}
+    movl    rINST, OUT_ARG2(%esp)
+    call    SYMBOL(MterpNewInstance)
+    RESTORE_IBASE
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_sget(is_object="0", helper="MterpSGetU32"):
+%  field(helper=helper)
+
+%def op_sget_boolean():
+%  op_sget(helper="MterpSGetU8")
+
+%def op_sget_byte():
+%  op_sget(helper="MterpSGetI8")
+
+%def op_sget_char():
+%  op_sget(helper="MterpSGetU16")
+
+%def op_sget_object():
+%  op_sget(is_object="1", helper="MterpSGetObj")
+
+%def op_sget_short():
+%  op_sget(helper="MterpSGetI16")
+
+%def op_sget_wide():
+%  op_sget(helper="MterpSGetU64")
+
+%def op_sput(is_object="0", helper="MterpSPutU32"):
+%  field(helper=helper)
+
+%def op_sput_boolean():
+%  op_sput(helper="MterpSPutU8")
+
+%def op_sput_byte():
+%  op_sput(helper="MterpSPutI8")
+
+%def op_sput_char():
+%  op_sput(helper="MterpSPutU16")
+
+%def op_sput_object():
+%  op_sput(is_object="1", helper="MterpSPutObj")
+
+%def op_sput_short():
+%  op_sput(helper="MterpSPutI16")
+
+%def op_sput_wide():
+%  op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/x86/op_add_double.S b/runtime/interpreter/mterp/x86/op_add_double.S
deleted file mode 100644
index de2708f..0000000
--- a/runtime/interpreter/mterp/x86/op_add_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"adds","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_add_double_2addr.S b/runtime/interpreter/mterp/x86/op_add_double_2addr.S
deleted file mode 100644
index 538c9ab..0000000
--- a/runtime/interpreter/mterp/x86/op_add_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"adds","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_add_float.S b/runtime/interpreter/mterp/x86/op_add_float.S
deleted file mode 100644
index 80b1736..0000000
--- a/runtime/interpreter/mterp/x86/op_add_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"adds","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_add_float_2addr.S b/runtime/interpreter/mterp/x86/op_add_float_2addr.S
deleted file mode 100644
index 6649253..0000000
--- a/runtime/interpreter/mterp/x86/op_add_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"adds","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_add_int.S b/runtime/interpreter/mterp/x86/op_add_int.S
deleted file mode 100644
index f71a56b..0000000
--- a/runtime/interpreter/mterp/x86/op_add_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop.S" {"instr":"addl    (rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_add_int_2addr.S b/runtime/interpreter/mterp/x86/op_add_int_2addr.S
deleted file mode 100644
index 5d43b65..0000000
--- a/runtime/interpreter/mterp/x86/op_add_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop2addr.S" {"instr":"addl    %eax, (rFP,%ecx,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_add_int_lit16.S b/runtime/interpreter/mterp/x86/op_add_int_lit16.S
deleted file mode 100644
index 4f34d17..0000000
--- a/runtime/interpreter/mterp/x86/op_add_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit16.S" {"instr":"addl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_add_int_lit8.S b/runtime/interpreter/mterp/x86/op_add_int_lit8.S
deleted file mode 100644
index 3f14744..0000000
--- a/runtime/interpreter/mterp/x86/op_add_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"addl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_add_long.S b/runtime/interpreter/mterp/x86/op_add_long.S
deleted file mode 100644
index dce0c26..0000000
--- a/runtime/interpreter/mterp/x86/op_add_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide.S" {"instr1":"addl    (rFP,%ecx,4), rIBASE", "instr2":"adcl    4(rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_add_long_2addr.S b/runtime/interpreter/mterp/x86/op_add_long_2addr.S
deleted file mode 100644
index 7847640..0000000
--- a/runtime/interpreter/mterp/x86/op_add_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide2addr.S" {"instr1":"addl    %eax, (rFP,rINST,4)","instr2":"adcl    %ecx, 4(rFP,rINST,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_aget.S b/runtime/interpreter/mterp/x86/op_aget.S
deleted file mode 100644
index 338386f..0000000
--- a/runtime/interpreter/mterp/x86/op_aget.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default { "load":"movl", "shift":"4", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
-/*
- * Array get, 32 bits or less.  vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    $load   $data_offset(%eax,%ecx,$shift), %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aget_boolean.S b/runtime/interpreter/mterp/x86/op_aget_boolean.S
deleted file mode 100644
index d910c94..0000000
--- a/runtime/interpreter/mterp/x86/op_aget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aget.S" { "load":"movzbl", "shift":"1", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aget_byte.S b/runtime/interpreter/mterp/x86/op_aget_byte.S
deleted file mode 100644
index aba9ffc..0000000
--- a/runtime/interpreter/mterp/x86/op_aget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aget.S" { "load":"movsbl", "shift":"1", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aget_char.S b/runtime/interpreter/mterp/x86/op_aget_char.S
deleted file mode 100644
index 748e410..0000000
--- a/runtime/interpreter/mterp/x86/op_aget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aget.S" { "load":"movzwl", "shift":"2", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aget_object.S b/runtime/interpreter/mterp/x86/op_aget_object.S
deleted file mode 100644
index 35ec053..0000000
--- a/runtime/interpreter/mterp/x86/op_aget_object.S
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Array object get.  vAA <- vBB[vCC].
- *
- * for: aget-object
- */
-    /* op vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecs <- vCC (requested index)
-    EXPORT_PC
-    movl    %eax, OUT_ARG0(%esp)
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(artAGetObjectFromMterp)  # (array, index)
-    movl    rSELF, %ecx
-    RESTORE_IBASE_FROM_SELF %ecx
-    cmpl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
-    jnz     MterpException
-    SET_VREG_OBJECT %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aget_short.S b/runtime/interpreter/mterp/x86/op_aget_short.S
deleted file mode 100644
index 6eaf5d9..0000000
--- a/runtime/interpreter/mterp/x86/op_aget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aget.S" { "load":"movswl", "shift":"2", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aget_wide.S b/runtime/interpreter/mterp/x86/op_aget_wide.S
deleted file mode 100644
index 92c612a..0000000
--- a/runtime/interpreter/mterp/x86/op_aget_wide.S
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Array get, 64 bits.  vAA <- vBB[vCC].
- */
-    /* aget-wide vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    leal    MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
-    movq    (%eax), %xmm0                   # xmm0 <- vBB[vCC]
-    SET_WIDE_FP_VREG %xmm0, rINST           # vAA <- xmm0
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_and_int.S b/runtime/interpreter/mterp/x86/op_and_int.S
deleted file mode 100644
index 6272c4e..0000000
--- a/runtime/interpreter/mterp/x86/op_and_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop.S" {"instr":"andl    (rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_and_int_2addr.S b/runtime/interpreter/mterp/x86/op_and_int_2addr.S
deleted file mode 100644
index 95df873..0000000
--- a/runtime/interpreter/mterp/x86/op_and_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop2addr.S" {"instr":"andl    %eax, (rFP,%ecx,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_and_int_lit16.S b/runtime/interpreter/mterp/x86/op_and_int_lit16.S
deleted file mode 100644
index b062064..0000000
--- a/runtime/interpreter/mterp/x86/op_and_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit16.S" {"instr":"andl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_and_int_lit8.S b/runtime/interpreter/mterp/x86/op_and_int_lit8.S
deleted file mode 100644
index 99915df..0000000
--- a/runtime/interpreter/mterp/x86/op_and_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"andl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_and_long.S b/runtime/interpreter/mterp/x86/op_and_long.S
deleted file mode 100644
index f8514ea..0000000
--- a/runtime/interpreter/mterp/x86/op_and_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide.S" {"instr1":"andl    (rFP,%ecx,4), rIBASE", "instr2":"andl    4(rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_and_long_2addr.S b/runtime/interpreter/mterp/x86/op_and_long_2addr.S
deleted file mode 100644
index 37249b8..0000000
--- a/runtime/interpreter/mterp/x86/op_and_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide2addr.S" {"instr1":"andl    %eax, (rFP,rINST,4)","instr2":"andl    %ecx, 4(rFP,rINST,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_aput.S b/runtime/interpreter/mterp/x86/op_aput.S
deleted file mode 100644
index 9d8c52d..0000000
--- a/runtime/interpreter/mterp/x86/op_aput.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "reg":"rINST", "store":"movl", "shift":"4", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
-/*
- * Array put, 32 bits or less.  vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    leal    $data_offset(%eax,%ecx,$shift), %eax
-    GET_VREG rINST, rINST
-    $store  $reg, (%eax)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aput_boolean.S b/runtime/interpreter/mterp/x86/op_aput_boolean.S
deleted file mode 100644
index e7fdd53..0000000
--- a/runtime/interpreter/mterp/x86/op_aput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aput.S" { "reg":"rINSTbl", "store":"movb", "shift":"1", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aput_byte.S b/runtime/interpreter/mterp/x86/op_aput_byte.S
deleted file mode 100644
index 491d03c..0000000
--- a/runtime/interpreter/mterp/x86/op_aput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aput.S" { "reg":"rINSTbl", "store":"movb", "shift":"1", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aput_char.S b/runtime/interpreter/mterp/x86/op_aput_char.S
deleted file mode 100644
index ca42cf0..0000000
--- a/runtime/interpreter/mterp/x86/op_aput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aput.S" { "reg":"rINSTw", "store":"movw", "shift":"2", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aput_object.S b/runtime/interpreter/mterp/x86/op_aput_object.S
deleted file mode 100644
index 980b26a..0000000
--- a/runtime/interpreter/mterp/x86/op_aput_object.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Store an object into an array.  vBB[vCC] <- vAA.
- */
-    /* op vAA, vBB, vCC */
-    EXPORT_PC
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rPC, OUT_ARG1(%esp)
-    REFRESH_INST ${opnum}
-    movl    rINST, OUT_ARG2(%esp)
-    call    SYMBOL(MterpAputObject)         # (array, index)
-    RESTORE_IBASE
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aput_short.S b/runtime/interpreter/mterp/x86/op_aput_short.S
deleted file mode 100644
index 5e63482..0000000
--- a/runtime/interpreter/mterp/x86/op_aput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aput.S" { "reg":"rINSTw", "store":"movw", "shift":"2", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aput_wide.S b/runtime/interpreter/mterp/x86/op_aput_wide.S
deleted file mode 100644
index 43ef64a..0000000
--- a/runtime/interpreter/mterp/x86/op_aput_wide.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Array put, 64 bits.  vBB[vCC] <- vAA.
- *
- */
-    /* aput-wide vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    leal    MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
-    GET_WIDE_FP_VREG %xmm0, rINST           # xmm0 <- vAA
-    movq    %xmm0, (%eax)                   # vBB[vCC] <- xmm0
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_array_length.S b/runtime/interpreter/mterp/x86/op_array_length.S
deleted file mode 100644
index 60ed80b..0000000
--- a/runtime/interpreter/mterp/x86/op_array_length.S
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Return the length of an array.
- */
-    mov     rINST, %eax                     # eax <- BA
-    sarl    $$4, rINST                      # rINST <- B
-    GET_VREG %ecx, rINST                    # ecx <- vB (object ref)
-    testl   %ecx, %ecx                      # is null?
-    je      common_errNullObject
-    andb    $$0xf, %al                      # eax <- A
-    movl    MIRROR_ARRAY_LENGTH_OFFSET(%ecx), rINST
-    SET_VREG rINST, %eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_check_cast.S b/runtime/interpreter/mterp/x86/op_check_cast.S
deleted file mode 100644
index d090aa3..0000000
--- a/runtime/interpreter/mterp/x86/op_check_cast.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Check to see if a cast from one class to another is allowed.
- */
-    /* check-cast vAA, class@BBBB */
-    EXPORT_PC
-    movzwl  2(rPC), %eax                    # eax <- BBBB
-    movl    %eax, OUT_ARG0(%esp)
-    leal    VREG_ADDRESS(rINST), %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    OFF_FP_METHOD(rFP),%eax
-    movl    %eax, OUT_ARG2(%esp)
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG3(%esp)
-    call    SYMBOL(MterpCheckCast)          # (index, &obj, method, self)
-    RESTORE_IBASE
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_cmp_long.S b/runtime/interpreter/mterp/x86/op_cmp_long.S
deleted file mode 100644
index 1f729b0..0000000
--- a/runtime/interpreter/mterp/x86/op_cmp_long.S
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
- * register based on the results of the comparison.
- */
-    /* cmp-long vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1], BB is clobbered
-    cmpl    VREG_HIGH_ADDRESS(%ecx), %eax
-    jl      .L${opcode}_smaller
-    jg      .L${opcode}_bigger
-    movzbl  2(rPC), %eax                    # eax <- BB, restore BB
-    GET_VREG %eax, %eax                     # eax <- v[BB]
-    sub     VREG_ADDRESS(%ecx), %eax
-    ja      .L${opcode}_bigger
-    jb      .L${opcode}_smaller
-.L${opcode}_finish:
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.L${opcode}_bigger:
-    movl    $$1, %eax
-    jmp     .L${opcode}_finish
-
-.L${opcode}_smaller:
-    movl    $$-1, %eax
-    jmp     .L${opcode}_finish
diff --git a/runtime/interpreter/mterp/x86/op_cmpg_double.S b/runtime/interpreter/mterp/x86/op_cmpg_double.S
deleted file mode 100644
index a73ba55..0000000
--- a/runtime/interpreter/mterp/x86/op_cmpg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcmp.S" {"suff":"d","nanval":"pos"}
diff --git a/runtime/interpreter/mterp/x86/op_cmpg_float.S b/runtime/interpreter/mterp/x86/op_cmpg_float.S
deleted file mode 100644
index 648051b..0000000
--- a/runtime/interpreter/mterp/x86/op_cmpg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcmp.S" {"suff":"s","nanval":"pos"}
diff --git a/runtime/interpreter/mterp/x86/op_cmpl_double.S b/runtime/interpreter/mterp/x86/op_cmpl_double.S
deleted file mode 100644
index 058163e..0000000
--- a/runtime/interpreter/mterp/x86/op_cmpl_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcmp.S" {"suff":"d","nanval":"neg"}
diff --git a/runtime/interpreter/mterp/x86/op_cmpl_float.S b/runtime/interpreter/mterp/x86/op_cmpl_float.S
deleted file mode 100644
index 302f078..0000000
--- a/runtime/interpreter/mterp/x86/op_cmpl_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcmp.S" {"suff":"s","nanval":"neg"}
diff --git a/runtime/interpreter/mterp/x86/op_const.S b/runtime/interpreter/mterp/x86/op_const.S
deleted file mode 100644
index 544d63b..0000000
--- a/runtime/interpreter/mterp/x86/op_const.S
+++ /dev/null
@@ -1,4 +0,0 @@
-    /* const vAA, #+BBBBbbbb */
-    movl    2(rPC), %eax                    # grab all 32 bits at once
-    SET_VREG %eax, rINST                    # vAA<- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_const_16.S b/runtime/interpreter/mterp/x86/op_const_16.S
deleted file mode 100644
index 97cd5fa..0000000
--- a/runtime/interpreter/mterp/x86/op_const_16.S
+++ /dev/null
@@ -1,4 +0,0 @@
-    /* const/16 vAA, #+BBBB */
-    movswl  2(rPC), %ecx                    # ecx <- ssssBBBB
-    SET_VREG %ecx, rINST                    # vAA <- ssssBBBB
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_const_4.S b/runtime/interpreter/mterp/x86/op_const_4.S
deleted file mode 100644
index a60ba96..0000000
--- a/runtime/interpreter/mterp/x86/op_const_4.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* const/4 vA, #+B */
-    movsx   rINSTbl, %eax                   # eax <-ssssssBx
-    movl    $$0xf, rINST
-    andl    %eax, rINST                     # rINST <- A
-    sarl    $$4, %eax
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_const_class.S b/runtime/interpreter/mterp/x86/op_const_class.S
deleted file mode 100644
index 71648b5..0000000
--- a/runtime/interpreter/mterp/x86/op_const_class.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/x86/op_const_high16.S b/runtime/interpreter/mterp/x86/op_const_high16.S
deleted file mode 100644
index 576967a..0000000
--- a/runtime/interpreter/mterp/x86/op_const_high16.S
+++ /dev/null
@@ -1,5 +0,0 @@
-    /* const/high16 vAA, #+BBBB0000 */
-    movzwl  2(rPC), %eax                    # eax <- 0000BBBB
-    sall    $$16, %eax                      # eax <- BBBB0000
-    SET_VREG %eax, rINST                    # vAA <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_const_method_handle.S b/runtime/interpreter/mterp/x86/op_const_method_handle.S
deleted file mode 100644
index 77948fd..0000000
--- a/runtime/interpreter/mterp/x86/op_const_method_handle.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/x86/op_const_method_type.S b/runtime/interpreter/mterp/x86/op_const_method_type.S
deleted file mode 100644
index 03c6ce5..0000000
--- a/runtime/interpreter/mterp/x86/op_const_method_type.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/x86/op_const_string.S b/runtime/interpreter/mterp/x86/op_const_string.S
deleted file mode 100644
index 5553aab..0000000
--- a/runtime/interpreter/mterp/x86/op_const_string.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/x86/op_const_string_jumbo.S b/runtime/interpreter/mterp/x86/op_const_string_jumbo.S
deleted file mode 100644
index e7f952a..0000000
--- a/runtime/interpreter/mterp/x86/op_const_string_jumbo.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /* const/string vAA, String@BBBBBBBB */
-    EXPORT_PC
-    movl    2(rPC), %eax                    # eax <- BBBB
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rINST, OUT_ARG1(%esp)
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG2(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG3(%esp)
-    call    SYMBOL(MterpConstString)        # (index, tgt_reg, shadow_frame, self)
-    RESTORE_IBASE
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_const_wide.S b/runtime/interpreter/mterp/x86/op_const_wide.S
deleted file mode 100644
index 3750728..0000000
--- a/runtime/interpreter/mterp/x86/op_const_wide.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
-    movl    2(rPC), %eax                    # eax <- lsw
-    movzbl  rINSTbl, %ecx                   # ecx <- AA
-    movl    6(rPC), rINST                   # rINST <- msw
-    SET_VREG %eax, %ecx
-    SET_VREG_HIGH  rINST, %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
diff --git a/runtime/interpreter/mterp/x86/op_const_wide_16.S b/runtime/interpreter/mterp/x86/op_const_wide_16.S
deleted file mode 100644
index 1331c32..0000000
--- a/runtime/interpreter/mterp/x86/op_const_wide_16.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* const-wide/16 vAA, #+BBBB */
-    movswl  2(rPC), %eax                    # eax <- ssssBBBB
-    movl    rIBASE, %ecx                    # preserve rIBASE (cltd trashes it)
-    cltd                                    # rIBASE:eax <- ssssssssssssBBBB
-    SET_VREG_HIGH rIBASE, rINST             # store msw
-    SET_VREG %eax, rINST                    # store lsw
-    movl    %ecx, rIBASE                    # restore rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_const_wide_32.S b/runtime/interpreter/mterp/x86/op_const_wide_32.S
deleted file mode 100644
index ed7d62b..0000000
--- a/runtime/interpreter/mterp/x86/op_const_wide_32.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* const-wide/32 vAA, #+BBBBbbbb */
-    movl    2(rPC), %eax                    # eax <- BBBBbbbb
-    movl    rIBASE, %ecx                    # preserve rIBASE (cltd trashes it)
-    cltd                                    # rIBASE:eax <- ssssssssssssBBBB
-    SET_VREG_HIGH rIBASE, rINST             # store msw
-    SET_VREG %eax, rINST                    # store lsw
-    movl    %ecx, rIBASE                    # restore rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_const_wide_high16.S b/runtime/interpreter/mterp/x86/op_const_wide_high16.S
deleted file mode 100644
index 11b9310..0000000
--- a/runtime/interpreter/mterp/x86/op_const_wide_high16.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* const-wide/high16 vAA, #+BBBB000000000000 */
-    movzwl  2(rPC), %eax                    # eax <- 0000BBBB
-    sall    $$16, %eax                      # eax <- BBBB0000
-    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
-    xorl    %eax, %eax
-    SET_VREG %eax, rINST                    # v[AA+0] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_div_double.S b/runtime/interpreter/mterp/x86/op_div_double.S
deleted file mode 100644
index 575716d..0000000
--- a/runtime/interpreter/mterp/x86/op_div_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"divs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_div_double_2addr.S b/runtime/interpreter/mterp/x86/op_div_double_2addr.S
deleted file mode 100644
index 8229a31..0000000
--- a/runtime/interpreter/mterp/x86/op_div_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"divs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_div_float.S b/runtime/interpreter/mterp/x86/op_div_float.S
deleted file mode 100644
index 250f1dc..0000000
--- a/runtime/interpreter/mterp/x86/op_div_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"divs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_div_float_2addr.S b/runtime/interpreter/mterp/x86/op_div_float_2addr.S
deleted file mode 100644
index c30d148..0000000
--- a/runtime/interpreter/mterp/x86/op_div_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"divs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_div_int.S b/runtime/interpreter/mterp/x86/op_div_int.S
deleted file mode 100644
index 5fc8fa5..0000000
--- a/runtime/interpreter/mterp/x86/op_div_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindiv.S" {"result":"%eax","special":"$0x80000000","rem":"0"}
diff --git a/runtime/interpreter/mterp/x86/op_div_int_2addr.S b/runtime/interpreter/mterp/x86/op_div_int_2addr.S
deleted file mode 100644
index 04cf1ba..0000000
--- a/runtime/interpreter/mterp/x86/op_div_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindiv2addr.S" {"result":"%eax","special":"$0x80000000"}
diff --git a/runtime/interpreter/mterp/x86/op_div_int_lit16.S b/runtime/interpreter/mterp/x86/op_div_int_lit16.S
deleted file mode 100644
index dd396bb..0000000
--- a/runtime/interpreter/mterp/x86/op_div_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindivLit16.S" {"result":"%eax","special":"$0x80000000"}
diff --git a/runtime/interpreter/mterp/x86/op_div_int_lit8.S b/runtime/interpreter/mterp/x86/op_div_int_lit8.S
deleted file mode 100644
index 3cbd9d0..0000000
--- a/runtime/interpreter/mterp/x86/op_div_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindivLit8.S" {"result":"%eax","special":"$0x80000000"}
diff --git a/runtime/interpreter/mterp/x86/op_div_long.S b/runtime/interpreter/mterp/x86/op_div_long.S
deleted file mode 100644
index e56a035..0000000
--- a/runtime/interpreter/mterp/x86/op_div_long.S
+++ /dev/null
@@ -1,23 +0,0 @@
-%default {"routine":"art_quick_ldiv"}
-/* art_quick_* methods has quick abi,
- *   so use eax, ecx, edx, ebx for args
- */
-    /* div vAA, vBB, vCC */
-    .extern $routine
-    mov     rIBASE, LOCAL0(%esp)            # save rIBASE/%edx
-    mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
-    movzbl  3(rPC), %eax                    # eax <- CC
-    GET_VREG %ecx, %eax
-    GET_VREG_HIGH %ebx, %eax
-    movl    %ecx, %edx
-    orl     %ebx, %ecx
-    jz      common_errDivideByZero
-    movzbl  2(rPC), %eax                    # eax <- BB
-    GET_VREG_HIGH %ecx, %eax
-    GET_VREG %eax, %eax
-    call    SYMBOL($routine)
-    mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
-    SET_VREG_HIGH rIBASE, rINST
-    SET_VREG %eax, rINST
-    mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_div_long_2addr.S b/runtime/interpreter/mterp/x86/op_div_long_2addr.S
deleted file mode 100644
index 159cc44..0000000
--- a/runtime/interpreter/mterp/x86/op_div_long_2addr.S
+++ /dev/null
@@ -1,25 +0,0 @@
-%default {"routine":"art_quick_ldiv"}
-/* art_quick_* methods has quick abi,
- *   so use eax, ecx, edx, ebx for args
- */
-    /* div/2addr vA, vB */
-    .extern   $routine
-    mov     rIBASE, LOCAL0(%esp)            # save rIBASE/%edx
-    movzbl  rINSTbl, %eax
-    shrl    $$4, %eax                       # eax <- B
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
-    movl    %ebx, %ecx
-    GET_VREG %edx, %eax
-    GET_VREG_HIGH %ebx, %eax
-    movl    %edx, %eax
-    orl     %ebx, %eax
-    jz      common_errDivideByZero
-    GET_VREG %eax, %ecx
-    GET_VREG_HIGH %ecx, %ecx
-    call    SYMBOL($routine)
-    mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
-    SET_VREG_HIGH rIBASE, rINST
-    SET_VREG %eax, rINST
-    mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_double_to_float.S b/runtime/interpreter/mterp/x86/op_double_to_float.S
deleted file mode 100644
index 5135d60..0000000
--- a/runtime/interpreter/mterp/x86/op_double_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"load":"fldl","store":"fstps"}
diff --git a/runtime/interpreter/mterp/x86/op_double_to_int.S b/runtime/interpreter/mterp/x86/op_double_to_int.S
deleted file mode 100644
index 9c4e11c..0000000
--- a/runtime/interpreter/mterp/x86/op_double_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/cvtfp_int.S" {"srcdouble":"1","tgtlong":"0"}
diff --git a/runtime/interpreter/mterp/x86/op_double_to_long.S b/runtime/interpreter/mterp/x86/op_double_to_long.S
deleted file mode 100644
index fe0eee2..0000000
--- a/runtime/interpreter/mterp/x86/op_double_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/cvtfp_int.S" {"srcdouble":"1","tgtlong":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_fill_array_data.S b/runtime/interpreter/mterp/x86/op_fill_array_data.S
deleted file mode 100644
index 5855284..0000000
--- a/runtime/interpreter/mterp/x86/op_fill_array_data.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /* fill-array-data vAA, +BBBBBBBB */
-    EXPORT_PC
-    movl    2(rPC), %ecx                    # ecx <- BBBBbbbb
-    leal    (rPC,%ecx,2), %ecx              # ecx <- PC + BBBBbbbb*2
-    GET_VREG %eax, rINST                    # eax <- vAA (array object)
-    movl    %eax, OUT_ARG0(%esp)
-    movl    %ecx, OUT_ARG1(%esp)
-    call    SYMBOL(MterpFillArrayData)      # (obj, payload)
-    REFRESH_IBASE
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_filled_new_array.S b/runtime/interpreter/mterp/x86/op_filled_new_array.S
deleted file mode 100644
index 35b2fe8..0000000
--- a/runtime/interpreter/mterp/x86/op_filled_new_array.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "helper":"MterpFilledNewArray" }
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    .extern $helper
-    EXPORT_PC
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rPC, OUT_ARG1(%esp)
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG2(%esp)
-    call    SYMBOL($helper)
-    REFRESH_IBASE
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_filled_new_array_range.S b/runtime/interpreter/mterp/x86/op_filled_new_array_range.S
deleted file mode 100644
index 841059e..0000000
--- a/runtime/interpreter/mterp/x86/op_filled_new_array_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/x86/op_float_to_double.S b/runtime/interpreter/mterp/x86/op_float_to_double.S
deleted file mode 100644
index 12a3e14..0000000
--- a/runtime/interpreter/mterp/x86/op_float_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"load":"flds","store":"fstpl","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_float_to_int.S b/runtime/interpreter/mterp/x86/op_float_to_int.S
deleted file mode 100644
index ac57388..0000000
--- a/runtime/interpreter/mterp/x86/op_float_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/cvtfp_int.S" {"srcdouble":"0","tgtlong":"0"}
diff --git a/runtime/interpreter/mterp/x86/op_float_to_long.S b/runtime/interpreter/mterp/x86/op_float_to_long.S
deleted file mode 100644
index be1d982..0000000
--- a/runtime/interpreter/mterp/x86/op_float_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/cvtfp_int.S" {"srcdouble":"0","tgtlong":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_goto.S b/runtime/interpreter/mterp/x86/op_goto.S
deleted file mode 100644
index 1827d68..0000000
--- a/runtime/interpreter/mterp/x86/op_goto.S
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
-    /* goto +AA */
-    movsbl  rINSTbl, rINST                  # rINST <- ssssssAA
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86/op_goto_16.S b/runtime/interpreter/mterp/x86/op_goto_16.S
deleted file mode 100644
index ea5ea90..0000000
--- a/runtime/interpreter/mterp/x86/op_goto_16.S
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
-    /* goto/16 +AAAA */
-    movswl  2(rPC), rINST                   # rINST <- ssssAAAA
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86/op_goto_32.S b/runtime/interpreter/mterp/x86/op_goto_32.S
deleted file mode 100644
index 4becaf3..0000000
--- a/runtime/interpreter/mterp/x86/op_goto_32.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0".  Because
- * we need the V bit set, we'll use an adds to convert from Dalvik
- * offset to byte offset.
- */
-    /* goto/32 +AAAAAAAA */
-    movl    2(rPC), rINST                   # rINST <- AAAAAAAA
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86/op_if_eq.S b/runtime/interpreter/mterp/x86/op_if_eq.S
deleted file mode 100644
index 5413d98..0000000
--- a/runtime/interpreter/mterp/x86/op_if_eq.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bincmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/x86/op_if_eqz.S b/runtime/interpreter/mterp/x86/op_if_eqz.S
deleted file mode 100644
index 53dc99e..0000000
--- a/runtime/interpreter/mterp/x86/op_if_eqz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/zcmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/x86/op_if_ge.S b/runtime/interpreter/mterp/x86/op_if_ge.S
deleted file mode 100644
index c2ba3c6..0000000
--- a/runtime/interpreter/mterp/x86/op_if_ge.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bincmp.S" { "revcmp":"l" }
diff --git a/runtime/interpreter/mterp/x86/op_if_gez.S b/runtime/interpreter/mterp/x86/op_if_gez.S
deleted file mode 100644
index cd2c772..0000000
--- a/runtime/interpreter/mterp/x86/op_if_gez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/zcmp.S" { "revcmp":"l" }
diff --git a/runtime/interpreter/mterp/x86/op_if_gt.S b/runtime/interpreter/mterp/x86/op_if_gt.S
deleted file mode 100644
index 9fe84bb..0000000
--- a/runtime/interpreter/mterp/x86/op_if_gt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bincmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/x86/op_if_gtz.S b/runtime/interpreter/mterp/x86/op_if_gtz.S
deleted file mode 100644
index b454ffd..0000000
--- a/runtime/interpreter/mterp/x86/op_if_gtz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/zcmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/x86/op_if_le.S b/runtime/interpreter/mterp/x86/op_if_le.S
deleted file mode 100644
index 93571a7..0000000
--- a/runtime/interpreter/mterp/x86/op_if_le.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bincmp.S" { "revcmp":"g" }
diff --git a/runtime/interpreter/mterp/x86/op_if_lez.S b/runtime/interpreter/mterp/x86/op_if_lez.S
deleted file mode 100644
index 779c77f..0000000
--- a/runtime/interpreter/mterp/x86/op_if_lez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/zcmp.S" { "revcmp":"g" }
diff --git a/runtime/interpreter/mterp/x86/op_if_lt.S b/runtime/interpreter/mterp/x86/op_if_lt.S
deleted file mode 100644
index 1fb1521..0000000
--- a/runtime/interpreter/mterp/x86/op_if_lt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bincmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/x86/op_if_ltz.S b/runtime/interpreter/mterp/x86/op_if_ltz.S
deleted file mode 100644
index 155c356..0000000
--- a/runtime/interpreter/mterp/x86/op_if_ltz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/zcmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/x86/op_if_ne.S b/runtime/interpreter/mterp/x86/op_if_ne.S
deleted file mode 100644
index 7e1b065..0000000
--- a/runtime/interpreter/mterp/x86/op_if_ne.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bincmp.S" { "revcmp":"e" }
diff --git a/runtime/interpreter/mterp/x86/op_if_nez.S b/runtime/interpreter/mterp/x86/op_if_nez.S
deleted file mode 100644
index 8951f5b..0000000
--- a/runtime/interpreter/mterp/x86/op_if_nez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/zcmp.S" { "revcmp":"e" }
diff --git a/runtime/interpreter/mterp/x86/op_iget.S b/runtime/interpreter/mterp/x86/op_iget.S
deleted file mode 100644
index d85d54c..0000000
--- a/runtime/interpreter/mterp/x86/op_iget.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIGetU32"}
-%include "x86/field.S" { }
diff --git a/runtime/interpreter/mterp/x86/op_iget_boolean.S b/runtime/interpreter/mterp/x86/op_iget_boolean.S
deleted file mode 100644
index ddccc41..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_boolean_quick.S b/runtime/interpreter/mterp/x86/op_iget_boolean_quick.S
deleted file mode 100644
index 02b0c16..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget_quick.S" { "load":"movsbl" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_byte.S b/runtime/interpreter/mterp/x86/op_iget_byte.S
deleted file mode 100644
index cd46d3d..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_byte_quick.S b/runtime/interpreter/mterp/x86/op_iget_byte_quick.S
deleted file mode 100644
index 02b0c16..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget_quick.S" { "load":"movsbl" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_char.S b/runtime/interpreter/mterp/x86/op_iget_char.S
deleted file mode 100644
index 9969734..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_char_quick.S b/runtime/interpreter/mterp/x86/op_iget_char_quick.S
deleted file mode 100644
index a5d9712..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget_quick.S" { "load":"movzwl" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_object.S b/runtime/interpreter/mterp/x86/op_iget_object.S
deleted file mode 100644
index 3d421fc..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_object_quick.S b/runtime/interpreter/mterp/x86/op_iget_object_quick.S
deleted file mode 100644
index b1551a0..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_object_quick.S
+++ /dev/null
@@ -1,17 +0,0 @@
-    /* For: iget-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    movl    %ecx, OUT_ARG0(%esp)
-    movl    %eax, OUT_ARG1(%esp)
-    EXPORT_PC
-    call    SYMBOL(artIGetObjectFromMterp)  # (obj, offset)
-    movl    rSELF, %ecx
-    RESTORE_IBASE_FROM_SELF %ecx
-    cmpl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
-    jnz     MterpException                  # bail out
-    andb    $$0xf,rINSTbl                   # rINST <- A
-    SET_VREG_OBJECT %eax, rINST             # fp[A] <- value
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iget_quick.S b/runtime/interpreter/mterp/x86/op_iget_quick.S
deleted file mode 100644
index 1b7440f..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_quick.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "load":"movl"}
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    ${load} (%ecx,%eax,1), %eax
-    andb    $$0xf,rINSTbl                   # rINST <- A
-    SET_VREG %eax, rINST                    # fp[A] <- value
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iget_short.S b/runtime/interpreter/mterp/x86/op_iget_short.S
deleted file mode 100644
index c7477f5..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_short_quick.S b/runtime/interpreter/mterp/x86/op_iget_short_quick.S
deleted file mode 100644
index 2c3aeb6..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget_quick.S" { "load":"movswl" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_wide.S b/runtime/interpreter/mterp/x86/op_iget_wide.S
deleted file mode 100644
index 741a64e..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget.S" { "helper":"MterpIGetU64" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_wide_quick.S b/runtime/interpreter/mterp/x86/op_iget_wide_quick.S
deleted file mode 100644
index 7ce74cc..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_wide_quick.S
+++ /dev/null
@@ -1,11 +0,0 @@
-    /* iget-wide-quick vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    movq    (%ecx,%eax,1), %xmm0
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    SET_WIDE_FP_VREG %xmm0, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_instance_of.S b/runtime/interpreter/mterp/x86/op_instance_of.S
deleted file mode 100644
index e6fe5b2..0000000
--- a/runtime/interpreter/mterp/x86/op_instance_of.S
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
-    /* instance-of vA, vB, class@CCCC */
-    EXPORT_PC
-    movzwl  2(rPC), %eax                    # eax <- BBBB
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rINST, %eax                     # eax <- BA
-    sarl    $$4, %eax                       # eax <- B
-    leal    VREG_ADDRESS(%eax), %ecx        # Get object address
-    movl    %ecx, OUT_ARG1(%esp)
-    movl    OFF_FP_METHOD(rFP),%eax
-    movl    %eax, OUT_ARG2(%esp)
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG3(%esp)
-    call    SYMBOL(MterpInstanceOf)         # (index, &obj, method, self)
-    movl    rSELF, %ecx
-    RESTORE_IBASE_FROM_SELF %ecx
-    cmpl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
-    jnz     MterpException
-    andb    $$0xf, rINSTbl                  # rINSTbl <- A
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_int_to_byte.S b/runtime/interpreter/mterp/x86/op_int_to_byte.S
deleted file mode 100644
index b4e8d22c..0000000
--- a/runtime/interpreter/mterp/x86/op_int_to_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unop.S" {"instr":"movsbl  %al, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_int_to_char.S b/runtime/interpreter/mterp/x86/op_int_to_char.S
deleted file mode 100644
index 4608971..0000000
--- a/runtime/interpreter/mterp/x86/op_int_to_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unop.S" {"instr":"movzwl  %ax,%eax"}
diff --git a/runtime/interpreter/mterp/x86/op_int_to_double.S b/runtime/interpreter/mterp/x86/op_int_to_double.S
deleted file mode 100644
index 3e9921e..0000000
--- a/runtime/interpreter/mterp/x86/op_int_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"load":"fildl","store":"fstpl","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_int_to_float.S b/runtime/interpreter/mterp/x86/op_int_to_float.S
deleted file mode 100644
index 849540d..0000000
--- a/runtime/interpreter/mterp/x86/op_int_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"load":"fildl","store":"fstps"}
diff --git a/runtime/interpreter/mterp/x86/op_int_to_long.S b/runtime/interpreter/mterp/x86/op_int_to_long.S
deleted file mode 100644
index 6f9ea26..0000000
--- a/runtime/interpreter/mterp/x86/op_int_to_long.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /* int to long vA, vB */
-    movzbl  rINSTbl, %eax                   # eax <- +A
-    sarl    $$4, %eax                       # eax <- B
-    GET_VREG %eax, %eax                     # eax <- vB
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    movl    rIBASE, %ecx                    # cltd trashes rIBASE/edx
-    cltd                                    # rINST:eax<- sssssssBBBBBBBB
-    SET_VREG_HIGH rIBASE, rINST             # v[A+1] <- rIBASE
-    SET_VREG %eax, rINST                    # v[A+0] <- %eax
-    movl    %ecx, rIBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
diff --git a/runtime/interpreter/mterp/x86/op_int_to_short.S b/runtime/interpreter/mterp/x86/op_int_to_short.S
deleted file mode 100644
index 90d0ae6..0000000
--- a/runtime/interpreter/mterp/x86/op_int_to_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unop.S" {"instr":"movswl %ax, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_invoke_custom.S b/runtime/interpreter/mterp/x86/op_invoke_custom.S
deleted file mode 100644
index eddd5b3..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_custom.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeCustom" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_custom_range.S b/runtime/interpreter/mterp/x86/op_invoke_custom_range.S
deleted file mode 100644
index 1a4e884..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_custom_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_direct.S b/runtime/interpreter/mterp/x86/op_invoke_direct.S
deleted file mode 100644
index 76fb9a6..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_direct.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_direct_range.S b/runtime/interpreter/mterp/x86/op_invoke_direct_range.S
deleted file mode 100644
index a6ab604..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_direct_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_interface.S b/runtime/interpreter/mterp/x86/op_invoke_interface.S
deleted file mode 100644
index 91c24f5..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_interface.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeInterface" }
-/*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86/op_invoke_interface_range.S b/runtime/interpreter/mterp/x86/op_invoke_interface_range.S
deleted file mode 100644
index e478beb..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_interface_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_polymorphic.S b/runtime/interpreter/mterp/x86/op_invoke_polymorphic.S
deleted file mode 100644
index 3907689..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_polymorphic.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/x86/op_invoke_polymorphic_range.S
deleted file mode 100644
index 59a8230..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_polymorphic_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_static.S b/runtime/interpreter/mterp/x86/op_invoke_static.S
deleted file mode 100644
index b4c1236..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_static.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeStatic" }
-
diff --git a/runtime/interpreter/mterp/x86/op_invoke_static_range.S b/runtime/interpreter/mterp/x86/op_invoke_static_range.S
deleted file mode 100644
index 3dc8a26..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_static_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_super.S b/runtime/interpreter/mterp/x86/op_invoke_super.S
deleted file mode 100644
index be20edd..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_super.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeSuper" }
-/*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86/op_invoke_super_range.S b/runtime/interpreter/mterp/x86/op_invoke_super_range.S
deleted file mode 100644
index f36bf72..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_super_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_virtual.S b/runtime/interpreter/mterp/x86/op_invoke_virtual.S
deleted file mode 100644
index 7e9c456..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_virtual.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeVirtual" }
-/*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/x86/op_invoke_virtual_quick.S
deleted file mode 100644
index 2dc9ab6..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_virtual_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_virtual_range.S b/runtime/interpreter/mterp/x86/op_invoke_virtual_range.S
deleted file mode 100644
index d1d20d2..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_virtual_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/x86/op_invoke_virtual_range_quick.S
deleted file mode 100644
index 21bfc55..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_virtual_range_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/x86/op_iput.S b/runtime/interpreter/mterp/x86/op_iput.S
deleted file mode 100644
index 3628ffd..0000000
--- a/runtime/interpreter/mterp/x86/op_iput.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIPutU32" }
-%include "x86/field.S" { }
diff --git a/runtime/interpreter/mterp/x86/op_iput_boolean.S b/runtime/interpreter/mterp/x86/op_iput_boolean.S
deleted file mode 100644
index fdd5303..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_boolean_quick.S b/runtime/interpreter/mterp/x86/op_iput_boolean_quick.S
deleted file mode 100644
index 93865de..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput_quick.S" { "reg":"rINSTbl", "store":"movb" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_byte.S b/runtime/interpreter/mterp/x86/op_iput_byte.S
deleted file mode 100644
index b81850c..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_byte_quick.S b/runtime/interpreter/mterp/x86/op_iput_byte_quick.S
deleted file mode 100644
index 93865de..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput_quick.S" { "reg":"rINSTbl", "store":"movb" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_char.S b/runtime/interpreter/mterp/x86/op_iput_char.S
deleted file mode 100644
index dde3853..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_char_quick.S b/runtime/interpreter/mterp/x86/op_iput_char_quick.S
deleted file mode 100644
index 4ec8029..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput_quick.S" { "reg":"rINSTw", "store":"movw" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_object.S b/runtime/interpreter/mterp/x86/op_iput_object.S
deleted file mode 100644
index a124b7e..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput.S" { "is_object":"1", "helper":"MterpIPutObj" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_object_quick.S b/runtime/interpreter/mterp/x86/op_iput_object_quick.S
deleted file mode 100644
index cb77929..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_object_quick.S
+++ /dev/null
@@ -1,11 +0,0 @@
-    EXPORT_PC
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rPC, OUT_ARG1(%esp)
-    REFRESH_INST ${opnum}
-    movl    rINST, OUT_ARG2(%esp)
-    call    SYMBOL(MterpIputObjectQuick)
-    testb   %al, %al
-    jz      MterpException
-    RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iput_quick.S b/runtime/interpreter/mterp/x86/op_iput_quick.S
deleted file mode 100644
index b67cee0..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_quick.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "reg":"rINST", "store":"movl" }
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST, rINST                   # rINST <- v[A]
-    movzwl  2(rPC), %eax                    # eax <- field byte offset
-    ${store}    ${reg}, (%ecx,%eax,1)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iput_short.S b/runtime/interpreter/mterp/x86/op_iput_short.S
deleted file mode 100644
index 130e875..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_short_quick.S b/runtime/interpreter/mterp/x86/op_iput_short_quick.S
deleted file mode 100644
index 4ec8029..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput_quick.S" { "reg":"rINSTw", "store":"movw" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_wide.S b/runtime/interpreter/mterp/x86/op_iput_wide.S
deleted file mode 100644
index 2820ede..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput.S" { "helper":"MterpIPutU64" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_wide_quick.S b/runtime/interpreter/mterp/x86/op_iput_wide_quick.S
deleted file mode 100644
index 17de6f8..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_wide_quick.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /* iput-wide-quick vA, vB, offset@CCCC */
-    movzbl    rINSTbl, %ecx                 # ecx<- BA
-    sarl      $$4, %ecx                     # ecx<- B
-    GET_VREG  %ecx, %ecx                    # vB (object we're operating on)
-    testl     %ecx, %ecx                    # is object null?
-    je        common_errNullObject
-    movzwl    2(rPC), %eax                  # eax<- field byte offset
-    leal      (%ecx,%eax,1), %ecx           # ecx<- Address of 64-bit target
-    andb      $$0xf, rINSTbl                # rINST<- A
-    GET_WIDE_FP_VREG %xmm0, rINST           # xmm0<- fp[A]/fp[A+1]
-    movq      %xmm0, (%ecx)                 # obj.field<- r0/r1
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_long_to_double.S b/runtime/interpreter/mterp/x86/op_long_to_double.S
deleted file mode 100644
index 2c7f905..0000000
--- a/runtime/interpreter/mterp/x86/op_long_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"load":"fildll","store":"fstpl","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_long_to_float.S b/runtime/interpreter/mterp/x86/op_long_to_float.S
deleted file mode 100644
index e500e39..0000000
--- a/runtime/interpreter/mterp/x86/op_long_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"load":"fildll","store":"fstps"}
diff --git a/runtime/interpreter/mterp/x86/op_long_to_int.S b/runtime/interpreter/mterp/x86/op_long_to_int.S
deleted file mode 100644
index 1c39b96..0000000
--- a/runtime/interpreter/mterp/x86/op_long_to_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%include "x86/op_move.S"
diff --git a/runtime/interpreter/mterp/x86/op_monitor_enter.S b/runtime/interpreter/mterp/x86/op_monitor_enter.S
deleted file mode 100644
index b35c684..0000000
--- a/runtime/interpreter/mterp/x86/op_monitor_enter.S
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Synchronize on an object.
- */
-    /* monitor-enter vAA */
-    EXPORT_PC
-    GET_VREG %ecx, rINST
-    movl    %ecx, OUT_ARG0(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG1(%esp)
-    call    SYMBOL(artLockObjectFromCode)   # (object, self)
-    RESTORE_IBASE
-    testb   %al, %al
-    jnz     MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_monitor_exit.S b/runtime/interpreter/mterp/x86/op_monitor_exit.S
deleted file mode 100644
index 2d17d5e..0000000
--- a/runtime/interpreter/mterp/x86/op_monitor_exit.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction.  See the Dalvik
- * instruction spec.
- */
-    /* monitor-exit vAA */
-    EXPORT_PC
-    GET_VREG %ecx, rINST
-    movl    %ecx, OUT_ARG0(%esp)
-    movl    rSELF, %eax
-    movl    %eax, OUT_ARG1(%esp)
-    call    SYMBOL(artUnlockObjectFromCode) # (object, self)
-    RESTORE_IBASE
-    testb   %al, %al
-    jnz     MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move.S b/runtime/interpreter/mterp/x86/op_move.S
deleted file mode 100644
index ea173b9..0000000
--- a/runtime/interpreter/mterp/x86/op_move.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "is_object":"0" }
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    movzbl  rINSTbl, %eax                   # eax <- BA
-    andb    $$0xf, %al                      # eax <- A
-    shrl    $$4, rINST                      # rINST <- B
-    GET_VREG rINST, rINST
-    .if $is_object
-    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
-    .else
-    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_16.S b/runtime/interpreter/mterp/x86/op_move_16.S
deleted file mode 100644
index 454deb5..0000000
--- a/runtime/interpreter/mterp/x86/op_move_16.S
+++ /dev/null
@@ -1,12 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    movzwl  4(rPC), %ecx                    # ecx <- BBBB
-    movzwl  2(rPC), %eax                    # eax <- AAAA
-    GET_VREG rINST, %ecx
-    .if $is_object
-    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
-    .else
-    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_move_exception.S b/runtime/interpreter/mterp/x86/op_move_exception.S
deleted file mode 100644
index d8dc74f..0000000
--- a/runtime/interpreter/mterp/x86/op_move_exception.S
+++ /dev/null
@@ -1,6 +0,0 @@
-    /* move-exception vAA */
-    movl    rSELF, %ecx
-    movl    THREAD_EXCEPTION_OFFSET(%ecx), %eax
-    SET_VREG_OBJECT %eax, rINST             # fp[AA] <- exception object
-    movl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_from16.S b/runtime/interpreter/mterp/x86/op_move_from16.S
deleted file mode 100644
index e869855..0000000
--- a/runtime/interpreter/mterp/x86/op_move_from16.S
+++ /dev/null
@@ -1,12 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    movzx   rINSTbl, %eax                   # eax <- AA
-    movw    2(rPC), rINSTw                  # rINSTw <- BBBB
-    GET_VREG rINST, rINST                   # rINST <- fp[BBBB]
-    .if $is_object
-    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
-    .else
-    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_move_object.S b/runtime/interpreter/mterp/x86/op_move_object.S
deleted file mode 100644
index a6a7c90..0000000
--- a/runtime/interpreter/mterp/x86/op_move_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_move_object_16.S b/runtime/interpreter/mterp/x86/op_move_object_16.S
deleted file mode 100644
index e0c8527..0000000
--- a/runtime/interpreter/mterp/x86/op_move_object_16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_move_object_from16.S b/runtime/interpreter/mterp/x86/op_move_object_from16.S
deleted file mode 100644
index e623820..0000000
--- a/runtime/interpreter/mterp/x86/op_move_object_from16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_move_result.S b/runtime/interpreter/mterp/x86/op_move_result.S
deleted file mode 100644
index f6f2129..0000000
--- a/runtime/interpreter/mterp/x86/op_move_result.S
+++ /dev/null
@@ -1,11 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    movl    OFF_FP_RESULT_REGISTER(rFP), %eax    # get pointer to result JType.
-    movl    (%eax), %eax                    # r0 <- result.i.
-    .if $is_object
-    SET_VREG_OBJECT %eax, rINST             # fp[A] <- fp[B]
-    .else
-    SET_VREG %eax, rINST                    # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_result_object.S b/runtime/interpreter/mterp/x86/op_move_result_object.S
deleted file mode 100644
index cbf5e1d..0000000
--- a/runtime/interpreter/mterp/x86/op_move_result_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_move_result_wide.S b/runtime/interpreter/mterp/x86/op_move_result_wide.S
deleted file mode 100644
index 7818cce..0000000
--- a/runtime/interpreter/mterp/x86/op_move_result_wide.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* move-result-wide vAA */
-    movl    OFF_FP_RESULT_REGISTER(rFP), %eax    # get pointer to result JType.
-    movl    4(%eax), %ecx                   # Get high
-    movl    (%eax), %eax                    # Get low
-    SET_VREG %eax, rINST                    # v[AA+0] <- eax
-    SET_VREG_HIGH %ecx, rINST               # v[AA+1] <- ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_wide.S b/runtime/interpreter/mterp/x86/op_move_wide.S
deleted file mode 100644
index 79ce7b7..0000000
--- a/runtime/interpreter/mterp/x86/op_move_wide.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $$4, rINST                      # rINST <- B
-    andb    $$0xf, %cl                      # ecx <- A
-    GET_WIDE_FP_VREG %xmm0, rINST           # xmm0 <- v[B]
-    SET_WIDE_FP_VREG %xmm0, %ecx            # v[A] <- xmm0
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_wide_16.S b/runtime/interpreter/mterp/x86/op_move_wide_16.S
deleted file mode 100644
index a6b8596..0000000
--- a/runtime/interpreter/mterp/x86/op_move_wide_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    movzwl  4(rPC), %ecx                    # ecx<- BBBB
-    movzwl  2(rPC), %eax                    # eax<- AAAA
-    GET_WIDE_FP_VREG %xmm0, %ecx            # xmm0 <- v[B]
-    SET_WIDE_FP_VREG %xmm0, %eax            # v[A] <- xmm0
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_move_wide_from16.S b/runtime/interpreter/mterp/x86/op_move_wide_from16.S
deleted file mode 100644
index ec344de..0000000
--- a/runtime/interpreter/mterp/x86/op_move_wide_from16.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    movzwl  2(rPC), %ecx                    # ecx <- BBBB
-    movzbl  rINSTbl, %eax                   # eax <- AAAA
-    GET_WIDE_FP_VREG %xmm0, %ecx            # xmm0 <- v[B]
-    SET_WIDE_FP_VREG %xmm0, %eax            # v[A] <- xmm0
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_double.S b/runtime/interpreter/mterp/x86/op_mul_double.S
deleted file mode 100644
index 7cef4c0..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"muls","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_mul_double_2addr.S b/runtime/interpreter/mterp/x86/op_mul_double_2addr.S
deleted file mode 100644
index bb722b6..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"muls","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_mul_float.S b/runtime/interpreter/mterp/x86/op_mul_float.S
deleted file mode 100644
index 1156230..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"muls","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_mul_float_2addr.S b/runtime/interpreter/mterp/x86/op_mul_float_2addr.S
deleted file mode 100644
index e9316df..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"muls","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_mul_int.S b/runtime/interpreter/mterp/x86/op_mul_int.S
deleted file mode 100644
index 77f4659..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_int.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /*
-     * 32-bit binary multiplication.
-     */
-    /* mul vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax, %eax                     # eax <- vBB
-    mov     rIBASE, LOCAL0(%esp)
-    imull   (rFP,%ecx,4), %eax              # trashes rIBASE/edx
-    mov     LOCAL0(%esp), rIBASE
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_int_2addr.S b/runtime/interpreter/mterp/x86/op_mul_int_2addr.S
deleted file mode 100644
index da699ae..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_int_2addr.S
+++ /dev/null
@@ -1,10 +0,0 @@
-    /* mul vA, vB */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    sarl    $$4, rINST                      # rINST <- B
-    GET_VREG %eax, rINST                    # eax <- vB
-    andb    $$0xf, %cl                      # ecx <- A
-    movl    rIBASE, rINST
-    imull   (rFP,%ecx,4), %eax              # trashes rIBASE/edx
-    movl    rINST, rIBASE
-    SET_VREG %eax, %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_mul_int_lit16.S b/runtime/interpreter/mterp/x86/op_mul_int_lit16.S
deleted file mode 100644
index 056f491..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_int_lit16.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /* mul/lit16 vA, vB, #+CCCC */
-    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
-    movzbl  rINSTbl, %eax                   # eax <- 000000BA
-    sarl    $$4, %eax                       # eax <- B
-    GET_VREG %eax, %eax                     # eax <- vB
-    movl    rIBASE, %ecx
-    movswl  2(rPC), rIBASE                  # rIBASE <- ssssCCCC
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    imull   rIBASE, %eax                    # trashes rIBASE/edx
-    movl    %ecx, rIBASE
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_int_lit8.S b/runtime/interpreter/mterp/x86/op_mul_int_lit8.S
deleted file mode 100644
index 59b3844..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_int_lit8.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* mul/lit8 vAA, vBB, #+CC */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movl    rIBASE, %ecx
-    GET_VREG  %eax, %eax                    # eax <- rBB
-    movsbl  3(rPC), rIBASE                  # rIBASE <- ssssssCC
-    imull   rIBASE, %eax                    # trashes rIBASE/edx
-    movl    %ecx, rIBASE
-    SET_VREG %eax, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_long.S b/runtime/interpreter/mterp/x86/op_mul_long.S
deleted file mode 100644
index f35ca13..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_long.S
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Signed 64-bit integer multiply.
- *
- * We could definately use more free registers for
- * this code.   We spill rINSTw (ebx),
- * giving us eax, ebc, ecx and edx as computational
- * temps.  On top of that, we'll spill edi (rFP)
- * for use as the vB pointer and esi (rPC) for use
- * as the vC pointer.  Yuck.
- *
- */
-    /* mul-long vAA, vBB, vCC */
-    movzbl  2(rPC), %eax                    # eax <- B
-    movzbl  3(rPC), %ecx                    # ecx <- C
-    mov     rPC, LOCAL0(%esp)               # save Interpreter PC
-    mov     rFP, LOCAL1(%esp)               # save FP
-    mov     rIBASE, LOCAL2(%esp)            # save rIBASE
-    leal    (rFP,%eax,4), %esi              # esi <- &v[B]
-    leal    (rFP,%ecx,4), rFP               # rFP <- &v[C]
-    movl    4(%esi), %ecx                   # ecx <- Bmsw
-    imull   (rFP), %ecx                     # ecx <- (Bmsw*Clsw)
-    movl    4(rFP), %eax                    # eax <- Cmsw
-    imull   (%esi), %eax                    # eax <- (Cmsw*Blsw)
-    addl    %eax, %ecx                      # ecx <- (Bmsw*Clsw)+(Cmsw*Blsw)
-    movl    (rFP), %eax                     # eax <- Clsw
-    mull    (%esi)                          # eax <- (Clsw*Alsw)
-    mov     LOCAL0(%esp), rPC               # restore Interpreter PC
-    mov     LOCAL1(%esp), rFP               # restore FP
-    leal    (%ecx,rIBASE), rIBASE           # full result now in rIBASE:%eax
-    SET_VREG_HIGH rIBASE, rINST             # v[B+1] <- rIBASE
-    mov     LOCAL2(%esp), rIBASE            # restore IBASE
-    SET_VREG %eax, rINST                    # v[B] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_long_2addr.S b/runtime/interpreter/mterp/x86/op_mul_long_2addr.S
deleted file mode 100644
index 565a57c..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_long_2addr.S
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Signed 64-bit integer multiply, 2-addr version
- *
- * We could definately use more free registers for
- * this code.  We must spill %edx (rIBASE) because it
- * is used by imul.  We'll also spill rINST (ebx),
- * giving us eax, ebc, ecx and rIBASE as computational
- * temps.  On top of that, we'll spill %esi (edi)
- * for use as the vA pointer and rFP (esi) for use
- * as the vB pointer.  Yuck.
- */
-    /* mul-long/2addr vA, vB */
-    movzbl  rINSTbl, %eax                   # eax <- BA
-    andb    $$0xf, %al                      # eax <- A
-    CLEAR_WIDE_REF %eax                     # clear refs in advance
-    sarl    $$4, rINST                      # rINST <- B
-    mov     rPC, LOCAL0(%esp)               # save Interpreter PC
-    mov     rFP, LOCAL1(%esp)               # save FP
-    mov     rIBASE, LOCAL2(%esp)            # save rIBASE
-    leal    (rFP,%eax,4), %esi              # esi <- &v[A]
-    leal    (rFP,rINST,4), rFP              # rFP <- &v[B]
-    movl    4(%esi), %ecx                   # ecx <- Amsw
-    imull   (rFP), %ecx                     # ecx <- (Amsw*Blsw)
-    movl    4(rFP), %eax                    # eax <- Bmsw
-    imull   (%esi), %eax                    # eax <- (Bmsw*Alsw)
-    addl    %eax, %ecx                      # ecx <- (Amsw*Blsw)+(Bmsw*Alsw)
-    movl    (rFP), %eax                     # eax <- Blsw
-    mull    (%esi)                          # eax <- (Blsw*Alsw)
-    leal    (%ecx,rIBASE), rIBASE           # full result now in %edx:%eax
-    movl    rIBASE, 4(%esi)                 # v[A+1] <- rIBASE
-    movl    %eax, (%esi)                    # v[A] <- %eax
-    mov     LOCAL0(%esp), rPC               # restore Interpreter PC
-    mov     LOCAL2(%esp), rIBASE            # restore IBASE
-    mov     LOCAL1(%esp), rFP               # restore FP
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_neg_double.S b/runtime/interpreter/mterp/x86/op_neg_double.S
deleted file mode 100644
index fac4322..0000000
--- a/runtime/interpreter/mterp/x86/op_neg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"instr":"fchs","load":"fldl","store":"fstpl","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_neg_float.S b/runtime/interpreter/mterp/x86/op_neg_float.S
deleted file mode 100644
index 30f071b..0000000
--- a/runtime/interpreter/mterp/x86/op_neg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"instr":"fchs","load":"flds","store":"fstps"}
diff --git a/runtime/interpreter/mterp/x86/op_neg_int.S b/runtime/interpreter/mterp/x86/op_neg_int.S
deleted file mode 100644
index 67d4d18..0000000
--- a/runtime/interpreter/mterp/x86/op_neg_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unop.S" {"instr":"negl    %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_neg_long.S b/runtime/interpreter/mterp/x86/op_neg_long.S
deleted file mode 100644
index 30da247..0000000
--- a/runtime/interpreter/mterp/x86/op_neg_long.S
+++ /dev/null
@@ -1,13 +0,0 @@
-    /* unop vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $$4, %ecx                       # ecx <- B
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, %ecx                     # eax <- v[B+0]
-    GET_VREG_HIGH %ecx, %ecx                # ecx <- v[B+1]
-    negl    %eax
-    adcl    $$0, %ecx
-    negl    %ecx
-    SET_VREG %eax, rINST                    # v[A+0] <- eax
-    SET_VREG_HIGH %ecx, rINST               # v[A+1] <- ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
diff --git a/runtime/interpreter/mterp/x86/op_new_array.S b/runtime/interpreter/mterp/x86/op_new_array.S
deleted file mode 100644
index 16226e9..0000000
--- a/runtime/interpreter/mterp/x86/op_new_array.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
-    /* new-array vA, vB, class@CCCC */
-    EXPORT_PC
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rPC, OUT_ARG1(%esp)
-    REFRESH_INST ${opnum}
-    movl    rINST, OUT_ARG2(%esp)
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG3(%esp)
-    call    SYMBOL(MterpNewArray)
-    RESTORE_IBASE
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_new_instance.S b/runtime/interpreter/mterp/x86/op_new_instance.S
deleted file mode 100644
index f976acc..0000000
--- a/runtime/interpreter/mterp/x86/op_new_instance.S
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Create a new instance of a class.
- */
-    /* new-instance vAA, class@BBBB */
-    EXPORT_PC
-    leal    OFF_FP_SHADOWFRAME(rFP), %eax
-    movl    %eax, OUT_ARG0(%esp)
-    movl    rSELF, %ecx
-    movl    %ecx, OUT_ARG1(%esp)
-    REFRESH_INST ${opnum}
-    movl    rINST, OUT_ARG2(%esp)
-    call    SYMBOL(MterpNewInstance)
-    RESTORE_IBASE
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_nop.S b/runtime/interpreter/mterp/x86/op_nop.S
deleted file mode 100644
index 4cb68e3..0000000
--- a/runtime/interpreter/mterp/x86/op_nop.S
+++ /dev/null
@@ -1 +0,0 @@
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_not_int.S b/runtime/interpreter/mterp/x86/op_not_int.S
deleted file mode 100644
index 335ab09..0000000
--- a/runtime/interpreter/mterp/x86/op_not_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unop.S" {"instr":"notl %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_not_long.S b/runtime/interpreter/mterp/x86/op_not_long.S
deleted file mode 100644
index 8f706e1..0000000
--- a/runtime/interpreter/mterp/x86/op_not_long.S
+++ /dev/null
@@ -1,11 +0,0 @@
-    /* unop vA, vB */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    sarl    $$4, %ecx                       # ecx <- B
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, %ecx                     # eax <- v[B+0]
-    GET_VREG_HIGH %ecx, %ecx                # ecx <- v[B+1]
-    notl    %eax
-    notl    %ecx
-    SET_VREG %eax, rINST                    # v[A+0] <- eax
-    SET_VREG_HIGH %ecx, rINST               # v[A+1] <- ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_or_int.S b/runtime/interpreter/mterp/x86/op_or_int.S
deleted file mode 100644
index ebe2ec2..0000000
--- a/runtime/interpreter/mterp/x86/op_or_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop.S" {"instr":"orl     (rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_or_int_2addr.S b/runtime/interpreter/mterp/x86/op_or_int_2addr.S
deleted file mode 100644
index 36c17db..0000000
--- a/runtime/interpreter/mterp/x86/op_or_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop2addr.S" {"instr":"orl     %eax, (rFP,%ecx,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_or_int_lit16.S b/runtime/interpreter/mterp/x86/op_or_int_lit16.S
deleted file mode 100644
index 0a88ff59..0000000
--- a/runtime/interpreter/mterp/x86/op_or_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit16.S" {"instr":"orl     %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_or_int_lit8.S b/runtime/interpreter/mterp/x86/op_or_int_lit8.S
deleted file mode 100644
index 0670b67..0000000
--- a/runtime/interpreter/mterp/x86/op_or_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"orl     %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_or_long.S b/runtime/interpreter/mterp/x86/op_or_long.S
deleted file mode 100644
index 09ca539..0000000
--- a/runtime/interpreter/mterp/x86/op_or_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide.S" {"instr1":"orl     (rFP,%ecx,4), rIBASE", "instr2":"orl     4(rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_or_long_2addr.S b/runtime/interpreter/mterp/x86/op_or_long_2addr.S
deleted file mode 100644
index 2062e81..0000000
--- a/runtime/interpreter/mterp/x86/op_or_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide2addr.S" {"instr1":"orl     %eax, (rFP,rINST,4)","instr2":"orl     %ecx, 4(rFP,rINST,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_packed_switch.S b/runtime/interpreter/mterp/x86/op_packed_switch.S
deleted file mode 100644
index fcb7509..0000000
--- a/runtime/interpreter/mterp/x86/op_packed_switch.S
+++ /dev/null
@@ -1,21 +0,0 @@
-%default { "func":"MterpDoPackedSwitch" }
-/*
- * Handle a packed-switch or sparse-switch instruction.  In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
-    /* op vAA, +BBBB */
-    movl    2(rPC), %ecx                    # ecx <- BBBBbbbb
-    GET_VREG %eax, rINST                    # eax <- vAA
-    leal    (rPC,%ecx,2), %ecx              # ecx <- PC + BBBBbbbb*2
-    movl    %eax, OUT_ARG1(%esp)            # ARG1 <- vAA
-    movl    %ecx, OUT_ARG0(%esp)            # ARG0 <- switchData
-    call    SYMBOL($func)
-    REFRESH_IBASE
-    testl   %eax, %eax
-    movl    %eax, rINST
-    jmp     MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86/op_rem_double.S b/runtime/interpreter/mterp/x86/op_rem_double.S
deleted file mode 100644
index 4b52a06..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_double.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /* rem_double vAA, vBB, vCC */
-    movzbl  3(rPC), %ecx                    # ecx <- BB
-    movzbl  2(rPC), %eax                    # eax <- CC
-    fldl    VREG_ADDRESS(%ecx)              # %st1 <- fp[vBB]
-    fldl    VREG_ADDRESS(%eax)              # %st0 <- fp[vCC]
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstpl   VREG_ADDRESS(rINST)             # fp[vAA] <- %st
-    CLEAR_WIDE_REF rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_rem_double_2addr.S b/runtime/interpreter/mterp/x86/op_rem_double_2addr.S
deleted file mode 100644
index 5a0e669..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_double_2addr.S
+++ /dev/null
@@ -1,15 +0,0 @@
-    /* rem_double/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    sarl    $$4, rINST                      # rINST <- B
-    fldl    VREG_ADDRESS(rINST)             # vB to fp stack
-    andb    $$0xf, %cl                      # ecx <- A
-    fldl    VREG_ADDRESS(%ecx)              # vA to fp stack
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstpl   VREG_ADDRESS(%ecx)              # %st to vA
-    CLEAR_WIDE_REF %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_rem_float.S b/runtime/interpreter/mterp/x86/op_rem_float.S
deleted file mode 100644
index 05e0bf1..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_float.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /* rem_float vAA, vBB, vCC */
-    movzbl  3(rPC), %ecx                    # ecx <- BB
-    movzbl  2(rPC), %eax                    # eax <- CC
-    flds    VREG_ADDRESS(%ecx)              # vBB to fp stack
-    flds    VREG_ADDRESS(%eax)              # vCC to fp stack
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstps   VREG_ADDRESS(rINST)             # %st to vAA
-    CLEAR_REF rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_rem_float_2addr.S b/runtime/interpreter/mterp/x86/op_rem_float_2addr.S
deleted file mode 100644
index 29f84e6..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_float_2addr.S
+++ /dev/null
@@ -1,15 +0,0 @@
-    /* rem_float/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    sarl    $$4, rINST                      # rINST <- B
-    flds    VREG_ADDRESS(rINST)             # vB to fp stack
-    andb    $$0xf, %cl                      # ecx <- A
-    flds    VREG_ADDRESS(%ecx)              # vA to fp stack
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstps   VREG_ADDRESS(%ecx)              # %st to vA
-    CLEAR_REF %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_rem_int.S b/runtime/interpreter/mterp/x86/op_rem_int.S
deleted file mode 100644
index d25b93c..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindiv.S" {"result":"rIBASE","special":"$0","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_rem_int_2addr.S b/runtime/interpreter/mterp/x86/op_rem_int_2addr.S
deleted file mode 100644
index c788e0e..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindiv2addr.S" {"result":"rIBASE","special":"$0"}
diff --git a/runtime/interpreter/mterp/x86/op_rem_int_lit16.S b/runtime/interpreter/mterp/x86/op_rem_int_lit16.S
deleted file mode 100644
index 3df9d39..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindivLit16.S" {"result":"rIBASE","special":"$0"}
diff --git a/runtime/interpreter/mterp/x86/op_rem_int_lit8.S b/runtime/interpreter/mterp/x86/op_rem_int_lit8.S
deleted file mode 100644
index 56e19c6..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindivLit8.S" {"result":"rIBASE","special":"$0"}
diff --git a/runtime/interpreter/mterp/x86/op_rem_long.S b/runtime/interpreter/mterp/x86/op_rem_long.S
deleted file mode 100644
index 0ffe1f6..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_div_long.S" {"routine":"art_quick_lmod"}
diff --git a/runtime/interpreter/mterp/x86/op_rem_long_2addr.S b/runtime/interpreter/mterp/x86/op_rem_long_2addr.S
deleted file mode 100644
index 4b97735..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_div_long_2addr.S" {"routine":"art_quick_lmod"}
diff --git a/runtime/interpreter/mterp/x86/op_return.S b/runtime/interpreter/mterp/x86/op_return.S
deleted file mode 100644
index a8ebbed..0000000
--- a/runtime/interpreter/mterp/x86/op_return.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    call    SYMBOL(MterpThreadFenceForConstructor)
-    movl    rSELF, %eax
-    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
-    jz      1f
-    movl    %eax, OUT_ARG0(%esp)
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    GET_VREG %eax, rINST                    # eax <- vAA
-    xorl    %ecx, %ecx
-    jmp     MterpReturn
diff --git a/runtime/interpreter/mterp/x86/op_return_object.S b/runtime/interpreter/mterp/x86/op_return_object.S
deleted file mode 100644
index 12c84b3..0000000
--- a/runtime/interpreter/mterp/x86/op_return_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_return.S"
diff --git a/runtime/interpreter/mterp/x86/op_return_void.S b/runtime/interpreter/mterp/x86/op_return_void.S
deleted file mode 100644
index d9eddf3..0000000
--- a/runtime/interpreter/mterp/x86/op_return_void.S
+++ /dev/null
@@ -1,11 +0,0 @@
-    .extern MterpThreadFenceForConstructor
-    call    SYMBOL(MterpThreadFenceForConstructor)
-    movl    rSELF, %eax
-    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
-    jz      1f
-    movl    %eax, OUT_ARG0(%esp)
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    xorl    %eax, %eax
-    xorl    %ecx, %ecx
-    jmp     MterpReturn
diff --git a/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S b/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S
deleted file mode 100644
index 2fbda6b..0000000
--- a/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    movl    rSELF, %eax
-    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
-    jz      1f
-    movl    %eax, OUT_ARG0(%esp)
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    xorl    %eax, %eax
-    xorl    %ecx, %ecx
-    jmp     MterpReturn
diff --git a/runtime/interpreter/mterp/x86/op_return_wide.S b/runtime/interpreter/mterp/x86/op_return_wide.S
deleted file mode 100644
index 5fff626..0000000
--- a/runtime/interpreter/mterp/x86/op_return_wide.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Return a 64-bit value.
- */
-    /* return-wide vAA */
-    .extern MterpThreadFenceForConstructor
-    call    SYMBOL(MterpThreadFenceForConstructor)
-    movl    rSELF, %eax
-    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
-    jz      1f
-    movl    %eax, OUT_ARG0(%esp)
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    GET_VREG %eax, rINST                    # eax <- v[AA+0]
-    GET_VREG_HIGH %ecx, rINST               # ecx <- v[AA+1]
-    jmp     MterpReturn
diff --git a/runtime/interpreter/mterp/x86/op_rsub_int.S b/runtime/interpreter/mterp/x86/op_rsub_int.S
deleted file mode 100644
index d6449c6..0000000
--- a/runtime/interpreter/mterp/x86/op_rsub_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-%include "x86/binopLit16.S" {"instr":"subl    %eax, %ecx","result":"%ecx"}
diff --git a/runtime/interpreter/mterp/x86/op_rsub_int_lit8.S b/runtime/interpreter/mterp/x86/op_rsub_int_lit8.S
deleted file mode 100644
index 15d0e35..0000000
--- a/runtime/interpreter/mterp/x86/op_rsub_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"subl    %eax, %ecx" , "result":"%ecx"}
diff --git a/runtime/interpreter/mterp/x86/op_sget.S b/runtime/interpreter/mterp/x86/op_sget.S
deleted file mode 100644
index ada4e0e..0000000
--- a/runtime/interpreter/mterp/x86/op_sget.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSGetU32" }
-%include "x86/field.S" { }
diff --git a/runtime/interpreter/mterp/x86/op_sget_boolean.S b/runtime/interpreter/mterp/x86/op_sget_boolean.S
deleted file mode 100644
index 3936eea..0000000
--- a/runtime/interpreter/mterp/x86/op_sget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sget.S" {"helper":"MterpSGetU8"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_byte.S b/runtime/interpreter/mterp/x86/op_sget_byte.S
deleted file mode 100644
index 967586d..0000000
--- a/runtime/interpreter/mterp/x86/op_sget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sget.S" {"helper":"MterpSGetI8"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_char.S b/runtime/interpreter/mterp/x86/op_sget_char.S
deleted file mode 100644
index b706f18..0000000
--- a/runtime/interpreter/mterp/x86/op_sget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sget.S" {"helper":"MterpSGetU16"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_object.S b/runtime/interpreter/mterp/x86/op_sget_object.S
deleted file mode 100644
index eac8836..0000000
--- a/runtime/interpreter/mterp/x86/op_sget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_short.S b/runtime/interpreter/mterp/x86/op_sget_short.S
deleted file mode 100644
index ee058a6..0000000
--- a/runtime/interpreter/mterp/x86/op_sget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sget.S" {"helper":"MterpSGetI16"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_wide.S b/runtime/interpreter/mterp/x86/op_sget_wide.S
deleted file mode 100644
index 5923274..0000000
--- a/runtime/interpreter/mterp/x86/op_sget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sget.S" {"helper":"MterpSGetU64"}
diff --git a/runtime/interpreter/mterp/x86/op_shl_int.S b/runtime/interpreter/mterp/x86/op_shl_int.S
deleted file mode 100644
index 6a41d1c..0000000
--- a/runtime/interpreter/mterp/x86/op_shl_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop1.S" {"instr":"sall    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_shl_int_2addr.S b/runtime/interpreter/mterp/x86/op_shl_int_2addr.S
deleted file mode 100644
index 72abb8e..0000000
--- a/runtime/interpreter/mterp/x86/op_shl_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/shop2addr.S" {"instr":"sall    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_shl_int_lit8.S b/runtime/interpreter/mterp/x86/op_shl_int_lit8.S
deleted file mode 100644
index b8d6069..0000000
--- a/runtime/interpreter/mterp/x86/op_shl_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"sall    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_shl_long.S b/runtime/interpreter/mterp/x86/op_shl_long.S
deleted file mode 100644
index aa58a93..0000000
--- a/runtime/interpreter/mterp/x86/op_shl_long.S
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Long integer shift.  This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.  x86 shifts automatically mask off
- * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
- * case specially.
- */
-    /* shl-long vAA, vBB, vCC */
-    /* ecx gets shift count */
-    /* Need to spill rINST */
-    /* rINSTw gets AA */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE, %eax              # ecx <- v[BB+1]
-    GET_VREG %ecx, %ecx                     # ecx <- vCC
-    GET_VREG %eax, %eax                     # eax <- v[BB+0]
-    shldl   %eax,rIBASE
-    sall    %cl, %eax
-    testb   $$32, %cl
-    je      2f
-    movl    %eax, rIBASE
-    xorl    %eax, %eax
-2:
-    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax, rINST                    # v[AA+0] <- %eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_shl_long_2addr.S b/runtime/interpreter/mterp/x86/op_shl_long_2addr.S
deleted file mode 100644
index 6bbf49c..0000000
--- a/runtime/interpreter/mterp/x86/op_shl_long_2addr.S
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
-    /* shl-long/2addr vA, vB */
-    /* ecx gets shift count */
-    /* Need to spill rIBASE */
-    /* rINSTw gets AA */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, rINST                    # eax <- v[AA+0]
-    sarl    $$4, %ecx                       # ecx <- B
-    movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
-    GET_VREG %ecx, %ecx                     # ecx <- vBB
-    shldl   %eax, rIBASE
-    sall    %cl, %eax
-    testb   $$32, %cl
-    je      2f
-    movl    %eax, rIBASE
-    xorl    %eax, %eax
-2:
-    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax, rINST                    # v[AA+0] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_shr_int.S b/runtime/interpreter/mterp/x86/op_shr_int.S
deleted file mode 100644
index 687b2c3..0000000
--- a/runtime/interpreter/mterp/x86/op_shr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop1.S" {"instr":"sarl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_shr_int_2addr.S b/runtime/interpreter/mterp/x86/op_shr_int_2addr.S
deleted file mode 100644
index 533b0e9..0000000
--- a/runtime/interpreter/mterp/x86/op_shr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/shop2addr.S" {"instr":"sarl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_shr_int_lit8.S b/runtime/interpreter/mterp/x86/op_shr_int_lit8.S
deleted file mode 100644
index ebd1bea..0000000
--- a/runtime/interpreter/mterp/x86/op_shr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"sarl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_shr_long.S b/runtime/interpreter/mterp/x86/op_shr_long.S
deleted file mode 100644
index 68aa0ee..0000000
--- a/runtime/interpreter/mterp/x86/op_shr_long.S
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Long integer shift.  This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.  x86 shifts automatically mask off
- * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
- * case specially.
- */
-    /* shr-long vAA, vBB, vCC */
-    /* ecx gets shift count */
-    /* Need to spill rIBASE */
-    /* rINSTw gets AA */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE, %eax              # rIBASE<- v[BB+1]
-    GET_VREG %ecx, %ecx                     # ecx <- vCC
-    GET_VREG %eax, %eax                     # eax <- v[BB+0]
-    shrdl   rIBASE, %eax
-    sarl    %cl, rIBASE
-    testb   $$32, %cl
-    je      2f
-    movl    rIBASE, %eax
-    sarl    $$31, rIBASE
-2:
-    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax, rINST                    # v[AA+0] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_shr_long_2addr.S b/runtime/interpreter/mterp/x86/op_shr_long_2addr.S
deleted file mode 100644
index 148bd1b..0000000
--- a/runtime/interpreter/mterp/x86/op_shr_long_2addr.S
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
-    /* shl-long/2addr vA, vB */
-    /* ecx gets shift count */
-    /* Need to spill rIBASE */
-    /* rINSTw gets AA */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, rINST                    # eax <- v[AA+0]
-    sarl    $$4, %ecx                       # ecx <- B
-    movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
-    GET_VREG %ecx, %ecx                     # ecx <- vBB
-    shrdl   rIBASE, %eax
-    sarl    %cl, rIBASE
-    testb   $$32, %cl
-    je      2f
-    movl    rIBASE, %eax
-    sarl    $$31, rIBASE
-2:
-    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax, rINST                    # v[AA+0] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_sparse_switch.S b/runtime/interpreter/mterp/x86/op_sparse_switch.S
deleted file mode 100644
index fdaec47..0000000
--- a/runtime/interpreter/mterp/x86/op_sparse_switch.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/x86/op_sput.S b/runtime/interpreter/mterp/x86/op_sput.S
deleted file mode 100644
index 2ad68e7..0000000
--- a/runtime/interpreter/mterp/x86/op_sput.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSPutU32"}
-%include "x86/field.S" { }
diff --git a/runtime/interpreter/mterp/x86/op_sput_boolean.S b/runtime/interpreter/mterp/x86/op_sput_boolean.S
deleted file mode 100644
index c6aa7c4..0000000
--- a/runtime/interpreter/mterp/x86/op_sput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_byte.S b/runtime/interpreter/mterp/x86/op_sput_byte.S
deleted file mode 100644
index fd504a8..0000000
--- a/runtime/interpreter/mterp/x86/op_sput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_char.S b/runtime/interpreter/mterp/x86/op_sput_char.S
deleted file mode 100644
index b4d0997..0000000
--- a/runtime/interpreter/mterp/x86/op_sput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_object.S b/runtime/interpreter/mterp/x86/op_sput_object.S
deleted file mode 100644
index 4452dba..0000000
--- a/runtime/interpreter/mterp/x86/op_sput_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sput.S" {"is_object":"1", "helper":"MterpSPutObj"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_short.S b/runtime/interpreter/mterp/x86/op_sput_short.S
deleted file mode 100644
index eba01bd..0000000
--- a/runtime/interpreter/mterp/x86/op_sput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_wide.S b/runtime/interpreter/mterp/x86/op_sput_wide.S
deleted file mode 100644
index d79b068..0000000
--- a/runtime/interpreter/mterp/x86/op_sput_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sput.S" {"helper":"MterpSPutU64"}
diff --git a/runtime/interpreter/mterp/x86/op_sub_double.S b/runtime/interpreter/mterp/x86/op_sub_double.S
deleted file mode 100644
index e83afeb..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"subs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_sub_double_2addr.S b/runtime/interpreter/mterp/x86/op_sub_double_2addr.S
deleted file mode 100644
index af9a2ab..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"subs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_sub_float.S b/runtime/interpreter/mterp/x86/op_sub_float.S
deleted file mode 100644
index 423d834..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"subs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_sub_float_2addr.S b/runtime/interpreter/mterp/x86/op_sub_float_2addr.S
deleted file mode 100644
index 18de000..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"subs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_sub_int.S b/runtime/interpreter/mterp/x86/op_sub_int.S
deleted file mode 100644
index 7fe03fb..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop.S" {"instr":"subl    (rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_sub_int_2addr.S b/runtime/interpreter/mterp/x86/op_sub_int_2addr.S
deleted file mode 100644
index cc9bf60..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop2addr.S" {"instr":"subl    %eax, (rFP,%ecx,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_sub_long.S b/runtime/interpreter/mterp/x86/op_sub_long.S
deleted file mode 100644
index 014591e..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide.S" {"instr1":"subl    (rFP,%ecx,4), rIBASE", "instr2":"sbbl    4(rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_sub_long_2addr.S b/runtime/interpreter/mterp/x86/op_sub_long_2addr.S
deleted file mode 100644
index 7498029..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide2addr.S" {"instr1":"subl    %eax, (rFP,rINST,4)","instr2":"sbbl    %ecx, 4(rFP,rINST,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_throw.S b/runtime/interpreter/mterp/x86/op_throw.S
deleted file mode 100644
index a6e6b1e..0000000
--- a/runtime/interpreter/mterp/x86/op_throw.S
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Throw an exception object in the current thread.
- */
-    /* throw vAA */
-    EXPORT_PC
-    GET_VREG %eax, rINST                    # eax<- vAA (exception object)
-    testl   %eax, %eax
-    jz      common_errNullObject
-    movl    rSELF,%ecx
-    movl    %eax, THREAD_EXCEPTION_OFFSET(%ecx)
-    jmp     MterpException
diff --git a/runtime/interpreter/mterp/x86/op_unused_3e.S b/runtime/interpreter/mterp/x86/op_unused_3e.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_3e.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_3f.S b/runtime/interpreter/mterp/x86/op_unused_3f.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_3f.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_40.S b/runtime/interpreter/mterp/x86/op_unused_40.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_40.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_41.S b/runtime/interpreter/mterp/x86/op_unused_41.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_41.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_42.S b/runtime/interpreter/mterp/x86/op_unused_42.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_42.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_43.S b/runtime/interpreter/mterp/x86/op_unused_43.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_43.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_79.S b/runtime/interpreter/mterp/x86/op_unused_79.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_79.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_7a.S b/runtime/interpreter/mterp/x86/op_unused_7a.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_7a.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f3.S b/runtime/interpreter/mterp/x86/op_unused_f3.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_f3.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f4.S b/runtime/interpreter/mterp/x86/op_unused_f4.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_f4.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f5.S b/runtime/interpreter/mterp/x86/op_unused_f5.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_f5.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f6.S b/runtime/interpreter/mterp/x86/op_unused_f6.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_f6.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f7.S b/runtime/interpreter/mterp/x86/op_unused_f7.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_f7.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f8.S b/runtime/interpreter/mterp/x86/op_unused_f8.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_f8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f9.S b/runtime/interpreter/mterp/x86/op_unused_f9.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_f9.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_fc.S b/runtime/interpreter/mterp/x86/op_unused_fc.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_fc.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_fd.S b/runtime/interpreter/mterp/x86/op_unused_fd.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_fd.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_ushr_int.S b/runtime/interpreter/mterp/x86/op_ushr_int.S
deleted file mode 100644
index dfe25ff..0000000
--- a/runtime/interpreter/mterp/x86/op_ushr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop1.S" {"instr":"shrl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_ushr_int_2addr.S b/runtime/interpreter/mterp/x86/op_ushr_int_2addr.S
deleted file mode 100644
index c14bc98..0000000
--- a/runtime/interpreter/mterp/x86/op_ushr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/shop2addr.S" {"instr":"shrl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_ushr_int_lit8.S b/runtime/interpreter/mterp/x86/op_ushr_int_lit8.S
deleted file mode 100644
index e129f6b..0000000
--- a/runtime/interpreter/mterp/x86/op_ushr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"shrl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_ushr_long.S b/runtime/interpreter/mterp/x86/op_ushr_long.S
deleted file mode 100644
index 9527c9c..0000000
--- a/runtime/interpreter/mterp/x86/op_ushr_long.S
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Long integer shift.  This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.  x86 shifts automatically mask off
- * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
- * case specially.
- */
-    /* shr-long vAA, vBB, vCC */
-    /* ecx gets shift count */
-    /* Need to spill rIBASE */
-    /* rINSTw gets AA */
-    movzbl  2(rPC), %eax                    # eax <- BB
-    movzbl  3(rPC), %ecx                    # ecx <- CC
-    movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE, %eax              # rIBASE <- v[BB+1]
-    GET_VREG %ecx, %ecx                     # ecx <- vCC
-    GET_VREG %eax, %eax                     # eax <- v[BB+0]
-    shrdl   rIBASE, %eax
-    shrl    %cl, rIBASE
-    testb   $$32, %cl
-    je      2f
-    movl    rIBASE, %eax
-    xorl    rIBASE, rIBASE
-2:
-    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax, rINST                    # v[BB+0] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_ushr_long_2addr.S b/runtime/interpreter/mterp/x86/op_ushr_long_2addr.S
deleted file mode 100644
index 72fcc36..0000000
--- a/runtime/interpreter/mterp/x86/op_ushr_long_2addr.S
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
-    /* shl-long/2addr vA, vB */
-    /* ecx gets shift count */
-    /* Need to spill rIBASE */
-    /* rINSTw gets AA */
-    movzbl  rINSTbl, %ecx                   # ecx <- BA
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, rINST                    # eax <- v[AA+0]
-    sarl    $$4, %ecx                       # ecx <- B
-    movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
-    GET_VREG %ecx, %ecx                     # ecx <- vBB
-    shrdl   rIBASE, %eax
-    shrl    %cl, rIBASE
-    testb   $$32, %cl
-    je      2f
-    movl    rIBASE, %eax
-    xorl    rIBASE, rIBASE
-2:
-    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
-    movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax, rINST                    # v[AA+0] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_xor_int.S b/runtime/interpreter/mterp/x86/op_xor_int.S
deleted file mode 100644
index 35aca6a..0000000
--- a/runtime/interpreter/mterp/x86/op_xor_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop.S" {"instr":"xorl    (rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_xor_int_2addr.S b/runtime/interpreter/mterp/x86/op_xor_int_2addr.S
deleted file mode 100644
index d7b70e2..0000000
--- a/runtime/interpreter/mterp/x86/op_xor_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop2addr.S" {"instr":"xorl    %eax, (rFP,%ecx,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_xor_int_lit16.S b/runtime/interpreter/mterp/x86/op_xor_int_lit16.S
deleted file mode 100644
index 115f0a0..0000000
--- a/runtime/interpreter/mterp/x86/op_xor_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit16.S" {"instr":"xorl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_xor_int_lit8.S b/runtime/interpreter/mterp/x86/op_xor_int_lit8.S
deleted file mode 100644
index 243971c..0000000
--- a/runtime/interpreter/mterp/x86/op_xor_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"xorl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_xor_long.S b/runtime/interpreter/mterp/x86/op_xor_long.S
deleted file mode 100644
index 0d3c0f5..0000000
--- a/runtime/interpreter/mterp/x86/op_xor_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide.S" {"instr1":"xorl    (rFP,%ecx,4), rIBASE", "instr2":"xorl    4(rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_xor_long_2addr.S b/runtime/interpreter/mterp/x86/op_xor_long_2addr.S
deleted file mode 100644
index b5000e4..0000000
--- a/runtime/interpreter/mterp/x86/op_xor_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide2addr.S" {"instr1":"xorl    %eax, (rFP,rINST,4)","instr2":"xorl    %ecx, 4(rFP,rINST,4)"}
diff --git a/runtime/interpreter/mterp/x86/other.S b/runtime/interpreter/mterp/x86/other.S
new file mode 100644
index 0000000..270ccb6
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/other.S
@@ -0,0 +1,328 @@
+%def const(helper="UndefinedConstHandler"):
+    /* const/class vAA, type@BBBB */
+    /* const/method-handle vAA, method_handle@BBBB */
+    /* const/method-type vAA, proto@BBBB */
+    /* const/string vAA, string@@BBBB */
+    .extern $helper
+    EXPORT_PC
+    movzwl  2(rPC), %eax                    # eax <- BBBB
+    movl    %eax, OUT_ARG0(%esp)
+    movl    rINST, OUT_ARG1(%esp)
+    leal    OFF_FP_SHADOWFRAME(rFP), %eax
+    movl    %eax, OUT_ARG2(%esp)
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG3(%esp)
+    call    SYMBOL($helper)                 # (index, tgt_reg, shadow_frame, self)
+    RESTORE_IBASE
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def unused():
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+%def op_const():
+    /* const vAA, #+BBBBbbbb */
+    movl    2(rPC), %eax                    # grab all 32 bits at once
+    SET_VREG %eax, rINST                    # vAA<- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_const_16():
+    /* const/16 vAA, #+BBBB */
+    movswl  2(rPC), %ecx                    # ecx <- ssssBBBB
+    SET_VREG %ecx, rINST                    # vAA <- ssssBBBB
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_const_4():
+    /* const/4 vA, #+B */
+    movsx   rINSTbl, %eax                   # eax <-ssssssBx
+    movl    $$0xf, rINST
+    andl    %eax, rINST                     # rINST <- A
+    sarl    $$4, %eax
+    SET_VREG %eax, rINST
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_const_class():
+%  const(helper="MterpConstClass")
+
+%def op_const_high16():
+    /* const/high16 vAA, #+BBBB0000 */
+    movzwl  2(rPC), %eax                    # eax <- 0000BBBB
+    sall    $$16, %eax                      # eax <- BBBB0000
+    SET_VREG %eax, rINST                    # vAA <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_const_method_handle():
+%  const(helper="MterpConstMethodHandle")
+
+%def op_const_method_type():
+%  const(helper="MterpConstMethodType")
+
+%def op_const_string():
+%  const(helper="MterpConstString")
+
+%def op_const_string_jumbo():
+    /* const/string vAA, String@BBBBBBBB */
+    EXPORT_PC
+    movl    2(rPC), %eax                    # eax <- BBBB
+    movl    %eax, OUT_ARG0(%esp)
+    movl    rINST, OUT_ARG1(%esp)
+    leal    OFF_FP_SHADOWFRAME(rFP), %eax
+    movl    %eax, OUT_ARG2(%esp)
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG3(%esp)
+    call    SYMBOL(MterpConstString)        # (index, tgt_reg, shadow_frame, self)
+    RESTORE_IBASE
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_const_wide():
+    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+    movl    2(rPC), %eax                    # eax <- lsw
+    movzbl  rINSTbl, %ecx                   # ecx <- AA
+    movl    6(rPC), rINST                   # rINST <- msw
+    SET_VREG %eax, %ecx
+    SET_VREG_HIGH  rINST, %ecx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
+
+%def op_const_wide_16():
+    /* const-wide/16 vAA, #+BBBB */
+    movswl  2(rPC), %eax                    # eax <- ssssBBBB
+    movl    rIBASE, %ecx                    # preserve rIBASE (cltd trashes it)
+    cltd                                    # rIBASE:eax <- ssssssssssssBBBB
+    SET_VREG_HIGH rIBASE, rINST             # store msw
+    SET_VREG %eax, rINST                    # store lsw
+    movl    %ecx, rIBASE                    # restore rIBASE
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_const_wide_32():
+    /* const-wide/32 vAA, #+BBBBbbbb */
+    movl    2(rPC), %eax                    # eax <- BBBBbbbb
+    movl    rIBASE, %ecx                    # preserve rIBASE (cltd trashes it)
+    cltd                                    # rIBASE:eax <- ssssssssssssBBBB
+    SET_VREG_HIGH rIBASE, rINST             # store msw
+    SET_VREG %eax, rINST                    # store lsw
+    movl    %ecx, rIBASE                    # restore rIBASE
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_const_wide_high16():
+    /* const-wide/high16 vAA, #+BBBB000000000000 */
+    movzwl  2(rPC), %eax                    # eax <- 0000BBBB
+    sall    $$16, %eax                      # eax <- BBBB0000
+    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
+    xorl    %eax, %eax
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_monitor_enter():
+/*
+ * Synchronize on an object.
+ */
+    /* monitor-enter vAA */
+    EXPORT_PC
+    GET_VREG %ecx, rINST
+    movl    %ecx, OUT_ARG0(%esp)
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG1(%esp)
+    call    SYMBOL(artLockObjectFromCode)   # (object, self)
+    RESTORE_IBASE
+    testb   %al, %al
+    jnz     MterpException
+    ADVANCE_PC 1
+    movl    rSELF, %eax
+    cmpb    LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
+    jz      MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+%def op_monitor_exit():
+/*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction.  See the Dalvik
+ * instruction spec.
+ */
+    /* monitor-exit vAA */
+    EXPORT_PC
+    GET_VREG %ecx, rINST
+    movl    %ecx, OUT_ARG0(%esp)
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG1(%esp)
+    call    SYMBOL(artUnlockObjectFromCode) # (object, self)
+    RESTORE_IBASE
+    testb   %al, %al
+    jnz     MterpException
+    ADVANCE_PC 1
+    movl    rSELF, %eax
+    cmpb    LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
+    jz      MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+%def op_move(is_object="0"):
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    movzbl  rINSTbl, %eax                   # eax <- BA
+    andb    $$0xf, %al                      # eax <- A
+    shrl    $$4, rINST                      # rINST <- B
+    GET_VREG rINST, rINST
+    .if $is_object
+    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
+    .else
+    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_16(is_object="0"):
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    movzwl  4(rPC), %ecx                    # ecx <- BBBB
+    movzwl  2(rPC), %eax                    # eax <- AAAA
+    GET_VREG rINST, %ecx
+    .if $is_object
+    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
+    .else
+    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_move_exception():
+    /* move-exception vAA */
+    movl    rSELF, %ecx
+    movl    THREAD_EXCEPTION_OFFSET(%ecx), %eax
+    SET_VREG_OBJECT %eax, rINST             # fp[AA] <- exception object
+    movl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_from16(is_object="0"):
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    movzx   rINSTbl, %eax                   # eax <- AA
+    movw    2(rPC), rINSTw                  # rINSTw <- BBBB
+    GET_VREG rINST, rINST                   # rINST <- fp[BBBB]
+    .if $is_object
+    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
+    .else
+    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_move_object():
+%  op_move(is_object="1")
+
+%def op_move_object_16():
+%  op_move_16(is_object="1")
+
+%def op_move_object_from16():
+%  op_move_from16(is_object="1")
+
+%def op_move_result(is_object="0"):
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    movl    OFF_FP_RESULT_REGISTER(rFP), %eax    # get pointer to result JType.
+    movl    (%eax), %eax                    # r0 <- result.i.
+    .if $is_object
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <- fp[B]
+    .else
+    SET_VREG %eax, rINST                    # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_result_object():
+%  op_move_result(is_object="1")
+
+%def op_move_result_wide():
+    /* move-result-wide vAA */
+    movl    OFF_FP_RESULT_REGISTER(rFP), %eax    # get pointer to result JType.
+    movl    4(%eax), %ecx                   # Get high
+    movl    (%eax), %eax                    # Get low
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
+    SET_VREG_HIGH %ecx, rINST               # v[AA+1] <- ecx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_wide():
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    movzbl  rINSTbl, %ecx                   # ecx <- BA
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    GET_WIDE_FP_VREG %xmm0, rINST           # xmm0 <- v[B]
+    SET_WIDE_FP_VREG %xmm0, %ecx            # v[A] <- xmm0
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_wide_16():
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    movzwl  4(rPC), %ecx                    # ecx<- BBBB
+    movzwl  2(rPC), %eax                    # eax<- AAAA
+    GET_WIDE_FP_VREG %xmm0, %ecx            # xmm0 <- v[B]
+    SET_WIDE_FP_VREG %xmm0, %eax            # v[A] <- xmm0
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_move_wide_from16():
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    movzwl  2(rPC), %ecx                    # ecx <- BBBB
+    movzbl  rINSTbl, %eax                   # eax <- AAAA
+    GET_WIDE_FP_VREG %xmm0, %ecx            # xmm0 <- v[B]
+    SET_WIDE_FP_VREG %xmm0, %eax            # v[A] <- xmm0
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_nop():
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_unused_3e():
+%  unused()
+
+%def op_unused_3f():
+%  unused()
+
+%def op_unused_40():
+%  unused()
+
+%def op_unused_41():
+%  unused()
+
+%def op_unused_42():
+%  unused()
+
+%def op_unused_43():
+%  unused()
+
+%def op_unused_79():
+%  unused()
+
+%def op_unused_7a():
+%  unused()
+
+%def op_unused_f3():
+%  unused()
+
+%def op_unused_f4():
+%  unused()
+
+%def op_unused_f5():
+%  unused()
+
+%def op_unused_f6():
+%  unused()
+
+%def op_unused_f7():
+%  unused()
+
+%def op_unused_f8():
+%  unused()
+
+%def op_unused_f9():
+%  unused()
+
+%def op_unused_fc():
+%  unused()
+
+%def op_unused_fd():
+%  unused()
diff --git a/runtime/interpreter/mterp/x86/shop2addr.S b/runtime/interpreter/mterp/x86/shop2addr.S
deleted file mode 100644
index 96c9954..0000000
--- a/runtime/interpreter/mterp/x86/shop2addr.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
-    /* shift/2addr vA, vB */
-    movzx   rINSTbl, %ecx                   # eax <- BA
-    sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %ecx                     # eax <- vBB
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax, rINST                    # eax <- vAA
-    $instr                                  # ex: sarl %cl, %eax
-    SET_VREG $result, rINST
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/sseBinop.S b/runtime/interpreter/mterp/x86/sseBinop.S
deleted file mode 100644
index 63a1e21..0000000
--- a/runtime/interpreter/mterp/x86/sseBinop.S
+++ /dev/null
@@ -1,9 +0,0 @@
-%default {"instr":"","suff":""}
-    movzbl  2(rPC), %ecx                    # ecx <- BB
-    movzbl  3(rPC), %eax                    # eax <- CC
-    movs${suff}   VREG_ADDRESS(%ecx), %xmm0  # %xmm0 <- 1st src
-    ${instr}${suff} VREG_ADDRESS(%eax), %xmm0
-    movs${suff}   %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movs${suff}   %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/sseBinop2Addr.S b/runtime/interpreter/mterp/x86/sseBinop2Addr.S
deleted file mode 100644
index d157e67..0000000
--- a/runtime/interpreter/mterp/x86/sseBinop2Addr.S
+++ /dev/null
@@ -1,10 +0,0 @@
-%default {"instr":"","suff":""}
-    movzx   rINSTbl, %ecx                   # ecx <- A+
-    andl    $$0xf, %ecx                     # ecx <- A
-    movs${suff} VREG_ADDRESS(%ecx), %xmm0      # %xmm0 <- 1st src
-    sarl    $$4, rINST                      # rINST<- B
-    ${instr}${suff} VREG_ADDRESS(rINST), %xmm0
-    movs${suff} %xmm0, VREG_ADDRESS(%ecx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movs${suff} %xmm0, VREG_REF_ADDRESS(rINST)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/unop.S b/runtime/interpreter/mterp/x86/unop.S
deleted file mode 100644
index db09fc0..0000000
--- a/runtime/interpreter/mterp/x86/unop.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default {"instr":""}
-/*
- * Generic 32-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movzbl  rINSTbl,%ecx                    # ecx <- A+
-    sarl    $$4,rINST                       # rINST <- B
-    GET_VREG %eax, rINST                    # eax <- vB
-    andb    $$0xf,%cl                       # ecx <- A
-    $instr
-    SET_VREG %eax, %ecx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/unused.S b/runtime/interpreter/mterp/x86/unused.S
deleted file mode 100644
index c95ef94..0000000
--- a/runtime/interpreter/mterp/x86/unused.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
diff --git a/runtime/interpreter/mterp/x86/zcmp.S b/runtime/interpreter/mterp/x86/zcmp.S
deleted file mode 100644
index c116159..0000000
--- a/runtime/interpreter/mterp/x86/zcmp.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
-    /* if-cmp vAA, +BBBB */
-    cmpl    $$0, VREG_ADDRESS(rINST)        # compare (vA, 0)
-    j${revcmp}   1f
-    movswl  2(rPC), rINST                   # fetch signed displacement
-    testl   rINST, rINST
-    jmp     MterpCommonTakenBranch
-1:
-    cmpw    $$JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/alt_stub.S b/runtime/interpreter/mterp/x86_64/alt_stub.S
deleted file mode 100644
index 24cd1a8..0000000
--- a/runtime/interpreter/mterp/x86_64/alt_stub.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler.  Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
-    .extern MterpCheckBefore
-    REFRESH_IBASE
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame, dex_pc_ptr)
-    jmp     .L_op_nop+(${opnum}*${handler_size_bytes})
diff --git a/runtime/interpreter/mterp/x86_64/arithmetic.S b/runtime/interpreter/mterp/x86_64/arithmetic.S
new file mode 100644
index 0000000..ff64b53
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/arithmetic.S
@@ -0,0 +1,575 @@
+%def bindiv(result="", second="", wide="", suffix="", rem="0", ext="cdq"):
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    .if $wide
+    GET_WIDE_VREG %rax, %rax                # eax <- vBB
+    GET_WIDE_VREG $second, %rcx             # ecx <- vCC
+    .else
+    GET_VREG %eax, %rax                     # eax <- vBB
+    GET_VREG $second, %rcx                  # ecx <- vCC
+    .endif
+    test${suffix}   $second, $second
+    jz      common_errDivideByZero
+    cmp${suffix}  $$-1, $second
+    je      2f
+    $ext                                    # rdx:rax <- sign-extended of rax
+    idiv${suffix}   $second
+1:
+    .if $wide
+    SET_WIDE_VREG $result, rINSTq           # eax <- vBB
+    .else
+    SET_VREG $result, rINSTq                # eax <- vBB
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+    .if $rem
+    xor${suffix} $result, $result
+    .else
+    neg${suffix} $result
+    .endif
+    jmp     1b
+
+%def bindiv2addr(result="", second="", wide="", suffix="", rem="0", ext="cdq"):
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- BA
+    sarl    $$4, %ecx                       # rcx <- B
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    .if $wide
+    GET_WIDE_VREG %rax, rINSTq              # eax <- vA
+    GET_WIDE_VREG $second, %rcx             # ecx <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vA
+    GET_VREG $second, %rcx                  # ecx <- vB
+    .endif
+    test${suffix}   $second, $second
+    jz      common_errDivideByZero
+    cmp${suffix}  $$-1, $second
+    je      2f
+    $ext                                    # rdx:rax <- sign-extended of rax
+    idiv${suffix}   $second
+1:
+    .if $wide
+    SET_WIDE_VREG $result, rINSTq           # vA <- result
+    .else
+    SET_VREG $result, rINSTq                # vA <- result
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+2:
+    .if $rem
+    xor${suffix} $result, $result
+    .else
+    neg${suffix} $result
+    .endif
+    jmp     1b
+
+%def bindivLit16(result="", rem="0"):
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem/lit16 vA, vB, #+CCCC */
+    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
+    movl    rINST, %eax                     # rax <- 000000BA
+    sarl    $$4, %eax                       # eax <- B
+    GET_VREG %eax, %rax                     # eax <- vB
+    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    testl   %ecx, %ecx
+    jz      common_errDivideByZero
+    cmpl    $$-1, %ecx
+    je      2f
+    cdq                                     # rax <- sign-extended of eax
+    idivl   %ecx
+1:
+    SET_VREG $result, rINSTq                # vA <- result
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+    .if $rem
+    xorl    $result, $result
+    .else
+    negl    $result
+    .endif
+    jmp     1b
+
+%def bindivLit8(result="", rem="0"):
+/*
+ * 32-bit div/rem "lit8" binary operation.  Handles special case of
+ * op0=minint & op1=-1
+ */
+    /* div/rem/lit8 vAA, vBB, #+CC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
+    GET_VREG  %eax, %rax                    # eax <- rBB
+    testl   %ecx, %ecx
+    je      common_errDivideByZero
+    cmpl    $$-1, %ecx
+    je      2f
+    cdq                                     # rax <- sign-extended of eax
+    idivl   %ecx
+1:
+    SET_VREG $result, rINSTq                # vA <- result
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+    .if $rem
+    xorl    $result, $result
+    .else
+    negl    $result
+    .endif
+    jmp     1b
+
+%def binop(result="%eax", instr=""):
+/*
+ * Generic 32-bit binary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ *      xor-int, shl-int, shr-int, ushr-int
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB
+    $instr VREG_ADDRESS(%rcx),%eax
+    SET_VREG $result, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binop1(wide="0", instr=""):
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %ecx, %rcx                     # eax <- vCC
+    .if $wide
+    GET_WIDE_VREG %rax, %rax                # rax <- vBB
+    $instr                                  # ex: addl    %ecx,%eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, %rax                     # eax <- vBB
+    $instr                                  # ex: addl    %ecx,%eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binop2addr(result="%eax", instr=""):
+/*
+ * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+    /* binop/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    $instr %eax, VREG_ADDRESS(%rcx)
+    CLEAR_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def binopLit16(result="%eax", instr=""):
+/*
+ * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ *      and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+    /* binop/lit16 vA, vB, #+CCCC */
+    movl    rINST, %eax                     # rax <- 000000BA
+    sarl    $$4, %eax                       # eax <- B
+    GET_VREG %eax, %rax                     # eax <- vB
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
+    $instr                                  # for example: addl %ecx, %eax
+    SET_VREG $result, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binopLit8(result="%eax", instr=""):
+/*
+ * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ *      and-int/lit8, or-int/lit8, xor-int/lit8,
+ *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
+    GET_VREG %eax, %rax                     # eax <- rBB
+    $instr                                  # ex: addl %ecx,%eax
+    SET_VREG $result, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binopWide(instr=""):
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_WIDE_VREG %rax, %rax                # rax <- v[BB]
+    $instr VREG_ADDRESS(%rcx),%rax
+    SET_WIDE_VREG %rax, rINSTq              # v[AA] <- rax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binopWide2addr(instr=""):
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    $instr %rax,VREG_ADDRESS(%rcx)
+    CLEAR_WIDE_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def cvtfp_int(fp_suffix="", i_suffix="", max_const="", result_reg="", wide=""):
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint.  If it is less
+ * than minint, it should be clamped to minint.  If it is a nan, the result
+ * should be zero.  Further, the rounding mode is to truncate.
+ */
+    /* float/double to int/long vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    GET_VREG_XMM${fp_suffix} %xmm0, rINSTq
+    mov${i_suffix}  ${max_const}, ${result_reg}
+    cvtsi2s${fp_suffix}${i_suffix} ${result_reg}, %xmm1
+    comis${fp_suffix}    %xmm1, %xmm0
+    jae     1f
+    jp      2f
+    cvtts${fp_suffix}2si${i_suffix}  %xmm0, ${result_reg}
+    jmp     1f
+2:
+    xor${i_suffix}    ${result_reg}, ${result_reg}
+1:
+    .if $wide
+    SET_WIDE_VREG ${result_reg}, %rcx
+    .else
+    SET_VREG ${result_reg}, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def shop2addr(wide="0", instr=""):
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+    /* shift/2addr vA, vB */
+    movl    rINST, %ecx                     # ecx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # ecx <- vBB
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    .if $wide
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vAA
+    $instr                                  # ex: sarl %cl, %eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vAA
+    $instr                                  # ex: sarl %cl, %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def unop(preinstr="", instr="", wide="0"):
+/*
+ * Generic 32/64-bit unary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4,rINST                       # rINST <- B
+    .if ${wide}
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    .endif
+    andb    $$0xf,%cl                       # ecx <- A
+$preinstr
+$instr
+    .if ${wide}
+    SET_WIDE_VREG %rax, %rcx
+    .else
+    SET_VREG %eax, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_add_int():
+%  binop(instr="addl")
+
+%def op_add_int_2addr():
+%  binop2addr(instr="addl")
+
+%def op_add_int_lit16():
+%  binopLit16(instr="addl    %ecx, %eax")
+
+%def op_add_int_lit8():
+%  binopLit8(instr="addl    %ecx, %eax")
+
+%def op_add_long():
+%  binopWide(instr="addq")
+
+%def op_add_long_2addr():
+%  binopWide2addr(instr="addq")
+
+%def op_and_int():
+%  binop(instr="andl")
+
+%def op_and_int_2addr():
+%  binop2addr(instr="andl")
+
+%def op_and_int_lit16():
+%  binopLit16(instr="andl    %ecx, %eax")
+
+%def op_and_int_lit8():
+%  binopLit8(instr="andl    %ecx, %eax")
+
+%def op_and_long():
+%  binopWide(instr="andq")
+
+%def op_and_long_2addr():
+%  binopWide2addr(instr="andq")
+
+%def op_cmp_long():
+/*
+ * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ */
+    /* cmp-long vAA, vBB, vCC */
+    movzbq  2(rPC), %rdx                    # edx <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_WIDE_VREG %rdx, %rdx                # rdx <- v[BB]
+    xorl    %eax, %eax
+    xorl    %edi, %edi
+    addb    $$1, %al
+    movl    $$-1, %esi
+    cmpq    VREG_ADDRESS(%rcx), %rdx
+    cmovl   %esi, %edi
+    cmovg   %eax, %edi
+    SET_VREG %edi, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_div_int():
+%  bindiv(result="%eax", second="%ecx", wide="0", suffix="l")
+
+%def op_div_int_2addr():
+%  bindiv2addr(result="%eax", second="%ecx", wide="0", suffix="l")
+
+%def op_div_int_lit16():
+%  bindivLit16(result="%eax")
+
+%def op_div_int_lit8():
+%  bindivLit8(result="%eax")
+
+%def op_div_long():
+%  bindiv(result="%rax", second="%rcx", wide="1", suffix="q", ext="cqo")
+
+%def op_div_long_2addr():
+%  bindiv2addr(result="%rax", second="%rcx", wide="1", suffix="q", ext="cqo")
+
+%def op_int_to_byte():
+%  unop(instr="movsbl  %al, %eax")
+
+%def op_int_to_char():
+%  unop(instr="movzwl  %ax,%eax")
+
+%def op_int_to_long():
+    /* int to long vA, vB */
+    movzbq  rINSTbl, %rax                   # rax <- +A
+    sarl    $$4, %eax                       # eax <- B
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    movslq  VREG_ADDRESS(%rax), %rax
+    SET_WIDE_VREG %rax, rINSTq              # v[A] <- %rax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+%def op_int_to_short():
+%  unop(instr="movswl %ax, %eax")
+
+%def op_long_to_int():
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%  op_move()
+
+%def op_mul_int():
+%  binop(instr="imull")
+
+%def op_mul_int_2addr():
+    /* mul vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    GET_VREG %eax, %rcx                     # eax <- vA
+    imull   (rFP,rINSTq,4), %eax
+    SET_VREG %eax, %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_mul_int_lit16():
+%  binopLit16(instr="imull   %ecx, %eax")
+
+%def op_mul_int_lit8():
+%  binopLit8(instr="imull   %ecx, %eax")
+
+%def op_mul_long():
+%  binopWide(instr="imulq")
+
+%def op_mul_long_2addr():
+    /* mul vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    GET_WIDE_VREG %rax, %rcx                # rax <- vA
+    imulq   (rFP,rINSTq,4), %rax
+    SET_WIDE_VREG %rax, %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_neg_int():
+%  unop(instr="    negl    %eax")
+
+%def op_neg_long():
+%  unop(instr="    negq    %rax", wide="1")
+
+%def op_not_int():
+%  unop(instr="    notl    %eax")
+
+%def op_not_long():
+%  unop(instr="    notq    %rax", wide="1")
+
+%def op_or_int():
+%  binop(instr="orl")
+
+%def op_or_int_2addr():
+%  binop2addr(instr="orl")
+
+%def op_or_int_lit16():
+%  binopLit16(instr="orl     %ecx, %eax")
+
+%def op_or_int_lit8():
+%  binopLit8(instr="orl     %ecx, %eax")
+
+%def op_or_long():
+%  binopWide(instr="orq")
+
+%def op_or_long_2addr():
+%  binopWide2addr(instr="orq")
+
+%def op_rem_int():
+%  bindiv(result="%edx", second="%ecx", wide="0", suffix="l", rem="1")
+
+%def op_rem_int_2addr():
+%  bindiv2addr(result="%edx", second="%ecx", wide="0", suffix="l", rem="1")
+
+%def op_rem_int_lit16():
+%  bindivLit16(result="%edx", rem="1")
+
+%def op_rem_int_lit8():
+%  bindivLit8(result="%edx", rem="1")
+
+%def op_rem_long():
+%  bindiv(result="%rdx", second="%rcx", wide="1", suffix="q", ext="cqo", rem="1")
+
+%def op_rem_long_2addr():
+%  bindiv2addr(result="%rdx", second="%rcx", wide="1", suffix="q", rem="1", ext="cqo")
+
+%def op_rsub_int():
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+%  binopLit16(instr="subl    %eax, %ecx", result="%ecx")
+
+%def op_rsub_int_lit8():
+%  binopLit8(instr="subl    %eax, %ecx", result="%ecx")
+
+%def op_shl_int():
+%  binop1(instr="sall    %cl, %eax")
+
+%def op_shl_int_2addr():
+%  shop2addr(instr="sall    %cl, %eax")
+
+%def op_shl_int_lit8():
+%  binopLit8(instr="sall    %cl, %eax")
+
+%def op_shl_long():
+%  binop1(instr="salq    %cl, %rax", wide="1")
+
+%def op_shl_long_2addr():
+%  shop2addr(instr="salq    %cl, %rax", wide="1")
+
+%def op_shr_int():
+%  binop1(instr="sarl    %cl, %eax")
+
+%def op_shr_int_2addr():
+%  shop2addr(instr="sarl    %cl, %eax")
+
+%def op_shr_int_lit8():
+%  binopLit8(instr="sarl    %cl, %eax")
+
+%def op_shr_long():
+%  binop1(instr="sarq    %cl, %rax", wide="1")
+
+%def op_shr_long_2addr():
+%  shop2addr(instr="sarq    %cl, %rax", wide="1")
+
+%def op_sub_int():
+%  binop(instr="subl")
+
+%def op_sub_int_2addr():
+%  binop2addr(instr="subl")
+
+%def op_sub_long():
+%  binopWide(instr="subq")
+
+%def op_sub_long_2addr():
+%  binopWide2addr(instr="subq")
+
+%def op_ushr_int():
+%  binop1(instr="shrl    %cl, %eax")
+
+%def op_ushr_int_2addr():
+%  shop2addr(instr="shrl    %cl, %eax")
+
+%def op_ushr_int_lit8():
+%  binopLit8(instr="shrl    %cl, %eax")
+
+%def op_ushr_long():
+%  binop1(instr="shrq    %cl, %rax", wide="1")
+
+%def op_ushr_long_2addr():
+%  shop2addr(instr="shrq    %cl, %rax", wide="1")
+
+%def op_xor_int():
+%  binop(instr="xorl")
+
+%def op_xor_int_2addr():
+%  binop2addr(instr="xorl")
+
+%def op_xor_int_lit16():
+%  binopLit16(instr="xorl    %ecx, %eax")
+
+%def op_xor_int_lit8():
+%  binopLit8(instr="xorl    %ecx, %eax")
+
+%def op_xor_long():
+%  binopWide(instr="xorq")
+
+%def op_xor_long_2addr():
+%  binopWide2addr(instr="xorq")
diff --git a/runtime/interpreter/mterp/x86_64/array.S b/runtime/interpreter/mterp/x86_64/array.S
new file mode 100644
index 0000000..e49c097
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/array.S
@@ -0,0 +1,178 @@
+%def op_aget(load="movl", shift="4", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET", wide="0"):
+/*
+ * Array get, 32 bits or less.  vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if $wide
+    movq    $data_offset(%rax,%rcx,8), %rax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    $load   $data_offset(%rax,%rcx,$shift), %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aget_boolean():
+%  op_aget(load="movzbl", shift="1", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aget_byte():
+%  op_aget(load="movsbl", shift="1", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aget_char():
+%  op_aget(load="movzwl", shift="2", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aget_object():
+/*
+ * Array object get.  vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG OUT_32_ARG0, %rax              # eax <- vBB (array object)
+    GET_VREG OUT_32_ARG1, %rcx              # ecx <- vCC (requested index)
+    EXPORT_PC
+    call    SYMBOL(artAGetObjectFromMterp)  # (array, index)
+    movq    rSELF, %rcx
+    cmpq    $$0, THREAD_EXCEPTION_OFFSET(%rcx)
+    jnz     MterpException
+    SET_VREG_OBJECT %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aget_short():
+%  op_aget(load="movswl", shift="2", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aget_wide():
+%  op_aget(load="movq", shift="8", data_offset="MIRROR_WIDE_ARRAY_DATA_OFFSET", wide="1")
+
+%def op_aput(reg="rINST", store="movl", shift="4", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET", wide="0"):
+/*
+ * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if $wide
+    GET_WIDE_VREG rINSTq, rINSTq
+    .else
+    GET_VREG rINST, rINSTq
+    .endif
+    $store    $reg, $data_offset(%rax,%rcx,$shift)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aput_boolean():
+%  op_aput(reg="rINSTbl", store="movb", shift="1", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aput_byte():
+%  op_aput(reg="rINSTbl", store="movb", shift="1", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aput_char():
+%  op_aput(reg="rINSTw", store="movw", shift="2", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aput_object():
+/*
+ * Store an object into an array.  vBB[vCC] <- vAA.
+ */
+    /* op vAA, vBB, vCC */
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    REFRESH_INST ${opnum}
+    movq    rINSTq, OUT_ARG2
+    call    SYMBOL(MterpAputObject)         # (array, index)
+    testb   %al, %al
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aput_short():
+%  op_aput(reg="rINSTw", store="movw", shift="2", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aput_wide():
+%  op_aput(reg="rINSTq", store="movq", shift="8", data_offset="MIRROR_WIDE_ARRAY_DATA_OFFSET", wide="1")
+
+%def op_array_length():
+/*
+ * Return the length of an array.
+ */
+    movl    rINST, %eax                     # eax <- BA
+    sarl    $$4, rINST                      # rINST <- B
+    GET_VREG %ecx, rINSTq                   # ecx <- vB (object ref)
+    testl   %ecx, %ecx                      # is null?
+    je      common_errNullObject
+    andb    $$0xf, %al                      # eax <- A
+    movl    MIRROR_ARRAY_LENGTH_OFFSET(%rcx), rINST
+    SET_VREG rINST, %rax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_fill_array_data():
+    /* fill-array-data vAA, +BBBBBBBB */
+    EXPORT_PC
+    movslq  2(rPC), %rcx                    # rcx <- ssssssssBBBBbbbb
+    leaq    (rPC,%rcx,2), OUT_ARG1          # OUT_ARG1 <- PC + ssssssssBBBBbbbb*2
+    GET_VREG OUT_32_ARG0, rINSTq            # OUT_ARG0 <- vAA (array object)
+    call    SYMBOL(MterpFillArrayData)      # (obj, payload)
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_filled_new_array(helper="MterpFilledNewArray"):
+/*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+    .extern $helper
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    movq    rSELF, OUT_ARG2
+    call    SYMBOL($helper)
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_filled_new_array_range():
+%  op_filled_new_array(helper="MterpFilledNewArrayRange")
+
+%def op_new_array():
+/*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+    /* new-array vA, vB, class@CCCC */
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    REFRESH_INST ${opnum}
+    movq    rINSTq, OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpNewArray)
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/bincmp.S b/runtime/interpreter/mterp/x86_64/bincmp.S
deleted file mode 100644
index 6601483..0000000
--- a/runtime/interpreter/mterp/x86_64/bincmp.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
-    /* if-cmp vA, vB, +CCCC */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $$4, rINST                      # rINST <- B
-    andb    $$0xf, %cl                      # rcx <- A
-    GET_VREG %eax, %rcx                     # eax <- vA
-    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
-    j${revcmp}   1f
-    movswq  2(rPC), rINSTq                  # Get signed branch offset
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-1:
-    cmpl    $$JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/bindiv.S b/runtime/interpreter/mterp/x86_64/bindiv.S
deleted file mode 100644
index e10d1dc..0000000
--- a/runtime/interpreter/mterp/x86_64/bindiv.S
+++ /dev/null
@@ -1,34 +0,0 @@
-%default {"result":"","second":"","wide":"","suffix":"","rem":"0","ext":"cdq"}
-/*
- * 32-bit binary div/rem operation.  Handles special case of op1=-1.
- */
-    /* div/rem vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    .if $wide
-    GET_WIDE_VREG %rax, %rax                # eax <- vBB
-    GET_WIDE_VREG $second, %rcx             # ecx <- vCC
-    .else
-    GET_VREG %eax, %rax                     # eax <- vBB
-    GET_VREG $second, %rcx                  # ecx <- vCC
-    .endif
-    test${suffix}   $second, $second
-    jz      common_errDivideByZero
-    cmp${suffix}  $$-1, $second
-    je      2f
-    $ext                                    # rdx:rax <- sign-extended of rax
-    idiv${suffix}   $second
-1:
-    .if $wide
-    SET_WIDE_VREG $result, rINSTq           # eax <- vBB
-    .else
-    SET_VREG $result, rINSTq                # eax <- vBB
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
-    .if $rem
-    xor${suffix} $result, $result
-    .else
-    neg${suffix} $result
-    .endif
-    jmp     1b
diff --git a/runtime/interpreter/mterp/x86_64/bindiv2addr.S b/runtime/interpreter/mterp/x86_64/bindiv2addr.S
deleted file mode 100644
index 8b9bc95..0000000
--- a/runtime/interpreter/mterp/x86_64/bindiv2addr.S
+++ /dev/null
@@ -1,35 +0,0 @@
-%default {"result":"","second":"","wide":"","suffix":"","rem":"0","ext":"cdq"}
-/*
- * 32-bit binary div/rem operation.  Handles special case of op1=-1.
- */
-    /* div/rem/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- BA
-    sarl    $$4, %ecx                       # rcx <- B
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    .if $wide
-    GET_WIDE_VREG %rax, rINSTq              # eax <- vA
-    GET_WIDE_VREG $second, %rcx             # ecx <- vB
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vA
-    GET_VREG $second, %rcx                  # ecx <- vB
-    .endif
-    test${suffix}   $second, $second
-    jz      common_errDivideByZero
-    cmp${suffix}  $$-1, $second
-    je      2f
-    $ext                                    # rdx:rax <- sign-extended of rax
-    idiv${suffix}   $second
-1:
-    .if $wide
-    SET_WIDE_VREG $result, rINSTq           # vA <- result
-    .else
-    SET_VREG $result, rINSTq                # vA <- result
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-2:
-    .if $rem
-    xor${suffix} $result, $result
-    .else
-    neg${suffix} $result
-    .endif
-    jmp     1b
diff --git a/runtime/interpreter/mterp/x86_64/bindivLit16.S b/runtime/interpreter/mterp/x86_64/bindivLit16.S
deleted file mode 100644
index 80dbce2..0000000
--- a/runtime/interpreter/mterp/x86_64/bindivLit16.S
+++ /dev/null
@@ -1,27 +0,0 @@
-%default {"result":"","rem":"0"}
-/*
- * 32-bit binary div/rem operation.  Handles special case of op1=-1.
- */
-    /* div/rem/lit16 vA, vB, #+CCCC */
-    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
-    movl    rINST, %eax                     # rax <- 000000BA
-    sarl    $$4, %eax                       # eax <- B
-    GET_VREG %eax, %rax                     # eax <- vB
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    testl   %ecx, %ecx
-    jz      common_errDivideByZero
-    cmpl    $$-1, %ecx
-    je      2f
-    cdq                                     # rax <- sign-extended of eax
-    idivl   %ecx
-1:
-    SET_VREG $result, rINSTq                # vA <- result
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
-    .if $rem
-    xorl    $result, $result
-    .else
-    negl    $result
-    .endif
-    jmp     1b
diff --git a/runtime/interpreter/mterp/x86_64/bindivLit8.S b/runtime/interpreter/mterp/x86_64/bindivLit8.S
deleted file mode 100644
index ab535f3..0000000
--- a/runtime/interpreter/mterp/x86_64/bindivLit8.S
+++ /dev/null
@@ -1,25 +0,0 @@
-%default {"result":"","rem":"0"}
-/*
- * 32-bit div/rem "lit8" binary operation.  Handles special case of
- * op0=minint & op1=-1
- */
-    /* div/rem/lit8 vAA, vBB, #+CC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG  %eax, %rax                    # eax <- rBB
-    testl   %ecx, %ecx
-    je      common_errDivideByZero
-    cmpl    $$-1, %ecx
-    je      2f
-    cdq                                     # rax <- sign-extended of eax
-    idivl   %ecx
-1:
-    SET_VREG $result, rINSTq                # vA <- result
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
-    .if $rem
-    xorl    $result, $result
-    .else
-    negl    $result
-    .endif
-    jmp     1b
diff --git a/runtime/interpreter/mterp/x86_64/binop.S b/runtime/interpreter/mterp/x86_64/binop.S
deleted file mode 100644
index 962dd61..0000000
--- a/runtime/interpreter/mterp/x86_64/binop.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit binary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- *      xor-int, shl-int, shr-int, ushr-int
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB
-    $instr                                  # ex: addl    (rFP,%rcx,4),%eax
-    SET_VREG $result, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binop1.S b/runtime/interpreter/mterp/x86_64/binop1.S
deleted file mode 100644
index bdd5732..0000000
--- a/runtime/interpreter/mterp/x86_64/binop1.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"wide":"0"}
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_VREG %ecx, %rcx                     # eax <- vCC
-    .if $wide
-    GET_WIDE_VREG %rax, %rax                # rax <- vBB
-    $instr                                  # ex: addl    %ecx,%eax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    GET_VREG %eax, %rax                     # eax <- vBB
-    $instr                                  # ex: addl    %ecx,%eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binop2addr.S b/runtime/interpreter/mterp/x86_64/binop2addr.S
deleted file mode 100644
index 4448a81..0000000
--- a/runtime/interpreter/mterp/x86_64/binop2addr.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
-    /* binop/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $$4, rINST                      # rINST <- B
-    andb    $$0xf, %cl                      # ecx <- A
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    $instr                                  # for ex: addl   %eax,(rFP,%ecx,4)
-    CLEAR_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/binopLit16.S b/runtime/interpreter/mterp/x86_64/binopLit16.S
deleted file mode 100644
index de43b53..0000000
--- a/runtime/interpreter/mterp/x86_64/binopLit16.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- *      and-int/lit16, or-int/lit16, xor-int/lit16
- */
-    /* binop/lit16 vA, vB, #+CCCC */
-    movl    rINST, %eax                     # rax <- 000000BA
-    sarl    $$4, %eax                       # eax <- B
-    GET_VREG %eax, %rax                     # eax <- vB
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
-    $instr                                  # for example: addl %ecx, %eax
-    SET_VREG $result, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binopLit8.S b/runtime/interpreter/mterp/x86_64/binopLit8.S
deleted file mode 100644
index 995002b..0000000
--- a/runtime/interpreter/mterp/x86_64/binopLit8.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call.  (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- *      and-int/lit8, or-int/lit8, xor-int/lit8,
- *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
-    /* binop/lit8 vAA, vBB, #+CC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
-    GET_VREG %eax, %rax                     # eax <- rBB
-    $instr                                  # ex: addl %ecx,%eax
-    SET_VREG $result, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binopWide.S b/runtime/interpreter/mterp/x86_64/binopWide.S
deleted file mode 100644
index f92f18e..0000000
--- a/runtime/interpreter/mterp/x86_64/binopWide.S
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_WIDE_VREG %rax, %rax                # rax <- v[BB]
-    $instr                                  # ex: addq   (rFP,%rcx,4),%rax
-    SET_WIDE_VREG %rax, rINSTq              # v[AA] <- rax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binopWide2addr.S b/runtime/interpreter/mterp/x86_64/binopWide2addr.S
deleted file mode 100644
index d9e6cfb..0000000
--- a/runtime/interpreter/mterp/x86_64/binopWide2addr.S
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Generic 64-bit binary operation.
- */
-    /* binop/2addr vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $$4, rINST                      # rINST <- B
-    andb    $$0xf, %cl                      # ecx <- A
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    $instr                                  # for ex: addq   %rax,(rFP,%rcx,4)
-    CLEAR_WIDE_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/const.S b/runtime/interpreter/mterp/x86_64/const.S
deleted file mode 100644
index 1ddf20f..0000000
--- a/runtime/interpreter/mterp/x86_64/const.S
+++ /dev/null
@@ -1,15 +0,0 @@
-%default { "helper":"UndefinedConstHandler" }
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern $helper
-    EXPORT_PC
-    movzwq  2(rPC), OUT_ARG0                # eax <- OUT_ARG0
-    movq    rINSTq, OUT_ARG1
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
-    movq    rSELF, OUT_ARG3
-    call    SYMBOL($helper)                 # (index, tgt_reg, shadow_frame, self)
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/control_flow.S b/runtime/interpreter/mterp/x86_64/control_flow.S
new file mode 100644
index 0000000..2f3b5e5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/control_flow.S
@@ -0,0 +1,206 @@
+%def bincmp(revcmp=""):
+/*
+ * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+    /* if-cmp vA, vB, +CCCC */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # rcx <- A
+    GET_VREG %eax, %rcx                     # eax <- vA
+    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
+    j${revcmp}   1f
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
+    testq   rINSTq, rINSTq
+    jmp     MterpCommonTakenBranch
+1:
+    cmpl    $$JIT_CHECK_OSR, rPROFILE
+    je      .L_check_not_taken_osr
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def zcmp(revcmp=""):
+/*
+ * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+    /* if-cmp vAA, +BBBB */
+    cmpl    $$0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
+    j${revcmp}   1f
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
+    testq   rINSTq, rINSTq
+    jmp     MterpCommonTakenBranch
+1:
+    cmpl    $$JIT_CHECK_OSR, rPROFILE
+    je      .L_check_not_taken_osr
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_goto():
+/*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+    /* goto +AA */
+    movsbq  rINSTbl, rINSTq                 # rINSTq <- ssssssAA
+    testq   rINSTq, rINSTq
+    jmp     MterpCommonTakenBranch
+
+%def op_goto_16():
+/*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+    /* goto/16 +AAAA */
+    movswq  2(rPC), rINSTq                  # rINSTq <- ssssAAAA
+    testq   rINSTq, rINSTq
+    jmp     MterpCommonTakenBranch
+
+%def op_goto_32():
+/*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ *  Because we need the SF bit set, we'll use an adds
+ * to convert from Dalvik offset to byte offset.
+ */
+    /* goto/32 +AAAAAAAA */
+    movslq  2(rPC), rINSTq                  # rINSTq <- AAAAAAAA
+    testq   rINSTq, rINSTq
+    jmp     MterpCommonTakenBranch
+
+%def op_if_eq():
+%  bincmp(revcmp="ne")
+
+%def op_if_eqz():
+%  zcmp(revcmp="ne")
+
+%def op_if_ge():
+%  bincmp(revcmp="l")
+
+%def op_if_gez():
+%  zcmp(revcmp="l")
+
+%def op_if_gt():
+%  bincmp(revcmp="le")
+
+%def op_if_gtz():
+%  zcmp(revcmp="le")
+
+%def op_if_le():
+%  bincmp(revcmp="g")
+
+%def op_if_lez():
+%  zcmp(revcmp="g")
+
+%def op_if_lt():
+%  bincmp(revcmp="ge")
+
+%def op_if_ltz():
+%  zcmp(revcmp="ge")
+
+%def op_if_ne():
+%  bincmp(revcmp="e")
+
+%def op_if_nez():
+%  zcmp(revcmp="e")
+
+%def op_packed_switch(func="MterpDoPackedSwitch"):
+/*
+ * Handle a packed-switch or sparse-switch instruction.  In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+    /* op vAA, +BBBB */
+    movslq  2(rPC), OUT_ARG0                # rcx <- ssssssssBBBBbbbb
+    leaq    (rPC,OUT_ARG0,2), OUT_ARG0      # rcx <- PC + ssssssssBBBBbbbb*2
+    GET_VREG OUT_32_ARG1, rINSTq            # eax <- vAA
+    call    SYMBOL($func)
+    testl   %eax, %eax
+    movslq  %eax, rINSTq
+    jmp     MterpCommonTakenBranch
+
+%def op_return():
+/*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
+    movq    rSELF, OUT_ARG0
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+    jz      1f
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    GET_VREG %eax, rINSTq                   # eax <- vAA
+    jmp     MterpReturn
+
+%def op_return_object():
+%  op_return()
+
+%def op_return_void():
+    .extern MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
+    movq    rSELF, OUT_ARG0
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+    jz      1f
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    xorq    %rax, %rax
+    jmp     MterpReturn
+
+%def op_return_void_no_barrier():
+    movq    rSELF, OUT_ARG0
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+    jz      1f
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    xorq    %rax, %rax
+    jmp     MterpReturn
+
+%def op_return_wide():
+/*
+ * Return a 64-bit value.
+ */
+    /* return-wide vAA */
+    .extern MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
+    movq    rSELF, OUT_ARG0
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+    jz      1f
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    GET_WIDE_VREG %rax, rINSTq              # eax <- v[AA]
+    jmp     MterpReturn
+
+%def op_sparse_switch():
+%  op_packed_switch(func="MterpDoSparseSwitch")
+
+%def op_throw():
+/*
+ * Throw an exception object in the current thread.
+ */
+    /* throw vAA */
+    EXPORT_PC
+    GET_VREG %eax, rINSTq                   # eax<- vAA (exception object)
+    testb   %al, %al
+    jz      common_errNullObject
+    movq    rSELF, %rcx
+    movq    %rax, THREAD_EXCEPTION_OFFSET(%rcx)
+    jmp     MterpException
diff --git a/runtime/interpreter/mterp/x86_64/cvtfp_int.S b/runtime/interpreter/mterp/x86_64/cvtfp_int.S
deleted file mode 100644
index 1472bd2..0000000
--- a/runtime/interpreter/mterp/x86_64/cvtfp_int.S
+++ /dev/null
@@ -1,27 +0,0 @@
-%default {"fp_suffix":"","i_suffix":"","max_const":"","result_reg":"","wide":""}
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint.  If it is less
- * than minint, it should be clamped to minint.  If it is a nan, the result
- * should be zero.  Further, the rounding mode is to truncate.
- */
-    /* float/double to int/long vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $$4, rINST                      # rINST <- B
-    andb    $$0xf, %cl                      # ecx <- A
-    movs${fp_suffix}   VREG_ADDRESS(rINSTq), %xmm0
-    mov${i_suffix}  ${max_const}, ${result_reg}
-    cvtsi2s${fp_suffix}${i_suffix} ${result_reg}, %xmm1
-    comis${fp_suffix}    %xmm1, %xmm0
-    jae     1f
-    jp      2f
-    cvtts${fp_suffix}2si${i_suffix}  %xmm0, ${result_reg}
-    jmp     1f
-2:
-    xor${i_suffix}    ${result_reg}, ${result_reg}
-1:
-    .if $wide
-    SET_WIDE_VREG ${result_reg}, %rcx
-    .else
-    SET_VREG ${result_reg}, %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/entry.S b/runtime/interpreter/mterp/x86_64/entry.S
deleted file mode 100644
index b08419b..0000000
--- a/runtime/interpreter/mterp/x86_64/entry.S
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
-    .text
-    ASM_HIDDEN SYMBOL(ExecuteMterpImpl)
-    .global SYMBOL(ExecuteMterpImpl)
-    FUNCTION_TYPE(ExecuteMterpImpl)
-
-/*
- * On entry:
- *  0  Thread* self
- *  1  insns_
- *  2  ShadowFrame
- *  3  JValue* result_register
- *
- */
-
-SYMBOL(ExecuteMterpImpl):
-    .cfi_startproc
-    .cfi_def_cfa rsp, 8
-
-    /* Spill callee save regs */
-    PUSH %rbx
-    PUSH %rbp
-    PUSH %r12
-    PUSH %r13
-    PUSH %r14
-    PUSH %r15
-
-    /* Allocate frame */
-    subq    $$FRAME_SIZE, %rsp
-    .cfi_adjust_cfa_offset FRAME_SIZE
-
-    /* Remember the return register */
-    movq    IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
-
-    /* Remember the code_item */
-    movq    IN_ARG1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(IN_ARG2)
-
-    /* set up "named" registers */
-    movl    SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
-    leaq    SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
-    leaq    (rFP, %rax, 4), rREFS
-    movl    SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
-    leaq    (IN_ARG1, %rax, 2), rPC
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-    EXPORT_PC
-
-    /* Starting ibase */
-    movq    IN_ARG0, rSELF
-    REFRESH_IBASE_REG IN_ARG0
-
-    /* Set up for backwards branches & osr profiling */
-    movq    IN_ARG0, OUT_ARG2  /* Set up OUT_ARG2 before clobbering IN_ARG0 */
-    movq    OFF_FP_METHOD(rFP), OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpSetUpHotnessCountdown)
-    movswl  %ax, rPROFILE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST
-    GOTO_NEXT
-    /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/x86_64/fallback.S b/runtime/interpreter/mterp/x86_64/fallback.S
deleted file mode 100644
index 8d61166..0000000
--- a/runtime/interpreter/mterp/x86_64/fallback.S
+++ /dev/null
@@ -1,3 +0,0 @@
-/* Transfer stub to alternate interpreter */
-    jmp     MterpFallback
-
diff --git a/runtime/interpreter/mterp/x86_64/field.S b/runtime/interpreter/mterp/x86_64/field.S
deleted file mode 100644
index f8b0588..0000000
--- a/runtime/interpreter/mterp/x86_64/field.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { }
-    /*
-     * General field read / write (iget-* iput-* sget-* sput-*).
-     */
-    .extern $helper
-    REFRESH_INST ${opnum}                      # fix rINST to include opcode
-    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
-    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
-    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
-    call    SYMBOL($helper)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/floating_point.S b/runtime/interpreter/mterp/x86_64/floating_point.S
new file mode 100644
index 0000000..7fcb742
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/floating_point.S
@@ -0,0 +1,236 @@
+%def fpcmp(suff="d", nanval="pos"):
+/*
+ * Compare two floating-point values.  Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ *     if (x == y) {
+ *         return 0;
+ *     } else if (x < y) {
+ *         return -1;
+ *     } else if (x > y) {
+ *         return 1;
+ *     } else {
+ *         return nanval ? 1 : -1;
+ *     }
+ * }
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  3(rPC), %rcx                    # ecx<- CC
+    movzbq  2(rPC), %rax                    # eax<- BB
+    GET_VREG_XMM${suff} %xmm0, %rax
+    xor     %eax, %eax
+    ucomis${suff} VREG_ADDRESS(%rcx), %xmm0
+    jp      .L${opcode}_nan_is_${nanval}
+    je      .L${opcode}_finish
+    jb      .L${opcode}_less
+.L${opcode}_nan_is_pos:
+    addb    $$1, %al
+    jmp     .L${opcode}_finish
+.L${opcode}_nan_is_neg:
+.L${opcode}_less:
+    movl    $$-1, %eax
+.L${opcode}_finish:
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def fpcvt(source_suffix="", dest_suffix="", wide=""):
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    cvts${source_suffix}2s${dest_suffix}    VREG_ADDRESS(rINSTq), %xmm0
+    .if $wide
+    SET_VREG_XMMd %xmm0, %rcx
+    CLEAR_WIDE_REF %rcx
+    .else
+    SET_VREG_XMMs %xmm0, %rcx
+    CLEAR_REF %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def sseBinop(instr="", suff=""):
+    movzbq  2(rPC), %rcx                    # ecx <- BB
+    movzbq  3(rPC), %rax                    # eax <- CC
+    GET_VREG_XMM${suff} %xmm0, %rcx         # %xmm0 <- 1st src
+    ${instr}${suff} VREG_ADDRESS(%rax), %xmm0
+    SET_VREG_XMM${suff} %xmm0, rINSTq       # vAA <- %xmm0
+    pxor    %xmm0, %xmm0
+    movs${suff}   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def sseBinop2Addr(instr="", suff=""):
+    movl    rINST, %ecx                     # ecx <- A+
+    andl    $$0xf, %ecx                     # ecx <- A
+    GET_VREG_XMM${suff} %xmm0, %rcx         # %xmm0 <- 1st src
+    sarl    $$4, rINST                      # rINST<- B
+    ${instr}${suff} VREG_ADDRESS(rINSTq), %xmm0
+    SET_VREG_XMM${suff} %xmm0, %rcx         # vAA <- %xmm0
+    pxor    %xmm0, %xmm0
+    movs${suff} %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_add_double():
+%  sseBinop(instr="adds", suff="d")
+
+%def op_add_double_2addr():
+%  sseBinop2Addr(instr="adds", suff="d")
+
+%def op_add_float():
+%  sseBinop(instr="adds", suff="s")
+
+%def op_add_float_2addr():
+%  sseBinop2Addr(instr="adds", suff="s")
+
+%def op_cmpg_double():
+%  fpcmp(suff="d", nanval="pos")
+
+%def op_cmpg_float():
+%  fpcmp(suff="s", nanval="pos")
+
+%def op_cmpl_double():
+%  fpcmp(suff="d", nanval="neg")
+
+%def op_cmpl_float():
+%  fpcmp(suff="s", nanval="neg")
+
+%def op_div_double():
+%  sseBinop(instr="divs", suff="d")
+
+%def op_div_double_2addr():
+%  sseBinop2Addr(instr="divs", suff="d")
+
+%def op_div_float():
+%  sseBinop(instr="divs", suff="s")
+
+%def op_div_float_2addr():
+%  sseBinop2Addr(instr="divs", suff="s")
+
+%def op_double_to_float():
+%  fpcvt(source_suffix="d", dest_suffix="s", wide="0")
+
+%def op_double_to_int():
+%  cvtfp_int(fp_suffix="d", i_suffix="l", max_const="$0x7fffffff", result_reg="%eax", wide="0")
+
+%def op_double_to_long():
+%  cvtfp_int(fp_suffix="d", i_suffix="q", max_const="$0x7fffffffffffffff", result_reg="%rax", wide="1")
+
+%def op_float_to_double():
+%  fpcvt(source_suffix="s", dest_suffix="d", wide="1")
+
+%def op_float_to_int():
+%  cvtfp_int(fp_suffix="s", i_suffix="l", max_const="$0x7fffffff", result_reg="%eax", wide="0")
+
+%def op_float_to_long():
+%  cvtfp_int(fp_suffix="s", i_suffix="q", max_const="$0x7fffffffffffffff", result_reg="%rax", wide="1")
+
+%def op_int_to_double():
+%  fpcvt(source_suffix="i", dest_suffix="dl", wide="1")
+
+%def op_int_to_float():
+%  fpcvt(source_suffix="i", dest_suffix="sl", wide="0")
+
+%def op_long_to_double():
+%  fpcvt(source_suffix="i", dest_suffix="dq", wide="1")
+
+%def op_long_to_float():
+%  fpcvt(source_suffix="i", dest_suffix="sq", wide="0")
+
+%def op_mul_double():
+%  sseBinop(instr="muls", suff="d")
+
+%def op_mul_double_2addr():
+%  sseBinop2Addr(instr="muls", suff="d")
+
+%def op_mul_float():
+%  sseBinop(instr="muls", suff="s")
+
+%def op_mul_float_2addr():
+%  sseBinop2Addr(instr="muls", suff="s")
+
+%def op_neg_double():
+%  unop(preinstr="    movq    $0x8000000000000000, %rsi", instr="    xorq    %rsi, %rax", wide="1")
+
+%def op_neg_float():
+%  unop(instr="    xorl    $0x80000000, %eax")
+
+%def op_rem_double():
+    /* rem_double vAA, vBB, vCC */
+    movzbq  3(rPC), %rcx                    # ecx <- BB
+    movzbq  2(rPC), %rax                    # eax <- CC
+    fldl    VREG_ADDRESS(%rcx)              # %st1 <- fp[vBB]
+    fldl    VREG_ADDRESS(%rax)              # %st0 <- fp[vCC]
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstpl   VREG_ADDRESS(rINSTq)            # fp[vAA] <- %st
+    CLEAR_WIDE_REF rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_rem_double_2addr():
+    /* rem_double/2addr vA, vB */
+    movzbq  rINSTbl, %rcx                   # ecx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    fldl    VREG_ADDRESS(rINSTq)            # vB to fp stack
+    andb    $$0xf, %cl                      # ecx <- A
+    fldl    VREG_ADDRESS(%rcx)              # vA to fp stack
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstpl   VREG_ADDRESS(%rcx)              # %st to vA
+    CLEAR_WIDE_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_rem_float():
+    /* rem_float vAA, vBB, vCC */
+    movzbq  3(rPC), %rcx                    # ecx <- BB
+    movzbq  2(rPC), %rax                    # eax <- CC
+    flds    VREG_ADDRESS(%rcx)              # vBB to fp stack
+    flds    VREG_ADDRESS(%rax)              # vCC to fp stack
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstps   VREG_ADDRESS(rINSTq)            # %st to vAA
+    CLEAR_REF rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_rem_float_2addr():
+    /* rem_float/2addr vA, vB */
+    movzbq  rINSTbl, %rcx                   # ecx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    flds    VREG_ADDRESS(rINSTq)            # vB to fp stack
+    andb    $$0xf, %cl                      # ecx <- A
+    flds    VREG_ADDRESS(%rcx)              # vA to fp stack
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstps   VREG_ADDRESS(%rcx)              # %st to vA
+    CLEAR_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_sub_double():
+%  sseBinop(instr="subs", suff="d")
+
+%def op_sub_double_2addr():
+%  sseBinop2Addr(instr="subs", suff="d")
+
+%def op_sub_float():
+%  sseBinop(instr="subs", suff="s")
+
+%def op_sub_float_2addr():
+%  sseBinop2Addr(instr="subs", suff="s")
diff --git a/runtime/interpreter/mterp/x86_64/footer.S b/runtime/interpreter/mterp/x86_64/footer.S
deleted file mode 100644
index 3cc7532..0000000
--- a/runtime/interpreter/mterp/x86_64/footer.S
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * ===========================================================================
- *  Common subroutines and data
- * ===========================================================================
- */
-
-    .text
-    .align  2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpLogDivideByZeroException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errArrayIndex:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpLogArrayIndexException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errNegativeArraySize:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpLogNegativeArraySizeException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errNoSuchMethod:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpLogNoSuchMethodException)
-#endif
-    jmp     MterpCommonFallback
-
-common_errNullObject:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpLogNullObjectException)
-#endif
-    jmp     MterpCommonFallback
-
-common_exceptionThrown:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpLogExceptionThrownException)
-#endif
-    jmp     MterpCommonFallback
-
-MterpSuspendFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movl    THREAD_FLAGS_OFFSET(OUT_ARG0), OUT_32_ARG2
-    call    SYMBOL(MterpLogSuspendFallback)
-#endif
-    jmp     MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    movq    rSELF, %rcx
-    cmpq    $$0, THREAD_EXCEPTION_OFFSET(%rcx)
-    jz      MterpFallback
-    /* intentional fallthrough - handle pending exception. */
-
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpHandleException)
-    testb   %al, %al
-    jz      MterpExceptionReturn
-    movq    OFF_FP_DEX_INSTRUCTIONS(rFP), %rax
-    mov     OFF_FP_DEX_PC(rFP), %ecx
-    leaq    (%rax, %rcx, 2), rPC
-    movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
-    /* Do we need to switch interpreters? */
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    /* resume execution at catch block */
-    REFRESH_IBASE
-    FETCH_INST
-    GOTO_NEXT
-    /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    rINST          <= signed offset
- *    rPROFILE       <= signed hotness countdown (expanded to 32 bits)
- *    condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranch:
-    jg      .L_forward_branch               # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-#  error "JIT_CHECK_OSR must be -1."
-#endif
-    cmpl    $$JIT_CHECK_OSR, rPROFILE
-    je      .L_osr_check
-    decl    rPROFILE
-    je      .L_add_batch                    # counted down to zero - report
-.L_resume_backward_branch:
-    movq    rSELF, %rax
-    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
-    REFRESH_IBASE_REG %rax
-    leaq    (rPC, rINSTq, 2), rPC
-    FETCH_INST
-    jnz     .L_suspend_request_pending
-    GOTO_NEXT
-
-.L_suspend_request_pending:
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    call    SYMBOL(MterpSuspendCheck)       # (self)
-    testb   %al, %al
-    jnz     MterpFallback
-    REFRESH_IBASE                           # might have changed during suspend
-    GOTO_NEXT
-
-.L_no_count_backwards:
-    cmpl    $$JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
-    jne     .L_resume_backward_branch
-.L_osr_check:
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rINSTq, OUT_ARG2
-    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    testb   %al, %al
-    jz      .L_resume_backward_branch
-    jmp     MterpOnStackReplacement
-
-.L_forward_branch:
-    cmpl    $$JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
-    je      .L_check_osr_forward
-.L_resume_forward_branch:
-    leaq    (rPC, rINSTq, 2), rPC
-    FETCH_INST
-    GOTO_NEXT
-
-.L_check_osr_forward:
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rINSTq, OUT_ARG2
-    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    testb   %al, %al
-    jz      .L_resume_forward_branch
-    jmp     MterpOnStackReplacement
-
-.L_add_batch:
-    movl    rPROFILE, %eax
-    movq    OFF_FP_METHOD(rFP), OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movw    %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
-    movq    rSELF, OUT_ARG2
-    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
-    movswl  %ax, rPROFILE
-    jmp     .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movl    $$2, OUT_32_ARG2
-    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    testb   %al, %al
-    jnz     MterpOnStackReplacement
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movl    rINST, OUT_32_ARG2
-    call    SYMBOL(MterpLogOSR)
-#endif
-    movl    $$1, %eax
-    jmp     MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    call    SYMBOL(MterpLogFallback)
-#endif
-MterpCommonFallback:
-    xorl    %eax, %eax
-    jmp     MterpDone
-
-/*
- * On entry:
- *  uint32_t* rFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    movl    $$1, %eax
-    jmp     MterpDone
-MterpReturn:
-    movq    OFF_FP_RESULT_REGISTER(rFP), %rdx
-    movq    %rax, (%rdx)
-    movl    $$1, %eax
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    testl   rPROFILE, rPROFILE
-    jle     MRestoreFrame                   # if > 0, we may have some counts to report.
-
-    movl    %eax, rINST                     # stash return value
-    /* Report cached hotness counts */
-    movl    rPROFILE, %eax
-    movq    OFF_FP_METHOD(rFP), OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movw    %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
-    movq    rSELF, OUT_ARG2
-    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
-    movl    rINST, %eax                     # restore return value
-
-    /* pop up frame */
-MRestoreFrame:
-    addq    $$FRAME_SIZE, %rsp
-    .cfi_adjust_cfa_offset -FRAME_SIZE
-
-    /* Restore callee save register */
-    POP %r15
-    POP %r14
-    POP %r13
-    POP %r12
-    POP %rbp
-    POP %rbx
-    ret
-    .cfi_endproc
-    SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
diff --git a/runtime/interpreter/mterp/x86_64/fpcmp.S b/runtime/interpreter/mterp/x86_64/fpcmp.S
deleted file mode 100644
index 806bc2b..0000000
--- a/runtime/interpreter/mterp/x86_64/fpcmp.S
+++ /dev/null
@@ -1,35 +0,0 @@
-%default {"suff":"d","nanval":"pos"}
-/*
- * Compare two floating-point values.  Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- *     if (x == y) {
- *         return 0;
- *     } else if (x < y) {
- *         return -1;
- *     } else if (x > y) {
- *         return 1;
- *     } else {
- *         return nanval ? 1 : -1;
- *     }
- * }
- */
-    /* op vAA, vBB, vCC */
-    movzbq  3(rPC), %rcx                    # ecx<- CC
-    movzbq  2(rPC), %rax                    # eax<- BB
-    movs${suff} VREG_ADDRESS(%rax), %xmm0
-    xor     %eax, %eax
-    ucomis${suff} VREG_ADDRESS(%rcx), %xmm0
-    jp      .L${opcode}_nan_is_${nanval}
-    je      .L${opcode}_finish
-    jb      .L${opcode}_less
-.L${opcode}_nan_is_pos:
-    addb    $$1, %al
-    jmp     .L${opcode}_finish
-.L${opcode}_nan_is_neg:
-.L${opcode}_less:
-    movl    $$-1, %eax
-.L${opcode}_finish:
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/fpcvt.S b/runtime/interpreter/mterp/x86_64/fpcvt.S
deleted file mode 100644
index 657869e..0000000
--- a/runtime/interpreter/mterp/x86_64/fpcvt.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {"source_suffix":"","dest_suffix":"","wide":""}
-/*
- * Generic 32-bit FP conversion operation.
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $$4, rINST                      # rINST <- B
-    andb    $$0xf, %cl                      # ecx <- A
-    cvts${source_suffix}2s${dest_suffix}    VREG_ADDRESS(rINSTq), %xmm0
-    .if $wide
-    movsd   %xmm0, VREG_ADDRESS(%rcx)
-    CLEAR_WIDE_REF %rcx
-    .else
-    movss   %xmm0, VREG_ADDRESS(%rcx)
-    CLEAR_REF %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/header.S b/runtime/interpreter/mterp/x86_64/header.S
deleted file mode 100644
index 0332ce2..0000000
--- a/runtime/interpreter/mterp/x86_64/header.S
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-  Art assembly interpreter notes:
-
-  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
-  handle invoke, allows higher-level code to create frame & shadow frame.
-
-  Once that's working, support direct entry code & eliminate shadow frame (and
-  excess locals allocation.
-
-  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
-  base of the vreg array within the shadow frame.  Access the other fields,
-  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
-  the shadow frame mechanism of double-storing object references - via rFP &
-  number_of_vregs_.
-
- */
-
-/*
-x86_64 ABI general notes:
-
-Caller save set:
-   rax, rdx, rcx, rsi, rdi, r8-r11, st(0)-st(7)
-Callee save set:
-   rbx, rbp, r12-r15
-Return regs:
-   32-bit in eax
-   64-bit in rax
-   fp on xmm0
-
-First 8 fp parameters came in xmm0-xmm7.
-First 6 non-fp parameters came in rdi, rsi, rdx, rcx, r8, r9.
-Other parameters passed on stack, pushed right-to-left.  On entry to target, first
-param is at 8(%esp).  Traditional entry code is:
-
-Stack must be 16-byte aligned to support SSE in native code.
-
-If we're not doing variable stack allocation (alloca), the frame pointer can be
-eliminated and all arg references adjusted to be esp relative.
-*/
-
-/*
-Mterp and x86_64 notes:
-
-Some key interpreter variables will be assigned to registers.
-
-  nick     reg   purpose
-  rPROFILE rbp   countdown register for jit profiling
-  rPC      r12   interpreted program counter, used for fetching instructions
-  rFP      r13   interpreted frame pointer, used for accessing locals and args
-  rINSTw   bx    first 16-bit code of current instruction
-  rINSTbl  bl    opcode portion of instruction word
-  rINSTbh  bh    high byte of inst word, usually contains src/tgt reg names
-  rIBASE   r14   base of instruction handler table
-  rREFS    r15   base of object references in shadow frame.
-
-Notes:
-   o High order 16 bits of ebx must be zero on entry to handler
-   o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
-   o eax and ecx are scratch, rINSTw/ebx sometimes scratch
-
-Macros are provided for common operations.  Each macro MUST emit only
-one instruction to make instruction-counting easier.  They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Handle mac compiler specific
- */
-#if defined(__APPLE__)
-    #define MACRO_LITERAL(value) $$(value)
-    #define FUNCTION_TYPE(name)
-    #define OBJECT_TYPE(name)
-    #define SIZE(start,end)
-    // Mac OS' symbols have an _ prefix.
-    #define SYMBOL(name) _ ## name
-    #define ASM_HIDDEN .private_extern
-#else
-    #define MACRO_LITERAL(value) $$value
-    #define FUNCTION_TYPE(name) .type name, @function
-    #define OBJECT_TYPE(name) .type name, @object
-    #define SIZE(start,end) .size start, .-end
-    #define SYMBOL(name) name
-    #define ASM_HIDDEN .hidden
-#endif
-
-.macro PUSH _reg
-    pushq \_reg
-    .cfi_adjust_cfa_offset 8
-    .cfi_rel_offset \_reg, 0
-.endm
-
-.macro POP _reg
-    popq \_reg
-    .cfi_adjust_cfa_offset -8
-    .cfi_restore \_reg
-.endm
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
-#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
-
-/* Frame size must be 16-byte aligned.
- * Remember about 8 bytes for return address + 6 * 8 for spills.
- */
-#define FRAME_SIZE     8
-
-/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3        %rcx
-#define IN_ARG2        %rdx
-#define IN_ARG1        %rsi
-#define IN_ARG0        %rdi
-/* Spill offsets relative to %esp */
-#define SELF_SPILL     (FRAME_SIZE -  8)
-/* Out Args  */
-#define OUT_ARG3       %rcx
-#define OUT_ARG2       %rdx
-#define OUT_ARG1       %rsi
-#define OUT_ARG0       %rdi
-#define OUT_32_ARG3    %ecx
-#define OUT_32_ARG2    %edx
-#define OUT_32_ARG1    %esi
-#define OUT_32_ARG0    %edi
-#define OUT_FP_ARG1    %xmm1
-#define OUT_FP_ARG0    %xmm0
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rSELF    SELF_SPILL(%rsp)
-#define rPC      %r12
-#define CFI_DEX  12 // DWARF register number of the register holding dex-pc (rPC).
-#define CFI_TMP  5  // DWARF register number of the first argument register (rdi).
-#define rFP      %r13
-#define rINST    %ebx
-#define rINSTq   %rbx
-#define rINSTw   %bx
-#define rINSTbh  %bh
-#define rINSTbl  %bl
-#define rIBASE   %r14
-#define rREFS    %r15
-#define rPROFILE %ebp
-
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
-    movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- * IBase handles uses the caller save register so we must restore it after each call.
- * Also it is used as a result of some 64-bit operations (like imul) and we should
- * restore it in such cases also.
- *
- */
-.macro REFRESH_IBASE_REG self_reg
-    movq    THREAD_CURRENT_IBASE_OFFSET(\self_reg), rIBASE
-.endm
-.macro REFRESH_IBASE
-    movq    rSELF, rIBASE
-    REFRESH_IBASE_REG rIBASE
-.endm
-
-/*
- * Refresh rINST.
- * At enter to handler rINST does not contain the opcode number.
- * However some utilities require the full value, so this macro
- * restores the opcode number.
- */
-.macro REFRESH_INST _opnum
-    movb    rINSTbl, rINSTbh
-    movb    $$\_opnum, rINSTbl
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINSTw.  Does not advance rPC.
- */
-.macro FETCH_INST
-    movzwq  (rPC), rINSTq
-.endm
-
-/*
- * Remove opcode from rINST, compute the address of handler and jump to it.
- */
-.macro GOTO_NEXT
-    movzx   rINSTbl,%eax
-    movzbl  rINSTbh,rINST
-    shll    MACRO_LITERAL(${handler_size_bits}), %eax
-    addq    rIBASE, %rax
-    jmp     *%rax
-.endm
-
-/*
- * Advance rPC by instruction count.
- */
-.macro ADVANCE_PC _count
-    leaq    2*\_count(rPC), rPC
-.endm
-
-/*
- * Advance rPC by instruction count, fetch instruction and jump to handler.
- */
-.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
-    ADVANCE_PC \_count
-    FETCH_INST
-    GOTO_NEXT
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
-#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
-
-.macro GET_VREG _reg _vreg
-    movl    (rFP,\_vreg,4), \_reg
-.endm
-
-/* Read wide value. */
-.macro GET_WIDE_VREG _reg _vreg
-    movq    (rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG _reg _vreg
-    movl    \_reg, (rFP,\_vreg,4)
-    movl    MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-/* Write wide value. reg is clobbered. */
-.macro SET_WIDE_VREG _reg _vreg
-    movq    \_reg, (rFP,\_vreg,4)
-    xorq    \_reg, \_reg
-    movq    \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro SET_VREG_OBJECT _reg _vreg
-    movl    \_reg, (rFP,\_vreg,4)
-    movl    \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro GET_VREG_HIGH _reg _vreg
-    movl    4(rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG_HIGH _reg _vreg
-    movl    \_reg, 4(rFP,\_vreg,4)
-    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_REF _vreg
-    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_WIDE_REF _vreg
-    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
-    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
diff --git a/runtime/interpreter/mterp/x86_64/instruction_end.S b/runtime/interpreter/mterp/x86_64/instruction_end.S
deleted file mode 100644
index 94587f8..0000000
--- a/runtime/interpreter/mterp/x86_64/instruction_end.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
-    OBJECT_TYPE(artMterpAsmInstructionEnd)
-    ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
-    .global SYMBOL(artMterpAsmInstructionEnd)
-SYMBOL(artMterpAsmInstructionEnd):
diff --git a/runtime/interpreter/mterp/x86_64/instruction_end_alt.S b/runtime/interpreter/mterp/x86_64/instruction_end_alt.S
deleted file mode 100644
index 7757bce..0000000
--- a/runtime/interpreter/mterp/x86_64/instruction_end_alt.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
-    OBJECT_TYPE(artMterpAsmAltInstructionEnd)
-    ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
-    .global SYMBOL(artMterpAsmAltInstructionEnd)
-SYMBOL(artMterpAsmAltInstructionEnd):
diff --git a/runtime/interpreter/mterp/x86_64/instruction_end_sister.S b/runtime/interpreter/mterp/x86_64/instruction_end_sister.S
deleted file mode 100644
index 8eb79ac..0000000
--- a/runtime/interpreter/mterp/x86_64/instruction_end_sister.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
-    OBJECT_TYPE(artMterpAsmSisterEnd)
-    ASM_HIDDEN SYMBOL(artMterpAsmSisterEnd)
-    .global SYMBOL(artMterpAsmSisterEnd)
-SYMBOL(artMterpAsmSisterEnd):
diff --git a/runtime/interpreter/mterp/x86_64/instruction_start.S b/runtime/interpreter/mterp/x86_64/instruction_start.S
deleted file mode 100644
index 5d29a819..0000000
--- a/runtime/interpreter/mterp/x86_64/instruction_start.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
-    OBJECT_TYPE(artMterpAsmInstructionStart)
-    ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
-    .global SYMBOL(artMterpAsmInstructionStart)
-SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
-    .text
diff --git a/runtime/interpreter/mterp/x86_64/instruction_start_alt.S b/runtime/interpreter/mterp/x86_64/instruction_start_alt.S
deleted file mode 100644
index 8dcf5bf..0000000
--- a/runtime/interpreter/mterp/x86_64/instruction_start_alt.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
-    OBJECT_TYPE(artMterpAsmAltInstructionStart)
-    ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
-    .global SYMBOL(artMterpAsmAltInstructionStart)
-    .text
-SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
diff --git a/runtime/interpreter/mterp/x86_64/instruction_start_sister.S b/runtime/interpreter/mterp/x86_64/instruction_start_sister.S
deleted file mode 100644
index 796e98b..0000000
--- a/runtime/interpreter/mterp/x86_64/instruction_start_sister.S
+++ /dev/null
@@ -1,7 +0,0 @@
-
-    OBJECT_TYPE(artMterpAsmSisterStart)
-    ASM_HIDDEN SYMBOL(artMterpAsmSisterStart)
-    .global SYMBOL(artMterpAsmSisterStart)
-    .text
-    .balign 4
-SYMBOL(artMterpAsmSisterStart):
diff --git a/runtime/interpreter/mterp/x86_64/invoke.S b/runtime/interpreter/mterp/x86_64/invoke.S
index f7e6155..15b48c9 100644
--- a/runtime/interpreter/mterp/x86_64/invoke.S
+++ b/runtime/interpreter/mterp/x86_64/invoke.S
@@ -1,4 +1,4 @@
-%default { "helper":"UndefinedInvokeHandler" }
+%def invoke(helper="UndefinedInvokeHandler"):
 /*
  * Generic invoke handler wrapper.
  */
@@ -15,8 +15,101 @@
     testb   %al, %al
     jz      MterpException
     ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
+    movq    rSELF, %rax
+    cmpb    LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
+    jz      MterpFallback
     FETCH_INST
     GOTO_NEXT
+
+%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
+    /*
+     * invoke-polymorphic handler wrapper.
+     */
+    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+    .extern $helper
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rPC, OUT_ARG2
+    REFRESH_INST ${opnum}
+    movl    rINST, OUT_32_ARG3
+    call    SYMBOL($helper)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC 4
+    movq    rSELF, %rax
+    cmpb    LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
+    jz      MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+%def op_invoke_custom():
+%  invoke(helper="MterpInvokeCustom")
+
+%def op_invoke_custom_range():
+%  invoke(helper="MterpInvokeCustomRange")
+
+%def op_invoke_direct():
+%  invoke(helper="MterpInvokeDirect")
+
+%def op_invoke_direct_range():
+%  invoke(helper="MterpInvokeDirectRange")
+
+%def op_invoke_interface():
+%  invoke(helper="MterpInvokeInterface")
+/*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_interface_range():
+%  invoke(helper="MterpInvokeInterfaceRange")
+
+%def op_invoke_polymorphic():
+%  invoke_polymorphic(helper="MterpInvokePolymorphic")
+
+%def op_invoke_polymorphic_range():
+%  invoke_polymorphic(helper="MterpInvokePolymorphicRange")
+
+%def op_invoke_static():
+%  invoke(helper="MterpInvokeStatic")
+
+
+%def op_invoke_static_range():
+%  invoke(helper="MterpInvokeStaticRange")
+
+%def op_invoke_super():
+%  invoke(helper="MterpInvokeSuper")
+/*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_super_range():
+%  invoke(helper="MterpInvokeSuperRange")
+
+%def op_invoke_virtual():
+%  invoke(helper="MterpInvokeVirtual")
+/*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_virtual_quick():
+%  invoke(helper="MterpInvokeVirtualQuick")
+
+%def op_invoke_virtual_range():
+%  invoke(helper="MterpInvokeVirtualRange")
+
+%def op_invoke_virtual_range_quick():
+%  invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/x86_64/invoke_polymorphic.S b/runtime/interpreter/mterp/x86_64/invoke_polymorphic.S
deleted file mode 100644
index 5157860..0000000
--- a/runtime/interpreter/mterp/x86_64/invoke_polymorphic.S
+++ /dev/null
@@ -1,22 +0,0 @@
-%default { "helper":"UndefinedInvokeHandler" }
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern $helper
-    EXPORT_PC
-    movq    rSELF, OUT_ARG0
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
-    movq    rPC, OUT_ARG2
-    REFRESH_INST ${opnum}
-    movl    rINST, OUT_32_ARG3
-    call    SYMBOL($helper)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC 4
-    call    SYMBOL(MterpShouldSwitchInterpreters)
-    testb   %al, %al
-    jnz     MterpFallback
-    FETCH_INST
-    GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/main.S b/runtime/interpreter/mterp/x86_64/main.S
new file mode 100644
index 0000000..5900220
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/main.S
@@ -0,0 +1,759 @@
+%def header():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+  Art assembly interpreter notes:
+
+  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+  handle invoke, allows higher-level code to create frame & shadow frame.
+
+  Once that's working, support direct entry code & eliminate shadow frame (and
+  excess locals allocation.
+
+  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
+  base of the vreg array within the shadow frame.  Access the other fields,
+  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
+  the shadow frame mechanism of double-storing object references - via rFP &
+  number_of_vregs_.
+
+ */
+
+/*
+x86_64 ABI general notes:
+
+Caller save set:
+   rax, rdx, rcx, rsi, rdi, r8-r11, st(0)-st(7)
+Callee save set:
+   rbx, rbp, r12-r15
+Return regs:
+   32-bit in eax
+   64-bit in rax
+   fp on xmm0
+
+First 8 fp parameters came in xmm0-xmm7.
+First 6 non-fp parameters came in rdi, rsi, rdx, rcx, r8, r9.
+Other parameters passed on stack, pushed right-to-left.  On entry to target, first
+param is at 8(%esp).  Traditional entry code is:
+
+Stack must be 16-byte aligned to support SSE in native code.
+
+If we're not doing variable stack allocation (alloca), the frame pointer can be
+eliminated and all arg references adjusted to be esp relative.
+*/
+
+/*
+Mterp and x86_64 notes:
+
+Some key interpreter variables will be assigned to registers.
+
+  nick     reg   purpose
+  rPROFILE rbp   countdown register for jit profiling
+  rPC      r12   interpreted program counter, used for fetching instructions
+  rFP      r13   interpreted frame pointer, used for accessing locals and args
+  rINSTw   bx    first 16-bit code of current instruction
+  rINSTbl  bl    opcode portion of instruction word
+  rINSTbh  bh    high byte of inst word, usually contains src/tgt reg names
+  rIBASE   r14   base of instruction handler table
+  rREFS    r15   base of object references in shadow frame.
+
+Notes:
+   o High order 16 bits of ebx must be zero on entry to handler
+   o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
+   o eax and ecx are scratch, rINSTw/ebx sometimes scratch
+
+Macros are provided for common operations.  Each macro MUST emit only
+one instruction to make instruction-counting easier.  They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+#include "interpreter/cfi_asm_support.h"
+
+#define LITERAL(value) $$(value)
+
+/*
+ * Handle mac compiler specific
+ */
+#if defined(__APPLE__)
+    #define MACRO_LITERAL(value) $$(value)
+    #define FUNCTION_TYPE(name)
+    #define OBJECT_TYPE(name)
+    #define SIZE(start,end)
+    // Mac OS' symbols have an _ prefix.
+    #define SYMBOL(name) _ ## name
+    #define ASM_HIDDEN .private_extern
+#else
+    #define MACRO_LITERAL(value) $$value
+    #define FUNCTION_TYPE(name) .type name, @function
+    #define OBJECT_TYPE(name) .type name, @object
+    #define SIZE(start,end) .size start, .-end
+    #define SYMBOL(name) name
+    #define ASM_HIDDEN .hidden
+#endif
+
+.macro PUSH _reg
+    pushq \_reg
+    .cfi_adjust_cfa_offset 8
+    .cfi_rel_offset \_reg, 0
+.endm
+
+.macro POP _reg
+    popq \_reg
+    .cfi_adjust_cfa_offset -8
+    .cfi_restore \_reg
+.endm
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
+ * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
+#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+/* Frame size must be 16-byte aligned.
+ * Remember about 8 bytes for return address + 6 * 8 for spills.
+ */
+#define FRAME_SIZE     8
+
+/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
+#define IN_ARG3        %rcx
+#define IN_ARG2        %rdx
+#define IN_ARG1        %rsi
+#define IN_ARG0        %rdi
+/* Spill offsets relative to %esp */
+#define SELF_SPILL     (FRAME_SIZE -  8)
+/* Out Args  */
+#define OUT_ARG3       %rcx
+#define OUT_ARG2       %rdx
+#define OUT_ARG1       %rsi
+#define OUT_ARG0       %rdi
+#define OUT_32_ARG3    %ecx
+#define OUT_32_ARG2    %edx
+#define OUT_32_ARG1    %esi
+#define OUT_32_ARG0    %edi
+#define OUT_FP_ARG1    %xmm1
+#define OUT_FP_ARG0    %xmm0
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rSELF    SELF_SPILL(%rsp)
+#define rPC      %r12
+#define CFI_DEX  12 // DWARF register number of the register holding dex-pc (rPC).
+#define CFI_TMP  5  // DWARF register number of the first argument register (rdi).
+#define rFP      %r13
+#define rINST    %ebx
+#define rINSTq   %rbx
+#define rINSTw   %bx
+#define rINSTbh  %bh
+#define rINSTbl  %bl
+#define rIBASE   %r14
+#define rREFS    %r15
+#define rPROFILE %ebp
+
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array.  For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+    movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
+.endm
+
+/*
+ * Refresh handler table.
+ * IBase handles uses the caller save register so we must restore it after each call.
+ * Also it is used as a result of some 64-bit operations (like imul) and we should
+ * restore it in such cases also.
+ *
+ */
+.macro REFRESH_IBASE_REG self_reg
+    movq    THREAD_CURRENT_IBASE_OFFSET(\self_reg), rIBASE
+.endm
+.macro REFRESH_IBASE
+    movq    rSELF, rIBASE
+    REFRESH_IBASE_REG rIBASE
+.endm
+
+/*
+ * Refresh rINST.
+ * At enter to handler rINST does not contain the opcode number.
+ * However some utilities require the full value, so this macro
+ * restores the opcode number.
+ */
+.macro REFRESH_INST _opnum
+    movb    rINSTbl, rINSTbh
+    movb    $$\_opnum, rINSTbl
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINSTw.  Does not advance rPC.
+ */
+.macro FETCH_INST
+    movzwq  (rPC), rINSTq
+.endm
+
+/*
+ * Remove opcode from rINST, compute the address of handler and jump to it.
+ */
+.macro GOTO_NEXT
+    movzx   rINSTbl,%eax
+    movzbl  rINSTbh,rINST
+    shll    MACRO_LITERAL(${handler_size_bits}), %eax
+    addq    rIBASE, %rax
+    jmp     *%rax
+.endm
+
+/*
+ * Advance rPC by instruction count.
+ */
+.macro ADVANCE_PC _count
+    leaq    2*\_count(rPC), rPC
+.endm
+
+/*
+ * Advance rPC by instruction count, fetch instruction and jump to handler.
+ */
+.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
+    ADVANCE_PC \_count
+    FETCH_INST
+    GOTO_NEXT
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
+#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
+#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
+#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
+
+.macro GET_VREG _reg _vreg
+    movl    VREG_ADDRESS(\_vreg), \_reg
+.endm
+
+/* Read wide value. */
+.macro GET_WIDE_VREG _reg _vreg
+    movq    VREG_ADDRESS(\_vreg), \_reg
+.endm
+
+.macro SET_VREG _reg _vreg
+    movl    \_reg, VREG_ADDRESS(\_vreg)
+    movl    MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+.endm
+
+/* Write wide value. reg is clobbered. */
+.macro SET_WIDE_VREG _reg _vreg
+    movq    \_reg, VREG_ADDRESS(\_vreg)
+    xorq    \_reg, \_reg
+    movq    \_reg, VREG_REF_ADDRESS(\_vreg)
+.endm
+
+.macro SET_VREG_OBJECT _reg _vreg
+    movl    \_reg, VREG_ADDRESS(\_vreg)
+    movl    \_reg, VREG_REF_ADDRESS(\_vreg)
+.endm
+
+.macro GET_VREG_HIGH _reg _vreg
+    movl    VREG_HIGH_ADDRESS(\_vreg), \_reg
+.endm
+
+.macro SET_VREG_HIGH _reg _vreg
+    movl    \_reg, VREG_HIGH_ADDRESS(\_vreg)
+    movl    MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
+.endm
+
+.macro CLEAR_REF _vreg
+    movl    MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+.endm
+
+.macro CLEAR_WIDE_REF _vreg
+    movl    MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+    movl    MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
+.endm
+
+.macro GET_VREG_XMMs _xmmreg _vreg
+    movss VREG_ADDRESS(\_vreg), \_xmmreg
+.endm
+.macro GET_VREG_XMMd _xmmreg _vreg
+    movsd VREG_ADDRESS(\_vreg), \_xmmreg
+.endm
+.macro SET_VREG_XMMs _xmmreg _vreg
+    movss \_xmmreg, VREG_ADDRESS(\_vreg)
+.endm
+.macro SET_VREG_XMMd _xmmreg _vreg
+    movsd \_xmmreg, VREG_ADDRESS(\_vreg)
+.endm
+
+/*
+ * function support macros.
+ */
+.macro ENTRY name
+    .text
+    ASM_HIDDEN SYMBOL(\name)
+    .global SYMBOL(\name)
+    FUNCTION_TYPE(\name)
+SYMBOL(\name):
+.endm
+
+.macro END name
+    SIZE(\name,\name)
+.endm
+
+%def entry():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ *
+ * On entry:
+ *  0  Thread* self
+ *  1  insns_
+ *  2  ShadowFrame
+ *  3  JValue* result_register
+ *
+ */
+
+ENTRY ExecuteMterpImpl
+    .cfi_startproc
+    .cfi_def_cfa rsp, 8
+
+    /* Spill callee save regs */
+    PUSH %rbx
+    PUSH %rbp
+    PUSH %r12
+    PUSH %r13
+    PUSH %r14
+    PUSH %r15
+
+    /* Allocate frame */
+    subq    $$FRAME_SIZE, %rsp
+    .cfi_adjust_cfa_offset FRAME_SIZE
+
+    /* Remember the return register */
+    movq    IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
+
+    /* Remember the code_item */
+    movq    IN_ARG1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(IN_ARG2)
+
+    /* set up "named" registers */
+    movl    SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
+    leaq    SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
+    leaq    (rFP, %rax, 4), rREFS
+    movl    SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
+    leaq    (IN_ARG1, %rax, 2), rPC
+    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
+    EXPORT_PC
+
+    /* Starting ibase */
+    movq    IN_ARG0, rSELF
+    REFRESH_IBASE_REG IN_ARG0
+
+    /* Set up for backwards branches & osr profiling */
+    movq    IN_ARG0, OUT_ARG2  /* Set up OUT_ARG2 before clobbering IN_ARG0 */
+    movq    OFF_FP_METHOD(rFP), OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpSetUpHotnessCountdown)
+    movswl  %ax, rPROFILE
+
+    /* start executing the instruction at rPC */
+    FETCH_INST
+    GOTO_NEXT
+    /* NOTE: no fallthrough */
+    // cfi info continues, and covers the whole mterp implementation.
+    END ExecuteMterpImpl
+
+%def dchecks_before_helper():
+    // Call C++ to do debug checks and return to the handler using tail call.
+    .extern MterpCheckBefore
+    popq    %rax                     # Return address (the instuction handler).
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rPC, OUT_ARG2
+    pushq   %rax                     # Return address for the tail call.
+    jmp     SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
+
+%def opcode_pre():
+%  add_helper(dchecks_before_helper, "Mterp_dchecks_before_helper")
+    #if !defined(NDEBUG)
+    call    SYMBOL(Mterp_dchecks_before_helper)
+    #endif
+
+%def fallback():
+/* Transfer stub to alternate interpreter */
+    jmp     MterpFallback
+
+
+%def helpers():
+    ENTRY MterpHelpers
+
+%def footer():
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+    .text
+    .align  2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogDivideByZeroException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errArrayIndex:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogArrayIndexException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errNegativeArraySize:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogNegativeArraySizeException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errNoSuchMethod:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogNoSuchMethodException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errNullObject:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogNullObjectException)
+#endif
+    jmp     MterpCommonFallback
+
+common_exceptionThrown:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogExceptionThrownException)
+#endif
+    jmp     MterpCommonFallback
+
+MterpSuspendFallback:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movl    THREAD_FLAGS_OFFSET(OUT_ARG0), OUT_32_ARG2
+    call    SYMBOL(MterpLogSuspendFallback)
+#endif
+    jmp     MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary.  If there is a pending
+ * exception, handle it.  Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+    movq    rSELF, %rcx
+    cmpq    $$0, THREAD_EXCEPTION_OFFSET(%rcx)
+    jz      MterpFallback
+    /* intentional fallthrough - handle pending exception. */
+
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpHandleException)
+    testb   %al, %al
+    jz      MterpExceptionReturn
+    movq    OFF_FP_DEX_INSTRUCTIONS(rFP), %rax
+    mov     OFF_FP_DEX_PC(rFP), %ecx
+    leaq    (%rax, %rcx, 2), rPC
+    movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
+    /* Do we need to switch interpreters? */
+    movq    rSELF, %rax
+    cmpb    LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
+    jz      MterpFallback
+    /* resume execution at catch block */
+    REFRESH_IBASE
+    FETCH_INST
+    GOTO_NEXT
+    /* NOTE: no fallthrough */
+
+/*
+ * Common handling for branches with support for Jit profiling.
+ * On entry:
+ *    rINST          <= signed offset
+ *    rPROFILE       <= signed hotness countdown (expanded to 32 bits)
+ *    condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
+ *
+ * We have quite a few different cases for branch profiling, OSR detection and
+ * suspend check support here.
+ *
+ * Taken backward branches:
+ *    If profiling active, do hotness countdown and report if we hit zero.
+ *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *    Is there a pending suspend request?  If so, suspend.
+ *
+ * Taken forward branches and not-taken backward branches:
+ *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *
+ * Our most common case is expected to be a taken backward branch with active jit profiling,
+ * but no full OSR check and no pending suspend request.
+ * Next most common case is not-taken branch with no full OSR check.
+ *
+ */
+MterpCommonTakenBranch:
+    jg      .L_forward_branch               # don't add forward branches to hotness
+/*
+ * We need to subtract 1 from positive values and we should not see 0 here,
+ * so we may use the result of the comparison with -1.
+ */
+#if JIT_CHECK_OSR != -1
+#  error "JIT_CHECK_OSR must be -1."
+#endif
+    cmpl    $$JIT_CHECK_OSR, rPROFILE
+    je      .L_osr_check
+    decl    rPROFILE
+    je      .L_add_batch                    # counted down to zero - report
+.L_resume_backward_branch:
+    movq    rSELF, %rax
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
+    REFRESH_IBASE_REG %rax
+    leaq    (rPC, rINSTq, 2), rPC
+    FETCH_INST
+    jnz     .L_suspend_request_pending
+    GOTO_NEXT
+
+.L_suspend_request_pending:
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    call    SYMBOL(MterpSuspendCheck)       # (self)
+    testb   %al, %al
+    jnz     MterpFallback
+    REFRESH_IBASE                           # might have changed during suspend
+    GOTO_NEXT
+
+.L_no_count_backwards:
+    cmpl    $$JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
+    jne     .L_resume_backward_branch
+.L_osr_check:
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rINSTq, OUT_ARG2
+    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+    testb   %al, %al
+    jz      .L_resume_backward_branch
+    jmp     MterpOnStackReplacement
+
+.L_forward_branch:
+    cmpl    $$JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
+    je      .L_check_osr_forward
+.L_resume_forward_branch:
+    leaq    (rPC, rINSTq, 2), rPC
+    FETCH_INST
+    GOTO_NEXT
+
+.L_check_osr_forward:
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rINSTq, OUT_ARG2
+    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+    testb   %al, %al
+    jz      .L_resume_forward_branch
+    jmp     MterpOnStackReplacement
+
+.L_add_batch:
+    movl    rPROFILE, %eax
+    movq    OFF_FP_METHOD(rFP), OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movw    %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
+    movq    rSELF, OUT_ARG2
+    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
+    movswl  %ax, rPROFILE
+    jmp     .L_no_count_backwards
+
+/*
+ * Entered from the conditional branch handlers when OSR check request active on
+ * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
+ */
+.L_check_not_taken_osr:
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movl    $$2, OUT_32_ARG2
+    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+    testb   %al, %al
+    jnz     MterpOnStackReplacement
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movl    rINST, OUT_32_ARG2
+    call    SYMBOL(MterpLogOSR)
+#endif
+    movl    $$1, %eax
+    jmp     MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogFallback)
+#endif
+MterpCommonFallback:
+    xorl    %eax, %eax
+    jmp     MterpDone
+
+/*
+ * On entry:
+ *  uint32_t* rFP  (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+    movl    $$1, %eax
+    jmp     MterpDone
+MterpReturn:
+    movq    OFF_FP_RESULT_REGISTER(rFP), %rdx
+    movq    %rax, (%rdx)
+    movl    $$1, %eax
+MterpDone:
+/*
+ * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
+ * checking for OSR.  If greater than zero, we might have unreported hotness to register
+ * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
+ * should only reach zero immediately after a hotness decrement, and is then reset to either
+ * a negative special state or the new non-zero countdown value.
+ */
+    testl   rPROFILE, rPROFILE
+    jle     MRestoreFrame                   # if > 0, we may have some counts to report.
+
+    movl    %eax, rINST                     # stash return value
+    /* Report cached hotness counts */
+    movl    rPROFILE, %eax
+    movq    OFF_FP_METHOD(rFP), OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movw    %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
+    movq    rSELF, OUT_ARG2
+    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
+    movl    rINST, %eax                     # restore return value
+
+    /* pop up frame */
+MRestoreFrame:
+    addq    $$FRAME_SIZE, %rsp
+    .cfi_adjust_cfa_offset -FRAME_SIZE
+
+    /* Restore callee save register */
+    POP %r15
+    POP %r14
+    POP %r13
+    POP %r12
+    POP %rbp
+    POP %rbx
+    ret
+    .cfi_endproc
+    END MterpHelpers
+
+%def instruction_end():
+
+    OBJECT_TYPE(artMterpAsmInstructionEnd)
+    ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
+    .global SYMBOL(artMterpAsmInstructionEnd)
+SYMBOL(artMterpAsmInstructionEnd):
+
+%def instruction_start():
+
+    OBJECT_TYPE(artMterpAsmInstructionStart)
+    ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
+    .global SYMBOL(artMterpAsmInstructionStart)
+SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
+    .text
+
+%def opcode_start():
+    ENTRY Mterp_${opcode}
+%def opcode_end():
+    END Mterp_${opcode}
+%def helper_start(name):
+    ENTRY ${name}
+%def helper_end(name):
+    END ${name}
diff --git a/runtime/interpreter/mterp/x86_64/object.S b/runtime/interpreter/mterp/x86_64/object.S
new file mode 100644
index 0000000..fa85f69
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/object.S
@@ -0,0 +1,254 @@
+%def field(helper=""):
+    /*
+     * General field read / write (iget-* iput-* sget-* sput-*).
+     */
+    .extern $helper
+    REFRESH_INST ${opnum}                      # fix rINST to include opcode
+    movq    rPC, OUT_ARG0                      # arg0: Instruction* inst
+    movl    rINST, OUT_32_ARG1                 # arg1: uint16_t inst_data
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2  # arg2: ShadowFrame* sf
+    movq    rSELF, OUT_ARG3                    # arg3: Thread* self
+    call    SYMBOL($helper)
+    testb   %al, %al
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_check_cast():
+/*
+ * Check to see if a cast from one class to another is allowed.
+ */
+    /* check-cast vAA, class@BBBB */
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # OUT_ARG0 <- BBBB
+    leaq    VREG_ADDRESS(rINSTq), OUT_ARG1
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpCheckCast)          # (index, &obj, method, self)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iget(is_object="0", helper="MterpIGetU32"):
+%  field(helper=helper)
+
+%def op_iget_boolean():
+%  op_iget(helper="MterpIGetU8")
+
+%def op_iget_boolean_quick():
+%  op_iget_quick(load="movsbl")
+
+%def op_iget_byte():
+%  op_iget(helper="MterpIGetI8")
+
+%def op_iget_byte_quick():
+%  op_iget_quick(load="movsbl")
+
+%def op_iget_char():
+%  op_iget(helper="MterpIGetU16")
+
+%def op_iget_char_quick():
+%  op_iget_quick(load="movzwl")
+
+%def op_iget_object():
+%  op_iget(is_object="1", helper="MterpIGetObj")
+
+%def op_iget_object_quick():
+    /* For: iget-object-quick */
+    /* op vA, vB, offset@CCCC */
+    .extern artIGetObjectFromMterp
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG OUT_32_ARG0, %rcx              # vB (object we're operating on)
+    movzwl  2(rPC), OUT_32_ARG1             # eax <- field byte offset
+    EXPORT_PC
+    callq   SYMBOL(artIGetObjectFromMterp)  # (obj, offset)
+    movq    rSELF, %rcx
+    cmpq    $$0, THREAD_EXCEPTION_OFFSET(%rcx)
+    jnz     MterpException                  # bail out
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iget_quick(load="movl", wide="0"):
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+    /* op vA, vB, offset@CCCC */
+    movl    rINST, %ecx                     # rcx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    movzwq  2(rPC), %rax                    # eax <- field byte offset
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $$0xf,rINSTbl                   # rINST <- A
+    .if $wide
+    movq (%rcx,%rax,1), %rax
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    ${load} (%rcx,%rax,1), %eax
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iget_short():
+%  op_iget(helper="MterpIGetI16")
+
+%def op_iget_short_quick():
+%  op_iget_quick(load="movswl")
+
+%def op_iget_wide():
+%  op_iget(helper="MterpIGetU64")
+
+%def op_iget_wide_quick():
+%  op_iget_quick(load="movswl", wide="1")
+
+%def op_instance_of():
+/*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+    /* instance-of vA, vB, class@CCCC */
+    EXPORT_PC
+    movzwl  2(rPC), OUT_32_ARG0             # OUT_32_ARG0 <- CCCC
+    movl    rINST, %eax                     # eax <- BA
+    sarl    $$4, %eax                       # eax <- B
+    leaq    VREG_ADDRESS(%rax), OUT_ARG1    # Get object address
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpInstanceOf)         # (index, &obj, method, self)
+    movsbl  %al, %eax
+    movq    rSELF, %rcx
+    cmpq    $$0, THREAD_EXCEPTION_OFFSET(%rcx)
+    jnz     MterpException
+    andb    $$0xf, rINSTbl                  # rINSTbl <- A
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iput(is_object="0", helper="MterpIPutU32"):
+%  field(helper=helper)
+
+%def op_iput_boolean():
+%  op_iput(helper="MterpIPutU8")
+
+%def op_iput_boolean_quick():
+%  op_iput_quick(reg="rINSTbl", store="movb")
+
+%def op_iput_byte():
+%  op_iput(helper="MterpIPutI8")
+
+%def op_iput_byte_quick():
+%  op_iput_quick(reg="rINSTbl", store="movb")
+
+%def op_iput_char():
+%  op_iput(helper="MterpIPutU16")
+
+%def op_iput_char_quick():
+%  op_iput_quick(reg="rINSTw", store="movw")
+
+%def op_iput_object():
+%  op_iput(is_object="1", helper="MterpIPutObj")
+
+%def op_iput_object_quick():
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    REFRESH_INST ${opnum}
+    movl    rINST, OUT_32_ARG2
+    call    SYMBOL(MterpIputObjectQuick)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iput_quick(reg="rINST", store="movl"):
+    /* For: iput-quick, iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    GET_VREG rINST, rINSTq                  # rINST <- v[A]
+    movzwq  2(rPC), %rax                    # rax <- field byte offset
+    ${store}    ${reg}, (%rcx,%rax,1)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iput_short():
+%  op_iput(helper="MterpIPutI16")
+
+%def op_iput_short_quick():
+%  op_iput_quick(reg="rINSTw", store="movw")
+
+%def op_iput_wide():
+%  op_iput(helper="MterpIPutU64")
+
+%def op_iput_wide_quick():
+    /* iput-wide-quick vA, vB, offset@CCCC */
+    movzbq    rINSTbl, %rcx                 # rcx<- BA
+    sarl      $$4, %ecx                     # ecx<- B
+    GET_VREG  %ecx, %rcx                    # vB (object we're operating on)
+    testl     %ecx, %ecx                    # is object null?
+    je        common_errNullObject
+    movzwq    2(rPC), %rax                  # rax<- field byte offset
+    leaq      (%rcx,%rax,1), %rcx           # ecx<- Address of 64-bit target
+    andb      $$0xf, rINSTbl                # rINST<- A
+    GET_WIDE_VREG %rax, rINSTq              # rax<- fp[A]/fp[A+1]
+    movq      %rax, (%rcx)                  # obj.field<- r0/r1
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_new_instance():
+/*
+ * Create a new instance of a class.
+ */
+    /* new-instance vAA, class@BBBB */
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rSELF, OUT_ARG1
+    REFRESH_INST ${opnum}
+    movq    rINSTq, OUT_ARG2
+    call    SYMBOL(MterpNewInstance)
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_sget(is_object="0", helper="MterpSGetU32"):
+%  field(helper=helper)
+
+%def op_sget_boolean():
+%  op_sget(helper="MterpSGetU8")
+
+%def op_sget_byte():
+%  op_sget(helper="MterpSGetI8")
+
+%def op_sget_char():
+%  op_sget(helper="MterpSGetU16")
+
+%def op_sget_object():
+%  op_sget(is_object="1", helper="MterpSGetObj")
+
+%def op_sget_short():
+%  op_sget(helper="MterpSGetI16")
+
+%def op_sget_wide():
+%  op_sget(helper="MterpSGetU64")
+
+%def op_sput(is_object="0", helper="MterpSPutU32"):
+%  field(helper=helper)
+
+%def op_sput_boolean():
+%  op_sput(helper="MterpSPutU8")
+
+%def op_sput_byte():
+%  op_sput(helper="MterpSPutI8")
+
+%def op_sput_char():
+%  op_sput(helper="MterpSPutU16")
+
+%def op_sput_object():
+%  op_sput(is_object="1", helper="MterpSPutObj")
+
+%def op_sput_short():
+%  op_sput(helper="MterpSPutI16")
+
+%def op_sput_wide():
+%  op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/x86_64/op_add_double.S b/runtime/interpreter/mterp/x86_64/op_add_double.S
deleted file mode 100644
index cb462cb..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"adds","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_double_2addr.S
deleted file mode 100644
index 063bde3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"adds","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_float.S b/runtime/interpreter/mterp/x86_64/op_add_float.S
deleted file mode 100644
index 7753bf8..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"adds","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_float_2addr.S
deleted file mode 100644
index 6c8005b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"adds","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int.S b/runtime/interpreter/mterp/x86_64/op_add_int.S
deleted file mode 100644
index e316be7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop.S" {"instr":"addl    (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_int_2addr.S
deleted file mode 100644
index 2ff8293..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop2addr.S" {"instr":"addl    %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_add_int_lit16.S
deleted file mode 100644
index bfeb7ca..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit16.S" {"instr":"addl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_add_int_lit8.S
deleted file mode 100644
index 8954844..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"addl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_long.S b/runtime/interpreter/mterp/x86_64/op_add_long.S
deleted file mode 100644
index 89131ff..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide.S" {"instr":"addq    (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_long_2addr.S
deleted file mode 100644
index fed98bc..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide2addr.S" {"instr":"addq    %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_aget.S b/runtime/interpreter/mterp/x86_64/op_aget.S
deleted file mode 100644
index 58d4948..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aget.S
+++ /dev/null
@@ -1,24 +0,0 @@
-%default { "load":"movl", "shift":"4", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET", "wide":"0" }
-/*
- * Array get, 32 bits or less.  vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # eax <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    .if $wide
-    movq    $data_offset(%rax,%rcx,8), %rax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    $load   $data_offset(%rax,%rcx,$shift), %eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_boolean.S b/runtime/interpreter/mterp/x86_64/op_aget_boolean.S
deleted file mode 100644
index cf7bdb5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aget.S" { "load":"movzbl", "shift":"1", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_byte.S b/runtime/interpreter/mterp/x86_64/op_aget_byte.S
deleted file mode 100644
index 1cbb569..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aget.S" { "load":"movsbl", "shift":"1", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_char.S b/runtime/interpreter/mterp/x86_64/op_aget_char.S
deleted file mode 100644
index 45c9085..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aget.S" { "load":"movzwl", "shift":"2", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_object.S b/runtime/interpreter/mterp/x86_64/op_aget_object.S
deleted file mode 100644
index 5f77a97..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aget_object.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Array object get.  vAA <- vBB[vCC].
- *
- * for: aget-object
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG OUT_32_ARG0, %rax              # eax <- vBB (array object)
-    GET_VREG OUT_32_ARG1, %rcx              # ecx <- vCC (requested index)
-    EXPORT_PC
-    call    SYMBOL(artAGetObjectFromMterp)  # (array, index)
-    movq    rSELF, %rcx
-    cmpq    $$0, THREAD_EXCEPTION_OFFSET(%rcx)
-    jnz     MterpException
-    SET_VREG_OBJECT %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_short.S b/runtime/interpreter/mterp/x86_64/op_aget_short.S
deleted file mode 100644
index 82c4a1d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aget.S" { "load":"movswl", "shift":"2", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_wide.S b/runtime/interpreter/mterp/x86_64/op_aget_wide.S
deleted file mode 100644
index 4f2771b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aget.S" { "load":"movq", "shift":"8", "data_offset":"MIRROR_WIDE_ARRAY_DATA_OFFSET", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int.S b/runtime/interpreter/mterp/x86_64/op_and_int.S
deleted file mode 100644
index 4469889..0000000
--- a/runtime/interpreter/mterp/x86_64/op_and_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop.S" {"instr":"andl    (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_and_int_2addr.S
deleted file mode 100644
index 16315bb..0000000
--- a/runtime/interpreter/mterp/x86_64/op_and_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop2addr.S" {"instr":"andl    %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_and_int_lit16.S
deleted file mode 100644
index 63e851b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_and_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit16.S" {"instr":"andl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_and_int_lit8.S
deleted file mode 100644
index da7a20f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_and_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"andl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_long.S b/runtime/interpreter/mterp/x86_64/op_and_long.S
deleted file mode 100644
index ce1dd26..0000000
--- a/runtime/interpreter/mterp/x86_64/op_and_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide.S" {"instr":"andq    (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_and_long_2addr.S
deleted file mode 100644
index d17ab8d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_and_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide2addr.S" {"instr":"andq    %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_aput.S b/runtime/interpreter/mterp/x86_64/op_aput.S
deleted file mode 100644
index 11500ad..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aput.S
+++ /dev/null
@@ -1,23 +0,0 @@
-%default { "reg":"rINST", "store":"movl", "shift":"4", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET", "wide":"0" }
-/*
- * Array put, 32 bits or less.  vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
-    /* op vAA, vBB, vCC */
-    movzbq  2(rPC), %rax                    # rax <- BB
-    movzbq  3(rPC), %rcx                    # rcx <- CC
-    GET_VREG %eax, %rax                     # eax <- vBB (array object)
-    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
-    testl   %eax, %eax                      # null array object?
-    je      common_errNullObject            # bail if so
-    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
-    jae     common_errArrayIndex            # index >= length, bail.
-    .if $wide
-    GET_WIDE_VREG rINSTq, rINSTq
-    .else
-    GET_VREG rINST, rINSTq
-    .endif
-    $store    $reg, $data_offset(%rax,%rcx,$shift)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_boolean.S b/runtime/interpreter/mterp/x86_64/op_aput_boolean.S
deleted file mode 100644
index 7d77a86..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aput.S" { "reg":"rINSTbl", "store":"movb", "shift":"1", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_byte.S b/runtime/interpreter/mterp/x86_64/op_aput_byte.S
deleted file mode 100644
index 7a1723e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aput.S" { "reg":"rINSTbl", "store":"movb", "shift":"1", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_char.S b/runtime/interpreter/mterp/x86_64/op_aput_char.S
deleted file mode 100644
index f8f50a3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aput.S" { "reg":"rINSTw", "store":"movw", "shift":"2", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_object.S b/runtime/interpreter/mterp/x86_64/op_aput_object.S
deleted file mode 100644
index b1bae0f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aput_object.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Store an object into an array.  vBB[vCC] <- vAA.
- */
-    /* op vAA, vBB, vCC */
-    EXPORT_PC
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
-    movq    rPC, OUT_ARG1
-    REFRESH_INST ${opnum}
-    movq    rINSTq, OUT_ARG2
-    call    SYMBOL(MterpAputObject)         # (array, index)
-    testb   %al, %al
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_short.S b/runtime/interpreter/mterp/x86_64/op_aput_short.S
deleted file mode 100644
index 481fd68..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aput.S" { "reg":"rINSTw", "store":"movw", "shift":"2", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_wide.S b/runtime/interpreter/mterp/x86_64/op_aput_wide.S
deleted file mode 100644
index 5bbd39b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aput_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aput.S" { "reg":"rINSTq", "store":"movq", "shift":"8", "data_offset":"MIRROR_WIDE_ARRAY_DATA_OFFSET", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_array_length.S b/runtime/interpreter/mterp/x86_64/op_array_length.S
deleted file mode 100644
index e80d665..0000000
--- a/runtime/interpreter/mterp/x86_64/op_array_length.S
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Return the length of an array.
- */
-    movl    rINST, %eax                     # eax <- BA
-    sarl    $$4, rINST                      # rINST <- B
-    GET_VREG %ecx, rINSTq                   # ecx <- vB (object ref)
-    testl   %ecx, %ecx                      # is null?
-    je      common_errNullObject
-    andb    $$0xf, %al                      # eax <- A
-    movl    MIRROR_ARRAY_LENGTH_OFFSET(%rcx), rINST
-    SET_VREG rINST, %rax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_check_cast.S b/runtime/interpreter/mterp/x86_64/op_check_cast.S
deleted file mode 100644
index f8fa7b2..0000000
--- a/runtime/interpreter/mterp/x86_64/op_check_cast.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Check to see if a cast from one class to another is allowed.
- */
-    /* check-cast vAA, class@BBBB */
-    EXPORT_PC
-    movzwq  2(rPC), OUT_ARG0                # OUT_ARG0 <- BBBB
-    leaq    VREG_ADDRESS(rINSTq), OUT_ARG1
-    movq    OFF_FP_METHOD(rFP), OUT_ARG2
-    movq    rSELF, OUT_ARG3
-    call    SYMBOL(MterpCheckCast)          # (index, &obj, method, self)
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_cmp_long.S b/runtime/interpreter/mterp/x86_64/op_cmp_long.S
deleted file mode 100644
index 23ca3e5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_cmp_long.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
- * register based on the results of the comparison.
- */
-    /* cmp-long vAA, vBB, vCC */
-    movzbq  2(rPC), %rdx                    # edx <- BB
-    movzbq  3(rPC), %rcx                    # ecx <- CC
-    GET_WIDE_VREG %rdx, %rdx                # rdx <- v[BB]
-    xorl    %eax, %eax
-    xorl    %edi, %edi
-    addb    $$1, %al
-    movl    $$-1, %esi
-    cmpq    VREG_ADDRESS(%rcx), %rdx
-    cmovl   %esi, %edi
-    cmovg   %eax, %edi
-    SET_VREG %edi, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpg_double.S b/runtime/interpreter/mterp/x86_64/op_cmpg_double.S
deleted file mode 100644
index 7c0aa1b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_cmpg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcmp.S" {"suff":"d","nanval":"pos"}
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpg_float.S b/runtime/interpreter/mterp/x86_64/op_cmpg_float.S
deleted file mode 100644
index 14e8472..0000000
--- a/runtime/interpreter/mterp/x86_64/op_cmpg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcmp.S" {"suff":"s","nanval":"pos"}
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpl_double.S b/runtime/interpreter/mterp/x86_64/op_cmpl_double.S
deleted file mode 100644
index 1d4c424..0000000
--- a/runtime/interpreter/mterp/x86_64/op_cmpl_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcmp.S" {"suff":"d","nanval":"neg"}
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpl_float.S b/runtime/interpreter/mterp/x86_64/op_cmpl_float.S
deleted file mode 100644
index 97a12a6..0000000
--- a/runtime/interpreter/mterp/x86_64/op_cmpl_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcmp.S" {"suff":"s","nanval":"neg"}
diff --git a/runtime/interpreter/mterp/x86_64/op_const.S b/runtime/interpreter/mterp/x86_64/op_const.S
deleted file mode 100644
index 3cfafdb..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const.S
+++ /dev/null
@@ -1,4 +0,0 @@
-    /* const vAA, #+BBBBbbbb */
-    movl    2(rPC), %eax                    # grab all 32 bits at once
-    SET_VREG %eax, rINSTq                   # vAA<- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_const_16.S b/runtime/interpreter/mterp/x86_64/op_const_16.S
deleted file mode 100644
index 1a139c6..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_16.S
+++ /dev/null
@@ -1,4 +0,0 @@
-    /* const/16 vAA, #+BBBB */
-    movswl  2(rPC), %ecx                    # ecx <- ssssBBBB
-    SET_VREG %ecx, rINSTq                   # vAA <- ssssBBBB
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_4.S b/runtime/interpreter/mterp/x86_64/op_const_4.S
deleted file mode 100644
index 23c4816..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_4.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* const/4 vA, #+B */
-    movsbl  rINSTbl, %eax                   # eax <-ssssssBx
-    movl    $$0xf, rINST
-    andl    %eax, rINST                     # rINST <- A
-    sarl    $$4, %eax
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_const_class.S b/runtime/interpreter/mterp/x86_64/op_const_class.S
deleted file mode 100644
index 0c402e1..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_class.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/x86_64/op_const_high16.S b/runtime/interpreter/mterp/x86_64/op_const_high16.S
deleted file mode 100644
index 64e633c..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_high16.S
+++ /dev/null
@@ -1,5 +0,0 @@
-    /* const/high16 vAA, #+BBBB0000 */
-    movzwl  2(rPC), %eax                    # eax <- 0000BBBB
-    sall    $$16, %eax                      # eax <- BBBB0000
-    SET_VREG %eax, rINSTq                   # vAA <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_method_handle.S b/runtime/interpreter/mterp/x86_64/op_const_method_handle.S
deleted file mode 100644
index 2b8b0a2..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_method_handle.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/x86_64/op_const_method_type.S b/runtime/interpreter/mterp/x86_64/op_const_method_type.S
deleted file mode 100644
index 33ce952..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_method_type.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/x86_64/op_const_string.S b/runtime/interpreter/mterp/x86_64/op_const_string.S
deleted file mode 100644
index 5a29bd3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_string.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/x86_64/op_const_string_jumbo.S b/runtime/interpreter/mterp/x86_64/op_const_string_jumbo.S
deleted file mode 100644
index ae03d20..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_string_jumbo.S
+++ /dev/null
@@ -1,10 +0,0 @@
-    /* const/string vAA, String@BBBBBBBB */
-    EXPORT_PC
-    movl    2(rPC), OUT_32_ARG0             # OUT_32_ARG0 <- BBBB
-    movq    rINSTq, OUT_ARG1
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
-    movq    rSELF, OUT_ARG3
-    call    SYMBOL(MterpConstString)        # (index, tgt_reg, shadow_frame, self)
-    testb   %al, %al
-    jnz     MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide.S b/runtime/interpreter/mterp/x86_64/op_const_wide.S
deleted file mode 100644
index 5615177..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_wide.S
+++ /dev/null
@@ -1,4 +0,0 @@
-    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
-    movq    2(rPC), %rax                    # rax <- HHHHhhhhBBBBbbbb
-    SET_WIDE_VREG %rax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide_16.S b/runtime/interpreter/mterp/x86_64/op_const_wide_16.S
deleted file mode 100644
index 593b624..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_wide_16.S
+++ /dev/null
@@ -1,4 +0,0 @@
-    /* const-wide/16 vAA, #+BBBB */
-    movswq  2(rPC), %rax                    # rax <- ssssBBBB
-    SET_WIDE_VREG %rax, rINSTq              # store
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide_32.S b/runtime/interpreter/mterp/x86_64/op_const_wide_32.S
deleted file mode 100644
index 5ef3636..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_wide_32.S
+++ /dev/null
@@ -1,4 +0,0 @@
-    /* const-wide/32 vAA, #+BBBBbbbb */
-    movslq   2(rPC), %rax                   # eax <- ssssssssBBBBbbbb
-    SET_WIDE_VREG %rax, rINSTq              # store
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide_high16.S b/runtime/interpreter/mterp/x86_64/op_const_wide_high16.S
deleted file mode 100644
index b86b4e5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_wide_high16.S
+++ /dev/null
@@ -1,5 +0,0 @@
-    /* const-wide/high16 vAA, #+BBBB000000000000 */
-    movzwq  2(rPC), %rax                    # eax <- 0000BBBB
-    salq    $$48, %rax                      # eax <- BBBB0000
-    SET_WIDE_VREG %rax, rINSTq              # v[AA+0] <- eax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_div_double.S b/runtime/interpreter/mterp/x86_64/op_div_double.S
deleted file mode 100644
index 45c700c..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"divs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_double_2addr.S
deleted file mode 100644
index 83f270e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"divs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_float.S b/runtime/interpreter/mterp/x86_64/op_div_float.S
deleted file mode 100644
index aa90b24..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"divs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_float_2addr.S
deleted file mode 100644
index f0f8f1a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"divs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int.S b/runtime/interpreter/mterp/x86_64/op_div_int.S
deleted file mode 100644
index bba5a17..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv.S" {"result":"%eax","second":"%ecx","wide":"0","suffix":"l"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_int_2addr.S
deleted file mode 100644
index fa4255d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv2addr.S" {"result":"%eax","second":"%ecx","wide":"0","suffix":"l"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_div_int_lit16.S
deleted file mode 100644
index 3fa1e09..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindivLit16.S" {"result":"%eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_div_int_lit8.S
deleted file mode 100644
index 859883e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindivLit8.S" {"result":"%eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_long.S b/runtime/interpreter/mterp/x86_64/op_div_long.S
deleted file mode 100644
index a061a88..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv.S" {"result":"%rax","second":"%rcx","wide":"1","suffix":"q","ext":"cqo"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_long_2addr.S
deleted file mode 100644
index 8886e68..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv2addr.S" {"result":"%rax","second":"%rcx","wide":"1","suffix":"q","ext":"cqo"}
diff --git a/runtime/interpreter/mterp/x86_64/op_double_to_float.S b/runtime/interpreter/mterp/x86_64/op_double_to_float.S
deleted file mode 100644
index cea1482..0000000
--- a/runtime/interpreter/mterp/x86_64/op_double_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcvt.S" {"source_suffix":"d","dest_suffix":"s","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_double_to_int.S b/runtime/interpreter/mterp/x86_64/op_double_to_int.S
deleted file mode 100644
index a9965ed..0000000
--- a/runtime/interpreter/mterp/x86_64/op_double_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/cvtfp_int.S" {"fp_suffix":"d","i_suffix":"l","max_const":"$0x7fffffff","result_reg":"%eax","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_double_to_long.S b/runtime/interpreter/mterp/x86_64/op_double_to_long.S
deleted file mode 100644
index 179e6a1..0000000
--- a/runtime/interpreter/mterp/x86_64/op_double_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/cvtfp_int.S" {"fp_suffix":"d","i_suffix":"q","max_const":"$0x7fffffffffffffff","result_reg":"%rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_fill_array_data.S b/runtime/interpreter/mterp/x86_64/op_fill_array_data.S
deleted file mode 100644
index 7ea36a6..0000000
--- a/runtime/interpreter/mterp/x86_64/op_fill_array_data.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    /* fill-array-data vAA, +BBBBBBBB */
-    EXPORT_PC
-    movslq  2(rPC), %rcx                    # rcx <- ssssssssBBBBbbbb
-    leaq    (rPC,%rcx,2), OUT_ARG1          # OUT_ARG1 <- PC + ssssssssBBBBbbbb*2
-    GET_VREG OUT_32_ARG0, rINSTq            # OUT_ARG0 <- vAA (array object)
-    call    SYMBOL(MterpFillArrayData)      # (obj, payload)
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_filled_new_array.S b/runtime/interpreter/mterp/x86_64/op_filled_new_array.S
deleted file mode 100644
index a7f7ddc..0000000
--- a/runtime/interpreter/mterp/x86_64/op_filled_new_array.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default { "helper":"MterpFilledNewArray" }
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    .extern $helper
-    EXPORT_PC
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
-    movq    rPC, OUT_ARG1
-    movq    rSELF, OUT_ARG2
-    call    SYMBOL($helper)
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_filled_new_array_range.S b/runtime/interpreter/mterp/x86_64/op_filled_new_array_range.S
deleted file mode 100644
index 4ca79a3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_filled_new_array_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_float_to_double.S b/runtime/interpreter/mterp/x86_64/op_float_to_double.S
deleted file mode 100644
index 7855205..0000000
--- a/runtime/interpreter/mterp/x86_64/op_float_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcvt.S" {"source_suffix":"s","dest_suffix":"d","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_float_to_int.S b/runtime/interpreter/mterp/x86_64/op_float_to_int.S
deleted file mode 100644
index cb90555..0000000
--- a/runtime/interpreter/mterp/x86_64/op_float_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/cvtfp_int.S" {"fp_suffix":"s","i_suffix":"l","max_const":"$0x7fffffff","result_reg":"%eax","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_float_to_long.S b/runtime/interpreter/mterp/x86_64/op_float_to_long.S
deleted file mode 100644
index 96bb4ee..0000000
--- a/runtime/interpreter/mterp/x86_64/op_float_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/cvtfp_int.S" {"fp_suffix":"s","i_suffix":"q","max_const":"$0x7fffffffffffffff","result_reg":"%rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_goto.S b/runtime/interpreter/mterp/x86_64/op_goto.S
deleted file mode 100644
index 9749901..0000000
--- a/runtime/interpreter/mterp/x86_64/op_goto.S
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
-    /* goto +AA */
-    movsbq  rINSTbl, rINSTq                 # rINSTq <- ssssssAA
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86_64/op_goto_16.S b/runtime/interpreter/mterp/x86_64/op_goto_16.S
deleted file mode 100644
index 77688e0..0000000
--- a/runtime/interpreter/mterp/x86_64/op_goto_16.S
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
-    /* goto/16 +AAAA */
-    movswq  2(rPC), rINSTq                  # rINSTq <- ssssAAAA
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86_64/op_goto_32.S b/runtime/interpreter/mterp/x86_64/op_goto_32.S
deleted file mode 100644
index 29d777b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_goto_32.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- *  Because we need the SF bit set, we'll use an adds
- * to convert from Dalvik offset to byte offset.
- */
-    /* goto/32 +AAAAAAAA */
-    movslq  2(rPC), rINSTq                  # rINSTq <- AAAAAAAA
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86_64/op_if_eq.S b/runtime/interpreter/mterp/x86_64/op_if_eq.S
deleted file mode 100644
index d56ce72..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_eq.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bincmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_eqz.S b/runtime/interpreter/mterp/x86_64/op_if_eqz.S
deleted file mode 100644
index a0fc444..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_eqz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/zcmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_ge.S b/runtime/interpreter/mterp/x86_64/op_if_ge.S
deleted file mode 100644
index a7832ef..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_ge.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bincmp.S" { "revcmp":"l" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_gez.S b/runtime/interpreter/mterp/x86_64/op_if_gez.S
deleted file mode 100644
index f9af5db..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_gez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/zcmp.S" { "revcmp":"l" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_gt.S b/runtime/interpreter/mterp/x86_64/op_if_gt.S
deleted file mode 100644
index 70f2b9e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_gt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bincmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_gtz.S b/runtime/interpreter/mterp/x86_64/op_if_gtz.S
deleted file mode 100644
index 2fb0d50..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_gtz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/zcmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_le.S b/runtime/interpreter/mterp/x86_64/op_if_le.S
deleted file mode 100644
index 321962a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_le.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bincmp.S" { "revcmp":"g" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_lez.S b/runtime/interpreter/mterp/x86_64/op_if_lez.S
deleted file mode 100644
index d3dc334..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_lez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/zcmp.S" { "revcmp":"g" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_lt.S b/runtime/interpreter/mterp/x86_64/op_if_lt.S
deleted file mode 100644
index f028005..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_lt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bincmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_ltz.S b/runtime/interpreter/mterp/x86_64/op_if_ltz.S
deleted file mode 100644
index 383d73a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_ltz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/zcmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_ne.S b/runtime/interpreter/mterp/x86_64/op_if_ne.S
deleted file mode 100644
index ac6e063..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_ne.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bincmp.S" { "revcmp":"e" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_nez.S b/runtime/interpreter/mterp/x86_64/op_if_nez.S
deleted file mode 100644
index c96e4f3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_nez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/zcmp.S" { "revcmp":"e" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget.S b/runtime/interpreter/mterp/x86_64/op_iget.S
deleted file mode 100644
index 4ab7c27..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIGetU32"}
-%include "x86_64/field.S" { }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_boolean.S b/runtime/interpreter/mterp/x86_64/op_iget_boolean.S
deleted file mode 100644
index 18e9264..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_boolean_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_boolean_quick.S
deleted file mode 100644
index 07139c7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget_quick.S" { "load":"movsbl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_byte.S b/runtime/interpreter/mterp/x86_64/op_iget_byte.S
deleted file mode 100644
index bec0ad5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_byte_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_byte_quick.S
deleted file mode 100644
index 07139c7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget_quick.S" { "load":"movsbl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_char.S b/runtime/interpreter/mterp/x86_64/op_iget_char.S
deleted file mode 100644
index 5e22b88..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_char_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_char_quick.S
deleted file mode 100644
index 8cb3be3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget_quick.S" { "load":"movzwl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_object.S b/runtime/interpreter/mterp/x86_64/op_iget_object.S
deleted file mode 100644
index bcef1d2..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_object_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_object_quick.S
deleted file mode 100644
index 176c954..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_object_quick.S
+++ /dev/null
@@ -1,15 +0,0 @@
-    /* For: iget-object-quick */
-    /* op vA, vB, offset@CCCC */
-    .extern artIGetObjectFromMterp
-    movzbq  rINSTbl, %rcx                   # rcx <- BA
-    sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG OUT_32_ARG0, %rcx              # vB (object we're operating on)
-    movzwl  2(rPC), OUT_32_ARG1             # eax <- field byte offset
-    EXPORT_PC
-    callq   SYMBOL(artIGetObjectFromMterp)  # (obj, offset)
-    movq    rSELF, %rcx
-    cmpq    $$0, THREAD_EXCEPTION_OFFSET(%rcx)
-    jnz     MterpException                  # bail out
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_quick.S
deleted file mode 100644
index bfb7530..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_quick.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default { "load":"movl", "wide":"0"}
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
-    /* op vA, vB, offset@CCCC */
-    movl    rINST, %ecx                     # rcx <- BA
-    sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
-    movzwq  2(rPC), %rax                    # eax <- field byte offset
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $$0xf,rINSTbl                   # rINST <- A
-    .if $wide
-    movq (%rcx,%rax,1), %rax
-    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
-    .else
-    ${load} (%rcx,%rax,1), %eax
-    SET_VREG %eax, rINSTq                   # fp[A] <- value
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_short.S b/runtime/interpreter/mterp/x86_64/op_iget_short.S
deleted file mode 100644
index 14c49f7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_short_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_short_quick.S
deleted file mode 100644
index 56ca858..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget_quick.S" { "load":"movswl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_wide.S b/runtime/interpreter/mterp/x86_64/op_iget_wide.S
deleted file mode 100644
index a85a474..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget.S" { "helper":"MterpIGetU64" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_wide_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_wide_quick.S
deleted file mode 100644
index 169d625..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_wide_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget_quick.S" { "load":"movswl", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_instance_of.S b/runtime/interpreter/mterp/x86_64/op_instance_of.S
deleted file mode 100644
index 4819833..0000000
--- a/runtime/interpreter/mterp/x86_64/op_instance_of.S
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
-    /* instance-of vA, vB, class@CCCC */
-    EXPORT_PC
-    movzwl  2(rPC), OUT_32_ARG0             # OUT_32_ARG0 <- CCCC
-    movl    rINST, %eax                     # eax <- BA
-    sarl    $$4, %eax                       # eax <- B
-    leaq    VREG_ADDRESS(%rax), OUT_ARG1    # Get object address
-    movq    OFF_FP_METHOD(rFP), OUT_ARG2
-    movq    rSELF, OUT_ARG3
-    call    SYMBOL(MterpInstanceOf)         # (index, &obj, method, self)
-    movsbl  %al, %eax
-    movq    rSELF, %rcx
-    cmpq    $$0, THREAD_EXCEPTION_OFFSET(%rcx)
-    jnz     MterpException
-    andb    $$0xf, rINSTbl                  # rINSTbl <- A
-    SET_VREG %eax, rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_byte.S b/runtime/interpreter/mterp/x86_64/op_int_to_byte.S
deleted file mode 100644
index f4e578f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_int_to_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":"movsbl  %al, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_char.S b/runtime/interpreter/mterp/x86_64/op_int_to_char.S
deleted file mode 100644
index c1bf17f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_int_to_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":"movzwl  %ax,%eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_double.S b/runtime/interpreter/mterp/x86_64/op_int_to_double.S
deleted file mode 100644
index 27ebf42..0000000
--- a/runtime/interpreter/mterp/x86_64/op_int_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"dl","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_float.S b/runtime/interpreter/mterp/x86_64/op_int_to_float.S
deleted file mode 100644
index 5a98d44..0000000
--- a/runtime/interpreter/mterp/x86_64/op_int_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"sl","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_long.S b/runtime/interpreter/mterp/x86_64/op_int_to_long.S
deleted file mode 100644
index 9281137..0000000
--- a/runtime/interpreter/mterp/x86_64/op_int_to_long.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* int to long vA, vB */
-    movzbq  rINSTbl, %rax                   # rax <- +A
-    sarl    $$4, %eax                       # eax <- B
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    movslq  VREG_ADDRESS(%rax), %rax
-    SET_WIDE_VREG %rax, rINSTq              # v[A] <- %rax
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_short.S b/runtime/interpreter/mterp/x86_64/op_int_to_short.S
deleted file mode 100644
index 6ae6b50..0000000
--- a/runtime/interpreter/mterp/x86_64/op_int_to_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":"movswl %ax, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_custom.S b/runtime/interpreter/mterp/x86_64/op_invoke_custom.S
deleted file mode 100644
index f4011f6..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_custom.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeCustom" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_custom_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_custom_range.S
deleted file mode 100644
index 94612c4..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_custom_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_direct.S b/runtime/interpreter/mterp/x86_64/op_invoke_direct.S
deleted file mode 100644
index 9628589..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_direct.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_direct_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_direct_range.S
deleted file mode 100644
index 09ac881..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_direct_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_interface.S b/runtime/interpreter/mterp/x86_64/op_invoke_interface.S
deleted file mode 100644
index 76d9cd4..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_interface.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeInterface" }
-/*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_interface_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_interface_range.S
deleted file mode 100644
index 785b43c..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_interface_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic.S b/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic.S
deleted file mode 100644
index 4529445..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic_range.S
deleted file mode 100644
index 01981c1..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_static.S b/runtime/interpreter/mterp/x86_64/op_invoke_static.S
deleted file mode 100644
index dd8027d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_static.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeStatic" }
-
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_static_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_static_range.S
deleted file mode 100644
index ee26074..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_static_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_super.S b/runtime/interpreter/mterp/x86_64/op_invoke_super.S
deleted file mode 100644
index d07f8d5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_super.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeSuper" }
-/*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_super_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_super_range.S
deleted file mode 100644
index 7245cfd..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_super_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual.S
deleted file mode 100644
index 19c708b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_virtual.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtual" }
-/*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_quick.S
deleted file mode 100644
index 313bd05..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range.S
deleted file mode 100644
index 424ad32..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range_quick.S
deleted file mode 100644
index 556f718..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput.S b/runtime/interpreter/mterp/x86_64/op_iput.S
deleted file mode 100644
index dad5af6..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIPutU32" }
-%include "x86_64/field.S" { }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_boolean.S b/runtime/interpreter/mterp/x86_64/op_iput_boolean.S
deleted file mode 100644
index 06bbd70..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_boolean_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_boolean_quick.S
deleted file mode 100644
index 6bd060e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput_quick.S" { "reg":"rINSTbl", "store":"movb" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_byte.S b/runtime/interpreter/mterp/x86_64/op_iput_byte.S
deleted file mode 100644
index 53f9008..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_byte_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_byte_quick.S
deleted file mode 100644
index 6bd060e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput_quick.S" { "reg":"rINSTbl", "store":"movb" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_char.S b/runtime/interpreter/mterp/x86_64/op_iput_char.S
deleted file mode 100644
index 4736f5e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_char_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_char_quick.S
deleted file mode 100644
index 3da96d5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput_quick.S" { "reg":"rINSTw", "store":"movw" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_object.S b/runtime/interpreter/mterp/x86_64/op_iput_object.S
deleted file mode 100644
index 202e33f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput.S" { "is_object":"1", "helper":"MterpIPutObj" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_object_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_object_quick.S
deleted file mode 100644
index b5b128a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_object_quick.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    EXPORT_PC
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
-    movq    rPC, OUT_ARG1
-    REFRESH_INST ${opnum}
-    movl    rINST, OUT_32_ARG2
-    call    SYMBOL(MterpIputObjectQuick)
-    testb   %al, %al
-    jz      MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_quick.S
deleted file mode 100644
index ecaf98e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_quick.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "reg":"rINST", "store":"movl" }
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    movzbq  rINSTbl, %rcx                   # rcx <- BA
-    sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
-    testl   %ecx, %ecx                      # is object null?
-    je      common_errNullObject
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST, rINSTq                  # rINST <- v[A]
-    movzwq  2(rPC), %rax                    # rax <- field byte offset
-    ${store}    ${reg}, (%rcx,%rax,1)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_short.S b/runtime/interpreter/mterp/x86_64/op_iput_short.S
deleted file mode 100644
index dca5735..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_short_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_short_quick.S
deleted file mode 100644
index 3da96d5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput_quick.S" { "reg":"rINSTw", "store":"movw" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_wide.S b/runtime/interpreter/mterp/x86_64/op_iput_wide.S
deleted file mode 100644
index db52016..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput.S" { "helper":"MterpIPutU64" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_wide_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_wide_quick.S
deleted file mode 100644
index 473189d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_wide_quick.S
+++ /dev/null
@@ -1,12 +0,0 @@
-    /* iput-wide-quick vA, vB, offset@CCCC */
-    movzbq    rINSTbl, %rcx                 # rcx<- BA
-    sarl      $$4, %ecx                     # ecx<- B
-    GET_VREG  %ecx, %rcx                    # vB (object we're operating on)
-    testl     %ecx, %ecx                    # is object null?
-    je        common_errNullObject
-    movzwq    2(rPC), %rax                  # rax<- field byte offset
-    leaq      (%rcx,%rax,1), %rcx           # ecx<- Address of 64-bit target
-    andb      $$0xf, rINSTbl                # rINST<- A
-    GET_WIDE_VREG %rax, rINSTq              # rax<- fp[A]/fp[A+1]
-    movq      %rax, (%rcx)                  # obj.field<- r0/r1
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_long_to_double.S b/runtime/interpreter/mterp/x86_64/op_long_to_double.S
deleted file mode 100644
index 7cdae32..0000000
--- a/runtime/interpreter/mterp/x86_64/op_long_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"dq","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_long_to_float.S b/runtime/interpreter/mterp/x86_64/op_long_to_float.S
deleted file mode 100644
index 7553348..0000000
--- a/runtime/interpreter/mterp/x86_64/op_long_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"sq","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_long_to_int.S b/runtime/interpreter/mterp/x86_64/op_long_to_int.S
deleted file mode 100644
index 7b50c8e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_long_to_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%include "x86_64/op_move.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_monitor_enter.S b/runtime/interpreter/mterp/x86_64/op_monitor_enter.S
deleted file mode 100644
index 411091f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_monitor_enter.S
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Synchronize on an object.
- */
-    /* monitor-enter vAA */
-    EXPORT_PC
-    GET_VREG OUT_32_ARG0, rINSTq
-    movq    rSELF, OUT_ARG1
-    call    SYMBOL(artLockObjectFromCode)   # (object, self)
-    testq   %rax, %rax
-    jnz     MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_monitor_exit.S b/runtime/interpreter/mterp/x86_64/op_monitor_exit.S
deleted file mode 100644
index 72d9a23..0000000
--- a/runtime/interpreter/mterp/x86_64/op_monitor_exit.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction.  See the Dalvik
- * instruction spec.
- */
-    /* monitor-exit vAA */
-    EXPORT_PC
-    GET_VREG OUT_32_ARG0, rINSTq
-    movq    rSELF, OUT_ARG1
-    call    SYMBOL(artUnlockObjectFromCode) # (object, self)
-    testq   %rax, %rax
-    jnz     MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move.S b/runtime/interpreter/mterp/x86_64/op_move.S
deleted file mode 100644
index ccaac2c..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "is_object":"0" }
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    movl    rINST, %eax                     # eax <- BA
-    andb    $$0xf, %al                      # eax <- A
-    shrl    $$4, rINST                      # rINST <- B
-    GET_VREG %edx, rINSTq
-    .if $is_object
-    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
-    .else
-    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_16.S b/runtime/interpreter/mterp/x86_64/op_move_16.S
deleted file mode 100644
index 6a813eb..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_16.S
+++ /dev/null
@@ -1,12 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    movzwq  4(rPC), %rcx                    # ecx <- BBBB
-    movzwq  2(rPC), %rax                    # eax <- AAAA
-    GET_VREG %edx, %rcx
-    .if $is_object
-    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
-    .else
-    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_move_exception.S b/runtime/interpreter/mterp/x86_64/op_move_exception.S
deleted file mode 100644
index 33db878..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_exception.S
+++ /dev/null
@@ -1,6 +0,0 @@
-    /* move-exception vAA */
-    movq    rSELF, %rcx
-    movl    THREAD_EXCEPTION_OFFSET(%rcx), %eax
-    SET_VREG_OBJECT %eax, rINSTq            # fp[AA] <- exception object
-    movl    $$0, THREAD_EXCEPTION_OFFSET(%rcx)
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_from16.S b/runtime/interpreter/mterp/x86_64/op_move_from16.S
deleted file mode 100644
index 150e9c2..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_from16.S
+++ /dev/null
@@ -1,11 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    movzwq  2(rPC), %rax                    # eax <- BBBB
-    GET_VREG %edx, %rax                     # edx <- fp[BBBB]
-    .if $is_object
-    SET_VREG_OBJECT %edx, rINSTq            # fp[A] <- fp[B]
-    .else
-    SET_VREG %edx, rINSTq                   # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_move_object.S b/runtime/interpreter/mterp/x86_64/op_move_object.S
deleted file mode 100644
index 0d86649..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_object_16.S b/runtime/interpreter/mterp/x86_64/op_move_object_16.S
deleted file mode 100644
index 32541ff..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_object_16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_object_from16.S b/runtime/interpreter/mterp/x86_64/op_move_object_from16.S
deleted file mode 100644
index 983e4ab..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_object_from16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_result.S b/runtime/interpreter/mterp/x86_64/op_move_result.S
deleted file mode 100644
index 8268344..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_result.S
+++ /dev/null
@@ -1,11 +0,0 @@
-%default { "is_object":"0" }
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    movq    OFF_FP_RESULT_REGISTER(rFP), %rax    # get pointer to result JType.
-    movl    (%rax), %eax                    # r0 <- result.i.
-    .if $is_object
-    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- fp[B]
-    .else
-    SET_VREG %eax, rINSTq                   # fp[A] <- fp[B]
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_result_object.S b/runtime/interpreter/mterp/x86_64/op_move_result_object.S
deleted file mode 100644
index c5aac17..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_result_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_result_wide.S b/runtime/interpreter/mterp/x86_64/op_move_result_wide.S
deleted file mode 100644
index 03de783..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_result_wide.S
+++ /dev/null
@@ -1,5 +0,0 @@
-    /* move-result-wide vAA */
-    movq    OFF_FP_RESULT_REGISTER(rFP), %rax    # get pointer to result JType.
-    movq    (%rax), %rdx                         # Get wide
-    SET_WIDE_VREG %rdx, rINSTq                   # v[AA] <- rdx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_wide.S b/runtime/interpreter/mterp/x86_64/op_move_wide.S
deleted file mode 100644
index 508f8cc..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_wide.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    movl    rINST, %ecx                     # ecx <- BA
-    sarl    $$4, rINST                      # rINST <- B
-    andb    $$0xf, %cl                      # ecx <- A
-    GET_WIDE_VREG %rdx, rINSTq              # rdx <- v[B]
-    SET_WIDE_VREG %rdx, %rcx                # v[A] <- rdx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_wide_16.S b/runtime/interpreter/mterp/x86_64/op_move_wide_16.S
deleted file mode 100644
index ce371a9..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_wide_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    movzwq  4(rPC), %rcx                    # ecx<- BBBB
-    movzwq  2(rPC), %rax                    # eax<- AAAA
-    GET_WIDE_VREG %rdx, %rcx                # rdx <- v[B]
-    SET_WIDE_VREG %rdx, %rax                # v[A] <- rdx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_move_wide_from16.S b/runtime/interpreter/mterp/x86_64/op_move_wide_from16.S
deleted file mode 100644
index 0d6971a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_wide_from16.S
+++ /dev/null
@@ -1,6 +0,0 @@
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    movzwl  2(rPC), %ecx                    # ecx <- BBBB
-    GET_WIDE_VREG %rdx, %rcx                # rdx <- v[B]
-    SET_WIDE_VREG %rdx, rINSTq              # v[A] <- rdx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_double.S b/runtime/interpreter/mterp/x86_64/op_mul_double.S
deleted file mode 100644
index 1f4bcb3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"muls","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_double_2addr.S
deleted file mode 100644
index 9850a28..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"muls","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_float.S b/runtime/interpreter/mterp/x86_64/op_mul_float.S
deleted file mode 100644
index 85960e9..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"muls","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_float_2addr.S
deleted file mode 100644
index 6d36b6a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"muls","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int.S b/runtime/interpreter/mterp/x86_64/op_mul_int.S
deleted file mode 100644
index 5f3923a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop.S" {"instr":"imull   (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_int_2addr.S
deleted file mode 100644
index 0b5af8a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_int_2addr.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* mul vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $$4, rINST                      # rINST <- B
-    andb    $$0xf, %cl                      # ecx <- A
-    GET_VREG %eax, %rcx                     # eax <- vA
-    imull   (rFP,rINSTq,4), %eax
-    SET_VREG %eax, %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_mul_int_lit16.S
deleted file mode 100644
index a4cfdbc..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit16.S" {"instr":"imull   %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_mul_int_lit8.S
deleted file mode 100644
index 89e9acb..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"imull   %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_long.S b/runtime/interpreter/mterp/x86_64/op_mul_long.S
deleted file mode 100644
index 2b85370..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide.S" {"instr":"imulq   (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_long_2addr.S
deleted file mode 100644
index 167128b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_long_2addr.S
+++ /dev/null
@@ -1,8 +0,0 @@
-    /* mul vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $$4, rINST                      # rINST <- B
-    andb    $$0xf, %cl                      # ecx <- A
-    GET_WIDE_VREG %rax, %rcx                # rax <- vA
-    imulq   (rFP,rINSTq,4), %rax
-    SET_WIDE_VREG %rax, %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_double.S b/runtime/interpreter/mterp/x86_64/op_neg_double.S
deleted file mode 100644
index 2c14b09..0000000
--- a/runtime/interpreter/mterp/x86_64/op_neg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"preinstr":"    movq    $0x8000000000000000, %rsi", "instr":"    xorq    %rsi, %rax", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_float.S b/runtime/interpreter/mterp/x86_64/op_neg_float.S
deleted file mode 100644
index 148b21e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_neg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":"    xorl    $0x80000000, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_int.S b/runtime/interpreter/mterp/x86_64/op_neg_int.S
deleted file mode 100644
index f90a937..0000000
--- a/runtime/interpreter/mterp/x86_64/op_neg_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":"    negl    %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_long.S b/runtime/interpreter/mterp/x86_64/op_neg_long.S
deleted file mode 100644
index 18fc3cc..0000000
--- a/runtime/interpreter/mterp/x86_64/op_neg_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":"    negq    %rax", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_new_array.S b/runtime/interpreter/mterp/x86_64/op_new_array.S
deleted file mode 100644
index 9831a0b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_new_array.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
-    /* new-array vA, vB, class@CCCC */
-    EXPORT_PC
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
-    movq    rPC, OUT_ARG1
-    REFRESH_INST ${opnum}
-    movq    rINSTq, OUT_ARG2
-    movq    rSELF, OUT_ARG3
-    call    SYMBOL(MterpNewArray)
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_new_instance.S b/runtime/interpreter/mterp/x86_64/op_new_instance.S
deleted file mode 100644
index fc8c8cd..0000000
--- a/runtime/interpreter/mterp/x86_64/op_new_instance.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Create a new instance of a class.
- */
-    /* new-instance vAA, class@BBBB */
-    EXPORT_PC
-    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
-    movq    rSELF, OUT_ARG1
-    REFRESH_INST ${opnum}
-    movq    rINSTq, OUT_ARG2
-    call    SYMBOL(MterpNewInstance)
-    testb   %al, %al                        # 0 means an exception is thrown
-    jz      MterpPossibleException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_nop.S b/runtime/interpreter/mterp/x86_64/op_nop.S
deleted file mode 100644
index 4cb68e3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_nop.S
+++ /dev/null
@@ -1 +0,0 @@
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_not_int.S b/runtime/interpreter/mterp/x86_64/op_not_int.S
deleted file mode 100644
index 463d080..0000000
--- a/runtime/interpreter/mterp/x86_64/op_not_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":"    notl    %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_not_long.S b/runtime/interpreter/mterp/x86_64/op_not_long.S
deleted file mode 100644
index c97bb9e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_not_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":"    notq    %rax", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int.S b/runtime/interpreter/mterp/x86_64/op_or_int.S
deleted file mode 100644
index 730310f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_or_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop.S" {"instr":"orl     (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_or_int_2addr.S
deleted file mode 100644
index f722e4d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_or_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop2addr.S" {"instr":"orl     %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_or_int_lit16.S
deleted file mode 100644
index fee86c7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_or_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit16.S" {"instr":"orl     %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_or_int_lit8.S
deleted file mode 100644
index 81104c7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_or_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"orl     %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_long.S b/runtime/interpreter/mterp/x86_64/op_or_long.S
deleted file mode 100644
index 6c70a20..0000000
--- a/runtime/interpreter/mterp/x86_64/op_or_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide.S" {"instr":"orq     (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_or_long_2addr.S
deleted file mode 100644
index 546da1d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_or_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide2addr.S" {"instr":"orq     %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_packed_switch.S b/runtime/interpreter/mterp/x86_64/op_packed_switch.S
deleted file mode 100644
index 148552f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_packed_switch.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default { "func":"MterpDoPackedSwitch" }
-/*
- * Handle a packed-switch or sparse-switch instruction.  In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
-    /* op vAA, +BBBB */
-    movslq  2(rPC), OUT_ARG0                # rcx <- ssssssssBBBBbbbb
-    leaq    (rPC,OUT_ARG0,2), OUT_ARG0      # rcx <- PC + ssssssssBBBBbbbb*2
-    GET_VREG OUT_32_ARG1, rINSTq            # eax <- vAA
-    call    SYMBOL($func)
-    testl   %eax, %eax
-    movslq  %eax, rINSTq
-    jmp     MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_double.S b/runtime/interpreter/mterp/x86_64/op_rem_double.S
deleted file mode 100644
index 00aed78..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_double.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /* rem_double vAA, vBB, vCC */
-    movzbq  3(rPC), %rcx                    # ecx <- BB
-    movzbq  2(rPC), %rax                    # eax <- CC
-    fldl    VREG_ADDRESS(%rcx)              # %st1 <- fp[vBB]
-    fldl    VREG_ADDRESS(%rax)              # %st0 <- fp[vCC]
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstpl   VREG_ADDRESS(rINSTq)            # fp[vAA] <- %st
-    CLEAR_WIDE_REF rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_double_2addr.S
deleted file mode 100644
index 9768266..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_double_2addr.S
+++ /dev/null
@@ -1,15 +0,0 @@
-    /* rem_double/2addr vA, vB */
-    movzbq  rINSTbl, %rcx                   # ecx <- A+
-    sarl    $$4, rINST                      # rINST <- B
-    fldl    VREG_ADDRESS(rINSTq)            # vB to fp stack
-    andb    $$0xf, %cl                      # ecx <- A
-    fldl    VREG_ADDRESS(%rcx)              # vA to fp stack
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstpl   VREG_ADDRESS(%rcx)              # %st to vA
-    CLEAR_WIDE_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_float.S b/runtime/interpreter/mterp/x86_64/op_rem_float.S
deleted file mode 100644
index 5af28ac..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_float.S
+++ /dev/null
@@ -1,14 +0,0 @@
-    /* rem_float vAA, vBB, vCC */
-    movzbq  3(rPC), %rcx                    # ecx <- BB
-    movzbq  2(rPC), %rax                    # eax <- CC
-    flds    VREG_ADDRESS(%rcx)              # vBB to fp stack
-    flds    VREG_ADDRESS(%rax)              # vCC to fp stack
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstps   VREG_ADDRESS(rINSTq)            # %st to vAA
-    CLEAR_REF rINSTq
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_float_2addr.S
deleted file mode 100644
index e9282a8..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_float_2addr.S
+++ /dev/null
@@ -1,15 +0,0 @@
-    /* rem_float/2addr vA, vB */
-    movzbq  rINSTbl, %rcx                   # ecx <- A+
-    sarl    $$4, rINST                      # rINST <- B
-    flds    VREG_ADDRESS(rINSTq)            # vB to fp stack
-    andb    $$0xf, %cl                      # ecx <- A
-    flds    VREG_ADDRESS(%rcx)              # vA to fp stack
-1:
-    fprem
-    fstsw   %ax
-    sahf
-    jp      1b
-    fstp    %st(1)
-    fstps   VREG_ADDRESS(%rcx)              # %st to vA
-    CLEAR_REF %rcx
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int.S b/runtime/interpreter/mterp/x86_64/op_rem_int.S
deleted file mode 100644
index fd77d7c..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv.S" {"result":"%edx","second":"%ecx","wide":"0","suffix":"l","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_int_2addr.S
deleted file mode 100644
index 25ffbf7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv2addr.S" {"result":"%edx","second":"%ecx","wide":"0","suffix":"l","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_rem_int_lit16.S
deleted file mode 100644
index 21cc370..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindivLit16.S" {"result":"%edx","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_rem_int_lit8.S
deleted file mode 100644
index 2eb0150..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindivLit8.S" {"result":"%edx","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_long.S b/runtime/interpreter/mterp/x86_64/op_rem_long.S
deleted file mode 100644
index efa7215..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv.S" {"result":"%rdx","second":"%rcx","wide":"1","suffix":"q","ext":"cqo","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_long_2addr.S
deleted file mode 100644
index ce0dd86..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv2addr.S" {"result":"%rdx","second":"%rcx","wide":"1","suffix":"q","rem":"1","ext":"cqo"}
diff --git a/runtime/interpreter/mterp/x86_64/op_return.S b/runtime/interpreter/mterp/x86_64/op_return.S
deleted file mode 100644
index 8cb6cba..0000000
--- a/runtime/interpreter/mterp/x86_64/op_return.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    call    SYMBOL(MterpThreadFenceForConstructor)
-    movq    rSELF, OUT_ARG0
-    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
-    jz      1f
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    GET_VREG %eax, rINSTq                   # eax <- vAA
-    jmp     MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_return_object.S b/runtime/interpreter/mterp/x86_64/op_return_object.S
deleted file mode 100644
index 1ae69a5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_return_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_return.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_return_void.S b/runtime/interpreter/mterp/x86_64/op_return_void.S
deleted file mode 100644
index ba68e7e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_return_void.S
+++ /dev/null
@@ -1,9 +0,0 @@
-    .extern MterpThreadFenceForConstructor
-    call    SYMBOL(MterpThreadFenceForConstructor)
-    movq    rSELF, OUT_ARG0
-    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
-    jz      1f
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    xorq    %rax, %rax
-    jmp     MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S
deleted file mode 100644
index 6799da1..0000000
--- a/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S
+++ /dev/null
@@ -1,7 +0,0 @@
-    movq    rSELF, OUT_ARG0
-    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
-    jz      1f
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    xorq    %rax, %rax
-    jmp     MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_return_wide.S b/runtime/interpreter/mterp/x86_64/op_return_wide.S
deleted file mode 100644
index d6d6d1b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_return_wide.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Return a 64-bit value.
- */
-    /* return-wide vAA */
-    .extern MterpThreadFenceForConstructor
-    call    SYMBOL(MterpThreadFenceForConstructor)
-    movq    rSELF, OUT_ARG0
-    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
-    jz      1f
-    call    SYMBOL(MterpSuspendCheck)
-1:
-    GET_WIDE_VREG %rax, rINSTq              # eax <- v[AA]
-    jmp     MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_rsub_int.S b/runtime/interpreter/mterp/x86_64/op_rsub_int.S
deleted file mode 100644
index 2dd2002..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rsub_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-%include "x86_64/binopLit16.S" {"instr":"subl    %eax, %ecx","result":"%ecx"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rsub_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_rsub_int_lit8.S
deleted file mode 100644
index 64d0d8a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rsub_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"subl    %eax, %ecx" , "result":"%ecx"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget.S b/runtime/interpreter/mterp/x86_64/op_sget.S
deleted file mode 100644
index 21e8e64..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sget.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSGetU32" }
-%include "x86_64/field.S" { }
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_boolean.S b/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
deleted file mode 100644
index e5a4e41..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sget.S" {"helper":"MterpSGetU8"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_byte.S b/runtime/interpreter/mterp/x86_64/op_sget_byte.S
deleted file mode 100644
index 4602f7d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sget.S" {"helper":"MterpSGetI8"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_char.S b/runtime/interpreter/mterp/x86_64/op_sget_char.S
deleted file mode 100644
index a094a54..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sget.S" {"helper":"MterpSGetU16"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_object.S b/runtime/interpreter/mterp/x86_64/op_sget_object.S
deleted file mode 100644
index 94597b1..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_short.S b/runtime/interpreter/mterp/x86_64/op_sget_short.S
deleted file mode 100644
index dee5c24..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sget.S" {"helper":"MterpSGetI16"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_wide.S b/runtime/interpreter/mterp/x86_64/op_sget_wide.S
deleted file mode 100644
index c53c077..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sget.S" {"helper":"MterpSGetU64"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_int.S b/runtime/interpreter/mterp/x86_64/op_shl_int.S
deleted file mode 100644
index fa1edb7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shl_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop1.S" {"instr":"sall    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_shl_int_2addr.S
deleted file mode 100644
index dd96279..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shl_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/shop2addr.S" {"instr":"sall    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_shl_int_lit8.S
deleted file mode 100644
index 39b23ae..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shl_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"sall    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_long.S b/runtime/interpreter/mterp/x86_64/op_shl_long.S
deleted file mode 100644
index fdc7cb6..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shl_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop1.S" {"instr":"salq    %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_shl_long_2addr.S
deleted file mode 100644
index 546633f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shl_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/shop2addr.S" {"instr":"salq    %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_int.S b/runtime/interpreter/mterp/x86_64/op_shr_int.S
deleted file mode 100644
index fc289f4..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop1.S" {"instr":"sarl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_shr_int_2addr.S
deleted file mode 100644
index 0e5bca7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/shop2addr.S" {"instr":"sarl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_shr_int_lit8.S
deleted file mode 100644
index 3cc9307..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"sarl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_long.S b/runtime/interpreter/mterp/x86_64/op_shr_long.S
deleted file mode 100644
index 25028d3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shr_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop1.S" {"instr":"sarq    %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_shr_long_2addr.S
deleted file mode 100644
index 3738413..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shr_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/shop2addr.S" {"instr":"sarq    %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sparse_switch.S b/runtime/interpreter/mterp/x86_64/op_sparse_switch.S
deleted file mode 100644
index 0eaa514..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sparse_switch.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/x86_64/op_sput.S b/runtime/interpreter/mterp/x86_64/op_sput.S
deleted file mode 100644
index 7dd2498..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sput.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSPutU32"}
-%include "x86_64/field.S" { }
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_boolean.S b/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
deleted file mode 100644
index ea9acbf..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_byte.S b/runtime/interpreter/mterp/x86_64/op_sput_byte.S
deleted file mode 100644
index 62c9e20..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_char.S b/runtime/interpreter/mterp/x86_64/op_sput_char.S
deleted file mode 100644
index ab0196e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_object.S b/runtime/interpreter/mterp/x86_64/op_sput_object.S
deleted file mode 100644
index c2bd07b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sput_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sput.S" {"is_object":"1", "helper":"MterpSPutObj"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_short.S b/runtime/interpreter/mterp/x86_64/op_sput_short.S
deleted file mode 100644
index f73a3fc..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_wide.S b/runtime/interpreter/mterp/x86_64/op_sput_wide.S
deleted file mode 100644
index 7e77072..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sput_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sput.S" {"helper":"MterpSPutU64"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_double.S b/runtime/interpreter/mterp/x86_64/op_sub_double.S
deleted file mode 100644
index 952667e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"subs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_double_2addr.S
deleted file mode 100644
index 0bd5dbb..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"subs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_float.S b/runtime/interpreter/mterp/x86_64/op_sub_float.S
deleted file mode 100644
index ea0ae14..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"subs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_float_2addr.S
deleted file mode 100644
index 9dd1780..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"subs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_int.S b/runtime/interpreter/mterp/x86_64/op_sub_int.S
deleted file mode 100644
index 560394f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop.S" {"instr":"subl    (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_int_2addr.S
deleted file mode 100644
index 6f50f78..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop2addr.S" {"instr":"subl    %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_long.S b/runtime/interpreter/mterp/x86_64/op_sub_long.S
deleted file mode 100644
index 7fa54e7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide.S" {"instr":"subq    (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_long_2addr.S
deleted file mode 100644
index c18be10..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide2addr.S" {"instr":"subq    %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_throw.S b/runtime/interpreter/mterp/x86_64/op_throw.S
deleted file mode 100644
index 8095c25..0000000
--- a/runtime/interpreter/mterp/x86_64/op_throw.S
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Throw an exception object in the current thread.
- */
-    /* throw vAA */
-    EXPORT_PC
-    GET_VREG %eax, rINSTq                   # eax<- vAA (exception object)
-    testb   %al, %al
-    jz      common_errNullObject
-    movq    rSELF, %rcx
-    movq    %rax, THREAD_EXCEPTION_OFFSET(%rcx)
-    jmp     MterpException
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_3e.S b/runtime/interpreter/mterp/x86_64/op_unused_3e.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_3e.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_3f.S b/runtime/interpreter/mterp/x86_64/op_unused_3f.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_3f.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_40.S b/runtime/interpreter/mterp/x86_64/op_unused_40.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_40.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_41.S b/runtime/interpreter/mterp/x86_64/op_unused_41.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_41.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_42.S b/runtime/interpreter/mterp/x86_64/op_unused_42.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_42.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_43.S b/runtime/interpreter/mterp/x86_64/op_unused_43.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_43.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_79.S b/runtime/interpreter/mterp/x86_64/op_unused_79.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_79.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_7a.S b/runtime/interpreter/mterp/x86_64/op_unused_7a.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_7a.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f3.S b/runtime/interpreter/mterp/x86_64/op_unused_f3.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_f3.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f4.S b/runtime/interpreter/mterp/x86_64/op_unused_f4.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_f4.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f5.S b/runtime/interpreter/mterp/x86_64/op_unused_f5.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_f5.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f6.S b/runtime/interpreter/mterp/x86_64/op_unused_f6.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_f6.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f7.S b/runtime/interpreter/mterp/x86_64/op_unused_f7.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_f7.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f8.S b/runtime/interpreter/mterp/x86_64/op_unused_f8.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_f8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f9.S b/runtime/interpreter/mterp/x86_64/op_unused_f9.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_f9.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fc.S b/runtime/interpreter/mterp/x86_64/op_unused_fc.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_fc.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fd.S b/runtime/interpreter/mterp/x86_64/op_unused_fd.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_fd.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_int.S b/runtime/interpreter/mterp/x86_64/op_ushr_int.S
deleted file mode 100644
index dd91086..0000000
--- a/runtime/interpreter/mterp/x86_64/op_ushr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop1.S" {"instr":"shrl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_ushr_int_2addr.S
deleted file mode 100644
index d38aedd..0000000
--- a/runtime/interpreter/mterp/x86_64/op_ushr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/shop2addr.S" {"instr":"shrl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_ushr_int_lit8.S
deleted file mode 100644
index f7ff8ab..0000000
--- a/runtime/interpreter/mterp/x86_64/op_ushr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"shrl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_long.S b/runtime/interpreter/mterp/x86_64/op_ushr_long.S
deleted file mode 100644
index 7c6daca..0000000
--- a/runtime/interpreter/mterp/x86_64/op_ushr_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop1.S" {"instr":"shrq    %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_ushr_long_2addr.S
deleted file mode 100644
index cd6a22c..0000000
--- a/runtime/interpreter/mterp/x86_64/op_ushr_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/shop2addr.S" {"instr":"shrq    %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int.S b/runtime/interpreter/mterp/x86_64/op_xor_int.S
deleted file mode 100644
index b295d74..0000000
--- a/runtime/interpreter/mterp/x86_64/op_xor_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop.S" {"instr":"xorl    (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_xor_int_2addr.S
deleted file mode 100644
index 879bfc0..0000000
--- a/runtime/interpreter/mterp/x86_64/op_xor_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop2addr.S" {"instr":"xorl    %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_xor_int_lit16.S
deleted file mode 100644
index 5d375a1..0000000
--- a/runtime/interpreter/mterp/x86_64/op_xor_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit16.S" {"instr":"xorl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_xor_int_lit8.S
deleted file mode 100644
index 54cce9c..0000000
--- a/runtime/interpreter/mterp/x86_64/op_xor_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"xorl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_long.S b/runtime/interpreter/mterp/x86_64/op_xor_long.S
deleted file mode 100644
index 52b44e2..0000000
--- a/runtime/interpreter/mterp/x86_64/op_xor_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide.S" {"instr":"xorq    (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_xor_long_2addr.S
deleted file mode 100644
index d75c4ba..0000000
--- a/runtime/interpreter/mterp/x86_64/op_xor_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide2addr.S" {"instr":"xorq    %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/other.S b/runtime/interpreter/mterp/x86_64/other.S
new file mode 100644
index 0000000..412389f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/other.S
@@ -0,0 +1,297 @@
+%def const(helper="UndefinedConstHandler"):
+    /* const/class vAA, type@BBBB */
+    /* const/method-handle vAA, method_handle@BBBB */
+    /* const/method-type vAA, proto@BBBB */
+    /* const/string vAA, string@@BBBB */
+    .extern $helper
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # eax <- OUT_ARG0
+    movq    rINSTq, OUT_ARG1
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL($helper)                 # (index, tgt_reg, shadow_frame, self)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def unused():
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+%def op_const():
+    /* const vAA, #+BBBBbbbb */
+    movl    2(rPC), %eax                    # grab all 32 bits at once
+    SET_VREG %eax, rINSTq                   # vAA<- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_const_16():
+    /* const/16 vAA, #+BBBB */
+    movswl  2(rPC), %ecx                    # ecx <- ssssBBBB
+    SET_VREG %ecx, rINSTq                   # vAA <- ssssBBBB
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_const_4():
+    /* const/4 vA, #+B */
+    movsbl  rINSTbl, %eax                   # eax <-ssssssBx
+    movl    $$0xf, rINST
+    andl    %eax, rINST                     # rINST <- A
+    sarl    $$4, %eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_const_class():
+%  const(helper="MterpConstClass")
+
+%def op_const_high16():
+    /* const/high16 vAA, #+BBBB0000 */
+    movzwl  2(rPC), %eax                    # eax <- 0000BBBB
+    sall    $$16, %eax                      # eax <- BBBB0000
+    SET_VREG %eax, rINSTq                   # vAA <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_const_method_handle():
+%  const(helper="MterpConstMethodHandle")
+
+%def op_const_method_type():
+%  const(helper="MterpConstMethodType")
+
+%def op_const_string():
+%  const(helper="MterpConstString")
+
+%def op_const_string_jumbo():
+    /* const/string vAA, String@BBBBBBBB */
+    EXPORT_PC
+    movl    2(rPC), OUT_32_ARG0             # OUT_32_ARG0 <- BBBB
+    movq    rINSTq, OUT_ARG1
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpConstString)        # (index, tgt_reg, shadow_frame, self)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_const_wide():
+    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+    movq    2(rPC), %rax                    # rax <- HHHHhhhhBBBBbbbb
+    SET_WIDE_VREG %rax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
+
+%def op_const_wide_16():
+    /* const-wide/16 vAA, #+BBBB */
+    movswq  2(rPC), %rax                    # rax <- ssssBBBB
+    SET_WIDE_VREG %rax, rINSTq              # store
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_const_wide_32():
+    /* const-wide/32 vAA, #+BBBBbbbb */
+    movslq   2(rPC), %rax                   # eax <- ssssssssBBBBbbbb
+    SET_WIDE_VREG %rax, rINSTq              # store
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_const_wide_high16():
+    /* const-wide/high16 vAA, #+BBBB000000000000 */
+    movzwq  2(rPC), %rax                    # eax <- 0000BBBB
+    salq    $$48, %rax                      # eax <- BBBB0000
+    SET_WIDE_VREG %rax, rINSTq              # v[AA+0] <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_monitor_enter():
+/*
+ * Synchronize on an object.
+ */
+    /* monitor-enter vAA */
+    EXPORT_PC
+    GET_VREG OUT_32_ARG0, rINSTq
+    movq    rSELF, OUT_ARG1
+    call    SYMBOL(artLockObjectFromCode)   # (object, self)
+    testq   %rax, %rax
+    jnz     MterpException
+    ADVANCE_PC 1
+    movq    rSELF, %rax
+    cmpb    LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
+    jz      MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+%def op_monitor_exit():
+/*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction.  See the Dalvik
+ * instruction spec.
+ */
+    /* monitor-exit vAA */
+    EXPORT_PC
+    GET_VREG OUT_32_ARG0, rINSTq
+    movq    rSELF, OUT_ARG1
+    call    SYMBOL(artUnlockObjectFromCode) # (object, self)
+    testq   %rax, %rax
+    jnz     MterpException
+    ADVANCE_PC 1
+    movq    rSELF, %rax
+    cmpb    LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
+    jz      MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+%def op_move(is_object="0"):
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    movl    rINST, %eax                     # eax <- BA
+    andb    $$0xf, %al                      # eax <- A
+    shrl    $$4, rINST                      # rINST <- B
+    GET_VREG %edx, rINSTq
+    .if $is_object
+    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_16(is_object="0"):
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    movzwq  4(rPC), %rcx                    # ecx <- BBBB
+    movzwq  2(rPC), %rax                    # eax <- AAAA
+    GET_VREG %edx, %rcx
+    .if $is_object
+    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_move_exception():
+    /* move-exception vAA */
+    movq    rSELF, %rcx
+    movl    THREAD_EXCEPTION_OFFSET(%rcx), %eax
+    SET_VREG_OBJECT %eax, rINSTq            # fp[AA] <- exception object
+    movl    $$0, THREAD_EXCEPTION_OFFSET(%rcx)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_from16(is_object="0"):
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    movzwq  2(rPC), %rax                    # eax <- BBBB
+    GET_VREG %edx, %rax                     # edx <- fp[BBBB]
+    .if $is_object
+    SET_VREG_OBJECT %edx, rINSTq            # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, rINSTq                   # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_move_object():
+%  op_move(is_object="1")
+
+%def op_move_object_16():
+%  op_move_16(is_object="1")
+
+%def op_move_object_from16():
+%  op_move_from16(is_object="1")
+
+%def op_move_result(is_object="0"):
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    movq    OFF_FP_RESULT_REGISTER(rFP), %rax    # get pointer to result JType.
+    movl    (%rax), %eax                    # r0 <- result.i.
+    .if $is_object
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- fp[B]
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_result_object():
+%  op_move_result(is_object="1")
+
+%def op_move_result_wide():
+    /* move-result-wide vAA */
+    movq    OFF_FP_RESULT_REGISTER(rFP), %rax    # get pointer to result JType.
+    movq    (%rax), %rdx                         # Get wide
+    SET_WIDE_VREG %rdx, rINSTq                   # v[AA] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_wide():
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    movl    rINST, %ecx                     # ecx <- BA
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    GET_WIDE_VREG %rdx, rINSTq              # rdx <- v[B]
+    SET_WIDE_VREG %rdx, %rcx                # v[A] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_wide_16():
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    movzwq  4(rPC), %rcx                    # ecx<- BBBB
+    movzwq  2(rPC), %rax                    # eax<- AAAA
+    GET_WIDE_VREG %rdx, %rcx                # rdx <- v[B]
+    SET_WIDE_VREG %rdx, %rax                # v[A] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_move_wide_from16():
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    movzwl  2(rPC), %ecx                    # ecx <- BBBB
+    GET_WIDE_VREG %rdx, %rcx                # rdx <- v[B]
+    SET_WIDE_VREG %rdx, rINSTq              # v[A] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_nop():
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_unused_3e():
+%  unused()
+
+%def op_unused_3f():
+%  unused()
+
+%def op_unused_40():
+%  unused()
+
+%def op_unused_41():
+%  unused()
+
+%def op_unused_42():
+%  unused()
+
+%def op_unused_43():
+%  unused()
+
+%def op_unused_79():
+%  unused()
+
+%def op_unused_7a():
+%  unused()
+
+%def op_unused_f3():
+%  unused()
+
+%def op_unused_f4():
+%  unused()
+
+%def op_unused_f5():
+%  unused()
+
+%def op_unused_f6():
+%  unused()
+
+%def op_unused_f7():
+%  unused()
+
+%def op_unused_f8():
+%  unused()
+
+%def op_unused_f9():
+%  unused()
+
+%def op_unused_fc():
+%  unused()
+
+%def op_unused_fd():
+%  unused()
diff --git a/runtime/interpreter/mterp/x86_64/shop2addr.S b/runtime/interpreter/mterp/x86_64/shop2addr.S
deleted file mode 100644
index 6b06d00..0000000
--- a/runtime/interpreter/mterp/x86_64/shop2addr.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"wide":"0"}
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
-    /* shift/2addr vA, vB */
-    movl    rINST, %ecx                     # ecx <- BA
-    sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG %ecx, %rcx                     # ecx <- vBB
-    andb    $$0xf, rINSTbl                  # rINST <- A
-    .if $wide
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vAA
-    $instr                                  # ex: sarl %cl, %eax
-    SET_WIDE_VREG %rax, rINSTq
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vAA
-    $instr                                  # ex: sarl %cl, %eax
-    SET_VREG %eax, rINSTq
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/sseBinop.S b/runtime/interpreter/mterp/x86_64/sseBinop.S
deleted file mode 100644
index 09d3364..0000000
--- a/runtime/interpreter/mterp/x86_64/sseBinop.S
+++ /dev/null
@@ -1,9 +0,0 @@
-%default {"instr":"","suff":""}
-    movzbq  2(rPC), %rcx                    # ecx <- BB
-    movzbq  3(rPC), %rax                    # eax <- CC
-    movs${suff}   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
-    ${instr}${suff} VREG_ADDRESS(%rax), %xmm0
-    movs${suff}   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
-    pxor    %xmm0, %xmm0
-    movs${suff}   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/sseBinop2Addr.S b/runtime/interpreter/mterp/x86_64/sseBinop2Addr.S
deleted file mode 100644
index 084166b..0000000
--- a/runtime/interpreter/mterp/x86_64/sseBinop2Addr.S
+++ /dev/null
@@ -1,10 +0,0 @@
-%default {"instr":"","suff":""}
-    movl    rINST, %ecx                     # ecx <- A+
-    andl    $$0xf, %ecx                     # ecx <- A
-    movs${suff} VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
-    sarl    $$4, rINST                      # rINST<- B
-    ${instr}${suff} VREG_ADDRESS(rINSTq), %xmm0
-    movs${suff} %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
-    pxor    %xmm0, %xmm0
-    movs${suff} %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/unop.S b/runtime/interpreter/mterp/x86_64/unop.S
deleted file mode 100644
index 1777123..0000000
--- a/runtime/interpreter/mterp/x86_64/unop.S
+++ /dev/null
@@ -1,22 +0,0 @@
-%default {"preinstr":"", "instr":"", "wide":"0"}
-/*
- * Generic 32/64-bit unary operation.  Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
-    /* unop vA, vB */
-    movl    rINST, %ecx                     # rcx <- A+
-    sarl    $$4,rINST                       # rINST <- B
-    .if ${wide}
-    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
-    .else
-    GET_VREG %eax, rINSTq                   # eax <- vB
-    .endif
-    andb    $$0xf,%cl                       # ecx <- A
-$preinstr
-$instr
-    .if ${wide}
-    SET_WIDE_VREG %rax, %rcx
-    .else
-    SET_VREG %eax, %rcx
-    .endif
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/unused.S b/runtime/interpreter/mterp/x86_64/unused.S
deleted file mode 100644
index c95ef94..0000000
--- a/runtime/interpreter/mterp/x86_64/unused.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * Bail to reference interpreter to throw.
- */
-    jmp     MterpFallback
diff --git a/runtime/interpreter/mterp/x86_64/zcmp.S b/runtime/interpreter/mterp/x86_64/zcmp.S
deleted file mode 100644
index fb8ae6a..0000000
--- a/runtime/interpreter/mterp/x86_64/zcmp.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
-    /* if-cmp vAA, +BBBB */
-    cmpl    $$0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
-    j${revcmp}   1f
-    movswq  2(rPC), rINSTq                  # fetch signed displacement
-    testq   rINSTq, rINSTq
-    jmp     MterpCommonTakenBranch
-1:
-    cmpl    $$JIT_CHECK_OSR, rPROFILE
-    je      .L_check_not_taken_osr
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/safe_math.h b/runtime/interpreter/safe_math.h
index 78b3539..06f046a 100644
--- a/runtime/interpreter/safe_math.h
+++ b/runtime/interpreter/safe_math.h
@@ -41,19 +41,19 @@
   return static_cast<biggest_T>(Op<unsigned_biggest_T>()(val1, val2));
 }
 
-// Perform signed a signed add on 'a' and 'b' with defined wrapping behavior.
+// Perform a signed add on 'a' and 'b' with defined wrapping behavior.
 template<typename T1, typename T2>
 static inline typename select_bigger<T1, T2>::type SafeAdd(T1 a, T2 b) {
   return SafeMath<std::plus>(a, b);
 }
 
-// Perform signed a signed substract on 'a' and 'b' with defined wrapping behavior.
+// Perform a signed substract on 'a' and 'b' with defined wrapping behavior.
 template<typename T1, typename T2>
 static inline typename select_bigger<T1, T2>::type SafeSub(T1 a, T2 b) {
   return SafeMath<std::minus>(a, b);
 }
 
-// Perform signed a signed multiply on 'a' and 'b' with defined wrapping behavior.
+// Perform a signed multiply on 'a' and 'b' with defined wrapping behavior.
 template<typename T1, typename T2>
 static inline typename select_bigger<T1, T2>::type SafeMul(T1 a, T2 b) {
   return SafeMath<std::multiplies>(a, b);
diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h
index 0e4cf27..3f6b729 100644
--- a/runtime/interpreter/shadow_frame.h
+++ b/runtime/interpreter/shadow_frame.h
@@ -21,9 +21,8 @@
 #include <cstring>
 #include <string>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
-#include "dex/dex_file.h"
 #include "lock_count_data.h"
 #include "read_barrier.h"
 #include "stack_reference.h"
@@ -49,6 +48,17 @@
 //  - interpreter - separate VRegs and reference arrays. References are in the reference array.
 //  - JNI - just VRegs, but where every VReg holds a reference.
 class ShadowFrame {
+ private:
+  // Used to keep track of extra state the shadowframe has.
+  enum class FrameFlags : uint32_t {
+    // We have been requested to notify when this frame gets popped.
+    kNotifyFramePop = 1 << 0,
+    // We have been asked to pop this frame off the stack as soon as possible.
+    kForcePopFrame  = 1 << 1,
+    // We have been asked to re-execute the last instruction.
+    kForceRetryInst = 1 << 2,
+  };
+
  public:
   // Compute size of ShadowFrame in bytes assuming it has a reference array.
   static size_t ComputeSize(uint32_t num_vregs) {
@@ -179,12 +189,8 @@
   mirror::Object* GetVRegReference(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK_LT(i, NumberOfVRegs());
     mirror::Object* ref;
-    if (HasReferenceArray()) {
-      ref = References()[i].AsMirrorPtr();
-    } else {
-      const uint32_t* vreg_ptr = &vregs_[i];
-      ref = reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr();
-    }
+    DCHECK(HasReferenceArray());
+    ref = References()[i].AsMirrorPtr();
     ReadBarrier::MaybeAssertToSpaceInvariant(ref);
     if (kVerifyFlags & kVerifyReads) {
       VerifyObject(ref);
@@ -279,47 +285,47 @@
     return lock_count_data_;
   }
 
-  static size_t LockCountDataOffset() {
+  static constexpr size_t LockCountDataOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, lock_count_data_);
   }
 
-  static size_t LinkOffset() {
+  static constexpr size_t LinkOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, link_);
   }
 
-  static size_t MethodOffset() {
+  static constexpr size_t MethodOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, method_);
   }
 
-  static size_t DexPCOffset() {
+  static constexpr size_t DexPCOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, dex_pc_);
   }
 
-  static size_t NumberOfVRegsOffset() {
+  static constexpr size_t NumberOfVRegsOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_);
   }
 
-  static size_t VRegsOffset() {
+  static constexpr size_t VRegsOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, vregs_);
   }
 
-  static size_t ResultRegisterOffset() {
+  static constexpr size_t ResultRegisterOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, result_register_);
   }
 
-  static size_t DexPCPtrOffset() {
+  static constexpr size_t DexPCPtrOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, dex_pc_ptr_);
   }
 
-  static size_t DexInstructionsOffset() {
+  static constexpr size_t DexInstructionsOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, dex_instructions_);
   }
 
-  static size_t CachedHotnessCountdownOffset() {
+  static constexpr size_t CachedHotnessCountdownOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, cached_hotness_countdown_);
   }
 
-  static size_t HotnessCountdownOffset() {
+  static constexpr size_t HotnessCountdownOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, hotness_countdown_);
   }
 
@@ -345,11 +351,38 @@
   }
 
   bool NeedsNotifyPop() const {
-    return needs_notify_pop_;
+    return GetFrameFlag(FrameFlags::kNotifyFramePop);
   }
 
   void SetNotifyPop(bool notify) {
-    needs_notify_pop_ = notify;
+    UpdateFrameFlag(notify, FrameFlags::kNotifyFramePop);
+  }
+
+  bool GetForcePopFrame() const {
+    return GetFrameFlag(FrameFlags::kForcePopFrame);
+  }
+
+  void SetForcePopFrame(bool enable) {
+    UpdateFrameFlag(enable, FrameFlags::kForcePopFrame);
+  }
+
+  bool GetForceRetryInstruction() const {
+    return GetFrameFlag(FrameFlags::kForceRetryInst);
+  }
+
+  void SetForceRetryInstruction(bool enable) {
+    UpdateFrameFlag(enable, FrameFlags::kForceRetryInst);
+  }
+
+  void CheckConsistentVRegs() const {
+    if (kIsDebugBuild) {
+      // A shadow frame visible to GC requires the following rule: for a given vreg,
+      // its vreg reference equivalent should be the same, or null.
+      for (uint32_t i = 0; i < NumberOfVRegs(); ++i) {
+        int32_t reference_value = References()[i].AsVRegValue();
+        CHECK((GetVReg(i) == reference_value) || (reference_value == 0));
+      }
+    }
   }
 
  private:
@@ -364,7 +397,7 @@
         dex_pc_(dex_pc),
         cached_hotness_countdown_(0),
         hotness_countdown_(0),
-        needs_notify_pop_(0) {
+        frame_flags_(0) {
     // TODO(iam): Remove this parameter, it's an an artifact of portable removal
     DCHECK(has_reference_array);
     if (has_reference_array) {
@@ -374,6 +407,18 @@
     }
   }
 
+  void UpdateFrameFlag(bool enable, FrameFlags flag) {
+    if (enable) {
+      frame_flags_ |= static_cast<uint32_t>(flag);
+    } else {
+      frame_flags_ &= ~static_cast<uint32_t>(flag);
+    }
+  }
+
+  bool GetFrameFlag(FrameFlags flag) const {
+    return (frame_flags_ & static_cast<uint32_t>(flag)) != 0;
+  }
+
   const StackReference<mirror::Object>* References() const {
     DCHECK(HasReferenceArray());
     const uint32_t* vreg_end = &vregs_[NumberOfVRegs()];
@@ -397,9 +442,11 @@
   uint32_t dex_pc_;
   int16_t cached_hotness_countdown_;
   int16_t hotness_countdown_;
-  // TODO Might be worth it to try to bit-pack this into some other field to reduce stack usage.
-  // NB alignment requires that this field takes 4 bytes. Only 1 bit is actually ever used.
-  bool needs_notify_pop_;
+
+  // This is a set of ShadowFrame::FrameFlags which denote special states this frame is in.
+  // NB alignment requires that this field takes 4 bytes no matter its size. Only 3 bits are
+  // currently used.
+  uint32_t frame_flags_;
 
   // This is a two-part array:
   //  - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index d4b51af..7a1b7eb 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -44,12 +44,15 @@
 #include "hidden_api.h"
 #include "interpreter/interpreter_common.h"
 #include "jvalue-inl.h"
+#include "mirror/array-alloc-inl.h"
 #include "mirror/array-inl.h"
-#include "mirror/class.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/field-inl.h"
 #include "mirror/method.h"
 #include "mirror/object-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
+#include "mirror/string-alloc-inl.h"
 #include "mirror/string-inl.h"
 #include "nativehelper/scoped_local_ref.h"
 #include "nth_caller_visitor.h"
@@ -178,16 +181,21 @@
   return param->AsString();
 }
 
+static std::function<hiddenapi::AccessContext()> GetHiddenapiAccessContextFunction(
+    ShadowFrame* frame) {
+  return [=]() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return hiddenapi::AccessContext(frame->GetMethod()->GetDeclaringClass());
+  };
+}
+
 template<typename T>
-static ALWAYS_INLINE bool ShouldBlockAccessToMember(T* member, ShadowFrame* frame)
+static ALWAYS_INLINE bool ShouldDenyAccessToMember(T* member, ShadowFrame* frame)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // All uses in this file are from reflection
-  constexpr hiddenapi::AccessMethod access_method = hiddenapi::kReflection;
-  return hiddenapi::GetMemberAction(
-      member,
-      frame->GetMethod()->GetDeclaringClass()->GetClassLoader(),
-      frame->GetMethod()->GetDeclaringClass()->GetDexCache(),
-      access_method) == hiddenapi::kDeny;
+  constexpr hiddenapi::AccessMethod kAccessMethod = hiddenapi::AccessMethod::kReflection;
+  return hiddenapi::ShouldDenyAccessToMember(member,
+                                             GetHiddenapiAccessContextFunction(frame),
+                                             kAccessMethod);
 }
 
 void UnstartedRuntime::UnstartedClassForNameCommon(Thread* self,
@@ -294,7 +302,7 @@
   auto* cl = Runtime::Current()->GetClassLinker();
   if (cl->EnsureInitialized(self, h_klass, true, true)) {
     ArtMethod* cons = h_klass->FindConstructor("()V", cl->GetImagePointerSize());
-    if (cons != nullptr && ShouldBlockAccessToMember(cons, shadow_frame)) {
+    if (cons != nullptr && ShouldDenyAccessToMember(cons, shadow_frame)) {
       cons = nullptr;
     }
     if (cons != nullptr) {
@@ -339,7 +347,7 @@
       }
     }
   }
-  if (found != nullptr && ShouldBlockAccessToMember(found, shadow_frame)) {
+  if (found != nullptr && ShouldDenyAccessToMember(found, shadow_frame)) {
     found = nullptr;
   }
   if (found == nullptr) {
@@ -386,25 +394,26 @@
   Runtime* runtime = Runtime::Current();
   bool transaction = runtime->IsActiveTransaction();
   PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
+  auto fn_hiddenapi_access_context = GetHiddenapiAccessContextFunction(shadow_frame);
   ObjPtr<mirror::Method> method;
   if (transaction) {
     if (pointer_size == PointerSize::k64) {
       method = mirror::Class::GetDeclaredMethodInternal<PointerSize::k64, true>(
-          self, klass, name, args);
+          self, klass, name, args, fn_hiddenapi_access_context);
     } else {
       method = mirror::Class::GetDeclaredMethodInternal<PointerSize::k32, true>(
-          self, klass, name, args);
+          self, klass, name, args, fn_hiddenapi_access_context);
     }
   } else {
     if (pointer_size == PointerSize::k64) {
       method = mirror::Class::GetDeclaredMethodInternal<PointerSize::k64, false>(
-          self, klass, name, args);
+          self, klass, name, args, fn_hiddenapi_access_context);
     } else {
       method = mirror::Class::GetDeclaredMethodInternal<PointerSize::k32, false>(
-          self, klass, name, args);
+          self, klass, name, args, fn_hiddenapi_access_context);
     }
   }
-  if (method != nullptr && ShouldBlockAccessToMember(method->GetArtMethod(), shadow_frame)) {
+  if (method != nullptr && ShouldDenyAccessToMember(method->GetArtMethod(), shadow_frame)) {
     method = nullptr;
   }
   result->SetL(method);
@@ -442,7 +451,7 @@
     }
   }
   if (constructor != nullptr &&
-      ShouldBlockAccessToMember(constructor->GetArtMethod(), shadow_frame)) {
+      ShouldDenyAccessToMember(constructor->GetArtMethod(), shadow_frame)) {
     constructor = nullptr;
   }
   result->SetL(constructor);
@@ -567,12 +576,9 @@
 
   Runtime* runtime = Runtime::Current();
 
-  std::vector<std::string> split;
-  Split(runtime->GetBootClassPathString(), ':', &split);
-  if (split.empty()) {
-    AbortTransactionOrFail(self,
-                           "Boot classpath not set or split error:: %s",
-                           runtime->GetBootClassPathString().c_str());
+  const std::vector<std::string>& boot_class_path = Runtime::Current()->GetBootClassPath();
+  if (boot_class_path.empty()) {
+    AbortTransactionOrFail(self, "Boot classpath not set");
     return;
   }
 
@@ -580,7 +586,7 @@
   size_t map_size;
   std::string last_error_msg;  // Only store the last message (we could concatenate).
 
-  for (const std::string& jar_file : split) {
+  for (const std::string& jar_file : boot_class_path) {
     mem_map = FindAndExtractEntry(jar_file, resource_cstr, &map_size, &last_error_msg);
     if (mem_map.IsValid()) {
       break;
@@ -638,7 +644,7 @@
   }
 
   uint32_t args[1];
-  args[0] = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(h_array.Get()));
+  args[0] = reinterpret_cast32<uint32_t>(h_array.Get());
   EnterInterpreterFromInvoke(self, constructor, h_obj.Get(), args, nullptr);
 
   if (self->IsExceptionPending()) {
@@ -865,10 +871,10 @@
       // checking version, however, does.
       if (Runtime::Current()->IsActiveTransaction()) {
         dst->AssignableCheckingMemcpy<true>(
-            dst_pos, src, src_pos, length, true /* throw_exception */);
+            dst_pos, src, src_pos, length, /* throw_exception= */ true);
       } else {
         dst->AssignableCheckingMemcpy<false>(
-                    dst_pos, src, src_pos, length, true /* throw_exception */);
+            dst_pos, src, src_pos, length, /* throw_exception= */ true);
       }
     }
   } else if (src_type->IsPrimitiveByte()) {
@@ -1180,19 +1186,19 @@
     }
 
     case Primitive::kPrimShort: {
-      typedef int16_t unaligned_short __attribute__ ((aligned (1)));
+      using unaligned_short __attribute__((__aligned__(1))) = int16_t;
       result->SetS(*reinterpret_cast<unaligned_short*>(static_cast<intptr_t>(address)));
       return;
     }
 
     case Primitive::kPrimInt: {
-      typedef int32_t unaligned_int __attribute__ ((aligned (1)));
+      using unaligned_int __attribute__((__aligned__(1))) = int32_t;
       result->SetI(*reinterpret_cast<unaligned_int*>(static_cast<intptr_t>(address)));
       return;
     }
 
     case Primitive::kPrimLong: {
-      typedef int64_t unaligned_long __attribute__ ((aligned (1)));
+      using unaligned_long __attribute__((__aligned__(1))) = int64_t;
       result->SetJ(*reinterpret_cast<unaligned_long*>(static_cast<intptr_t>(address)));
       return;
     }
@@ -1478,9 +1484,9 @@
             reinterpret_cast<uint8_t*>(obj) + static_cast<size_t>(offset));
     ReadBarrier::Barrier<
         mirror::Object,
-        /* kIsVolatile */ false,
+        /* kIsVolatile= */ false,
         kWithReadBarrier,
-        /* kAlwaysUpdateField */ true>(
+        /* kAlwaysUpdateField= */ true>(
         obj,
         MemberOffset(offset),
         field_addr);
@@ -1691,14 +1697,21 @@
 }
 
 void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(
-    Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
-    uint32_t* args, JValue* result) {
+    Thread* self,
+    ArtMethod* method ATTRIBUTE_UNUSED,
+    mirror::Object* receiver ATTRIBUTE_UNUSED,
+    uint32_t* args,
+    JValue* result) {
   int32_t length = args[1];
   DCHECK_GE(length, 0);
-  ObjPtr<mirror::Class> element_class = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
+  ObjPtr<mirror::Object> element_class = reinterpret_cast32<mirror::Object*>(args[0])->AsClass();
+  if (element_class == nullptr) {
+    AbortTransactionOrFail(self, "VMRuntime.newUnpaddedArray with null element_class.");
+    return;
+  }
   Runtime* runtime = Runtime::Current();
   ObjPtr<mirror::Class> array_class =
-      runtime->GetClassLinker()->FindArrayClass(self, element_class);
+      runtime->GetClassLinker()->FindArrayClass(self, element_class->AsClass());
   DCHECK(array_class != nullptr);
   gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
   result->SetL(mirror::Array::Alloc<true, true>(self,
@@ -1789,14 +1802,17 @@
   receiver->NotifyAll(self);
 }
 
-void UnstartedRuntime::UnstartedJNIStringCompareTo(
-    Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver, uint32_t* args,
-    JValue* result) {
-  mirror::String* rhs = reinterpret_cast<mirror::Object*>(args[0])->AsString();
+void UnstartedRuntime::UnstartedJNIStringCompareTo(Thread* self,
+                                                   ArtMethod* method ATTRIBUTE_UNUSED,
+                                                   mirror::Object* receiver,
+                                                   uint32_t* args,
+                                                   JValue* result) {
+  ObjPtr<mirror::Object> rhs = reinterpret_cast32<mirror::Object*>(args[0]);
   if (rhs == nullptr) {
-    AbortTransactionOrFail(self, "String.compareTo with null object");
+    AbortTransactionOrFail(self, "String.compareTo with null object.");
+    return;
   }
-  result->SetI(receiver->AsString()->CompareTo(rhs));
+  result->SetI(receiver->AsString()->CompareTo(rhs->AsString()));
 }
 
 void UnstartedRuntime::UnstartedJNIStringIntern(
@@ -1854,9 +1870,16 @@
 }
 
 void UnstartedRuntime::UnstartedJNIUnsafeCompareAndSwapInt(
-    Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
-  mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
+    Thread* self,
+    ArtMethod* method ATTRIBUTE_UNUSED,
+    mirror::Object* receiver ATTRIBUTE_UNUSED,
+    uint32_t* args,
+    JValue* result) {
+  ObjPtr<mirror::Object> obj = reinterpret_cast32<mirror::Object*>(args[0]);
+  if (obj == nullptr) {
+    AbortTransactionOrFail(self, "Unsafe.compareAndSwapInt with null object.");
+    return;
+  }
   jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
   jint expectedValue = args[3];
   jint newValue = args[4];
@@ -1877,12 +1900,14 @@
   result->SetZ(success ? JNI_TRUE : JNI_FALSE);
 }
 
-void UnstartedRuntime::UnstartedJNIUnsafeGetIntVolatile(
-    Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
-    uint32_t* args, JValue* result) {
-  mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
+void UnstartedRuntime::UnstartedJNIUnsafeGetIntVolatile(Thread* self,
+                                                        ArtMethod* method ATTRIBUTE_UNUSED,
+                                                        mirror::Object* receiver ATTRIBUTE_UNUSED,
+                                                        uint32_t* args,
+                                                        JValue* result) {
+  ObjPtr<mirror::Object> obj = reinterpret_cast32<mirror::Object*>(args[0]);
   if (obj == nullptr) {
-    AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+    AbortTransactionOrFail(self, "Unsafe.compareAndSwapIntVolatile with null object.");
     return;
   }
 
@@ -1890,12 +1915,18 @@
   result->SetI(obj->GetField32Volatile(MemberOffset(offset)));
 }
 
-void UnstartedRuntime::UnstartedJNIUnsafePutObject(
-    Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result ATTRIBUTE_UNUSED) {
-  mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
+void UnstartedRuntime::UnstartedJNIUnsafePutObject(Thread* self,
+                                                   ArtMethod* method ATTRIBUTE_UNUSED,
+                                                   mirror::Object* receiver ATTRIBUTE_UNUSED,
+                                                   uint32_t* args,
+                                                   JValue* result ATTRIBUTE_UNUSED) {
+  ObjPtr<mirror::Object> obj = reinterpret_cast32<mirror::Object*>(args[0]);
+  if (obj == nullptr) {
+    AbortTransactionOrFail(self, "Unsafe.putObject with null object.");
+    return;
+  }
   jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
-  mirror::Object* newValue = reinterpret_cast<mirror::Object*>(args[3]);
+  ObjPtr<mirror::Object> newValue = reinterpret_cast32<mirror::Object*>(args[3]);
   if (Runtime::Current()->IsActiveTransaction()) {
     obj->SetFieldObject<true>(MemberOffset(offset), newValue);
   } else {
@@ -1904,26 +1935,45 @@
 }
 
 void UnstartedRuntime::UnstartedJNIUnsafeGetArrayBaseOffsetForComponentType(
-    Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
-  ObjPtr<mirror::Class> component = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
-  Primitive::Type primitive_type = component->GetPrimitiveType();
+    Thread* self,
+    ArtMethod* method ATTRIBUTE_UNUSED,
+    mirror::Object* receiver ATTRIBUTE_UNUSED,
+    uint32_t* args,
+    JValue* result) {
+  ObjPtr<mirror::Object> component = reinterpret_cast32<mirror::Object*>(args[0]);
+  if (component == nullptr) {
+    AbortTransactionOrFail(self, "Unsafe.getArrayBaseOffsetForComponentType with null component.");
+    return;
+  }
+  Primitive::Type primitive_type = component->AsClass()->GetPrimitiveType();
   result->SetI(mirror::Array::DataOffset(Primitive::ComponentSize(primitive_type)).Int32Value());
 }
 
 void UnstartedRuntime::UnstartedJNIUnsafeGetArrayIndexScaleForComponentType(
-    Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
-  ObjPtr<mirror::Class> component = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
-  Primitive::Type primitive_type = component->GetPrimitiveType();
+    Thread* self,
+    ArtMethod* method ATTRIBUTE_UNUSED,
+    mirror::Object* receiver ATTRIBUTE_UNUSED,
+    uint32_t* args,
+    JValue* result) {
+  ObjPtr<mirror::Object> component = reinterpret_cast32<mirror::Object*>(args[0]);
+  if (component == nullptr) {
+    AbortTransactionOrFail(self, "Unsafe.getArrayIndexScaleForComponentType with null component.");
+    return;
+  }
+  Primitive::Type primitive_type = component->AsClass()->GetPrimitiveType();
   result->SetI(Primitive::ComponentSize(primitive_type));
 }
 
-typedef void (*InvokeHandler)(Thread* self, ShadowFrame* shadow_frame, JValue* result,
-    size_t arg_size);
+using InvokeHandler = void(*)(Thread* self,
+                              ShadowFrame* shadow_frame,
+                              JValue* result,
+                              size_t arg_size);
 
-typedef void (*JNIHandler)(Thread* self, ArtMethod* method, mirror::Object* receiver,
-    uint32_t* args, JValue* result);
+using JNIHandler = void(*)(Thread* self,
+                           ArtMethod* method,
+                           mirror::Object* receiver,
+                           uint32_t* args,
+                           JValue* result);
 
 static bool tables_initialized_ = false;
 static std::unordered_map<std::string, InvokeHandler> invoke_handlers_;
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index 200fc5b..a8ee23a 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -30,8 +30,11 @@
 #include "handle.h"
 #include "handle_scope-inl.h"
 #include "interpreter/interpreter_common.h"
+#include "mirror/array-alloc-inl.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class_loader.h"
 #include "mirror/object-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/string-inl.h"
 #include "runtime.h"
@@ -261,7 +264,7 @@
 
     UnstartedMemoryPeekShort(self, tmp.get(), &result, 0);
 
-    typedef int16_t unaligned_short __attribute__ ((aligned (1)));
+    using unaligned_short __attribute__((__aligned__(1))) = int16_t;
     const unaligned_short* short_ptr = reinterpret_cast<const unaligned_short*>(base_ptr + i);
     EXPECT_EQ(result.GetS(), *short_ptr);
   }
@@ -284,7 +287,7 @@
 
     UnstartedMemoryPeekInt(self, tmp.get(), &result, 0);
 
-    typedef int32_t unaligned_int __attribute__ ((aligned (1)));
+    using unaligned_int __attribute__((__aligned__(1))) = int32_t;
     const unaligned_int* int_ptr = reinterpret_cast<const unaligned_int*>(base_ptr + i);
     EXPECT_EQ(result.GetI(), *int_ptr);
   }
@@ -307,7 +310,7 @@
 
     UnstartedMemoryPeekLong(self, tmp.get(), &result, 0);
 
-    typedef int64_t unaligned_long __attribute__ ((aligned (1)));
+    using unaligned_long __attribute__((__aligned__(1))) = int64_t;
     const unaligned_long* long_ptr = reinterpret_cast<const unaligned_long*>(base_ptr + i);
     EXPECT_EQ(result.GetJ(), *long_ptr);
   }
@@ -695,7 +698,7 @@
       {  ld2,  ld2 }
   };
 
-  TestCeilFloor(true /* ceil */, self, tmp.get(), test_pairs, arraysize(test_pairs));
+  TestCeilFloor(/* ceil= */ true, self, tmp.get(), test_pairs, arraysize(test_pairs));
 }
 
 TEST_F(UnstartedRuntimeTest, Floor) {
@@ -722,7 +725,7 @@
       {  ld2,  ld2 }
   };
 
-  TestCeilFloor(false /* floor */, self, tmp.get(), test_pairs, arraysize(test_pairs));
+  TestCeilFloor(/* ceil= */ false, self, tmp.get(), test_pairs, arraysize(test_pairs));
 }
 
 TEST_F(UnstartedRuntimeTest, ToLowerUpper) {
diff --git a/runtime/intrinsics_list.h b/runtime/intrinsics_list.h
index 2f91f5d..57e81a7 100644
--- a/runtime/intrinsics_list.h
+++ b/runtime/intrinsics_list.h
@@ -219,6 +219,9 @@
   V(VarHandleLoadLoadFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "loadLoadFence", "()V") \
   V(VarHandleStoreStoreFence, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "storeStoreFence", "()V") \
   V(ReachabilityFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/ref/Reference;", "reachabilityFence", "(Ljava/lang/Object;)V") \
+  V(CRC32Update, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/util/zip/CRC32;", "update", "(II)I") \
+  V(CRC32UpdateBytes, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/util/zip/CRC32;", "updateBytes", "(I[BII)I") \
+  V(CRC32UpdateByteBuffer, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/util/zip/CRC32;", "updateByteBuffer", "(IJII)I") \
   SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(V)
 
 #endif  // ART_RUNTIME_INTRINSICS_LIST_H_
diff --git a/runtime/java_frame_root_info.h b/runtime/java_frame_root_info.h
index 452a76b..8141ea2 100644
--- a/runtime/java_frame_root_info.h
+++ b/runtime/java_frame_root_info.h
@@ -19,8 +19,8 @@
 
 #include <iosfwd>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "gc_root.h"
 
 namespace art {
diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc
index 481aff9..d64f11f 100644
--- a/runtime/jdwp/jdwp_adb.cc
+++ b/runtime/jdwp/jdwp_adb.cc
@@ -23,13 +23,10 @@
 #include "android-base/stringprintf.h"
 
 #include "base/logging.h"  // For VLOG.
+#include "base/socket_peer_is_trusted.h"
 #include "jdwp/jdwp_priv.h"
 #include "thread-current-inl.h"
 
-#ifdef ART_TARGET_ANDROID
-#include "cutils/sockets.h"
-#endif
-
 /*
  * The JDWP <-> ADB transport protocol is explained in detail
  * in system/core/adb/jdwp_service.c. Here's a summary.
@@ -87,13 +84,13 @@
     }
   }
 
-  virtual bool Accept() REQUIRES(!state_lock_);
+  bool Accept() override REQUIRES(!state_lock_);
 
-  virtual bool Establish(const JdwpOptions*) {
+  bool Establish(const JdwpOptions*) override {
     return false;
   }
 
-  virtual void Shutdown() REQUIRES(!state_lock_) {
+  void Shutdown() override REQUIRES(!state_lock_) {
     int control_sock;
     int local_clientSock;
     {
@@ -116,7 +113,7 @@
     WakePipe();
   }
 
-  virtual bool ProcessIncoming() REQUIRES(!state_lock_);
+  bool ProcessIncoming() override REQUIRES(!state_lock_);
 
  private:
   int ReceiveClientFd() REQUIRES(!state_lock_);
@@ -265,7 +262,7 @@
       if (!ret) {
         int control_sock = ControlSock();
 #ifdef ART_TARGET_ANDROID
-        if (control_sock < 0 || !socket_peer_is_trusted(control_sock)) {
+        if (control_sock < 0 || !art::SocketPeerIsTrusted(control_sock)) {
           if (control_sock >= 0 && shutdown(control_sock, SHUT_RDWR)) {
             PLOG(ERROR) << "trouble shutting down socket";
           }
@@ -346,7 +343,7 @@
   if (!HaveFullPacket()) {
     /* read some more, looping until we have data */
     errno = 0;
-    while (1) {
+    while (true) {
       int selCount;
       fd_set readfds;
       int maxfd = -1;
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 0353ea7..7ce70cb 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -554,7 +554,7 @@
       break;
     default:
       LOG(FATAL) << "unknown mod kind " << pMod->modKind;
-      break;
+      UNREACHABLE();
     }
   }
   return true;
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 1e61ba0..37365ff 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -292,8 +292,7 @@
     expandBufAddUtf8String(pReply, str);
   }
 
-  std::vector<std::string> boot_class_path;
-  Split(Runtime::Current()->GetBootClassPathString(), ':', &boot_class_path);
+  std::vector<std::string> boot_class_path = Runtime::Current()->GetBootClassPath();
   expandBufAdd4BE(pReply, boot_class_path.size());
   for (const std::string& str : boot_class_path) {
     expandBufAddUtf8String(pReply, str);
@@ -1344,13 +1343,14 @@
   VLOG(jdwp) << StringPrintf("    --> event requestId=%#x", requestId);
 
   /* add it to the list */
+  // TODO: RegisterEvent() should take std::unique_ptr<>.
   JdwpError err = state->RegisterEvent(pEvent.get());
   if (err != ERR_NONE) {
     /* registration failed, probably because event is bogus */
     LOG(WARNING) << "WARNING: event request rejected";
     return err;
   }
-  pEvent.release();
+  pEvent.release();  // NOLINT b/117926937
   return ERR_NONE;
 }
 
@@ -1432,7 +1432,7 @@
 /*
  * Handler map decl.
  */
-typedef JdwpError (*JdwpRequestHandler)(JdwpState* state, Request* request, ExpandBuf* reply);
+using JdwpRequestHandler = JdwpError(*)(JdwpState* state, Request* request, ExpandBuf* reply);
 
 struct JdwpHandlerMap {
   uint8_t cmdSet;
diff --git a/runtime/jdwp/jdwp_request.cc b/runtime/jdwp/jdwp_request.cc
index 6af267e..a77962e 100644
--- a/runtime/jdwp/jdwp_request.cc
+++ b/runtime/jdwp/jdwp_request.cc
@@ -70,7 +70,7 @@
     case 2: value = Read2BE(); break;
     case 4: value = Read4BE(); break;
     case 8: value = Read8BE(); break;
-    default: LOG(FATAL) << width; break;
+    default: LOG(FATAL) << width;
   }
   return value;
 }
diff --git a/runtime/jdwp/jdwp_socket.cc b/runtime/jdwp/jdwp_socket.cc
index 673a942..b8b0e16 100644
--- a/runtime/jdwp/jdwp_socket.cc
+++ b/runtime/jdwp/jdwp_socket.cc
@@ -54,10 +54,10 @@
         remote_port_(0U) {
   }
 
-  virtual bool Accept();
-  virtual bool Establish(const JdwpOptions*);
-  virtual void Shutdown();
-  virtual bool ProcessIncoming();
+  bool Accept() override;
+  bool Establish(const JdwpOptions*) override;
+  void Shutdown() override;
+  bool ProcessIncoming() override;
 
  private:
   in_addr remote_addr_;
@@ -383,7 +383,7 @@
   if (!HaveFullPacket()) {
     /* read some more, looping until we have data */
     errno = 0;
-    while (1) {
+    while (true) {
       int selCount;
       fd_set readfds;
       int maxfd = -1;
diff --git a/runtime/jdwp_provider.h b/runtime/jdwp_provider.h
index c4f1989..29fbc3f 100644
--- a/runtime/jdwp_provider.h
+++ b/runtime/jdwp_provider.h
@@ -19,9 +19,7 @@
 
 #include <ios>
 
-#include "base/globals.h"
-#include "base/macros.h"
-#include "base/logging.h"
+#include "runtime_globals.h"
 
 namespace art {
 
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 63fb22c..99f9387 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -19,14 +19,18 @@
 #include <android-base/logging.h>
 
 #include "base/array_ref.h"
+#include "base/logging.h"
 #include "base/mutex.h"
 #include "base/time_utils.h"
+#include "base/utils.h"
+#include "dex/dex_file.h"
 #include "thread-current-inl.h"
 #include "thread.h"
 
 #include <atomic>
-#include <unordered_map>
 #include <cstddef>
+#include <deque>
+#include <map>
 
 //
 // Debug interface for native tools (gdb, lldb, libunwind, simpleperf).
@@ -76,12 +80,16 @@
 //
 
 namespace art {
+
+static Mutex g_jit_debug_lock("JIT native debug entries", kNativeDebugInterfaceLock);
+static Mutex g_dex_debug_lock("DEX native debug entries", kNativeDebugInterfaceLock);
+
 extern "C" {
-  typedef enum {
+  enum JITAction {
     JIT_NOACTION = 0,
     JIT_REGISTER_FN,
     JIT_UNREGISTER_FN
-  } JITAction;
+  };
 
   struct JITCodeEntry {
     // Atomic to ensure the reader can always iterate over the linked list
@@ -126,14 +134,14 @@
   void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code;
 
   // The root data structure describing of all JITed methods.
-  JITDescriptor __jit_debug_descriptor {};
+  JITDescriptor __jit_debug_descriptor GUARDED_BY(g_jit_debug_lock) {};
 
   // The following globals mirror the ones above, but are used to register dex files.
   void __attribute__((noinline)) __dex_debug_register_code() {
     __asm__("");
   }
   void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code;
-  JITDescriptor __dex_debug_descriptor {};
+  JITDescriptor __dex_debug_descriptor GUARDED_BY(g_dex_debug_lock) {};
 }
 
 // Mark the descriptor as "locked", so native tools know the data is being modified.
@@ -155,8 +163,16 @@
 static JITCodeEntry* CreateJITCodeEntryInternal(
     JITDescriptor& descriptor,
     void (*register_code_ptr)(),
-    const ArrayRef<const uint8_t>& symfile)
-    REQUIRES(Locks::native_debug_interface_lock_) {
+    ArrayRef<const uint8_t> symfile,
+    bool copy_symfile) {
+  // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
+  if (copy_symfile) {
+    uint8_t* copy = new uint8_t[symfile.size()];
+    CHECK(copy != nullptr);
+    memcpy(copy, symfile.data(), symfile.size());
+    symfile = ArrayRef<const uint8_t>(copy, symfile.size());
+  }
+
   // Ensure the timestamp is monotonically increasing even in presence of low
   // granularity system timer.  This ensures each entry has unique timestamp.
   uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime());
@@ -188,9 +204,10 @@
 static void DeleteJITCodeEntryInternal(
     JITDescriptor& descriptor,
     void (*register_code_ptr)(),
-    JITCodeEntry* entry)
-    REQUIRES(Locks::native_debug_interface_lock_) {
+    JITCodeEntry* entry,
+    bool free_symfile) {
   CHECK(entry != nullptr);
+  const uint8_t* symfile = entry->symfile_addr_;
 
   // Ensure the timestamp is monotonically increasing even in presence of low
   // granularity system timer.  This ensures each entry has unique timestamp.
@@ -221,83 +238,174 @@
   memset(entry, 0, sizeof(*entry));
 
   delete entry;
+  if (free_symfile) {
+    delete[] symfile;
+  }
 }
 
-static std::unordered_map<const void*, JITCodeEntry*> __dex_debug_entries
-    GUARDED_BY(Locks::native_debug_interface_lock_);
+static std::map<const DexFile*, JITCodeEntry*> g_dex_debug_entries GUARDED_BY(g_dex_debug_lock);
 
-void AddNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile) {
-  MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
-  DCHECK(dexfile.data() != nullptr);
+void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
+  MutexLock mu(self, g_dex_debug_lock);
+  DCHECK(dexfile != nullptr);
   // This is just defensive check. The class linker should not register the dex file twice.
-  if (__dex_debug_entries.count(dexfile.data()) == 0) {
+  if (g_dex_debug_entries.count(dexfile) == 0) {
+    const ArrayRef<const uint8_t> symfile(dexfile->Begin(), dexfile->Size());
     JITCodeEntry* entry = CreateJITCodeEntryInternal(__dex_debug_descriptor,
                                                      __dex_debug_register_code_ptr,
-                                                     dexfile);
-    __dex_debug_entries.emplace(dexfile.data(), entry);
+                                                     symfile,
+                                                     /*copy_symfile=*/ false);
+    g_dex_debug_entries.emplace(dexfile, entry);
   }
 }
 
-void RemoveNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile) {
-  MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
-  auto it = __dex_debug_entries.find(dexfile.data());
+void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
+  MutexLock mu(self, g_dex_debug_lock);
+  auto it = g_dex_debug_entries.find(dexfile);
   // We register dex files in the class linker and free them in DexFile_closeDexFile, but
   // there might be cases where we load the dex file without using it in the class linker.
-  if (it != __dex_debug_entries.end()) {
+  if (it != g_dex_debug_entries.end()) {
     DeleteJITCodeEntryInternal(__dex_debug_descriptor,
                                __dex_debug_register_code_ptr,
-                               it->second);
-    __dex_debug_entries.erase(it);
+                               /*entry=*/ it->second,
+                               /*free_symfile=*/ false);
+    g_dex_debug_entries.erase(it);
   }
 }
 
-static size_t __jit_debug_mem_usage
-    GUARDED_BY(Locks::native_debug_interface_lock_) = 0;
-
 // Mapping from handle to entry. Used to manage life-time of the entries.
-static std::unordered_map<const void*, JITCodeEntry*> __jit_debug_entries
-    GUARDED_BY(Locks::native_debug_interface_lock_);
+static std::multimap<const void*, JITCodeEntry*> g_jit_debug_entries GUARDED_BY(g_jit_debug_lock);
 
-void AddNativeDebugInfoForJit(const void* handle, const std::vector<uint8_t>& symfile) {
+// Number of entries added since last packing.  Used to pack entries in bulk.
+static size_t g_jit_num_unpacked_entries GUARDED_BY(g_jit_debug_lock) = 0;
+
+// We postpone removal so that it is done in bulk.
+static std::deque<const void*> g_jit_removed_entries GUARDED_BY(g_jit_debug_lock);
+
+// Split the JIT code cache into groups of fixed size and create singe JITCodeEntry for each group.
+// The start address of method's code determines which group it belongs to.  The end is irrelevant.
+// As a consequnce, newly added mini debug infos will be merged and old ones (GCed) will be pruned.
+static void MaybePackJitMiniDebugInfo(PackElfFileForJITFunction pack,
+                                      InstructionSet isa,
+                                      const InstructionSetFeatures* features)
+    REQUIRES(g_jit_debug_lock) {
+  // Size of memory range covered by each JITCodeEntry.
+  // The number of methods per entry is variable (depending on how many fit in that range).
+  constexpr uint32_t kGroupSize = 64 * KB;
+  // Even if there are no removed entries, we want to pack new entries on regular basis.
+  constexpr uint32_t kPackFrequency = 64;
+
+  std::deque<const void*>& removed_entries = g_jit_removed_entries;
+  std::sort(removed_entries.begin(), removed_entries.end());
+  if (removed_entries.empty() && g_jit_num_unpacked_entries < kPackFrequency) {
+    return;  // Nothing to do.
+  }
+
+  std::vector<const uint8_t*> added_elf_files;
+  std::vector<const void*> removed_symbols;
+  auto added_it = g_jit_debug_entries.begin();
+  auto removed_it = removed_entries.begin();
+  while (added_it != g_jit_debug_entries.end()) {
+    // Collect all entries that have been added or removed within our memory range.
+    const void* group_ptr = AlignDown(added_it->first, kGroupSize);
+    added_elf_files.clear();
+    auto added_begin = added_it;
+    while (added_it != g_jit_debug_entries.end() &&
+           AlignDown(added_it->first, kGroupSize) == group_ptr) {
+      added_elf_files.push_back((added_it++)->second->symfile_addr_);
+    }
+    removed_symbols.clear();
+    while (removed_it != removed_entries.end() &&
+           AlignDown(*removed_it, kGroupSize) == group_ptr) {
+      removed_symbols.push_back(*(removed_it++));
+    }
+
+    // Create new singe JITCodeEntry that covers this memory range.
+    if (added_elf_files.size() == 1 && removed_symbols.size() == 0) {
+      continue;  // Nothing changed in this memory range.
+    }
+    uint64_t start_time = MilliTime();
+    size_t symbols;
+    std::vector<uint8_t> packed = pack(isa, features, added_elf_files, removed_symbols, &symbols);
+    VLOG(jit)
+        << "JIT mini-debug-info packed"
+        << " for " << group_ptr
+        << " in " << MilliTime() - start_time << "ms"
+        << " files=" << added_elf_files.size()
+        << " removed=" << removed_symbols.size()
+        << " symbols=" << symbols
+        << " size=" << PrettySize(packed.size());
+
+    // Replace the old entries with the new one (with their lifetime temporally overlapping).
+    JITCodeEntry* packed_entry = CreateJITCodeEntryInternal(
+        __jit_debug_descriptor,
+        __jit_debug_register_code_ptr,
+        ArrayRef<const uint8_t>(packed),
+        /*copy_symfile=*/ true);
+    for (auto it = added_begin; it != added_it; ++it) {
+      DeleteJITCodeEntryInternal(__jit_debug_descriptor,
+                                 __jit_debug_register_code_ptr,
+                                 /*entry=*/ it->second,
+                                 /*free_symfile=*/ true);
+    }
+    g_jit_debug_entries.erase(added_begin, added_it);
+    g_jit_debug_entries.emplace(group_ptr, packed_entry);
+  }
+  CHECK(added_it == g_jit_debug_entries.end());
+  CHECK(removed_it == removed_entries.end());
+  removed_entries.clear();
+  g_jit_num_unpacked_entries = 0;
+}
+
+void AddNativeDebugInfoForJit(Thread* self,
+                              const void* code_ptr,
+                              const std::vector<uint8_t>& symfile,
+                              PackElfFileForJITFunction pack,
+                              InstructionSet isa,
+                              const InstructionSetFeatures* features) {
+  MutexLock mu(self, g_jit_debug_lock);
   DCHECK_NE(symfile.size(), 0u);
 
-  // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
-  uint8_t* copy = new uint8_t[symfile.size()];
-  CHECK(copy != nullptr);
-  memcpy(copy, symfile.data(), symfile.size());
+  MaybePackJitMiniDebugInfo(pack, isa, features);
 
   JITCodeEntry* entry = CreateJITCodeEntryInternal(
       __jit_debug_descriptor,
       __jit_debug_register_code_ptr,
-      ArrayRef<const uint8_t>(copy, symfile.size()));
-  __jit_debug_mem_usage += sizeof(JITCodeEntry) + entry->symfile_size_;
+      ArrayRef<const uint8_t>(symfile),
+      /*copy_symfile=*/ true);
 
-  // We don't provide handle for type debug info, which means we cannot free it later.
+  VLOG(jit)
+      << "JIT mini-debug-info added"
+      << " for " << code_ptr
+      << " size=" << PrettySize(symfile.size());
+
+  // We don't provide code_ptr for type debug info, which means we cannot free it later.
   // (this only happens when --generate-debug-info flag is enabled for the purpose
   // of being debugged with gdb; it does not happen for debuggable apps by default).
-  bool ok = handle == nullptr || __jit_debug_entries.emplace(handle, entry).second;
-  DCHECK(ok) << "Native debug entry already exists for " << std::hex << handle;
-}
-
-void RemoveNativeDebugInfoForJit(const void* handle) {
-  auto it = __jit_debug_entries.find(handle);
-  // We generate JIT native debug info only if the right runtime flags are enabled,
-  // but we try to remove it unconditionally whenever code is freed from JIT cache.
-  if (it != __jit_debug_entries.end()) {
-    JITCodeEntry* entry = it->second;
-    const uint8_t* symfile_addr = entry->symfile_addr_;
-    uint64_t symfile_size = entry->symfile_size_;
-    DeleteJITCodeEntryInternal(__jit_debug_descriptor,
-                               __jit_debug_register_code_ptr,
-                               entry);
-    __jit_debug_entries.erase(it);
-    __jit_debug_mem_usage -= sizeof(JITCodeEntry) + symfile_size;
-    delete[] symfile_addr;
+  if (code_ptr != nullptr) {
+    g_jit_debug_entries.emplace(code_ptr, entry);
+    // Count how many entries we have added since the last mini-debug-info packing.
+    // We avoid g_jit_debug_entries.size() here because it can shrink during packing.
+    g_jit_num_unpacked_entries++;
   }
 }
 
-size_t GetJitNativeDebugInfoMemUsage() {
-  return __jit_debug_mem_usage + __jit_debug_entries.size() * 2 * sizeof(void*);
+void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr) {
+  MutexLock mu(self, g_jit_debug_lock);
+  // We generate JIT native debug info only if the right runtime flags are enabled,
+  // but we try to remove it unconditionally whenever code is freed from JIT cache.
+  if (!g_jit_debug_entries.empty()) {
+    g_jit_removed_entries.push_back(code_ptr);
+  }
+}
+
+size_t GetJitMiniDebugInfoMemUsage() {
+  MutexLock mu(Thread::Current(), g_jit_debug_lock);
+  size_t size = 0;
+  for (auto entry : g_jit_debug_entries) {
+    size += sizeof(JITCodeEntry) + entry.second->symfile_size_ + /*map entry*/ 4 * sizeof(void*);
+  }
+  return size;
 }
 
 }  // namespace art
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index 3d25910..17beb4b 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -18,35 +18,45 @@
 #define ART_RUNTIME_JIT_DEBUGGER_INTERFACE_H_
 
 #include <inttypes.h>
-#include <memory>
 #include <vector>
 
-#include "base/array_ref.h"
-#include "base/mutex.h"
+#include "arch/instruction_set_features.h"
+#include "base/locks.h"
 
 namespace art {
 
+class DexFile;
+class Thread;
+
+// This method is declared in the compiler library.
+// We need to pass it by pointer to be able to call it from runtime.
+typedef std::vector<uint8_t> PackElfFileForJITFunction(
+    InstructionSet isa,
+    const InstructionSetFeatures* features,
+    std::vector<const uint8_t*>& added_elf_files,
+    std::vector<const void*>& removed_symbols,
+    /*out*/ size_t* num_symbols);
+
 // Notify native tools (e.g. libunwind) that DEX file has been opened.
-// It takes the lock itself. The parameter must point to dex data (not the DexFile* object).
-void AddNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile);
+void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile);
 
 // Notify native tools (e.g. libunwind) that DEX file has been closed.
-// It takes the lock itself. The parameter must point to dex data (not the DexFile* object).
-void RemoveNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile);
+void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile);
 
-// Notify native tools about new JITed code by passing in-memory ELF.
-// The handle is the object that is being described (needed to be able to remove the entry).
+// Notify native tools (e.g. libunwind) that JIT has compiled a new method.
 // The method will make copy of the passed ELF file (to shrink it to the minimum size).
-void AddNativeDebugInfoForJit(const void* handle, const std::vector<uint8_t>& symfile)
-    REQUIRES(Locks::native_debug_interface_lock_);
+void AddNativeDebugInfoForJit(Thread* self,
+                              const void* code_ptr,
+                              const std::vector<uint8_t>& symfile,
+                              PackElfFileForJITFunction pack,
+                              InstructionSet isa,
+                              const InstructionSetFeatures* features);
 
-// Notify native debugger that JITed code has been removed and free the debug info.
-void RemoveNativeDebugInfoForJit(const void* handle)
-    REQUIRES(Locks::native_debug_interface_lock_);
+// Notify native tools (e.g. libunwind) that JIT code has been garbage collected.
+void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr);
 
-// Returns approximate memory used by all JITCodeEntries.
-size_t GetJitNativeDebugInfoMemUsage()
-    REQUIRES(Locks::native_debug_interface_lock_);
+// Returns approximate memory used by debug info for JIT code.
+size_t GetJitMiniDebugInfoMemUsage();
 
 }  // namespace art
 
diff --git a/runtime/jit/jit-inl.h b/runtime/jit/jit-inl.h
new file mode 100644
index 0000000..80324ad
--- /dev/null
+++ b/runtime/jit/jit-inl.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_JIT_INL_H_
+#define ART_RUNTIME_JIT_JIT_INL_H_
+
+#include "jit/jit.h"
+
+#include "art_method.h"
+#include "base/bit_utils.h"
+#include "thread.h"
+#include "runtime-inl.h"
+
+namespace art {
+namespace jit {
+
+inline bool Jit::ShouldUsePriorityThreadWeight(Thread* self) {
+  return self->IsJitSensitiveThread() && Runtime::Current()->InJankPerceptibleProcessState();
+}
+
+inline void Jit::AddSamples(Thread* self,
+                            ArtMethod* method,
+                            uint16_t samples,
+                            bool with_backedges) {
+  if (Jit::ShouldUsePriorityThreadWeight(self)) {
+    samples *= PriorityThreadWeight();
+  }
+  uint32_t old_count = method->GetCounter();
+  uint32_t new_count = old_count + samples;
+
+  // The full check is fairly expensive so we just add to hotness most of the time,
+  // and we do the full check only when some of the higher bits of the count change.
+  // NB: The method needs to see the transitions of the counter past the thresholds.
+  uint32_t old_batch = RoundDown(old_count, kJitSamplesBatchSize);  // Clear lower bits.
+  uint32_t new_batch = RoundDown(new_count, kJitSamplesBatchSize);  // Clear lower bits.
+  if (UNLIKELY(old_batch == 0)) {
+    // For low sample counts, we check every time (which is important for tests).
+    if (!MaybeCompileMethod(self, method, old_count, new_count, with_backedges)) {
+      // Tests may check that the counter is 0 for methods that we never compile.
+      return;  // Ignore the samples for now and retry later.
+    }
+  } else if (UNLIKELY(old_batch != new_batch)) {
+    // For high sample counts, we check only when we move past the batch boundary.
+    if (!MaybeCompileMethod(self, method, old_batch, new_batch, with_backedges)) {
+      // OSR compilation will ignore the samples if they don't have backedges.
+      return;  // Ignore the samples for now and retry later.
+    }
+  }
+
+  method->SetCounter(new_count);
+}
+
+}  // namespace jit
+}  // namespace art
+
+#endif  // ART_RUNTIME_JIT_JIT_INL_H_
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index d831628..d44bd59 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -28,6 +28,7 @@
 #include "debugger.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "interpreter/interpreter.h"
+#include "jit-inl.h"
 #include "jit_code_cache.h"
 #include "jni/java_vm_ext.h"
 #include "mirror/method_handle_impl.h"
@@ -56,17 +57,26 @@
 // JIT compiler
 void* Jit::jit_library_handle_ = nullptr;
 void* Jit::jit_compiler_handle_ = nullptr;
-void* (*Jit::jit_load_)(bool*) = nullptr;
+void* (*Jit::jit_load_)(void) = nullptr;
 void (*Jit::jit_unload_)(void*) = nullptr;
-bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool) = nullptr;
+bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool) = nullptr;
 void (*Jit::jit_types_loaded_)(void*, mirror::Class**, size_t count) = nullptr;
-bool Jit::generate_debug_info_ = false;
+bool (*Jit::jit_generate_debug_info_)(void*) = nullptr;
+void (*Jit::jit_update_options_)(void*) = nullptr;
 
 struct StressModeHelper {
   DECLARE_RUNTIME_DEBUG_FLAG(kSlowMode);
 };
 DEFINE_RUNTIME_DEBUG_FLAG(StressModeHelper, kSlowMode);
 
+uint32_t JitOptions::RoundUpThreshold(uint32_t threshold) {
+  if (threshold > kJitSamplesBatchSize) {
+    threshold = RoundUp(threshold, kJitSamplesBatchSize);
+  }
+  CHECK_LE(threshold, std::numeric_limits<uint16_t>::max());
+  return threshold;
+}
+
 JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
   auto* jit_options = new JitOptions;
   jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation);
@@ -92,30 +102,25 @@
                    : kJitStressDefaultCompileThreshold)
             : kJitDefaultCompileThreshold;
   }
-  if (jit_options->compile_threshold_ > std::numeric_limits<uint16_t>::max()) {
-    LOG(FATAL) << "Method compilation threshold is above its internal limit.";
-  }
+  jit_options->compile_threshold_ = RoundUpThreshold(jit_options->compile_threshold_);
 
   if (options.Exists(RuntimeArgumentMap::JITWarmupThreshold)) {
     jit_options->warmup_threshold_ = *options.Get(RuntimeArgumentMap::JITWarmupThreshold);
-    if (jit_options->warmup_threshold_ > std::numeric_limits<uint16_t>::max()) {
-      LOG(FATAL) << "Method warmup threshold is above its internal limit.";
-    }
   } else {
     jit_options->warmup_threshold_ = jit_options->compile_threshold_ / 2;
   }
+  jit_options->warmup_threshold_ = RoundUpThreshold(jit_options->warmup_threshold_);
 
   if (options.Exists(RuntimeArgumentMap::JITOsrThreshold)) {
     jit_options->osr_threshold_ = *options.Get(RuntimeArgumentMap::JITOsrThreshold);
-    if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) {
-      LOG(FATAL) << "Method on stack replacement threshold is above its internal limit.";
-    }
   } else {
     jit_options->osr_threshold_ = jit_options->compile_threshold_ * 2;
     if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) {
-      jit_options->osr_threshold_ = std::numeric_limits<uint16_t>::max();
+      jit_options->osr_threshold_ =
+          RoundDown(std::numeric_limits<uint16_t>::max(), kJitSamplesBatchSize);
     }
   }
+  jit_options->osr_threshold_ = RoundUpThreshold(jit_options->osr_threshold_);
 
   if (options.Exists(RuntimeArgumentMap::JITPriorityThreadWeight)) {
     jit_options->priority_thread_weight_ =
@@ -148,10 +153,6 @@
   return jit_options;
 }
 
-bool Jit::ShouldUsePriorityThreadWeight(Thread* self) {
-  return self->IsJitSensitiveThread() && Runtime::Current()->InJankPerceptibleProcessState();
-}
-
 void Jit::DumpInfo(std::ostream& os) {
   code_cache_->Dump(os);
   cumulative_timings_.Dump(os);
@@ -168,41 +169,55 @@
   cumulative_timings_.AddLogger(logger);
 }
 
-Jit::Jit(JitOptions* options) : options_(options),
-                                cumulative_timings_("JIT timings"),
-                                memory_use_("Memory used for compilation", 16),
-                                lock_("JIT memory use lock") {}
+Jit::Jit(JitCodeCache* code_cache, JitOptions* options)
+    : code_cache_(code_cache),
+      options_(options),
+      cumulative_timings_("JIT timings"),
+      memory_use_("Memory used for compilation", 16),
+      lock_("JIT memory use lock") {}
 
-Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
-  DCHECK(options->UseJitCompilation() || options->GetProfileSaverOptions().IsEnabled());
-  std::unique_ptr<Jit> jit(new Jit(options));
-  if (jit_compiler_handle_ == nullptr && !LoadCompiler(error_msg)) {
+Jit* Jit::Create(JitCodeCache* code_cache, JitOptions* options) {
+  if (jit_load_ == nullptr) {
+    LOG(WARNING) << "Not creating JIT: library not loaded";
     return nullptr;
   }
-  bool code_cache_only_for_profile_data = !options->UseJitCompilation();
-  jit->code_cache_.reset(JitCodeCache::Create(
-      options->GetCodeCacheInitialCapacity(),
-      options->GetCodeCacheMaxCapacity(),
-      jit->generate_debug_info_,
-      code_cache_only_for_profile_data,
-      error_msg));
-  if (jit->GetCodeCache() == nullptr) {
+  jit_compiler_handle_ = (jit_load_)();
+  if (jit_compiler_handle_ == nullptr) {
+    LOG(WARNING) << "Not creating JIT: failed to allocate a compiler";
     return nullptr;
   }
+  std::unique_ptr<Jit> jit(new Jit(code_cache, options));
+
+  // If the code collector is enabled, check if that still holds:
+  // With 'perf', we want a 1-1 mapping between an address and a method.
+  // We aren't able to keep method pointers live during the instrumentation method entry trampoline
+  // so we will just disable jit-gc if we are doing that.
+  if (code_cache->GetGarbageCollectCode()) {
+    code_cache->SetGarbageCollectCode(!jit_generate_debug_info_(jit_compiler_handle_) &&
+        !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
+  }
+
   VLOG(jit) << "JIT created with initial_capacity="
       << PrettySize(options->GetCodeCacheInitialCapacity())
       << ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity())
       << ", compile_threshold=" << options->GetCompileThreshold()
       << ", profile_saver_options=" << options->GetProfileSaverOptions();
 
-
-  jit->CreateThreadPool();
-
   // Notify native debugger about the classes already loaded before the creation of the jit.
   jit->DumpTypeInfoForLoadedTypes(Runtime::Current()->GetClassLinker());
   return jit.release();
 }
 
+template <typename T>
+bool Jit::LoadSymbol(T* address, const char* name, std::string* error_msg) {
+  *address = reinterpret_cast<T>(dlsym(jit_library_handle_, name));
+  if (*address == nullptr) {
+    *error_msg = std::string("JIT couldn't find ") + name + std::string(" entry point");
+    return false;
+  }
+  return true;
+}
+
 bool Jit::LoadCompilerLibrary(std::string* error_msg) {
   jit_library_handle_ = dlopen(
       kIsDebugBuild ? "libartd-compiler.so" : "libart-compiler.so", RTLD_NOW);
@@ -212,54 +227,22 @@
     *error_msg = oss.str();
     return false;
   }
-  jit_load_ = reinterpret_cast<void* (*)(bool*)>(dlsym(jit_library_handle_, "jit_load"));
-  if (jit_load_ == nullptr) {
+  bool all_resolved = true;
+  all_resolved = all_resolved && LoadSymbol(&jit_load_, "jit_load", error_msg);
+  all_resolved = all_resolved && LoadSymbol(&jit_unload_, "jit_unload", error_msg);
+  all_resolved = all_resolved && LoadSymbol(&jit_compile_method_, "jit_compile_method", error_msg);
+  all_resolved = all_resolved && LoadSymbol(&jit_types_loaded_, "jit_types_loaded", error_msg);
+  all_resolved = all_resolved && LoadSymbol(&jit_update_options_, "jit_update_options", error_msg);
+  all_resolved = all_resolved &&
+      LoadSymbol(&jit_generate_debug_info_, "jit_generate_debug_info", error_msg);
+  if (!all_resolved) {
     dlclose(jit_library_handle_);
-    *error_msg = "JIT couldn't find jit_load entry point";
-    return false;
-  }
-  jit_unload_ = reinterpret_cast<void (*)(void*)>(
-      dlsym(jit_library_handle_, "jit_unload"));
-  if (jit_unload_ == nullptr) {
-    dlclose(jit_library_handle_);
-    *error_msg = "JIT couldn't find jit_unload entry point";
-    return false;
-  }
-  jit_compile_method_ = reinterpret_cast<bool (*)(void*, ArtMethod*, Thread*, bool)>(
-      dlsym(jit_library_handle_, "jit_compile_method"));
-  if (jit_compile_method_ == nullptr) {
-    dlclose(jit_library_handle_);
-    *error_msg = "JIT couldn't find jit_compile_method entry point";
-    return false;
-  }
-  jit_types_loaded_ = reinterpret_cast<void (*)(void*, mirror::Class**, size_t)>(
-      dlsym(jit_library_handle_, "jit_types_loaded"));
-  if (jit_types_loaded_ == nullptr) {
-    dlclose(jit_library_handle_);
-    *error_msg = "JIT couldn't find jit_types_loaded entry point";
     return false;
   }
   return true;
 }
 
-bool Jit::LoadCompiler(std::string* error_msg) {
-  if (jit_library_handle_ == nullptr && !LoadCompilerLibrary(error_msg)) {
-    return false;
-  }
-  bool will_generate_debug_symbols = false;
-  VLOG(jit) << "Calling JitLoad interpreter_only="
-      << Runtime::Current()->GetInstrumentation()->InterpretOnly();
-  jit_compiler_handle_ = (jit_load_)(&will_generate_debug_symbols);
-  if (jit_compiler_handle_ == nullptr) {
-    dlclose(jit_library_handle_);
-    *error_msg = "JIT couldn't load compiler";
-    return false;
-  }
-  generate_debug_info_ = will_generate_debug_symbols;
-  return true;
-}
-
-bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
+bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr) {
   DCHECK(Runtime::Current()->UseJitCompilation());
   DCHECK(!method->IsRuntimeMethod());
 
@@ -289,7 +272,7 @@
   VLOG(jit) << "Compiling method "
             << ArtMethod::PrettyMethod(method_to_compile)
             << " osr=" << std::boolalpha << osr;
-  bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, osr);
+  bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, baseline, osr);
   code_cache_->DoneCompiling(method_to_compile, self, osr);
   if (!success) {
     VLOG(jit) << "Failed to compile method "
@@ -308,16 +291,10 @@
   return success;
 }
 
-void Jit::CreateThreadPool() {
-  // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
-  // is not null when we instrument.
-
-  // We need peers as we may report the JIT thread, e.g., in the debugger.
-  constexpr bool kJitPoolNeedsPeers = true;
-  thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
-
-  thread_pool_->SetPthreadPriority(options_->GetThreadPoolPthreadPriority());
-  Start();
+void Jit::WaitForWorkersToBeCreated() {
+  if (thread_pool_ != nullptr) {
+    thread_pool_->WaitForWorkersToBeCreated();
+  }
 }
 
 void Jit::DeleteThreadPool() {
@@ -347,10 +324,7 @@
 void Jit::StartProfileSaver(const std::string& filename,
                             const std::vector<std::string>& code_paths) {
   if (options_->GetSaveProfilingInfo()) {
-    ProfileSaver::Start(options_->GetProfileSaverOptions(),
-                        filename,
-                        code_cache_.get(),
-                        code_paths);
+    ProfileSaver::Start(options_->GetProfileSaverOptions(), filename, code_cache_, code_paths);
   }
 }
 
@@ -391,7 +365,7 @@
     return;
   }
   jit::Jit* jit = Runtime::Current()->GetJit();
-  if (jit->generate_debug_info_) {
+  if (jit_generate_debug_info_(jit->jit_compiler_handle_)) {
     DCHECK(jit->jit_types_loaded_ != nullptr);
     jit->jit_types_loaded_(jit->jit_compiler_handle_, &type, 1);
   }
@@ -406,7 +380,7 @@
     std::vector<mirror::Class*> classes_;
   };
 
-  if (generate_debug_info_) {
+  if (jit_generate_debug_info_(jit_compiler_handle_)) {
     ScopedObjectAccess so(Thread::Current());
 
     CollectClasses visitor;
@@ -578,10 +552,11 @@
 
 class JitCompileTask final : public Task {
  public:
-  enum TaskKind {
+  enum class TaskKind {
     kAllocateProfile,
     kCompile,
-    kCompileOsr
+    kCompileBaseline,
+    kCompileOsr,
   };
 
   JitCompileTask(ArtMethod* method, TaskKind kind) : method_(method), kind_(kind) {
@@ -598,14 +573,22 @@
 
   void Run(Thread* self) override {
     ScopedObjectAccess soa(self);
-    if (kind_ == kCompile) {
-      Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false);
-    } else if (kind_ == kCompileOsr) {
-      Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ true);
-    } else {
-      DCHECK(kind_ == kAllocateProfile);
-      if (ProfilingInfo::Create(self, method_, /* retry_allocation */ true)) {
-        VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
+    switch (kind_) {
+      case TaskKind::kCompile:
+      case TaskKind::kCompileBaseline:
+      case TaskKind::kCompileOsr: {
+        Runtime::Current()->GetJit()->CompileMethod(
+            method_,
+            self,
+            /* baseline= */ (kind_ == TaskKind::kCompileBaseline),
+            /* osr= */ (kind_ == TaskKind::kCompileOsr));
+        break;
+      }
+      case TaskKind::kAllocateProfile: {
+        if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
+          VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
+        }
+        break;
       }
     }
     ProfileSaver::NotifyJitActivity();
@@ -623,6 +606,18 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
 };
 
+void Jit::CreateThreadPool() {
+  // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
+  // is not null when we instrument.
+
+  // We need peers as we may report the JIT thread, e.g., in the debugger.
+  constexpr bool kJitPoolNeedsPeers = true;
+  thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
+
+  thread_pool_->SetPthreadPriority(options_->GetThreadPoolPthreadPriority());
+  Start();
+}
+
 static bool IgnoreSamplesForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
   if (method->IsClassInitializer() || !method->IsCompilable()) {
     // We do not want to compile such methods.
@@ -644,18 +639,24 @@
   return false;
 }
 
-void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_backedges) {
+bool Jit::MaybeCompileMethod(Thread* self,
+                             ArtMethod* method,
+                             uint32_t old_count,
+                             uint32_t new_count,
+                             bool with_backedges) {
   if (thread_pool_ == nullptr) {
-    // Should only see this when shutting down.
-    DCHECK(Runtime::Current()->IsShuttingDown(self));
-    return;
+    // Should only see this when shutting down, starting up, or in safe mode.
+    DCHECK(Runtime::Current()->IsShuttingDown(self) ||
+           !Runtime::Current()->IsFinishedStarting() ||
+           Runtime::Current()->IsSafeMode());
+    return false;
   }
   if (IgnoreSamplesForMethod(method)) {
-    return;
+    return false;
   }
   if (HotMethodThreshold() == 0) {
     // Tests might request JIT on first use (compiled synchronously in the interpreter).
-    return;
+    return false;
   }
   DCHECK(thread_pool_ != nullptr);
   DCHECK_GT(WarmMethodThreshold(), 0);
@@ -664,16 +665,10 @@
   DCHECK_GE(PriorityThreadWeight(), 1);
   DCHECK_LE(PriorityThreadWeight(), HotMethodThreshold());
 
-  uint16_t starting_count = method->GetCounter();
-  if (Jit::ShouldUsePriorityThreadWeight(self)) {
-    count *= PriorityThreadWeight();
-  }
-  uint32_t new_count = starting_count + count;
-  // Note: Native method have no "warm" state or profiling info.
-  if (LIKELY(!method->IsNative()) && starting_count < WarmMethodThreshold()) {
-    if ((new_count >= WarmMethodThreshold()) &&
-        (method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
-      bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false);
+  if (old_count < WarmMethodThreshold() && new_count >= WarmMethodThreshold()) {
+    // Note: Native method have no "warm" state or profiling info.
+    if (!method->IsNative() && method->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
+      bool success = ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
       if (success) {
         VLOG(jit) << "Start profiling " << method->PrettyMethod();
       }
@@ -682,42 +677,55 @@
         // Calling ProfilingInfo::Create might put us in a suspended state, which could
         // lead to the thread pool being deleted when we are shutting down.
         DCHECK(Runtime::Current()->IsShuttingDown(self));
-        return;
+        return false;
       }
 
       if (!success) {
         // We failed allocating. Instead of doing the collection on the Java thread, we push
         // an allocation to a compiler thread, that will do the collection.
-        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kAllocateProfile));
-      }
-    }
-    // Avoid jumping more than one state at a time.
-    new_count = std::min(new_count, static_cast<uint32_t>(HotMethodThreshold() - 1));
-  } else if (UseJitCompilation()) {
-    if (starting_count < HotMethodThreshold()) {
-      if ((new_count >= HotMethodThreshold()) &&
-          !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
-        DCHECK(thread_pool_ != nullptr);
-        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile));
-      }
-      // Avoid jumping more than one state at a time.
-      new_count = std::min(new_count, static_cast<uint32_t>(OSRMethodThreshold() - 1));
-    } else if (starting_count < OSRMethodThreshold()) {
-      if (!with_backedges) {
-        // If the samples don't contain any back edge, we don't increment the hotness.
-        return;
-      }
-      DCHECK(!method->IsNative());  // No back edges reported for native methods.
-      if ((new_count >= OSRMethodThreshold()) &&  !code_cache_->IsOsrCompiled(method)) {
-        DCHECK(thread_pool_ != nullptr);
-        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr));
+        thread_pool_->AddTask(
+            self, new JitCompileTask(method, JitCompileTask::TaskKind::kAllocateProfile));
       }
     }
   }
-  // Update hotness counter
-  method->SetCounter(new_count);
+  if (UseJitCompilation()) {
+    if (old_count < HotMethodThreshold() && new_count >= HotMethodThreshold()) {
+      if (!code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
+        DCHECK(thread_pool_ != nullptr);
+        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
+      }
+    }
+    if (old_count < OSRMethodThreshold() && new_count >= OSRMethodThreshold()) {
+      if (!with_backedges) {
+        return false;
+      }
+      DCHECK(!method->IsNative());  // No back edges reported for native methods.
+      if (!code_cache_->IsOsrCompiled(method)) {
+        DCHECK(thread_pool_ != nullptr);
+        thread_pool_->AddTask(
+            self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileOsr));
+      }
+    }
+  }
+  return true;
 }
 
+class ScopedSetRuntimeThread {
+ public:
+  explicit ScopedSetRuntimeThread(Thread* self)
+      : self_(self), was_runtime_thread_(self_->IsRuntimeThread()) {
+    self_->SetIsRuntimeThread(true);
+  }
+
+  ~ScopedSetRuntimeThread() {
+    self_->SetIsRuntimeThread(was_runtime_thread_);
+  }
+
+ private:
+  Thread* self_;
+  bool was_runtime_thread_;
+};
+
 void Jit::MethodEntered(Thread* thread, ArtMethod* method) {
   Runtime* runtime = Runtime::Current();
   if (UNLIKELY(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse())) {
@@ -725,9 +733,11 @@
     if (np_method->IsCompilable()) {
       if (!np_method->IsNative()) {
         // The compiler requires a ProfilingInfo object for non-native methods.
-        ProfilingInfo::Create(thread, np_method, /* retry_allocation */ true);
+        ProfilingInfo::Create(thread, np_method, /* retry_allocation= */ true);
       }
-      JitCompileTask compile_task(method, JitCompileTask::kCompile);
+      JitCompileTask compile_task(method, JitCompileTask::TaskKind::kCompile);
+      // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
+      ScopedSetRuntimeThread ssrt(thread);
       compile_task.Run(thread);
     }
     return;
@@ -743,7 +753,7 @@
     Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
         method, profiling_info->GetSavedEntryPoint());
   } else {
-    AddSamples(thread, method, 1, /* with_backedges */false);
+    AddSamples(thread, method, 1, /* with_backedges= */false);
   }
 }
 
@@ -793,5 +803,46 @@
   }
 }
 
+void Jit::PostForkChildAction(bool is_zygote) {
+  if (is_zygote) {
+    // Don't transition if this is for a child zygote.
+    return;
+  }
+  if (Runtime::Current()->IsSafeMode()) {
+    // Delete the thread pool, we are not going to JIT.
+    thread_pool_.reset(nullptr);
+    return;
+  }
+  // At this point, the compiler options have been adjusted to the particular configuration
+  // of the forked child. Parse them again.
+  jit_update_options_(jit_compiler_handle_);
+
+  // Adjust the status of code cache collection: the status from zygote was to not collect.
+  code_cache_->SetGarbageCollectCode(!jit_generate_debug_info_(jit_compiler_handle_) &&
+      !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
+
+  if (thread_pool_ != nullptr) {
+    // Remove potential tasks that have been inherited from the zygote.
+    thread_pool_->RemoveAllTasks(Thread::Current());
+
+    // Resume JIT compilation.
+    thread_pool_->CreateThreads();
+  }
+}
+
+void Jit::PreZygoteFork() {
+  if (thread_pool_ == nullptr) {
+    return;
+  }
+  thread_pool_->DeleteThreads();
+}
+
+void Jit::PostZygoteFork() {
+  if (thread_pool_ == nullptr) {
+    return;
+  }
+  thread_pool_->CreateThreads();
+}
+
 }  // namespace jit
 }  // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index edaf348..714db3a 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -47,6 +47,7 @@
 // At what priority to schedule jit threads. 9 is the lowest foreground priority on device.
 // See android/os/Process.java.
 static constexpr int kJitPoolThreadPthreadDefaultPriority = 9;
+static constexpr uint32_t kJitSamplesBatchSize = 32;  // Must be power of 2.
 
 class JitOptions {
  public:
@@ -122,12 +123,16 @@
   }
 
  private:
+  // We add the sample in batches of size kJitSamplesBatchSize.
+  // This method rounds the threshold so that it is multiple of the batch size.
+  static uint32_t RoundUpThreshold(uint32_t threshold);
+
   bool use_jit_compilation_;
   size_t code_cache_initial_capacity_;
   size_t code_cache_max_capacity_;
-  uint16_t compile_threshold_;
-  uint16_t warmup_threshold_;
-  uint16_t osr_threshold_;
+  uint32_t compile_threshold_;
+  uint32_t warmup_threshold_;
+  uint32_t osr_threshold_;
   uint16_t priority_thread_weight_;
   uint16_t invoke_transition_weight_;
   bool dump_info_on_shutdown_;
@@ -154,23 +159,28 @@
   static constexpr size_t kDefaultPriorityThreadWeightRatio = 1000;
   static constexpr size_t kDefaultInvokeTransitionWeightRatio = 500;
   // How frequently should the interpreter check to see if OSR compilation is ready.
-  static constexpr int16_t kJitRecheckOSRThreshold = 100;
+  static constexpr int16_t kJitRecheckOSRThreshold = 101;  // Prime number to avoid patterns.
 
   virtual ~Jit();
-  static Jit* Create(JitOptions* options, std::string* error_msg);
-  bool CompileMethod(ArtMethod* method, Thread* self, bool osr)
+
+  // Create JIT itself.
+  static Jit* Create(JitCodeCache* code_cache, JitOptions* options);
+
+  bool CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void CreateThreadPool();
 
   const JitCodeCache* GetCodeCache() const {
-    return code_cache_.get();
+    return code_cache_;
   }
 
   JitCodeCache* GetCodeCache() {
-    return code_cache_.get();
+    return code_cache_;
   }
 
+  void CreateThreadPool();
   void DeleteThreadPool();
+  void WaitForWorkersToBeCreated();
+
   // Dump interesting info: #methods compiled, code vs data size, compile / verify cumulative
   // loggers.
   void DumpInfo(std::ostream& os) REQUIRES(!lock_);
@@ -213,7 +223,10 @@
   void MethodEntered(Thread* thread, ArtMethod* method)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void AddSamples(Thread* self, ArtMethod* method, uint16_t samples, bool with_backedges)
+  ALWAYS_INLINE void AddSamples(Thread* self,
+                                ArtMethod* method,
+                                uint16_t samples,
+                                bool with_backedges)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void InvokeVirtualOrInterface(ObjPtr<mirror::Object> this_object,
@@ -268,6 +281,7 @@
                                         JValue* result)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Load the compiler library.
   static bool LoadCompilerLibrary(std::string* error_msg);
 
   ThreadPool* GetThreadPool() const {
@@ -280,25 +294,44 @@
   // Start JIT threads.
   void Start();
 
- private:
-  explicit Jit(JitOptions* options);
+  // Transition to a child state.
+  void PostForkChildAction(bool is_zygote);
 
-  static bool LoadCompiler(std::string* error_msg);
+  // Prepare for forking.
+  void PreZygoteFork();
+
+  // Adjust state after forking.
+  void PostZygoteFork();
+
+ private:
+  Jit(JitCodeCache* code_cache, JitOptions* options);
+
+  // Compile the method if the number of samples passes a threshold.
+  // Returns false if we can not compile now - don't increment the counter and retry later.
+  bool MaybeCompileMethod(Thread* self,
+                          ArtMethod* method,
+                          uint32_t old_count,
+                          uint32_t new_count,
+                          bool with_backedges)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  static bool BindCompilerMethods(std::string* error_msg);
 
   // JIT compiler
   static void* jit_library_handle_;
   static void* jit_compiler_handle_;
-  static void* (*jit_load_)(bool*);
+  static void* (*jit_load_)(void);
   static void (*jit_unload_)(void*);
-  static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool);
+  static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool);
   static void (*jit_types_loaded_)(void*, mirror::Class**, size_t count);
+  static void (*jit_update_options_)(void*);
+  static bool (*jit_generate_debug_info_)(void*);
+  template <typename T> static bool LoadSymbol(T*, const char* symbol, std::string* error_msg);
 
-  // We make this static to simplify the interaction with libart-compiler.so.
-  static bool generate_debug_info_;
-
+  // JIT resources owned by runtime.
+  jit::JitCodeCache* const code_cache_;
   const JitOptions* const options_;
 
-  std::unique_ptr<jit::JitCodeCache> code_cache_;
   std::unique_ptr<ThreadPool> thread_pool_;
 
   // Performance monitoring.
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 184aba8..c1b9a1a 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -18,11 +18,16 @@
 
 #include <sstream>
 
+#include <android-base/logging.h>
+#include <android-base/unique_fd.h>
+
 #include "arch/context.h"
 #include "art_method-inl.h"
 #include "base/enums.h"
 #include "base/histogram-inl.h"
 #include "base/logging.h"  // For VLOG.
+#include "base/membarrier.h"
+#include "base/memfd.h"
 #include "base/mem_map.h"
 #include "base/quasi_atomic.h"
 #include "base/stl_util.h"
@@ -35,6 +40,7 @@
 #include "dex/method_reference.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc/accounting/bitmap-inl.h"
+#include "gc/allocator/dlmalloc.h"
 #include "gc/scoped_gc_critical_section.h"
 #include "handle.h"
 #include "instrumentation.h"
@@ -51,16 +57,37 @@
 #include "thread-current-inl.h"
 #include "thread_list.h"
 
+using android::base::unique_fd;
+
 namespace art {
 namespace jit {
 
-static constexpr int kProtCode = PROT_READ | PROT_EXEC;
-static constexpr int kProtData = PROT_READ | PROT_WRITE;
-static constexpr int kProtProfile = PROT_READ;
-
 static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
 static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
 
+// Data cache will be half of the capacity
+// Code cache will be the other half of the capacity.
+// TODO: Make this variable?
+static constexpr size_t kCodeAndDataCapacityDivider = 2;
+
+static constexpr int kProtR = PROT_READ;
+static constexpr int kProtRW = PROT_READ | PROT_WRITE;
+static constexpr int kProtRWX = PROT_READ | PROT_WRITE | PROT_EXEC;
+static constexpr int kProtRX = PROT_READ | PROT_EXEC;
+
+namespace {
+
+// Translate an address belonging to one memory map into an address in a second. This is useful
+// when there are two virtual memory ranges for the same physical memory range.
+template <typename T>
+T* TranslateAddress(T* src_ptr, const MemMap& src, const MemMap& dst) {
+  CHECK(src.HasAddress(src_ptr));
+  uint8_t* const raw_src_ptr = reinterpret_cast<uint8_t*>(src_ptr);
+  return reinterpret_cast<T*>(raw_src_ptr - src.Begin() + dst.Begin());
+}
+
+}  // namespace
+
 class JitCodeCache::JniStubKey {
  public:
   explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -162,20 +189,187 @@
   std::vector<ArtMethod*> methods_;
 };
 
-JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
-                                   size_t max_capacity,
-                                   bool generate_debug_info,
-                                   bool used_only_for_profile_data,
-                                   std::string* error_msg) {
+bool JitCodeCache::InitializeMappings(bool rwx_memory_allowed,
+                                      bool is_zygote,
+                                      std::string* error_msg) {
   ScopedTrace trace(__PRETTY_FUNCTION__);
-  CHECK_GE(max_capacity, initial_capacity);
 
-  // With 'perf', we want a 1-1 mapping between an address and a method.
-  // We aren't able to keep method pointers live during the instrumentation method entry trampoline
-  // so we will just disable jit-gc if we are doing that.
-  bool garbage_collect_code = !generate_debug_info &&
-      !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
+  const size_t capacity = max_capacity_;
+  const size_t data_capacity = capacity / kCodeAndDataCapacityDivider;
+  const size_t exec_capacity = capacity - data_capacity;
 
+  // File descriptor enabling dual-view mapping of code section.
+  unique_fd mem_fd;
+
+  // Zygote shouldn't create a shared mapping for JIT, so we cannot use dual view
+  // for it.
+  if (!is_zygote) {
+    // Bionic supports memfd_create, but the call may fail on older kernels.
+    mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
+    if (mem_fd.get() < 0) {
+      std::ostringstream oss;
+      oss << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno);
+      if (!rwx_memory_allowed) {
+        // Without using RWX page permissions, the JIT can not fallback to single mapping as it
+        // requires tranitioning the code pages to RWX for updates.
+        *error_msg = oss.str();
+        return false;
+      }
+      VLOG(jit) << oss.str();
+    }
+  }
+
+  if (mem_fd.get() >= 0 && ftruncate(mem_fd, capacity) != 0) {
+    std::ostringstream oss;
+    oss << "Failed to initialize memory file: " << strerror(errno);
+    *error_msg = oss.str();
+    return false;
+  }
+
+  std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache";
+  std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache";
+
+  std::string error_str;
+  // Map name specific for android_os_Debug.cpp accounting.
+  // Map in low 4gb to simplify accessing root tables for x86_64.
+  // We could do PC-relative addressing to avoid this problem, but that
+  // would require reserving code and data area before submitting, which
+  // means more windows for the code memory to be RWX.
+  int base_flags;
+  MemMap data_pages;
+  if (mem_fd.get() >= 0) {
+    // Dual view of JIT code cache case. Create an initial mapping of data pages large enough
+    // for data and non-writable view of JIT code pages. We use the memory file descriptor to
+    // enable dual mapping - we'll create a second mapping using the descriptor below. The
+    // mappings will look like:
+    //
+    //       VA                  PA
+    //
+    //       +---------------+
+    //       | non exec code |\
+    //       +---------------+ \
+    //       :               :\ \
+    //       +---------------+.\.+---------------+
+    //       |  exec code    |  \|     code      |
+    //       +---------------+...+---------------+
+    //       |      data     |   |     data      |
+    //       +---------------+...+---------------+
+    //
+    // In this configuration code updates are written to the non-executable view of the code
+    // cache, and the executable view of the code cache has fixed RX memory protections.
+    //
+    // This memory needs to be mapped shared as the code portions will have two mappings.
+    base_flags = MAP_SHARED;
+    data_pages = MemMap::MapFile(
+        data_capacity + exec_capacity,
+        kProtRW,
+        base_flags,
+        mem_fd,
+        /* start= */ 0,
+        /* low_4gb= */ true,
+        data_cache_name.c_str(),
+        &error_str);
+  } else {
+    // Single view of JIT code cache case. Create an initial mapping of data pages large enough
+    // for data and JIT code pages. The mappings will look like:
+    //
+    //       VA                  PA
+    //
+    //       +---------------+...+---------------+
+    //       |  exec code    |   |     code      |
+    //       +---------------+...+---------------+
+    //       |      data     |   |     data      |
+    //       +---------------+...+---------------+
+    //
+    // In this configuration code updates are written to the executable view of the code cache,
+    // and the executable view of the code cache transitions RX to RWX for the update and then
+    // back to RX after the update.
+    base_flags = MAP_PRIVATE | MAP_ANON;
+    data_pages = MemMap::MapAnonymous(
+        data_cache_name.c_str(),
+        data_capacity + exec_capacity,
+        kProtRW,
+        /* low_4gb= */ true,
+        &error_str);
+  }
+
+  if (!data_pages.IsValid()) {
+    std::ostringstream oss;
+    oss << "Failed to create read write cache: " << error_str << " size=" << capacity;
+    *error_msg = oss.str();
+    return false;
+  }
+
+  MemMap exec_pages;
+  MemMap non_exec_pages;
+  if (exec_capacity > 0) {
+    uint8_t* const divider = data_pages.Begin() + data_capacity;
+    // Set initial permission for executable view to catch any SELinux permission problems early
+    // (for processes that cannot map WX pages). Otherwise, this region does not need to be
+    // executable as there is no code in the cache yet.
+    exec_pages = data_pages.RemapAtEnd(divider,
+                                       exec_cache_name.c_str(),
+                                       kProtRX,
+                                       base_flags | MAP_FIXED,
+                                       mem_fd.get(),
+                                       (mem_fd.get() >= 0) ? data_capacity : 0,
+                                       &error_str);
+    if (!exec_pages.IsValid()) {
+      std::ostringstream oss;
+      oss << "Failed to create read execute code cache: " << error_str << " size=" << capacity;
+      *error_msg = oss.str();
+      return false;
+    }
+
+    if (mem_fd.get() >= 0) {
+      // For dual view, create the secondary view of code memory used for updating code. This view
+      // is never executable.
+      std::string name = exec_cache_name + "-rw";
+      non_exec_pages = MemMap::MapFile(exec_capacity,
+                                       kProtR,
+                                       base_flags,
+                                       mem_fd,
+                                       /* start= */ data_capacity,
+                                       /* low_4GB= */ false,
+                                       name.c_str(),
+                                       &error_str);
+      if (!non_exec_pages.IsValid()) {
+        static const char* kFailedNxView = "Failed to map non-executable view of JIT code cache";
+        if (rwx_memory_allowed) {
+          // Log and continue as single view JIT (requires RWX memory).
+          VLOG(jit) << kFailedNxView;
+        } else {
+          *error_msg = kFailedNxView;
+          return false;
+        }
+      }
+    }
+  } else {
+    // Profiling only. No memory for code required.
+  }
+
+  data_pages_ = std::move(data_pages);
+  exec_pages_ = std::move(exec_pages);
+  non_exec_pages_ = std::move(non_exec_pages);
+  return true;
+}
+
+JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data,
+                                   bool rwx_memory_allowed,
+                                   bool is_zygote,
+                                   std::string* error_msg) {
+  // Register for membarrier expedited sync core if JIT will be generating code.
+  if (!used_only_for_profile_data) {
+    if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) {
+      // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are
+      // flushed and it's used when adding code to the JIT. The memory used by the new code may
+      // have just been released and, in theory, the old code could still be in a pipeline.
+      VLOG(jit) << "Kernel does not support membarrier sync-core";
+    }
+  }
+
+  // Check whether the provided max capacity in options is below 1GB.
+  size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity();
   // We need to have 32 bit offsets from method headers in code cache which point to things
   // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
   // Ensure we're below 1 GB to be safe.
@@ -187,88 +381,38 @@
     return nullptr;
   }
 
-  // Decide how we should map the code and data sections.
-  // If we use the code cache just for profiling we do not need to map the code section as
-  // executable.
-  // NOTE 1: this is yet another workaround to bypass strict SElinux policies in order to be able
-  //         to profile system server.
-  // NOTE 2: We could just not create the code section at all but we will need to
-  //         special case too many cases.
-  int memmap_flags_prot_code = used_only_for_profile_data ? kProtProfile : kProtCode;
+  size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity();
 
-  std::string error_str;
-  // Map name specific for android_os_Debug.cpp accounting.
-  // Map in low 4gb to simplify accessing root tables for x86_64.
-  // We could do PC-relative addressing to avoid this problem, but that
-  // would require reserving code and data area before submitting, which
-  // means more windows for the code memory to be RWX.
-  MemMap data_map = MemMap::MapAnonymous(
-      "data-code-cache",
-      /* addr */ nullptr,
-      max_capacity,
-      kProtData,
-      /* low_4gb */ true,
-      /* reuse */ false,
-      /* reservation */ nullptr,
-      &error_str);
-  if (!data_map.IsValid()) {
-    std::ostringstream oss;
-    oss << "Failed to create read write cache: " << error_str << " size=" << max_capacity;
-    *error_msg = oss.str();
+  std::unique_ptr<JitCodeCache> jit_code_cache(new JitCodeCache());
+
+  MutexLock mu(Thread::Current(), jit_code_cache->lock_);
+  jit_code_cache->InitializeState(initial_capacity, max_capacity);
+
+  // Zygote should never collect code to share the memory with the children.
+  if (is_zygote) {
+    jit_code_cache->garbage_collect_code_ = false;
+  }
+
+  if (!jit_code_cache->InitializeMappings(rwx_memory_allowed, is_zygote, error_msg)) {
     return nullptr;
   }
 
-  // Align both capacities to page size, as that's the unit mspaces use.
-  initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
-  max_capacity = RoundDown(max_capacity, 2 * kPageSize);
+  jit_code_cache->InitializeSpaces();
 
-  // Data cache is 1 / 2 of the map.
-  // TODO: Make this variable?
-  size_t data_size = max_capacity / 2;
-  size_t code_size = max_capacity - data_size;
-  DCHECK_EQ(code_size + data_size, max_capacity);
-  uint8_t* divider = data_map.Begin() + data_size;
+  VLOG(jit) << "Created jit code cache: initial capacity="
+            << PrettySize(initial_capacity)
+            << ", maximum capacity="
+            << PrettySize(max_capacity);
 
-  MemMap code_map = data_map.RemapAtEnd(
-      divider, "jit-code-cache", memmap_flags_prot_code | PROT_WRITE, &error_str);
-  if (!code_map.IsValid()) {
-    std::ostringstream oss;
-    oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
-    *error_msg = oss.str();
-    return nullptr;
-  }
-  DCHECK_EQ(code_map.Begin(), divider);
-  data_size = initial_capacity / 2;
-  code_size = initial_capacity - data_size;
-  DCHECK_EQ(code_size + data_size, initial_capacity);
-  return new JitCodeCache(
-      std::move(code_map),
-      std::move(data_map),
-      code_size,
-      data_size,
-      max_capacity,
-      garbage_collect_code,
-      memmap_flags_prot_code);
+  return jit_code_cache.release();
 }
 
-JitCodeCache::JitCodeCache(MemMap&& code_map,
-                           MemMap&& data_map,
-                           size_t initial_code_capacity,
-                           size_t initial_data_capacity,
-                           size_t max_capacity,
-                           bool garbage_collect_code,
-                           int memmap_flags_prot_code)
+JitCodeCache::JitCodeCache()
     : lock_("Jit code cache", kJitCodeCacheLock),
       lock_cond_("Jit code cache condition variable", lock_),
       collection_in_progress_(false),
-      code_map_(std::move(code_map)),
-      data_map_(std::move(data_map)),
-      max_capacity_(max_capacity),
-      current_capacity_(initial_code_capacity + initial_data_capacity),
-      code_end_(initial_code_capacity),
-      data_end_(initial_data_capacity),
       last_collection_increased_code_cache_(false),
-      garbage_collect_code_(garbage_collect_code),
+      garbage_collect_code_(true),
       used_memory_for_data_(0),
       used_memory_for_code_(0),
       number_of_compilations_(0),
@@ -279,39 +423,67 @@
       histogram_profiling_info_memory_use_("Memory used for profiling info", 16),
       is_weak_access_enabled_(true),
       inline_cache_cond_("Jit inline cache condition variable", lock_),
-      memmap_flags_prot_code_(memmap_flags_prot_code) {
+      zygote_data_pages_(),
+      zygote_exec_pages_(),
+      zygote_data_mspace_(nullptr),
+      zygote_exec_mspace_(nullptr) {
+}
 
-  DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
-  code_mspace_ = create_mspace_with_base(code_map_.Begin(), code_end_, false /*locked*/);
-  data_mspace_ = create_mspace_with_base(data_map_.Begin(), data_end_, false /*locked*/);
+void JitCodeCache::InitializeState(size_t initial_capacity, size_t max_capacity) {
+  CHECK_GE(max_capacity, initial_capacity);
+  CHECK(max_capacity <= 1 * GB) << "The max supported size for JIT code cache is 1GB";
+  // Align both capacities to page size, as that's the unit mspaces use.
+  initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
+  max_capacity = RoundDown(max_capacity, 2 * kPageSize);
 
-  if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
-    PLOG(FATAL) << "create_mspace_with_base failed";
+  used_memory_for_data_ = 0;
+  used_memory_for_code_ = 0;
+  number_of_compilations_ = 0;
+  number_of_osr_compilations_ = 0;
+  number_of_collections_ = 0;
+
+  data_pages_ = MemMap();
+  exec_pages_ = MemMap();
+  non_exec_pages_ = MemMap();
+  initial_capacity_ = initial_capacity;
+  max_capacity_ = max_capacity;
+  current_capacity_ = initial_capacity,
+  data_end_ = initial_capacity / kCodeAndDataCapacityDivider;
+  exec_end_ = initial_capacity - data_end_;
+}
+
+void JitCodeCache::InitializeSpaces() {
+  // Initialize the data heap
+  data_mspace_ = create_mspace_with_base(data_pages_.Begin(), data_end_, false /*locked*/);
+  CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed";
+
+  // Initialize the code heap
+  MemMap* code_heap = nullptr;
+  if (non_exec_pages_.IsValid()) {
+    code_heap = &non_exec_pages_;
+  } else if (exec_pages_.IsValid()) {
+    code_heap = &exec_pages_;
   }
-
-  SetFootprintLimit(current_capacity_);
-
-  CheckedCall(mprotect,
-              "mprotect jit code cache",
-              code_map_.Begin(),
-              code_map_.Size(),
-              memmap_flags_prot_code_);
-  CheckedCall(mprotect,
-              "mprotect jit data cache",
-              data_map_.Begin(),
-              data_map_.Size(),
-              kProtData);
-
-  VLOG(jit) << "Created jit code cache: initial data size="
-            << PrettySize(initial_data_capacity)
-            << ", initial code size="
-            << PrettySize(initial_code_capacity);
+  if (code_heap != nullptr) {
+    // Make all pages reserved for the code heap writable. The mspace allocator, that manages the
+    // heap, will take and initialize pages in create_mspace_with_base().
+    CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
+    exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
+    CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
+    SetFootprintLimit(initial_capacity_);
+    // Protect pages containing heap metadata. Updates to the code heap toggle write permission to
+    // perform the update and there are no other times write access is required.
+    CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
+  } else {
+    exec_mspace_ = nullptr;
+    SetFootprintLimit(initial_capacity_);
+  }
 }
 
 JitCodeCache::~JitCodeCache() {}
 
 bool JitCodeCache::ContainsPc(const void* ptr) const {
-  return code_map_.Begin() <= ptr && ptr < code_map_.End();
+  return exec_pages_.HasAddress(ptr) || zygote_exec_pages_.HasAddress(ptr);
 }
 
 bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
@@ -379,22 +551,20 @@
       : ScopedTrace("ScopedCodeCacheWrite"),
         code_cache_(code_cache) {
     ScopedTrace trace("mprotect all");
-    CheckedCall(
-        mprotect,
-        "make code writable",
-        code_cache_->code_map_.Begin(),
-        code_cache_->code_map_.Size(),
-        code_cache_->memmap_flags_prot_code_ | PROT_WRITE);
+    const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
+    if (updatable_pages != nullptr) {
+      int prot = code_cache_->HasDualCodeMapping() ? kProtRW : kProtRWX;
+      CheckedCall(mprotect, "Cache +W", updatable_pages->Begin(), updatable_pages->Size(), prot);
+    }
   }
 
   ~ScopedCodeCacheWrite() {
     ScopedTrace trace("mprotect code");
-    CheckedCall(
-        mprotect,
-        "make code protected",
-        code_cache_->code_map_.Begin(),
-        code_cache_->code_map_.Size(),
-        code_cache_->memmap_flags_prot_code_);
+    const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
+    if (updatable_pages != nullptr) {
+      int prot = code_cache_->HasDualCodeMapping() ? kProtR : kProtRX;
+      CheckedCall(mprotect, "Cache -W", updatable_pages->Begin(), updatable_pages->Size(), prot);
+    }
   }
 
  private:
@@ -411,7 +581,7 @@
                                   size_t code_size,
                                   size_t data_size,
                                   bool osr,
-                                  Handle<mirror::ObjectArray<mirror::Object>> roots,
+                                  const std::vector<Handle<mirror::Object>>& roots,
                                   bool has_should_deoptimize_flag,
                                   const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
   uint8_t* result = CommitCodeInternal(self,
@@ -477,18 +647,16 @@
   return stack_map_data - ComputeRootTableSize(GetNumberOfRoots(stack_map_data));
 }
 
-static void DCheckRootsAreValid(Handle<mirror::ObjectArray<mirror::Object>> roots)
+static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots)
     REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
   if (!kIsDebugBuild) {
     return;
   }
-  const uint32_t length = roots->GetLength();
   // Put all roots in `roots_data`.
-  for (uint32_t i = 0; i < length; ++i) {
-    ObjPtr<mirror::Object> object = roots->Get(i);
+  for (Handle<mirror::Object> object : roots) {
     // Ensure the string is strongly interned. b/32995596
     if (object->IsString()) {
-      ObjPtr<mirror::String> str = ObjPtr<mirror::String>::DownCast(object);
+      ObjPtr<mirror::String> str = object->AsString();
       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
       CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
     }
@@ -496,12 +664,12 @@
 }
 
 void JitCodeCache::FillRootTable(uint8_t* roots_data,
-                                 Handle<mirror::ObjectArray<mirror::Object>> roots) {
+                                 const std::vector<Handle<mirror::Object>>& roots) {
   GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
-  const uint32_t length = roots->GetLength();
+  const uint32_t length = roots.size();
   // Put all roots in `roots_data`.
   for (uint32_t i = 0; i < length; ++i) {
-    ObjPtr<mirror::Object> object = roots->Get(i);
+    ObjPtr<mirror::Object> object = roots[i].Get();
     gc_roots[i] = GcRoot<mirror::Object>(object);
   }
 }
@@ -530,7 +698,7 @@
   // This does not need a read barrier because this is called by GC.
   mirror::Class* cls = root_ptr->Read<kWithoutReadBarrier>();
   if (cls != nullptr && cls != weak_sentinel) {
-    DCHECK((cls->IsClass<kDefaultVerifyFlags, kWithoutReadBarrier>()));
+    DCHECK((cls->IsClass<kDefaultVerifyFlags>()));
     // Look at the classloader of the class to know if it has been unloaded.
     // This does not need a read barrier because this is called by GC.
     mirror::Object* class_loader =
@@ -590,15 +758,24 @@
 }
 
 void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
+  if (IsInZygoteExecSpace(code_ptr)) {
+    // No need to free, this is shared memory.
+    return;
+  }
   uintptr_t allocation = FromCodeToAllocation(code_ptr);
   // Notify native debugger that we are about to remove the code.
   // It does nothing if we are not using native debugger.
-  MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
-  RemoveNativeDebugInfoForJit(code_ptr);
+  RemoveNativeDebugInfoForJit(Thread::Current(), code_ptr);
   if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
     FreeData(GetRootTable(code_ptr));
   }  // else this is a JNI stub without any data.
-  FreeCode(reinterpret_cast<uint8_t*>(allocation));
+
+  uint8_t* code_allocation = reinterpret_cast<uint8_t*>(allocation);
+  if (HasDualCodeMapping()) {
+    code_allocation = TranslateAddress(code_allocation, exec_pages_, non_exec_pages_);
+  }
+
+  FreeCode(code_allocation);
 }
 
 void JitCodeCache::FreeAllMethodHeaders(
@@ -725,7 +902,8 @@
   }
 }
 
-static void ClearMethodCounter(ArtMethod* method, bool was_warm) {
+static void ClearMethodCounter(ArtMethod* method, bool was_warm)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   if (was_warm) {
     method->SetPreviouslyWarm();
   }
@@ -749,6 +927,16 @@
   }
 }
 
+const MemMap* JitCodeCache::GetUpdatableCodeMapping() const {
+  if (HasDualCodeMapping()) {
+    return &non_exec_pages_;
+  } else if (HasCodeMapping()) {
+    return &exec_pages_;
+  } else {
+    return nullptr;
+  }
+}
+
 uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
                                           ArtMethod* method,
                                           uint8_t* stack_map,
@@ -757,7 +945,7 @@
                                           size_t code_size,
                                           size_t data_size,
                                           bool osr,
-                                          Handle<mirror::ObjectArray<mirror::Object>> roots,
+                                          const std::vector<Handle<mirror::Object>>& roots,
                                           bool has_should_deoptimize_flag,
                                           const ArenaSet<ArtMethod*>&
                                               cha_single_implementation_list) {
@@ -769,38 +957,73 @@
     DCheckRootsAreValid(roots);
   }
 
-  size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
-  // Ensure the header ends up at expected instruction alignment.
-  size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
-  size_t total_size = header_size + code_size;
-
   OatQuickMethodHeader* method_header = nullptr;
   uint8_t* code_ptr = nullptr;
-  uint8_t* memory = nullptr;
+
   MutexLock mu(self, lock_);
   // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
   // finish.
   WaitForPotentialCollectionToCompleteRunnable(self);
   {
     ScopedCodeCacheWrite scc(this);
-    memory = AllocateCode(total_size);
-    if (memory == nullptr) {
+
+    size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+    // Ensure the header ends up at expected instruction alignment.
+    size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
+    size_t total_size = header_size + code_size;
+
+    // AllocateCode allocates memory in non-executable region for alignment header and code. The
+    // header size may include alignment padding.
+    uint8_t* nox_memory = AllocateCode(total_size);
+    if (nox_memory == nullptr) {
       return nullptr;
     }
-    code_ptr = memory + header_size;
 
+    // code_ptr points to non-executable code.
+    code_ptr = nox_memory + header_size;
     std::copy(code, code + code_size, code_ptr);
     method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+
+    // From here code_ptr points to executable code.
+    if (HasDualCodeMapping()) {
+      code_ptr = TranslateAddress(code_ptr, non_exec_pages_, exec_pages_);
+    }
+
     new (method_header) OatQuickMethodHeader(
         (stack_map != nullptr) ? code_ptr - stack_map : 0u,
         code_size);
-    // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
-    // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
-    // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
-    // 6P) stop being supported or their kernels are fixed.
+
+    DCHECK(!Runtime::Current()->IsAotCompiler());
+    if (has_should_deoptimize_flag) {
+      method_header->SetHasShouldDeoptimizeFlag();
+    }
+
+    // Update method_header pointer to executable code region.
+    if (HasDualCodeMapping()) {
+      method_header = TranslateAddress(method_header, non_exec_pages_, exec_pages_);
+    }
+
+    // Both instruction and data caches need flushing to the point of unification where both share
+    // a common view of memory. Flushing the data cache ensures the dirty cachelines from the
+    // newly added code are written out to the point of unification. Flushing the instruction
+    // cache ensures the newly written code will be fetched from the point of unification before
+    // use. Memory in the code cache is re-cycled as code is added and removed. The flushes
+    // prevent stale code from residing in the instruction cache.
+    //
+    // Caches are flushed before write permission is removed because some ARMv8 Qualcomm kernels
+    // may trigger a segfault if a page fault occurs when requesting a cache maintenance
+    // operation. This is a kernel bug that we need to work around until affected devices
+    // (e.g. Nexus 5X and 6P) stop being supported or their kernels are fixed.
     //
     // For reference, this behavior is caused by this commit:
     // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
+    //
+    if (HasDualCodeMapping()) {
+      // Flush the data cache lines associated with the non-executable copy of the code just added.
+      FlushDataCache(nox_memory, nox_memory + total_size);
+    }
+    // FlushInstructionCache() flushes both data and instruction caches lines. The cacheline range
+    // flushed is for the executable mapping of the code just added.
     FlushInstructionCache(code_ptr, code_ptr + code_size);
 
     // Ensure CPU instruction pipelines are flushed for all cores. This is necessary for
@@ -809,17 +1032,14 @@
     // shootdown (incidentally invalidating the CPU pipelines by sending an IPI to all cores to
     // notify them of the TLB invalidation). Some architectures, notably ARM and ARM64, have
     // hardware support that broadcasts TLB invalidations and so their kernels have no software
-    // based TLB shootdown. FlushInstructionPipeline() is a wrapper around the Linux
-    // membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED) syscall which does the appropriate flushing.
-    FlushInstructionPipeline();
-
-    DCHECK(!Runtime::Current()->IsAotCompiler());
-    if (has_should_deoptimize_flag) {
-      method_header->SetHasShouldDeoptimizeFlag();
-    }
+    // based TLB shootdown. The sync-core flavor of membarrier was introduced in Linux 4.16 to
+    // address this (see mbarrier(2)). The membarrier here will fail on prior kernels and on
+    // platforms lacking the appropriate support.
+    art::membarrier(art::MembarrierCommand::kPrivateExpeditedSyncCore);
 
     number_of_compilations_++;
   }
+
   // We need to update the entry point in the runnable state for the instrumentation.
   {
     // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
@@ -833,7 +1053,7 @@
         // Simply discard the compiled code. Clear the counter so that it may be recompiled later.
         // Hopefully the class hierarchy will be more stable when compilation is retried.
         single_impl_still_valid = false;
-        ClearMethodCounter(method, /*was_warm*/ false);
+        ClearMethodCounter(method, /*was_warm=*/ false);
         break;
       }
     }
@@ -919,7 +1139,7 @@
     return false;
   }
 
-  method->ClearCounter();
+  method->SetCounter(0);
   Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
       method, GetQuickToInterpreterBridge());
   VLOG(jit)
@@ -981,7 +1201,7 @@
 // method. The compiled code for the method (if there is any) must not be in any threads call stack.
 void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
   MutexLock mu(Thread::Current(), lock_);
-  RemoveMethodLocked(method, /* release_memory */ true);
+  RemoveMethodLocked(method, /* release_memory= */ true);
 }
 
 // This invalidates old_method. Once this function returns one can no longer use old_method to
@@ -1027,6 +1247,24 @@
   }
 }
 
+void JitCodeCache::ClearEntryPointsInZygoteExecSpace() {
+  MutexLock mu(Thread::Current(), lock_);
+  // Iterate over profiling infos to know which methods may have been JITted. Note that
+  // to be JITted, a method must have a profiling info.
+  for (ProfilingInfo* info : profiling_infos_) {
+    ArtMethod* method = info->GetMethod();
+    if (IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode())) {
+      method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+    }
+    // If zygote does method tracing, or in some configuration where
+    // the JIT zygote does GC, we also need to clear the saved entry point
+    // in the profiling info.
+    if (IsInZygoteExecSpace(info->GetSavedEntryPoint())) {
+      info->SetSavedEntryPoint(nullptr);
+    }
+  }
+}
+
 size_t JitCodeCache::CodeCacheSizeLocked() {
   return used_memory_for_code_;
 }
@@ -1094,41 +1332,32 @@
   }
 }
 
-class MarkCodeVisitor final : public StackVisitor {
- public:
-  MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
-      : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
-        code_cache_(code_cache_in),
-        bitmap_(code_cache_->GetLiveBitmap()) {}
-
-  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
-    const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
-    if (method_header == nullptr) {
-      return true;
-    }
-    const void* code = method_header->GetCode();
-    if (code_cache_->ContainsPc(code)) {
-      // Use the atomic set version, as multiple threads are executing this code.
-      bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
-    }
-    return true;
-  }
-
- private:
-  JitCodeCache* const code_cache_;
-  CodeCacheBitmap* const bitmap_;
-};
-
 class MarkCodeClosure final : public Closure {
  public:
-  MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
-      : code_cache_(code_cache), barrier_(barrier) {}
+  MarkCodeClosure(JitCodeCache* code_cache, CodeCacheBitmap* bitmap, Barrier* barrier)
+      : code_cache_(code_cache), bitmap_(bitmap), barrier_(barrier) {}
 
   void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
     ScopedTrace trace(__PRETTY_FUNCTION__);
     DCHECK(thread == Thread::Current() || thread->IsSuspended());
-    MarkCodeVisitor visitor(thread, code_cache_);
-    visitor.WalkStack();
+    StackVisitor::WalkStack(
+        [&](const art::StackVisitor* stack_visitor) {
+          const OatQuickMethodHeader* method_header =
+              stack_visitor->GetCurrentOatQuickMethodHeader();
+          if (method_header == nullptr) {
+            return true;
+          }
+          const void* code = method_header->GetCode();
+          if (code_cache_->ContainsPc(code) && !code_cache_->IsInZygoteExecSpace(code)) {
+            // Use the atomic set version, as multiple threads are executing this code.
+            bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
+          }
+          return true;
+        },
+        thread,
+        /* context= */ nullptr,
+        art::StackVisitor::StackWalkKind::kSkipInlinedFrames);
+
     if (kIsDebugBuild) {
       // The stack walking code queries the side instrumentation stack if it
       // sees an instrumentation exit pc, so the JIT code of methods in that stack
@@ -1139,10 +1368,10 @@
         // its stack frame, it is not the method owning return_pc_. We just pass null to
         // LookupMethodHeader: the method is only checked against in debug builds.
         OatQuickMethodHeader* method_header =
-            code_cache_->LookupMethodHeader(frame.return_pc_, /* method */ nullptr);
+            code_cache_->LookupMethodHeader(frame.return_pc_, /* method= */ nullptr);
         if (method_header != nullptr) {
           const void* code = method_header->GetCode();
-          CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
+          CHECK(bitmap_->Test(FromCodeToAllocation(code)));
         }
       }
     }
@@ -1151,6 +1380,7 @@
 
  private:
   JitCodeCache* const code_cache_;
+  CodeCacheBitmap* const bitmap_;
   Barrier* const barrier_;
 };
 
@@ -1160,13 +1390,13 @@
 }
 
 void JitCodeCache::SetFootprintLimit(size_t new_footprint) {
-  size_t per_space_footprint = new_footprint / 2;
-  DCHECK(IsAlignedParam(per_space_footprint, kPageSize));
-  DCHECK_EQ(per_space_footprint * 2, new_footprint);
-  mspace_set_footprint_limit(data_mspace_, per_space_footprint);
-  {
+  size_t data_space_footprint = new_footprint / kCodeAndDataCapacityDivider;
+  DCHECK(IsAlignedParam(data_space_footprint, kPageSize));
+  DCHECK_EQ(data_space_footprint * kCodeAndDataCapacityDivider, new_footprint);
+  mspace_set_footprint_limit(data_mspace_, data_space_footprint);
+  if (HasCodeMapping()) {
     ScopedCodeCacheWrite scc(this);
-    mspace_set_footprint_limit(code_mspace_, per_space_footprint);
+    mspace_set_footprint_limit(exec_mspace_, new_footprint - data_space_footprint);
   }
 }
 
@@ -1196,7 +1426,7 @@
 void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) {
   Barrier barrier(0);
   size_t threads_running_checkpoint = 0;
-  MarkCodeClosure closure(this, &barrier);
+  MarkCodeClosure closure(this, GetLiveBitmap(), &barrier);
   threads_running_checkpoint = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
   // Now that we have run our checkpoint, move to a suspended state and wait
   // for other threads to run the checkpoint.
@@ -1225,24 +1455,21 @@
 
 void JitCodeCache::GarbageCollectCache(Thread* self) {
   ScopedTrace trace(__FUNCTION__);
-  if (!garbage_collect_code_) {
-    MutexLock mu(self, lock_);
-    IncreaseCodeCacheCapacity();
-    return;
-  }
-
   // Wait for an existing collection, or let everyone know we are starting one.
   {
     ScopedThreadSuspension sts(self, kSuspended);
     MutexLock mu(self, lock_);
-    if (WaitForPotentialCollectionToComplete(self)) {
+    if (!garbage_collect_code_) {
+      IncreaseCodeCacheCapacity();
+      return;
+    } else if (WaitForPotentialCollectionToComplete(self)) {
       return;
     } else {
       number_of_collections_++;
       live_bitmap_.reset(CodeCacheBitmap::Create(
           "code-cache-bitmap",
-          reinterpret_cast<uintptr_t>(code_map_.Begin()),
-          reinterpret_cast<uintptr_t>(code_map_.Begin() + current_capacity_ / 2)));
+          reinterpret_cast<uintptr_t>(exec_pages_.Begin()),
+          reinterpret_cast<uintptr_t>(exec_pages_.Begin() + current_capacity_ / 2)));
       collection_in_progress_ = true;
     }
   }
@@ -1263,7 +1490,7 @@
               << PrettySize(CodeCacheSize())
               << ", data=" << PrettySize(DataCacheSize());
 
-    DoCollection(self, /* collect_profiling_info */ do_full_collection);
+    DoCollection(self, /* collect_profiling_info= */ do_full_collection);
 
     VLOG(jit) << "After code cache collection, code="
               << PrettySize(CodeCacheSize())
@@ -1290,7 +1517,7 @@
         // interpreter will update its entry point to the compiled code and call it.
         for (ProfilingInfo* info : profiling_infos_) {
           const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
-          if (ContainsPc(entry_point)) {
+          if (!IsInZygoteDataSpace(info) && ContainsPc(entry_point)) {
             info->SetSavedEntryPoint(entry_point);
             // Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring
             // class of the method. We may be concurrently running a GC which makes accessing
@@ -1305,7 +1532,7 @@
         // Change entry points of native methods back to the GenericJNI entrypoint.
         for (const auto& entry : jni_stubs_map_) {
           const JniStubData& data = entry.second;
-          if (!data.IsCompiled()) {
+          if (!data.IsCompiled() || IsInZygoteExecSpace(data.GetCode())) {
             continue;
           }
           // Make sure a single invocation of the GenericJNI trampoline tries to recompile.
@@ -1337,7 +1564,9 @@
     // Iterate over all compiled code and remove entries that are not marked.
     for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
       JniStubData* data = &it->second;
-      if (!data->IsCompiled() || GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) {
+      if (IsInZygoteExecSpace(data->GetCode()) ||
+          !data->IsCompiled() ||
+          GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) {
         ++it;
       } else {
         method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode()));
@@ -1347,7 +1576,7 @@
     for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
       const void* code_ptr = it->first;
       uintptr_t allocation = FromCodeToAllocation(code_ptr);
-      if (GetLiveBitmap()->Test(allocation)) {
+      if (IsInZygoteExecSpace(code_ptr) || GetLiveBitmap()->Test(allocation)) {
         ++it;
       } else {
         OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
@@ -1359,6 +1588,29 @@
   FreeAllMethodHeaders(method_headers);
 }
 
+bool JitCodeCache::GetGarbageCollectCode() {
+  MutexLock mu(Thread::Current(), lock_);
+  return garbage_collect_code_;
+}
+
+void JitCodeCache::SetGarbageCollectCode(bool value) {
+  Thread* self = Thread::Current();
+  MutexLock mu(self, lock_);
+  if (garbage_collect_code_ != value) {
+    if (garbage_collect_code_) {
+      // When dynamically disabling the garbage collection, we neee
+      // to make sure that a potential current collection is finished, and also
+      // clear the saved entry point in profiling infos to avoid dangling pointers.
+      WaitForPotentialCollectionToComplete(self);
+      for (ProfilingInfo* info : profiling_infos_) {
+        info->SetSavedEntryPoint(nullptr);
+      }
+    }
+    // Update the flag while holding the lock to ensure no thread will try to GC.
+    garbage_collect_code_ = value;
+  }
+}
+
 void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
   ScopedTrace trace(__FUNCTION__);
   {
@@ -1368,7 +1620,7 @@
       // Also remove the saved entry point from the ProfilingInfo objects.
       for (ProfilingInfo* info : profiling_infos_) {
         const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
-        if (!ContainsPc(ptr) && !info->IsInUseByCompiler()) {
+        if (!ContainsPc(ptr) && !info->IsInUseByCompiler() && !IsInZygoteDataSpace(info)) {
           info->GetMethod()->SetProfilingInfo(nullptr);
         }
 
@@ -1376,7 +1628,7 @@
           info->SetSavedEntryPoint(nullptr);
           // We are going to move this method back to interpreter. Clear the counter now to
           // give it a chance to be hot again.
-          ClearMethodCounter(info->GetMethod(), /*was_warm*/ true);
+          ClearMethodCounter(info->GetMethod(), /*was_warm=*/ true);
         }
       }
     } else if (kIsDebugBuild) {
@@ -1393,6 +1645,9 @@
     for (const auto& entry : jni_stubs_map_) {
       const JniStubData& data = entry.second;
       const void* code_ptr = data.GetCode();
+      if (IsInZygoteExecSpace(code_ptr)) {
+        continue;
+      }
       const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
       for (ArtMethod* method : data.GetMethods()) {
         if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
@@ -1404,6 +1659,9 @@
     for (const auto& it : method_code_map_) {
       ArtMethod* method = it.second;
       const void* code_ptr = it.first;
+      if (IsInZygoteExecSpace(code_ptr)) {
+        continue;
+      }
       const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
       if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
         GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
@@ -1611,22 +1869,26 @@
 // NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
 // is already held.
 void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
-  if (code_mspace_ == mspace) {
-    size_t result = code_end_;
-    code_end_ += increment;
-    return reinterpret_cast<void*>(result + code_map_.Begin());
+  if (mspace == exec_mspace_) {
+    DCHECK(exec_mspace_ != nullptr);
+    const MemMap* const code_pages = GetUpdatableCodeMapping();
+    void* result = code_pages->Begin() + exec_end_;
+    exec_end_ += increment;
+    return result;
   } else {
     DCHECK_EQ(data_mspace_, mspace);
-    size_t result = data_end_;
+    void* result = data_pages_.Begin() + data_end_;
     data_end_ += increment;
-    return reinterpret_cast<void*>(result + data_map_.Begin());
+    return result;
   }
 }
 
 void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations,
                                       std::vector<ProfileMethodInfo>& methods) {
+  Thread* self = Thread::Current();
+  WaitUntilInlineCacheAccessible(self);
+  MutexLock mu(self, lock_);
   ScopedTrace trace(__FUNCTION__);
-  MutexLock mu(Thread::Current(), lock_);
   uint16_t jit_compile_threshold = Runtime::Current()->GetJITOptions()->GetCompileThreshold();
   for (const ProfilingInfo* info : profiling_infos_) {
     ArtMethod* method = info->GetMethod();
@@ -1746,6 +2008,7 @@
         instrumentation->UpdateNativeMethodsCodeToJitCode(m, entrypoint);
       }
       if (collection_in_progress_) {
+        CHECK(!IsInZygoteExecSpace(data->GetCode()));
         GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode()));
       }
     }
@@ -1756,7 +2019,7 @@
       VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
       // Because the counter is not atomic, there are some rare cases where we may not hit the
       // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
-      ClearMethodCounter(method, /*was_warm*/ false);
+      ClearMethodCounter(method, /*was_warm=*/ false);
       return false;
     }
 
@@ -1807,11 +2070,6 @@
   }
 }
 
-size_t JitCodeCache::GetMemorySizeOfCodePointer(const void* ptr) {
-  MutexLock mu(Thread::Current(), lock_);
-  return mspace_usable_size(reinterpret_cast<const void*>(FromCodeToAllocation(ptr)));
-}
-
 void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
                                              const OatQuickMethodHeader* header) {
   DCHECK(!method->IsNative());
@@ -1832,7 +2090,7 @@
     // and clear the counter to get the method Jitted again.
     Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
         method, GetQuickToInterpreterBridge());
-    ClearMethodCounter(method, /*was_warm*/ profiling_info != nullptr);
+    ClearMethodCounter(method, /*was_warm=*/ profiling_info != nullptr);
   } else {
     MutexLock mu(Thread::Current(), lock_);
     auto it = osr_code_map_.find(method);
@@ -1846,7 +2104,7 @@
 uint8_t* JitCodeCache::AllocateCode(size_t code_size) {
   size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
   uint8_t* result = reinterpret_cast<uint8_t*>(
-      mspace_memalign(code_mspace_, alignment, code_size));
+      mspace_memalign(exec_mspace_, alignment, code_size));
   size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
   // Ensure the header ends up at expected instruction alignment.
   DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);
@@ -1855,8 +2113,12 @@
 }
 
 void JitCodeCache::FreeCode(uint8_t* code) {
+  if (IsInZygoteExecSpace(code)) {
+    // No need to free, this is shared memory.
+    return;
+  }
   used_memory_for_code_ -= mspace_usable_size(code);
-  mspace_free(code_mspace_, code);
+  mspace_free(exec_mspace_, code);
 }
 
 uint8_t* JitCodeCache::AllocateData(size_t data_size) {
@@ -1866,16 +2128,19 @@
 }
 
 void JitCodeCache::FreeData(uint8_t* data) {
+  if (IsInZygoteDataSpace(data)) {
+    // No need to free, this is shared memory.
+    return;
+  }
   used_memory_for_data_ -= mspace_usable_size(data);
   mspace_free(data_mspace_, data);
 }
 
 void JitCodeCache::Dump(std::ostream& os) {
   MutexLock mu(Thread::Current(), lock_);
-  MutexLock mu2(Thread::Current(), *Locks::native_debug_interface_lock_);
   os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
      << "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
-     << "Current JIT mini-debug-info size: " << PrettySize(GetJitNativeDebugInfoMemUsage()) << "\n"
+     << "Current JIT mini-debug-info size: " << PrettySize(GetJitMiniDebugInfoMemUsage()) << "\n"
      << "Current JIT capacity: " << PrettySize(current_capacity_) << "\n"
      << "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n"
      << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
@@ -1888,5 +2153,31 @@
   histogram_profiling_info_memory_use_.PrintMemoryUse(os);
 }
 
+void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
+  if (is_zygote) {
+    // Don't transition if this is for a child zygote.
+    return;
+  }
+  MutexLock mu(Thread::Current(), lock_);
+
+  zygote_data_pages_ = std::move(data_pages_);
+  zygote_exec_pages_ = std::move(exec_pages_);
+  zygote_data_mspace_ = data_mspace_;
+  zygote_exec_mspace_ = exec_mspace_;
+
+  size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity();
+  size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity();
+
+  InitializeState(initial_capacity, max_capacity);
+
+  std::string error_msg;
+  if (!InitializeMappings(/* rwx_memory_allowed= */ !is_system_server, is_zygote, &error_msg)) {
+    LOG(WARNING) << "Could not reset JIT state after zygote fork: " << error_msg;
+    return;
+  }
+
+  InitializeSpaces();
+}
+
 }  // namespace jit
 }  // namespace art
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index a4a0f8f..8a6cebe 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -71,7 +71,7 @@
 
 namespace jit {
 
-class JitInstrumentationCache;
+class MarkCodeClosure;
 class ScopedCodeCacheWrite;
 
 // Alignment in bits that will suit all architectures.
@@ -90,19 +90,12 @@
 
   // Create the code cache with a code + data capacity equal to "capacity", error message is passed
   // in the out arg error_msg.
-  static JitCodeCache* Create(size_t initial_capacity,
-                              size_t max_capacity,
-                              bool generate_debug_info,
-                              bool used_only_for_profile_data,
+  static JitCodeCache* Create(bool used_only_for_profile_data,
+                              bool rwx_memory_allowed,
+                              bool is_zygote,
                               std::string* error_msg);
   ~JitCodeCache();
 
-  // Number of bytes allocated in the code cache.
-  size_t CodeCacheSize() REQUIRES(!lock_);
-
-  // Number of bytes allocated in the data cache.
-  size_t DataCacheSize() REQUIRES(!lock_);
-
   bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!lock_);
@@ -141,7 +134,7 @@
                       size_t code_size,
                       size_t data_size,
                       bool osr,
-                      Handle<mirror::ObjectArray<mirror::Object>> roots,
+                      const std::vector<Handle<mirror::Object>>& roots,
                       bool has_should_deoptimize_flag,
                       const ArenaSet<ArtMethod*>& cha_single_implementation_list)
       REQUIRES_SHARED(Locks::mutator_lock_)
@@ -177,10 +170,6 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
-  CodeCacheBitmap* GetLiveBitmap() const {
-    return live_bitmap_.get();
-  }
-
   // Perform a collection on the code cache.
   void GarbageCollectCache(Thread* self)
       REQUIRES(!lock_)
@@ -223,7 +212,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
-    return mspace == code_mspace_ || mspace == data_mspace_;
+    return mspace == data_mspace_ || mspace == exec_mspace_;
   }
 
   void* MoreCore(const void* mspace, intptr_t increment);
@@ -234,10 +223,6 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  uint64_t GetLastUpdateTimeNs() const;
-
-  size_t GetMemorySizeOfCodePointer(const void* ptr) REQUIRES(!lock_);
-
   void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -261,13 +246,13 @@
   void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
       REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
 
-  // Dynamically change whether we want to garbage collect code. Should only be used
-  // by tests.
-  void SetGarbageCollectCode(bool value) {
-    garbage_collect_code_ = value;
-  }
+  // Dynamically change whether we want to garbage collect code.
+  void SetGarbageCollectCode(bool value) REQUIRES(!lock_);
 
-  bool GetGarbageCollectCode() const {
+  bool GetGarbageCollectCode() REQUIRES(!lock_);
+
+  // Unsafe variant for debug checks.
+  bool GetGarbageCollectCodeUnsafe() const NO_THREAD_SAFETY_ANALYSIS {
     return garbage_collect_code_;
   }
 
@@ -277,15 +262,22 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void PostForkChildAction(bool is_system_server, bool is_zygote);
+
+  // Clear the entrypoints of JIT compiled methods that belong in the zygote space.
+  // This is used for removing non-debuggable JIT code at the point we realize the runtime
+  // is debuggable.
+  void ClearEntryPointsInZygoteExecSpace() REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
+
  private:
-  // Take ownership of maps.
-  JitCodeCache(MemMap&& code_map,
-               MemMap&& data_map,
-               size_t initial_code_capacity,
-               size_t initial_data_capacity,
-               size_t max_capacity,
-               bool garbage_collect_code,
-               int memmap_flags_prot_code);
+  JitCodeCache();
+
+  void InitializeState(size_t initial_capacity, size_t max_capacity) REQUIRES(lock_);
+
+  bool InitializeMappings(bool rwx_memory_allowed, bool is_zygote, std::string* error_msg)
+      REQUIRES(lock_);
+
+  void InitializeSpaces() REQUIRES(lock_);
 
   // Internal version of 'CommitCode' that will not retry if the
   // allocation fails. Return null if the allocation fails.
@@ -297,14 +289,14 @@
                               size_t code_size,
                               size_t data_size,
                               bool osr,
-                              Handle<mirror::ObjectArray<mirror::Object>> roots,
+                              const std::vector<Handle<mirror::Object>>& roots,
                               bool has_should_deoptimize_flag,
                               const ArenaSet<ArtMethod*>& cha_single_implementation_list)
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Adds the given roots to the roots_data. Only a member for annotalysis.
-  void FillRootTable(uint8_t* roots_data, Handle<mirror::ObjectArray<mirror::Object>> roots)
+  void FillRootTable(uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots)
       REQUIRES(lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -340,6 +332,12 @@
   void FreeCodeAndData(const void* code_ptr) REQUIRES(lock_);
 
   // Number of bytes allocated in the code cache.
+  size_t CodeCacheSize() REQUIRES(!lock_);
+
+  // Number of bytes allocated in the data cache.
+  size_t DataCacheSize() REQUIRES(!lock_);
+
+  // Number of bytes allocated in the code cache.
   size_t CodeCacheSizeLocked() REQUIRES(lock_);
 
   // Number of bytes allocated in the data cache.
@@ -376,11 +374,33 @@
       REQUIRES(lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  CodeCacheBitmap* GetLiveBitmap() const {
+    return live_bitmap_.get();
+  }
+
   uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
   void FreeCode(uint8_t* code) REQUIRES(lock_);
   uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
   void FreeData(uint8_t* data) REQUIRES(lock_);
 
+  bool HasDualCodeMapping() const {
+    return non_exec_pages_.IsValid();
+  }
+
+  bool HasCodeMapping() const {
+    return exec_pages_.IsValid();
+  }
+
+  const MemMap* GetUpdatableCodeMapping() const;
+
+  bool IsInZygoteDataSpace(const void* ptr) const {
+    return zygote_data_pages_.HasAddress(ptr);
+  }
+
+  bool IsInZygoteExecSpace(const void* ptr) const {
+    return zygote_exec_pages_.HasAddress(ptr);
+  }
+
   bool IsWeakAccessEnabled(Thread* self) const;
   void WaitUntilInlineCacheAccessible(Thread* self)
       REQUIRES(!lock_)
@@ -395,14 +415,17 @@
   ConditionVariable lock_cond_ GUARDED_BY(lock_);
   // Whether there is a code cache collection in progress.
   bool collection_in_progress_ GUARDED_BY(lock_);
-  // Mem map which holds code.
-  MemMap code_map_;
   // Mem map which holds data (stack maps and profiling info).
-  MemMap data_map_;
-  // The opaque mspace for allocating code.
-  void* code_mspace_ GUARDED_BY(lock_);
+  MemMap data_pages_;
+  // Mem map which holds code and has executable permission.
+  MemMap exec_pages_;
+  // Mem map which holds code with non executable permission. Only valid for dual view JIT when
+  // this is the non-executable view of code used to write updates.
+  MemMap non_exec_pages_;
   // The opaque mspace for allocating data.
   void* data_mspace_ GUARDED_BY(lock_);
+  // The opaque mspace for allocating code.
+  void* exec_mspace_ GUARDED_BY(lock_);
   // Bitmap for collecting code and data.
   std::unique_ptr<CodeCacheBitmap> live_bitmap_;
   // Holds compiled code associated with the shorty for a JNI stub.
@@ -414,23 +437,26 @@
   // ProfilingInfo objects we have allocated.
   std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
 
+  // The initial capacity in bytes this code cache starts with.
+  size_t initial_capacity_ GUARDED_BY(lock_);
+
   // The maximum capacity in bytes this code cache can go to.
   size_t max_capacity_ GUARDED_BY(lock_);
 
   // The current capacity in bytes of the code cache.
   size_t current_capacity_ GUARDED_BY(lock_);
 
-  // The current footprint in bytes of the code portion of the code cache.
-  size_t code_end_ GUARDED_BY(lock_);
-
   // The current footprint in bytes of the data portion of the code cache.
   size_t data_end_ GUARDED_BY(lock_);
 
+  // The current footprint in bytes of the code portion of the code cache.
+  size_t exec_end_ GUARDED_BY(lock_);
+
   // Whether the last collection round increased the code cache.
   bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
 
   // Whether we can do garbage collection. Not 'const' as tests may override this.
-  bool garbage_collect_code_;
+  bool garbage_collect_code_ GUARDED_BY(lock_);
 
   // The size in bytes of used memory for the data portion of the code cache.
   size_t used_memory_for_data_ GUARDED_BY(lock_);
@@ -464,13 +490,20 @@
   // Condition to wait on for accessing inline caches.
   ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
 
-  // Mapping flags for the code section.
-  const int memmap_flags_prot_code_;
+  // Mem map which holds zygote data (stack maps and profiling info).
+  MemMap zygote_data_pages_;
+  // Mem map which holds zygote code and has executable permission.
+  MemMap zygote_exec_pages_;
+  // The opaque mspace for allocating zygote data.
+  void* zygote_data_mspace_ GUARDED_BY(lock_);
+  // The opaque mspace for allocating zygote code.
+  void* zygote_exec_mspace_ GUARDED_BY(lock_);
 
   friend class art::JitJniStubTestHelper;
   friend class ScopedCodeCacheWrite;
+  friend class MarkCodeClosure;
 
-  DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
+  DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
 };
 
 }  // namespace jit
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index d9ef922..f908c62 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -129,7 +129,7 @@
     }
     total_ms_of_sleep_ += options_.GetSaveResolvedClassesDelayMs();
   }
-  FetchAndCacheResolvedClassesAndMethods(/*startup*/ true);
+  FetchAndCacheResolvedClassesAndMethods(/*startup=*/ true);
 
 
   // When we save without waiting for JIT notifications we use a simple
@@ -183,7 +183,7 @@
 
     uint16_t number_of_new_methods = 0;
     uint64_t start_work = NanoTime();
-    bool profile_saved_to_disk = ProcessProfilingInfo(/*force_save*/false, &number_of_new_methods);
+    bool profile_saved_to_disk = ProcessProfilingInfo(/*force_save=*/false, &number_of_new_methods);
     // Update the notification counter based on result. Note that there might be contention on this
     // but we don't care about to be 100% precise.
     if (!profile_saved_to_disk) {
@@ -274,7 +274,7 @@
       : profile_boot_class_path_(profile_boot_class_path),
         out_(out) {}
 
-  virtual bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (klass->IsProxyClass() ||
         klass->IsArrayClass() ||
         klass->IsPrimitive() ||
@@ -362,7 +362,7 @@
       }
       // Visit all of the methods in the class to see which ones were executed.
       for (ArtMethod& method : klass->GetMethods(kRuntimePointerSize)) {
-        if (!method.IsNative()) {
+        if (!method.IsNative() && !method.IsAbstract()) {
           DCHECK(!method.IsProxyMethod());
           const uint16_t counter = method.GetCounter();
           // Mark startup methods as hot if they have more than hot_method_sample_threshold
@@ -431,11 +431,17 @@
     ProfileCompilationInfo* cached_info = info_it->second;
 
     const std::set<std::string>& locations = it.second;
+    VLOG(profiler) << "Locations for " << it.first << " " << android::base::Join(locations, ':');
+
     for (const auto& pair : hot_methods.GetMap()) {
       const DexFile* const dex_file = pair.first;
       const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
+      const MethodReferenceCollection::IndexVector& indices = pair.second;
+      VLOG(profiler) << "Location " << dex_file->GetLocation()
+                     << " base_location=" << base_location
+                     << " found=" << (locations.find(base_location) != locations.end())
+                     << " indices size=" << indices.size();
       if (locations.find(base_location) != locations.end()) {
-        const MethodReferenceCollection::IndexVector& indices = pair.second;
         uint8_t flags = Hotness::kFlagHot;
         flags |= startup ? Hotness::kFlagStartup : Hotness::kFlagPostStartup;
         cached_info->AddMethodsForDex(
@@ -448,8 +454,11 @@
     for (const auto& pair : sampled_methods.GetMap()) {
       const DexFile* const dex_file = pair.first;
       const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
+      const MethodReferenceCollection::IndexVector& indices = pair.second;
+      VLOG(profiler) << "Location " << base_location
+                     << " found=" << (locations.find(base_location) != locations.end())
+                     << " indices size=" << indices.size();
       if (locations.find(base_location) != locations.end()) {
-        const MethodReferenceCollection::IndexVector& indices = pair.second;
         cached_info->AddMethodsForDex(startup ? Hotness::kFlagStartup : Hotness::kFlagPostStartup,
                                       dex_file,
                                       indices.begin(),
@@ -466,8 +475,7 @@
                        << " (" << dex_file->GetLocation() << ")";
         cached_info->AddClassesForDex(dex_file, classes.begin(), classes.end());
       } else {
-        VLOG(profiler) << "Location not found " << base_location
-                       << " (" << dex_file->GetLocation() << ")";
+        VLOG(profiler) << "Location not found " << base_location;
       }
     }
     total_number_of_profile_entries_cached += resolved_classes_for_location.size();
@@ -501,7 +509,7 @@
 
   // We only need to do this once, not once per dex location.
   // TODO: Figure out a way to only do it when stuff has changed? It takes 30-50ms.
-  FetchAndCacheResolvedClassesAndMethods(/*startup*/ false);
+  FetchAndCacheResolvedClassesAndMethods(/*startup=*/ false);
 
   for (const auto& it : tracked_locations) {
     if (!force_save && ShuttingDown(Thread::Current())) {
@@ -513,6 +521,9 @@
     }
     const std::string& filename = it.first;
     const std::set<std::string>& locations = it.second;
+    VLOG(profiler) << "Tracked filename " << filename << " locations "
+                   << android::base::Join(locations, ":");
+
     std::vector<ProfileMethodInfo> profile_methods;
     {
       ScopedObjectAccess soa(Thread::Current());
@@ -521,12 +532,15 @@
     }
     {
       ProfileCompilationInfo info(Runtime::Current()->GetArenaPool());
-      if (!info.Load(filename, /*clear_if_invalid*/ true)) {
+      if (!info.Load(filename, /*clear_if_invalid=*/ true)) {
         LOG(WARNING) << "Could not forcefully load profile " << filename;
         continue;
       }
       uint64_t last_save_number_of_methods = info.GetNumberOfMethods();
       uint64_t last_save_number_of_classes = info.GetNumberOfResolvedClasses();
+      VLOG(profiler) << "last_save_number_of_methods=" << last_save_number_of_methods
+                     << " last_save_number_of_classes=" << last_save_number_of_classes
+                     << " number of profiled methods=" << profile_methods.size();
 
       // Try to add the method data. Note this may fail is the profile loaded from disk contains
       // outdated data (e.g. the previous profiled dex files might have been updated).
@@ -546,6 +560,11 @@
           info.ClearData();
           force_save = true;
         }
+      } else if (VLOG_IS_ON(profiler)) {
+        LOG(INFO) << "Failed to find cached profile for " << filename;
+        for (auto&& pair : profile_cache_) {
+          LOG(INFO) << "Cached profile " << pair.first;
+        }
       }
 
       int64_t delta_number_of_methods =
@@ -607,9 +626,9 @@
   Runtime* runtime = Runtime::Current();
 
   bool attached = runtime->AttachCurrentThread("Profile Saver",
-                                               /*as_daemon*/true,
+                                               /*as_daemon=*/true,
                                                runtime->GetSystemThreadGroup(),
-                                               /*create_peer*/true);
+                                               /*create_peer=*/true);
   if (!attached) {
     CHECK(runtime->IsShuttingDown(Thread::Current()));
     return nullptr;
@@ -662,6 +681,7 @@
   std::vector<std::string> code_paths_to_profile;
   for (const std::string& location : code_paths) {
     if (ShouldProfileLocation(location, options.GetProfileAOTCode()))  {
+      VLOG(profiler) << "Code path to profile " << location;
       code_paths_to_profile.push_back(location);
     }
   }
@@ -751,7 +771,7 @@
 
   // Force save everything before destroying the thread since we want profiler_pthread_ to remain
   // valid.
-  instance_->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr);
+  instance_->ProcessProfilingInfo(/*force_save=*/true, /*number_of_new_methods=*/nullptr);
 
   // Wait for the saver thread to stop.
   CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profile saver thread shutdown");
@@ -780,11 +800,38 @@
 static void AddTrackedLocationsToMap(const std::string& output_filename,
                                      const std::vector<std::string>& code_paths,
                                      SafeMap<std::string, std::set<std::string>>* map) {
+  std::vector<std::string> code_paths_and_filenames;
+  // The dex locations are sometimes set to the filename instead of the full path.
+  // So make sure we have both "locations" when tracking what needs to be profiled.
+  //   - apps + system server have filenames
+  //   - boot classpath elements have full paths
+
+  // TODO(calin, ngeoffray, vmarko) This is an workaround for using filanames as
+  // dex locations - needed to prebuilt with a partial boot image
+  // (commit: c4a924d8c74241057d957d360bf31cd5cd0e4f9c).
+  // We should find a better way which allows us to do the tracking based on full paths.
+  for (const std::string& path : code_paths) {
+    size_t last_sep_index = path.find_last_of('/');
+    if (last_sep_index == path.size() - 1) {
+      // Should not happen, but anyone can register code paths so better be prepared and ignore
+      // such locations.
+      continue;
+    }
+    std::string filename = last_sep_index == std::string::npos
+        ? path
+        : path.substr(last_sep_index + 1);
+
+    code_paths_and_filenames.push_back(path);
+    code_paths_and_filenames.push_back(filename);
+  }
+
   auto it = map->find(output_filename);
   if (it == map->end()) {
-    map->Put(output_filename, std::set<std::string>(code_paths.begin(), code_paths.end()));
+    map->Put(
+        output_filename,
+        std::set<std::string>(code_paths_and_filenames.begin(), code_paths_and_filenames.end()));
   } else {
-    it->second.insert(code_paths.begin(), code_paths.end());
+    it->second.insert(code_paths_and_filenames.begin(), code_paths_and_filenames.end());
   }
 }
 
@@ -838,7 +885,7 @@
   // but we only use this in testing when we now this won't happen.
   // Refactor the way we handle the instance so that we don't end up in this situation.
   if (saver != nullptr) {
-    saver->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr);
+    saver->ProcessProfilingInfo(/*force_save=*/true, /*number_of_new_methods=*/nullptr);
   }
 }
 
@@ -846,7 +893,7 @@
   MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
   if (instance_ != nullptr) {
     ProfileCompilationInfo info(Runtime::Current()->GetArenaPool());
-    if (!info.Load(profile, /*clear_if_invalid*/false)) {
+    if (!info.Load(profile, /*clear_if_invalid=*/false)) {
       return false;
     }
     ProfileCompilationInfo::MethodHotness hotness = info.GetMethodHotness(ref);
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index a3dae83..f6139bb 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -125,7 +125,7 @@
   }
 
   bool IsInUseByCompiler() const {
-    return IsMethodBeingCompiled(/*osr*/ true) || IsMethodBeingCompiled(/*osr*/ false) ||
+    return IsMethodBeingCompiled(/*osr=*/ true) || IsMethodBeingCompiled(/*osr=*/ false) ||
         (current_inline_uses_ > 0);
   }
 
diff --git a/runtime/jni/check_jni.cc b/runtime/jni/check_jni.cc
index c5e8830..7279299 100644
--- a/runtime/jni/check_jni.cc
+++ b/runtime/jni/check_jni.cc
@@ -181,7 +181,7 @@
     }
   }
 
-  VarArgs(VarArgs&& other) {
+  VarArgs(VarArgs&& other) noexcept {
     m_ = other.m_;
     cnt_ = other.cnt_;
     type_ = other.type_;
@@ -286,7 +286,7 @@
     // to get reasonable stacks and environment, rather than relying on
     // tombstoned.
     JNIEnv* env;
-    Runtime::Current()->GetJavaVM()->AttachCurrentThread(&env, /* thread_args */ nullptr);
+    Runtime::Current()->GetJavaVM()->AttachCurrentThread(&env, /* thr_args= */ nullptr);
 
     std::string tmp = android::base::StringPrintf(
         "a thread (tid %" PRId64 " is making JNI calls without being attached",
@@ -880,7 +880,7 @@
       break;
     case kDirectByteBuffer:
       UNIMPLEMENTED(FATAL);
-      break;
+      UNREACHABLE();
     case kString:
       okay = obj->GetClass()->IsStringClass();
       break;
@@ -2945,7 +2945,7 @@
           break;
         case Primitive::kPrimVoid:
           LOG(FATAL) << "Unexpected type: " << type;
-          break;
+          UNREACHABLE();
       }
       if (sc.Check(soa, false, result_check, &result)) {
         return result;
@@ -3031,7 +3031,7 @@
           break;
         case Primitive::kPrimVoid:
           LOG(FATAL) << "Unexpected type: " << type;
-          break;
+          UNREACHABLE();
       }
       JniValueType result;
       result.V = nullptr;
diff --git a/runtime/jni/java_vm_ext.cc b/runtime/jni/java_vm_ext.cc
index fdf0fee..e54b807 100644
--- a/runtime/jni/java_vm_ext.cc
+++ b/runtime/jni/java_vm_ext.cc
@@ -23,6 +23,7 @@
 #include "art_method-inl.h"
 #include "base/dumpable.h"
 #include "base/mutex-inl.h"
+#include "base/sdk_version.h"
 #include "base/stl_util.h"
 #include "base/systrace.h"
 #include "check_jni.h"
@@ -86,7 +87,11 @@
       self->GetJniEnv()->DeleteWeakGlobalRef(class_loader_);
     }
 
-    android::CloseNativeLibrary(handle_, needs_native_bridge_);
+    char* error_msg = nullptr;
+    if (!android::CloseNativeLibrary(handle_, needs_native_bridge_, &error_msg)) {
+      LOG(WARNING) << "Error while unloading native library \"" << path_ << "\": " << error_msg;
+      android::NativeLoaderFreeErrorMessage(error_msg);
+    }
   }
 
   jweak GetClassLoader() const {
@@ -330,7 +335,7 @@
     }
     ScopedThreadSuspension sts(self, kNative);
     // Do this without holding the jni libraries lock to prevent possible deadlocks.
-    typedef void (*JNI_OnUnloadFn)(JavaVM*, void*);
+    using JNI_OnUnloadFn = void(*)(JavaVM*, void*);
     for (auto library : unload_libraries) {
       void* const sym = library->FindSymbol("JNI_OnUnload", nullptr);
       if (sym == nullptr) {
@@ -531,8 +536,6 @@
   if (current_method != nullptr) {
     os << "\n    from " << current_method->PrettyMethod();
   }
-  os << "\n";
-  self->Dump(os);
 
   if (check_jni_abort_hook_ != nullptr) {
     check_jni_abort_hook_(check_jni_abort_hook_data_, os.str());
@@ -854,6 +857,7 @@
 bool JavaVMExt::LoadNativeLibrary(JNIEnv* env,
                                   const std::string& path,
                                   jobject class_loader,
+                                  jclass caller_class,
                                   std::string* error_msg) {
   error_msg->clear();
 
@@ -869,6 +873,7 @@
     library = libraries_->Get(path);
   }
   void* class_loader_allocator = nullptr;
+  std::string caller_location;
   {
     ScopedObjectAccess soa(env);
     // As the incoming class loader is reachable/alive during the call of this function,
@@ -879,6 +884,13 @@
     if (class_linker->IsBootClassLoader(soa, loader.Ptr())) {
       loader = nullptr;
       class_loader = nullptr;
+      if (caller_class != nullptr) {
+        ObjPtr<mirror::Class> caller = soa.Decode<mirror::Class>(caller_class);
+        ObjPtr<mirror::DexCache> dex_cache = caller->GetDexCache();
+        if (dex_cache != nullptr) {
+          caller_location = dex_cache->GetLocation()->ToModifiedUtf8();
+        }
+      }
     }
 
     class_loader_allocator = class_linker->GetAllocatorForClassLoader(loader.Ptr());
@@ -960,17 +972,21 @@
   Locks::mutator_lock_->AssertNotHeld(self);
   const char* path_str = path.empty() ? nullptr : path.c_str();
   bool needs_native_bridge = false;
-  void* handle = android::OpenNativeLibrary(env,
-                                            runtime_->GetTargetSdkVersion(),
-                                            path_str,
-                                            class_loader,
-                                            library_path.get(),
-                                            &needs_native_bridge,
-                                            error_msg);
-
+  char* nativeloader_error_msg = nullptr;
+  void* handle = android::OpenNativeLibrary(
+      env,
+      runtime_->GetTargetSdkVersion(),
+      path_str,
+      class_loader,
+      (caller_location.empty() ? nullptr : caller_location.c_str()),
+      library_path.get(),
+      &needs_native_bridge,
+      &nativeloader_error_msg);
   VLOG(jni) << "[Call to dlopen(\"" << path << "\", RTLD_NOW) returned " << handle << "]";
 
   if (handle == nullptr) {
+    *error_msg = nativeloader_error_msg;
+    android::NativeLoaderFreeErrorMessage(nativeloader_error_msg);
     VLOG(jni) << "dlopen(\"" << path << "\", RTLD_NOW) failed: " << *error_msg;
     return false;
   }
@@ -1023,11 +1039,11 @@
     self->SetClassLoaderOverride(class_loader);
 
     VLOG(jni) << "[Calling JNI_OnLoad in \"" << path << "\"]";
-    typedef int (*JNI_OnLoadFn)(JavaVM*, void*);
+    using JNI_OnLoadFn = int(*)(JavaVM*, void*);
     JNI_OnLoadFn jni_on_load = reinterpret_cast<JNI_OnLoadFn>(sym);
     int version = (*jni_on_load)(this, nullptr);
 
-    if (runtime_->GetTargetSdkVersion() != 0 && runtime_->GetTargetSdkVersion() <= 21) {
+    if (IsSdkVersionSetAndAtMost(runtime_->GetTargetSdkVersion(), SdkVersion::kL)) {
       // Make sure that sigchain owns SIGSEGV.
       EnsureFrontOfChain(SIGSEGV);
     }
diff --git a/runtime/jni/java_vm_ext.h b/runtime/jni/java_vm_ext.h
index 408d354..424dd7c 100644
--- a/runtime/jni/java_vm_ext.h
+++ b/runtime/jni/java_vm_ext.h
@@ -101,6 +101,7 @@
   bool LoadNativeLibrary(JNIEnv* env,
                          const std::string& path,
                          jobject class_loader,
+                         jclass caller_class,
                          std::string* error_msg);
 
   // Unload native libraries with cleared class loaders.
diff --git a/runtime/jni/java_vm_ext_test.cc b/runtime/jni/java_vm_ext_test.cc
index 4049c6e..4a7b1ca 100644
--- a/runtime/jni/java_vm_ext_test.cc
+++ b/runtime/jni/java_vm_ext_test.cc
@@ -27,7 +27,7 @@
 
 class JavaVmExtTest : public CommonRuntimeTest {
  protected:
-  virtual void SetUp() {
+  void SetUp() override {
     CommonRuntimeTest::SetUp();
 
     vm_ = Runtime::Current()->GetJavaVM();
@@ -109,6 +109,7 @@
 }
 
 TEST_F(JavaVmExtTest, AttachCurrentThread_SmallStack) {
+  TEST_DISABLED_FOR_MEMORY_TOOL();  // b/123500163
   pthread_t pthread;
   pthread_attr_t attr;
   const char* reason = __PRETTY_FUNCTION__;
diff --git a/runtime/jni/jni_env_ext.cc b/runtime/jni/jni_env_ext.cc
index efe43ee..976f89b 100644
--- a/runtime/jni/jni_env_ext.cc
+++ b/runtime/jni/jni_env_ext.cc
@@ -21,6 +21,7 @@
 
 #include "android-base/stringprintf.h"
 
+#include "base/mutex.h"
 #include "base/to_str.h"
 #include "check_jni.h"
 #include "indirect_reference_table.h"
diff --git a/runtime/jni/jni_env_ext.h b/runtime/jni/jni_env_ext.h
index 3a007ad..61de074 100644
--- a/runtime/jni/jni_env_ext.h
+++ b/runtime/jni/jni_env_ext.h
@@ -19,8 +19,8 @@
 
 #include <jni.h>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "indirect_reference_table.h"
 #include "obj_ptr.h"
 #include "reference_table.h"
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index 5200607..af86cc0 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -45,12 +45,15 @@
 #include "java_vm_ext.h"
 #include "jni_env_ext.h"
 #include "jvalue-inl.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
 #include "mirror/field-inl.h"
 #include "mirror/method.h"
 #include "mirror/object-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
+#include "mirror/string-alloc-inl.h"
 #include "mirror/string-inl.h"
 #include "mirror/throwable.h"
 #include "nativehelper/scoped_local_ref.h"
@@ -81,20 +84,20 @@
 // things not rendering correctly. E.g. b/16858794
 static constexpr bool kWarnJniAbort = false;
 
-static bool IsCallerTrusted(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
-  return hiddenapi::IsCallerTrusted(GetCallingClass(self, /* num_frames */ 1));
-}
-
 template<typename T>
-ALWAYS_INLINE static bool ShouldBlockAccessToMember(T* member, Thread* self)
+ALWAYS_INLINE static bool ShouldDenyAccessToMember(T* member, Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  hiddenapi::Action action = hiddenapi::GetMemberAction(
-      member, self, IsCallerTrusted, hiddenapi::kJNI);
-  if (action != hiddenapi::kAllow) {
-    hiddenapi::NotifyHiddenApiListener(member);
-  }
-
-  return action == hiddenapi::kDeny;
+  return hiddenapi::ShouldDenyAccessToMember(
+      member,
+      [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
+        // Construct AccessContext from the first calling class on stack.
+        // If the calling class cannot be determined, e.g. unattached threads,
+        // we conservatively assume the caller is trusted.
+        ObjPtr<mirror::Class> caller = GetCallingClass(self, /* num_frames */ 1);
+        return caller.IsNull() ? hiddenapi::AccessContext(/* is_trusted= */ true)
+                               : hiddenapi::AccessContext(caller);
+      },
+      hiddenapi::AccessMethod::kJNI);
 }
 
 // Helpers to call instrumentation functions for fields. These take jobjects so we don't need to set
@@ -106,9 +109,9 @@
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
   if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
     Thread* self = Thread::Current();
-    ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr,
-                                                   /*check_suspended*/ true,
-                                                   /*abort_on_error*/ false);
+    ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc=*/ nullptr,
+                                                   /*check_suspended=*/ true,
+                                                   /*abort_on_error=*/ false);
 
     if (cur_method == nullptr) {
       // Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all
@@ -133,9 +136,9 @@
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
   if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
     Thread* self = Thread::Current();
-    ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr,
-                                                   /*check_suspended*/ true,
-                                                   /*abort_on_error*/ false);
+    ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc=*/ nullptr,
+                                                   /*check_suspended=*/ true,
+                                                   /*abort_on_error=*/ false);
 
     if (cur_method == nullptr) {
       // Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all
@@ -157,9 +160,9 @@
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
   if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
     Thread* self = Thread::Current();
-    ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr,
-                                                   /*check_suspended*/ true,
-                                                   /*abort_on_error*/ false);
+    ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc=*/ nullptr,
+                                                   /*check_suspended=*/ true,
+                                                   /*abort_on_error=*/ false);
 
     if (cur_method == nullptr) {
       // Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all
@@ -256,7 +259,7 @@
   } else {
     method = c->FindClassMethod(name, sig, pointer_size);
   }
-  if (method != nullptr && ShouldBlockAccessToMember(method, soa.Self())) {
+  if (method != nullptr && ShouldDenyAccessToMember(method, soa.Self())) {
     method = nullptr;
   }
   if (method == nullptr || method->IsStatic() != is_static) {
@@ -335,7 +338,7 @@
   } else {
     field = c->FindInstanceField(name, field_type->GetDescriptor(&temp));
   }
-  if (field != nullptr && ShouldBlockAccessToMember(field, soa.Self())) {
+  if (field != nullptr && ShouldDenyAccessToMember(field, soa.Self())) {
     field = nullptr;
   }
   if (field == nullptr) {
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index 3040b90..57346b7 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -34,7 +34,7 @@
 // TODO: Convert to CommonRuntimeTest. Currently MakeExecutable is used.
 class JniInternalTest : public CommonCompilerTest {
  protected:
-  virtual void SetUp() {
+  void SetUp() override {
     CommonCompilerTest::SetUp();
 
     vm_ = Runtime::Current()->GetJavaVM();
@@ -962,11 +962,11 @@
   // Make sure we can actually use it.
   jstring s = env_->NewStringUTF("poop");
   if (mirror::kUseStringCompression) {
-    ASSERT_EQ(mirror::String::GetFlaggedCount(4, /* compressible */ true),
+    ASSERT_EQ(mirror::String::GetFlaggedCount(4, /* compressible= */ true),
               env_->GetIntField(s, fid2));
     // Create incompressible string
     jstring s_16 = env_->NewStringUTF("\u0444\u0444");
-    ASSERT_EQ(mirror::String::GetFlaggedCount(2, /* compressible */ false),
+    ASSERT_EQ(mirror::String::GetFlaggedCount(2, /* compressible= */ false),
               env_->GetIntField(s_16, fid2));
   } else {
     ASSERT_EQ(4, env_->GetIntField(s, fid2));
@@ -1485,7 +1485,7 @@
   ASSERT_NE(weak_global, nullptr);
   env_->DeleteLocalRef(local_ref);
   // GC should clear the weak global.
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
   jobject new_global_ref = env_->NewGlobalRef(weak_global);
   EXPECT_EQ(new_global_ref, nullptr);
   jobject new_local_ref = env_->NewLocalRef(weak_global);
diff --git a/runtime/jvalue.h b/runtime/jvalue.h
index b42d995..d03749c 100644
--- a/runtime/jvalue.h
+++ b/runtime/jvalue.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_JVALUE_H_
 #define ART_RUNTIME_JVALUE_H_
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 
 #include <stdint.h>
 
diff --git a/runtime/managed_stack.h b/runtime/managed_stack.h
index d1c230f..3fb83ac 100644
--- a/runtime/managed_stack.h
+++ b/runtime/managed_stack.h
@@ -23,8 +23,8 @@
 
 #include <android-base/logging.h>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/bit_utils.h"
 
 namespace art {
@@ -95,7 +95,7 @@
     tagged_top_quick_frame_ = TaggedTopQuickFrame::CreateTagged(top);
   }
 
-  static size_t TaggedTopQuickFrameOffset() {
+  static constexpr size_t TaggedTopQuickFrameOffset() {
     return OFFSETOF_MEMBER(ManagedStack, tagged_top_quick_frame_);
   }
 
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 570fc48..7d889c0 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -22,6 +22,7 @@
 #include "common_dex_operations.h"
 #include "interpreter/shadow_frame-inl.h"
 #include "jvalue-inl.h"
+#include "mirror/class-inl.h"
 #include "mirror/emulated_stack_frame.h"
 #include "mirror/method_handle_impl-inl.h"
 #include "mirror/method_type.h"
@@ -745,7 +746,7 @@
                                        callee_type,
                                        self,
                                        shadow_frame,
-                                       method_handle /* receiver */,
+                                       /* receiver= */ method_handle,
                                        operands,
                                        result);
   } else {
@@ -1103,7 +1104,7 @@
   if (IsInvokeVarHandle(handle_kind)) {
     return DoVarHandleInvokeTranslation(self,
                                         shadow_frame,
-                                        /*invokeExact*/ false,
+                                        /*invokeExact=*/ false,
                                         method_handle,
                                         callsite_type,
                                         operands,
@@ -1155,7 +1156,7 @@
   } else if (IsInvokeVarHandle(handle_kind)) {
     return DoVarHandleInvokeTranslation(self,
                                         shadow_frame,
-                                        /*invokeExact*/ true,
+                                        /*invokeExact=*/ true,
                                         method_handle,
                                         callsite_type,
                                         operands,
diff --git a/runtime/method_handles_test.cc b/runtime/method_handles_test.cc
index d123754..6a7eb8c 100644
--- a/runtime/method_handles_test.cc
+++ b/runtime/method_handles_test.cc
@@ -22,6 +22,7 @@
 #include "handle_scope-inl.h"
 #include "jvalue-inl.h"
 #include "mirror/method_type.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
 #include "reflection.h"
 #include "scoped_thread_state_change-inl.h"
diff --git a/runtime/mirror/accessible_object.h b/runtime/mirror/accessible_object.h
index d489f14..9660bf0 100644
--- a/runtime/mirror/accessible_object.h
+++ b/runtime/mirror/accessible_object.h
@@ -36,7 +36,8 @@
   }
 
  private:
-  uint8_t flag_;
+  // We only use the field indirectly using the FlagOffset() method.
+  uint8_t flag_ ATTRIBUTE_UNUSED;
   // Padding required for correct alignment of subclasses like Executable, Field, etc.
   uint8_t padding_[1] ATTRIBUTE_UNUSED;
 
diff --git a/runtime/mirror/array-alloc-inl.h b/runtime/mirror/array-alloc-inl.h
new file mode 100644
index 0000000..4250ff8
--- /dev/null
+++ b/runtime/mirror/array-alloc-inl.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_ARRAY_ALLOC_INL_H_
+#define ART_RUNTIME_MIRROR_ARRAY_ALLOC_INL_H_
+
+#include "array-inl.h"
+
+#include <android-base/logging.h>
+#include <android-base/stringprintf.h>
+
+#include "base/bit_utils.h"
+#include "base/casts.h"
+#include "class.h"
+#include "gc/heap-inl.h"
+#include "obj_ptr-inl.h"
+#include "runtime.h"
+
+namespace art {
+namespace mirror {
+
+static inline size_t ComputeArraySize(int32_t component_count, size_t component_size_shift) {
+  DCHECK_GE(component_count, 0);
+
+  size_t component_size = 1U << component_size_shift;
+  size_t header_size = Array::DataOffset(component_size).SizeValue();
+  size_t data_size = static_cast<size_t>(component_count) << component_size_shift;
+  size_t size = header_size + data_size;
+
+  // Check for size_t overflow if this was an unreasonable request
+  // but let the caller throw OutOfMemoryError.
+#ifdef __LP64__
+  // 64-bit. No overflow as component_count is 32-bit and the maximum
+  // component size is 8.
+  DCHECK_LE((1U << component_size_shift), 8U);
+#else
+  // 32-bit.
+  DCHECK_NE(header_size, 0U);
+  DCHECK_EQ(RoundUp(header_size, component_size), header_size);
+  // The array length limit (exclusive).
+  const size_t length_limit = (0U - header_size) >> component_size_shift;
+  if (UNLIKELY(length_limit <= static_cast<size_t>(component_count))) {
+    return 0;  // failure
+  }
+#endif
+  return size;
+}
+
+// Used for setting the array length in the allocation code path to ensure it is guarded by a
+// StoreStore fence.
+class SetLengthVisitor {
+ public:
+  explicit SetLengthVisitor(int32_t length) : length_(length) {
+  }
+
+  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Avoid AsArray as object is not yet in live bitmap or allocation stack.
+    ObjPtr<Array> array = ObjPtr<Array>::DownCast(obj);
+    // DCHECK(array->IsArrayInstance());
+    array->SetLength(length_);
+  }
+
+ private:
+  const int32_t length_;
+
+  DISALLOW_COPY_AND_ASSIGN(SetLengthVisitor);
+};
+
+// Similar to SetLengthVisitor, used for setting the array length to fill the usable size of an
+// array.
+class SetLengthToUsableSizeVisitor {
+ public:
+  SetLengthToUsableSizeVisitor(int32_t min_length, size_t header_size,
+                               size_t component_size_shift) :
+      minimum_length_(min_length), header_size_(header_size),
+      component_size_shift_(component_size_shift) {
+  }
+
+  void operator()(ObjPtr<Object> obj, size_t usable_size) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Avoid AsArray as object is not yet in live bitmap or allocation stack.
+    ObjPtr<Array> array = ObjPtr<Array>::DownCast(obj);
+    // DCHECK(array->IsArrayInstance());
+    int32_t length = (usable_size - header_size_) >> component_size_shift_;
+    DCHECK_GE(length, minimum_length_);
+    uint8_t* old_end = reinterpret_cast<uint8_t*>(array->GetRawData(1U << component_size_shift_,
+                                                                    minimum_length_));
+    uint8_t* new_end = reinterpret_cast<uint8_t*>(array->GetRawData(1U << component_size_shift_,
+                                                                    length));
+    // Ensure space beyond original allocation is zeroed.
+    memset(old_end, 0, new_end - old_end);
+    array->SetLength(length);
+  }
+
+ private:
+  const int32_t minimum_length_;
+  const size_t header_size_;
+  const size_t component_size_shift_;
+
+  DISALLOW_COPY_AND_ASSIGN(SetLengthToUsableSizeVisitor);
+};
+
+template <bool kIsInstrumented, bool kFillUsable>
+inline ObjPtr<Array> Array::Alloc(Thread* self,
+                                  ObjPtr<Class> array_class,
+                                  int32_t component_count,
+                                  size_t component_size_shift,
+                                  gc::AllocatorType allocator_type) {
+  DCHECK(allocator_type != gc::kAllocatorTypeLOS);
+  DCHECK(array_class != nullptr);
+  DCHECK(array_class->IsArrayClass());
+  DCHECK_EQ(array_class->GetComponentSizeShift(), component_size_shift);
+  DCHECK_EQ(array_class->GetComponentSize(), (1U << component_size_shift));
+  size_t size = ComputeArraySize(component_count, component_size_shift);
+#ifdef __LP64__
+  // 64-bit. No size_t overflow.
+  DCHECK_NE(size, 0U);
+#else
+  // 32-bit.
+  if (UNLIKELY(size == 0)) {
+    self->ThrowOutOfMemoryError(android::base::StringPrintf("%s of length %d would overflow",
+                                                            array_class->PrettyDescriptor().c_str(),
+                                                            component_count).c_str());
+    return nullptr;
+  }
+#endif
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  ObjPtr<Array> result;
+  if (!kFillUsable) {
+    SetLengthVisitor visitor(component_count);
+    result = ObjPtr<Array>::DownCast(MakeObjPtr(
+        heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size,
+                                                              allocator_type, visitor)));
+  } else {
+    SetLengthToUsableSizeVisitor visitor(component_count,
+                                         DataOffset(1U << component_size_shift).SizeValue(),
+                                         component_size_shift);
+    result = ObjPtr<Array>::DownCast(MakeObjPtr(
+        heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size,
+                                                              allocator_type, visitor)));
+  }
+  if (kIsDebugBuild && result != nullptr && Runtime::Current()->IsStarted()) {
+    array_class = result->GetClass();  // In case the array class moved.
+    CHECK_EQ(array_class->GetComponentSize(), 1U << component_size_shift);
+    if (!kFillUsable) {
+      CHECK_EQ(result->SizeOf(), size);
+    } else {
+      CHECK_GE(result->SizeOf(), size);
+    }
+  }
+  return result;
+}
+
+template<typename T>
+inline ObjPtr<PrimitiveArray<T>> PrimitiveArray<T>::AllocateAndFill(Thread* self,
+                                                                   const T* data,
+                                                                   size_t length) {
+  StackHandleScope<1> hs(self);
+  Handle<PrimitiveArray<T>> arr(hs.NewHandle(PrimitiveArray<T>::Alloc(self, length)));
+  if (!arr.IsNull()) {
+    // Copy it in. Just skip if it's null
+    memcpy(arr->GetData(), data, sizeof(T) * length);
+  }
+  return arr.Get();
+}
+
+}  // namespace mirror
+}  // namespace art
+
+#endif  // ART_RUNTIME_MIRROR_ARRAY_ALLOC_INL_H_
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 2e39530..a6a5ba2 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -20,13 +20,12 @@
 #include "array.h"
 
 #include <android-base/logging.h>
-#include <android-base/stringprintf.h>
 
 #include "base/bit_utils.h"
 #include "base/casts.h"
 #include "class.h"
-#include "gc/heap-inl.h"
 #include "obj_ptr-inl.h"
+#include "runtime.h"
 #include "thread-current-inl.h"
 
 namespace art {
@@ -50,14 +49,6 @@
   return header_size + data_size;
 }
 
-inline MemberOffset Array::DataOffset(size_t component_size) {
-  DCHECK(IsPowerOfTwo(component_size)) << component_size;
-  size_t data_offset = RoundUp(OFFSETOF_MEMBER(Array, first_element_), component_size);
-  DCHECK_EQ(RoundUp(data_offset, component_size), data_offset)
-      << "Array data offset isn't aligned with component size";
-  return MemberOffset(data_offset);
-}
-
 template<VerifyObjectFlags kVerifyFlags>
 inline bool Array::CheckIsValidIndex(int32_t index) {
   if (UNLIKELY(static_cast<uint32_t>(index) >=
@@ -68,152 +59,6 @@
   return true;
 }
 
-static inline size_t ComputeArraySize(int32_t component_count, size_t component_size_shift) {
-  DCHECK_GE(component_count, 0);
-
-  size_t component_size = 1U << component_size_shift;
-  size_t header_size = Array::DataOffset(component_size).SizeValue();
-  size_t data_size = static_cast<size_t>(component_count) << component_size_shift;
-  size_t size = header_size + data_size;
-
-  // Check for size_t overflow if this was an unreasonable request
-  // but let the caller throw OutOfMemoryError.
-#ifdef __LP64__
-  // 64-bit. No overflow as component_count is 32-bit and the maximum
-  // component size is 8.
-  DCHECK_LE((1U << component_size_shift), 8U);
-#else
-  // 32-bit.
-  DCHECK_NE(header_size, 0U);
-  DCHECK_EQ(RoundUp(header_size, component_size), header_size);
-  // The array length limit (exclusive).
-  const size_t length_limit = (0U - header_size) >> component_size_shift;
-  if (UNLIKELY(length_limit <= static_cast<size_t>(component_count))) {
-    return 0;  // failure
-  }
-#endif
-  return size;
-}
-
-// Used for setting the array length in the allocation code path to ensure it is guarded by a
-// StoreStore fence.
-class SetLengthVisitor {
- public:
-  explicit SetLengthVisitor(int32_t length) : length_(length) {
-  }
-
-  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    // Avoid AsArray as object is not yet in live bitmap or allocation stack.
-    ObjPtr<Array> array = ObjPtr<Array>::DownCast(obj);
-    // DCHECK(array->IsArrayInstance());
-    array->SetLength(length_);
-  }
-
- private:
-  const int32_t length_;
-
-  DISALLOW_COPY_AND_ASSIGN(SetLengthVisitor);
-};
-
-// Similar to SetLengthVisitor, used for setting the array length to fill the usable size of an
-// array.
-class SetLengthToUsableSizeVisitor {
- public:
-  SetLengthToUsableSizeVisitor(int32_t min_length, size_t header_size,
-                               size_t component_size_shift) :
-      minimum_length_(min_length), header_size_(header_size),
-      component_size_shift_(component_size_shift) {
-  }
-
-  void operator()(ObjPtr<Object> obj, size_t usable_size) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    // Avoid AsArray as object is not yet in live bitmap or allocation stack.
-    ObjPtr<Array> array = ObjPtr<Array>::DownCast(obj);
-    // DCHECK(array->IsArrayInstance());
-    int32_t length = (usable_size - header_size_) >> component_size_shift_;
-    DCHECK_GE(length, minimum_length_);
-    uint8_t* old_end = reinterpret_cast<uint8_t*>(array->GetRawData(1U << component_size_shift_,
-                                                                    minimum_length_));
-    uint8_t* new_end = reinterpret_cast<uint8_t*>(array->GetRawData(1U << component_size_shift_,
-                                                                    length));
-    // Ensure space beyond original allocation is zeroed.
-    memset(old_end, 0, new_end - old_end);
-    array->SetLength(length);
-  }
-
- private:
-  const int32_t minimum_length_;
-  const size_t header_size_;
-  const size_t component_size_shift_;
-
-  DISALLOW_COPY_AND_ASSIGN(SetLengthToUsableSizeVisitor);
-};
-
-template <bool kIsInstrumented, bool kFillUsable>
-inline ObjPtr<Array> Array::Alloc(Thread* self,
-                                  ObjPtr<Class> array_class,
-                                  int32_t component_count,
-                                  size_t component_size_shift,
-                                  gc::AllocatorType allocator_type) {
-  DCHECK(allocator_type != gc::kAllocatorTypeLOS);
-  DCHECK(array_class != nullptr);
-  DCHECK(array_class->IsArrayClass());
-  DCHECK_EQ(array_class->GetComponentSizeShift(), component_size_shift);
-  DCHECK_EQ(array_class->GetComponentSize(), (1U << component_size_shift));
-  size_t size = ComputeArraySize(component_count, component_size_shift);
-#ifdef __LP64__
-  // 64-bit. No size_t overflow.
-  DCHECK_NE(size, 0U);
-#else
-  // 32-bit.
-  if (UNLIKELY(size == 0)) {
-    self->ThrowOutOfMemoryError(android::base::StringPrintf("%s of length %d would overflow",
-                                                            array_class->PrettyDescriptor().c_str(),
-                                                            component_count).c_str());
-    return nullptr;
-  }
-#endif
-  gc::Heap* heap = Runtime::Current()->GetHeap();
-  ObjPtr<Array> result;
-  if (!kFillUsable) {
-    SetLengthVisitor visitor(component_count);
-    result = ObjPtr<Array>::DownCast(MakeObjPtr(
-        heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size,
-                                                              allocator_type, visitor)));
-  } else {
-    SetLengthToUsableSizeVisitor visitor(component_count,
-                                         DataOffset(1U << component_size_shift).SizeValue(),
-                                         component_size_shift);
-    result = ObjPtr<Array>::DownCast(MakeObjPtr(
-        heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size,
-                                                              allocator_type, visitor)));
-  }
-  if (kIsDebugBuild && result != nullptr && Runtime::Current()->IsStarted()) {
-    array_class = result->GetClass();  // In case the array class moved.
-    CHECK_EQ(array_class->GetComponentSize(), 1U << component_size_shift);
-    if (!kFillUsable) {
-      CHECK_EQ(result->SizeOf(), size);
-    } else {
-      CHECK_GE(result->SizeOf(), size);
-    }
-  }
-  return result;
-}
-
-template<typename T>
-inline ObjPtr<PrimitiveArray<T>> PrimitiveArray<T>::AllocateAndFill(Thread* self,
-                                                                   const T* data,
-                                                                   size_t length) {
-  StackHandleScope<1> hs(self);
-  Handle<PrimitiveArray<T>> arr(hs.NewHandle(PrimitiveArray<T>::Alloc(self, length)));
-  if (!arr.IsNull()) {
-    // Copy it in. Just skip if it's null
-    memcpy(arr->GetData(), data, sizeof(T) * length);
-  }
-  return arr.Get();
-}
-
 template<typename T>
 inline T PrimitiveArray<T>::Get(int32_t i) {
   if (!CheckIsValidIndex(i)) {
@@ -379,16 +224,14 @@
   }
 }
 
-template<typename T, VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<typename T, VerifyObjectFlags kVerifyFlags>
 inline T PointerArray::GetElementPtrSize(uint32_t idx, PointerSize ptr_size) {
   // C style casts here since we sometimes have T be a pointer, or sometimes an integer
   // (for stack traces).
   if (ptr_size == PointerSize::k64) {
-    return (T)static_cast<uintptr_t>(
-        AsLongArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx));
+    return (T)static_cast<uintptr_t>(AsLongArray<kVerifyFlags>()->GetWithoutChecks(idx));
   }
-  return (T)static_cast<uintptr_t>(static_cast<uint32_t>(
-      AsIntArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx)));
+  return (T)static_cast<uintptr_t>(AsIntArray<kVerifyFlags>()->GetWithoutChecks(idx));
 }
 
 template<bool kTransactionActive, bool kUnchecked>
@@ -410,12 +253,12 @@
                                                     ptr_size);
 }
 
-template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
+template <VerifyObjectFlags kVerifyFlags, typename Visitor>
 inline void PointerArray::Fixup(mirror::PointerArray* dest,
                                 PointerSize pointer_size,
                                 const Visitor& visitor) {
   for (size_t i = 0, count = GetLength(); i < count; ++i) {
-    void* ptr = GetElementPtrSize<void*, kVerifyFlags, kReadBarrierOption>(i, pointer_size);
+    void* ptr = GetElementPtrSize<void*, kVerifyFlags>(i, pointer_size);
     void* new_ptr = visitor(ptr);
     if (ptr != new_ptr) {
       dest->SetElementPtrSize<false, true>(i, new_ptr, pointer_size);
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 66ec368..05e397d 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -16,6 +16,7 @@
 
 #include "array-inl.h"
 
+#include "array-alloc-inl.h"
 #include "base/utils.h"
 #include "class-inl.h"
 #include "class.h"
@@ -26,6 +27,7 @@
 #include "gc/accounting/card_table-inl.h"
 #include "handle_scope-inl.h"
 #include "object-inl.h"
+#include "object_array-alloc-inl.h"
 #include "object_array-inl.h"
 #include "thread.h"
 
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 8bdd561..8816c61 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_MIRROR_ARRAY_H_
 #define ART_RUNTIME_MIRROR_ARRAY_H_
 
+#include "base/bit_utils.h"
 #include "base/enums.h"
 #include "gc/allocator_type.h"
 #include "obj_ptr.h"
@@ -25,6 +26,7 @@
 namespace art {
 
 template<class T> class Handle;
+class Thread;
 
 namespace mirror {
 
@@ -66,11 +68,17 @@
     SetField32<false, false, kVerifyNone>(OFFSET_OF_OBJECT_MEMBER(Array, length_), length);
   }
 
-  static MemberOffset LengthOffset() {
+  static constexpr MemberOffset LengthOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Array, length_);
   }
 
-  static MemberOffset DataOffset(size_t component_size);
+  static constexpr MemberOffset DataOffset(size_t component_size) {
+    DCHECK(IsPowerOfTwo(component_size)) << component_size;
+    size_t data_offset = RoundUp(OFFSETOF_MEMBER(Array, first_element_), component_size);
+    DCHECK_EQ(RoundUp(data_offset, component_size), data_offset)
+        << "Array data offset isn't aligned with component size";
+    return MemberOffset(data_offset);
+  }
 
   void* GetRawData(size_t component_size, int32_t index)
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -102,9 +110,11 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // The number of array elements.
-  int32_t length_;
+  // We only use the field indirectly using the LengthOffset() method.
+  int32_t length_ ATTRIBUTE_UNUSED;
   // Marker for the data (used by generated code)
-  uint32_t first_element_[0];
+  // We only use the field indirectly using the DataOffset() method.
+  uint32_t first_element_[0] ATTRIBUTE_UNUSED;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Array);
 };
@@ -183,14 +193,13 @@
 // Either an IntArray or a LongArray.
 class PointerArray : public Array {
  public:
-  template<typename T,
-           VerifyObjectFlags kVerifyFlags = kVerifyNone,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<typename T, VerifyObjectFlags kVerifyFlags = kVerifyNone>
   T GetElementPtrSize(uint32_t idx, PointerSize ptr_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  template<VerifyObjectFlags kVerifyFlags = kVerifyNone>
   void** ElementAddress(size_t index, PointerSize ptr_size) REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK_LT(index, static_cast<size_t>(GetLength()));
+    DCHECK_LT(index, static_cast<size_t>(GetLength<kVerifyFlags>()));
     return reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(this) +
                                     Array::DataOffset(static_cast<size_t>(ptr_size)).Uint32Value() +
                                     static_cast<size_t>(ptr_size) * index);
@@ -205,9 +214,7 @@
 
   // Fixup the pointers in the dest arrays by passing our pointers through the visitor. Only copies
   // to dest if visitor(source_ptr) != source_ptr.
-  template <VerifyObjectFlags kVerifyFlags = kVerifyNone,
-            ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
-            typename Visitor>
+  template <VerifyObjectFlags kVerifyFlags = kVerifyNone, typename Visitor>
   void Fixup(mirror::PointerArray* dest, PointerSize pointer_size, const Visitor& visitor)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/mirror/call_site.cc b/runtime/mirror/call_site.cc
index 738106c..7a23940 100644
--- a/runtime/mirror/call_site.cc
+++ b/runtime/mirror/call_site.cc
@@ -16,7 +16,7 @@
 
 #include "call_site.h"
 
-#include "class-inl.h"
+#include "class-alloc-inl.h"
 #include "class_root.h"
 #include "obj_ptr-inl.h"
 
diff --git a/runtime/mirror/call_site.h b/runtime/mirror/call_site.h
index 9b6afca..be5bdc9 100644
--- a/runtime/mirror/call_site.h
+++ b/runtime/mirror/call_site.h
@@ -17,7 +17,6 @@
 #ifndef ART_RUNTIME_MIRROR_CALL_SITE_H_
 #define ART_RUNTIME_MIRROR_CALL_SITE_H_
 
-#include "base/utils.h"
 #include "mirror/method_handle_impl.h"
 
 namespace art {
diff --git a/runtime/mirror/class-alloc-inl.h b/runtime/mirror/class-alloc-inl.h
new file mode 100644
index 0000000..d4a532e
--- /dev/null
+++ b/runtime/mirror/class-alloc-inl.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_CLASS_ALLOC_INL_H_
+#define ART_RUNTIME_MIRROR_CLASS_ALLOC_INL_H_
+
+#include "class-inl.h"
+
+#include "gc/heap-inl.h"
+#include "object-inl.h"
+#include "runtime.h"
+
+namespace art {
+namespace mirror {
+
+inline void Class::CheckObjectAlloc() {
+  DCHECK(!IsArrayClass())
+      << PrettyClass()
+      << "A array shouldn't be allocated through this "
+      << "as it requires a pre-fence visitor that sets the class size.";
+  DCHECK(!IsClassClass())
+      << PrettyClass()
+      << "A class object shouldn't be allocated through this "
+      << "as it requires a pre-fence visitor that sets the class size.";
+  DCHECK(!IsStringClass())
+      << PrettyClass()
+      << "A string shouldn't be allocated through this "
+      << "as it requires a pre-fence visitor that sets the class size.";
+  DCHECK(IsInstantiable()) << PrettyClass();
+  // TODO: decide whether we want this check. It currently fails during bootstrap.
+  // DCHECK(!Runtime::Current()->IsStarted() || IsInitializing()) << PrettyClass();
+  DCHECK_GE(this->object_size_, sizeof(Object));
+}
+
+template<bool kIsInstrumented, bool kCheckAddFinalizer>
+inline ObjPtr<Object> Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
+  CheckObjectAlloc();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  const bool add_finalizer = kCheckAddFinalizer && IsFinalizable();
+  if (!kCheckAddFinalizer) {
+    DCHECK(!IsFinalizable());
+  }
+  // Note that the this pointer may be invalidated after the allocation.
+  ObjPtr<Object> obj =
+      heap->AllocObjectWithAllocator<kIsInstrumented, false>(self,
+                                                             this,
+                                                             this->object_size_,
+                                                             allocator_type,
+                                                             VoidFunctor());
+  if (add_finalizer && LIKELY(obj != nullptr)) {
+    heap->AddFinalizerReference(self, &obj);
+    if (UNLIKELY(self->IsExceptionPending())) {
+      // Failed to allocate finalizer reference, it means that the whole allocation failed.
+      obj = nullptr;
+    }
+  }
+  return obj;
+}
+
+inline ObjPtr<Object> Class::AllocObject(Thread* self) {
+  return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
+}
+
+inline ObjPtr<Object> Class::AllocNonMovableObject(Thread* self) {
+  return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentNonMovingAllocator());
+}
+
+}  // namespace mirror
+}  // namespace art
+
+#endif  // ART_RUNTIME_MIRROR_CLASS_ALLOC_INL_H_
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 51dc1a4..fe49813 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -22,6 +22,7 @@
 #include "art_field.h"
 #include "art_method.h"
 #include "base/array_slice.h"
+#include "base/iteration_range.h"
 #include "base/length_prefixed_array.h"
 #include "base/utils.h"
 #include "class_linker.h"
@@ -30,41 +31,42 @@
 #include "dex/dex_file-inl.h"
 #include "dex/invoke_type.h"
 #include "dex_cache.h"
-#include "gc/heap-inl.h"
 #include "iftable.h"
+#include "imtable.h"
 #include "object-inl.h"
 #include "object_array.h"
 #include "read_barrier-inl.h"
 #include "runtime.h"
 #include "string.h"
 #include "subtype_check.h"
+#include "thread-current-inl.h"
 
 namespace art {
 namespace mirror {
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline uint32_t Class::GetObjectSize() {
   // Note: Extra parentheses to avoid the comma being interpreted as macro parameter separator.
-  DCHECK((!IsVariableSize<kVerifyFlags, kReadBarrierOption>())) << "class=" << PrettyTypeOf();
+  DCHECK((!IsVariableSize<kVerifyFlags>())) << "class=" << PrettyTypeOf();
   return GetField32(ObjectSizeOffset());
 }
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline uint32_t Class::GetObjectSizeAllocFastPath() {
   // Note: Extra parentheses to avoid the comma being interpreted as macro parameter separator.
-  DCHECK((!IsVariableSize<kVerifyFlags, kReadBarrierOption>())) << "class=" << PrettyTypeOf();
+  DCHECK((!IsVariableSize<kVerifyFlags>())) << "class=" << PrettyTypeOf();
   return GetField32(ObjectSizeAllocFastPathOffset());
 }
 
 template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
-inline Class* Class::GetSuperClass() {
+inline ObjPtr<Class> Class::GetSuperClass() {
   // Can only get super class for loaded classes (hack for when runtime is
   // initializing)
   DCHECK(IsLoaded<kVerifyFlags>() ||
          IsErroneous<kVerifyFlags>() ||
          !Runtime::Current()->IsStarted()) << IsLoaded();
-  return GetFieldObject<Class, kVerifyFlags, kReadBarrierOption>(
-      OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
+  return ObjPtr<Class>(GetFieldObject<Class, kVerifyFlags, kReadBarrierOption>(
+      OFFSET_OF_OBJECT_MEMBER(Class, super_class_)));
 }
 
 inline void Class::SetSuperClass(ObjPtr<Class> new_super_class) {
@@ -251,18 +253,14 @@
                                           uint32_t num_direct,
                                           uint32_t num_virtual) {
   DCHECK_LE(num_direct + num_virtual, (new_methods == nullptr) ? 0 : new_methods->size());
-  SetMethodsPtrInternal(new_methods);
+  SetField64<false>(OFFSET_OF_OBJECT_MEMBER(Class, methods_),
+                    static_cast<uint64_t>(reinterpret_cast<uintptr_t>(new_methods)));
   SetFieldShort<false>(OFFSET_OF_OBJECT_MEMBER(Class, copied_methods_offset_),
                     dchecked_integral_cast<uint16_t>(num_direct + num_virtual));
   SetFieldShort<false>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_offset_),
                        dchecked_integral_cast<uint16_t>(num_direct));
 }
 
-inline void Class::SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods) {
-  SetField64<false>(OFFSET_OF_OBJECT_MEMBER(Class, methods_),
-                    static_cast<uint64_t>(reinterpret_cast<uintptr_t>(new_methods)));
-}
-
 template<VerifyObjectFlags kVerifyFlags>
 inline ArtMethod* Class::GetVirtualMethod(size_t i, PointerSize pointer_size) {
   CheckPointerSize(pointer_size);
@@ -299,31 +297,35 @@
 }
 
 inline bool Class::HasVTable() {
-  return GetVTable() != nullptr || ShouldHaveEmbeddedVTable();
+  // No read barrier is needed for comparing with null.
+  return GetVTable<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr ||
+         ShouldHaveEmbeddedVTable();
 }
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline int32_t Class::GetVTableLength() {
-  if (ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>()) {
+  if (ShouldHaveEmbeddedVTable<kVerifyFlags>()) {
     return GetEmbeddedVTableLength();
   }
-  return GetVTable<kVerifyFlags, kReadBarrierOption>() != nullptr ?
-      GetVTable<kVerifyFlags, kReadBarrierOption>()->GetLength() : 0;
+  // We do not need a read barrier here as the length is constant,
+  // both from-space and to-space vtables shall yield the same result.
+  ObjPtr<PointerArray> vtable = GetVTable<kVerifyFlags, kWithoutReadBarrier>();
+  return vtable != nullptr ? vtable->GetLength() : 0;
 }
 
 template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
 inline ArtMethod* Class::GetVTableEntry(uint32_t i, PointerSize pointer_size) {
-  if (ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>()) {
+  if (ShouldHaveEmbeddedVTable<kVerifyFlags>()) {
     return GetEmbeddedVTableEntry(i, pointer_size);
   }
-  auto* vtable = GetVTable<kVerifyFlags, kReadBarrierOption>();
+  ObjPtr<PointerArray> vtable = GetVTable<kVerifyFlags, kReadBarrierOption>();
   DCHECK(vtable != nullptr);
-  return vtable->template GetElementPtrSize<ArtMethod*, kVerifyFlags, kReadBarrierOption>(
-      i, pointer_size);
+  return vtable->GetElementPtrSize<ArtMethod*, kVerifyFlags>(i, pointer_size);
 }
 
+template<VerifyObjectFlags kVerifyFlags>
 inline int32_t Class::GetEmbeddedVTableLength() {
-  return GetField32(MemberOffset(EmbeddedVTableLengthOffset()));
+  return GetField32<kVerifyFlags>(MemberOffset(EmbeddedVTableLengthOffset()));
 }
 
 inline void Class::SetEmbeddedVTableLength(int32_t len) {
@@ -374,13 +376,13 @@
   return false;
 }
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline bool Class::IsVariableSize() {
   // Classes, arrays, and strings vary in size, and so the object_size_ field cannot
   // be used to Get their instance size
-  return IsClassClass<kVerifyFlags, kReadBarrierOption>() ||
-         IsArrayClass<kVerifyFlags, kReadBarrierOption>() ||
-         IsStringClass();
+  return IsClassClass<kVerifyFlags>() ||
+         IsArrayClass<kVerifyFlags>() ||
+         IsStringClass<kVerifyFlags>();
 }
 
 inline void Class::SetObjectSize(uint32_t new_object_size) {
@@ -628,9 +630,11 @@
   return ret.Ptr();
 }
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline int32_t Class::GetIfTableCount() {
-  return GetIfTable<kVerifyFlags, kReadBarrierOption>()->Count();
+  // We do not need a read barrier here as the length is constant,
+  // both from-space and to-space iftables shall yield the same result.
+  return GetIfTable<kVerifyFlags, kWithoutReadBarrier>()->Count();
 }
 
 inline void Class::SetIfTable(ObjPtr<IfTable> new_iftable) {
@@ -647,19 +651,18 @@
 inline MemberOffset Class::GetFirstReferenceInstanceFieldOffset() {
   ObjPtr<Class> super_class = GetSuperClass<kVerifyFlags, kReadBarrierOption>();
   return (super_class != nullptr)
-      ? MemberOffset(RoundUp(super_class->GetObjectSize<kVerifyFlags, kReadBarrierOption>(),
-                             kHeapReferenceSize))
+      ? MemberOffset(RoundUp(super_class->GetObjectSize<kVerifyFlags>(), kHeapReferenceSize))
       : ClassOffset();
 }
 
-template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template <VerifyObjectFlags kVerifyFlags>
 inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(PointerSize pointer_size) {
-  DCHECK(IsResolved());
+  DCHECK(IsResolved<kVerifyFlags>());
   uint32_t base = sizeof(Class);  // Static fields come after the class.
-  if (ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>()) {
+  if (ShouldHaveEmbeddedVTable<kVerifyFlags>()) {
     // Static fields come after the embedded tables.
     base = Class::ComputeClassSize(
-        true, GetEmbeddedVTableLength(), 0, 0, 0, 0, 0, pointer_size);
+        true, GetEmbeddedVTableLength<kVerifyFlags>(), 0, 0, 0, 0, 0, pointer_size);
   }
   return MemberOffset(base);
 }
@@ -756,58 +759,6 @@
   return size_shift;
 }
 
-inline void Class::CheckObjectAlloc() {
-  DCHECK(!IsArrayClass())
-      << PrettyClass()
-      << "A array shouldn't be allocated through this "
-      << "as it requires a pre-fence visitor that sets the class size.";
-  DCHECK(!IsClassClass())
-      << PrettyClass()
-      << "A class object shouldn't be allocated through this "
-      << "as it requires a pre-fence visitor that sets the class size.";
-  DCHECK(!IsStringClass())
-      << PrettyClass()
-      << "A string shouldn't be allocated through this "
-      << "as it requires a pre-fence visitor that sets the class size.";
-  DCHECK(IsInstantiable()) << PrettyClass();
-  // TODO: decide whether we want this check. It currently fails during bootstrap.
-  // DCHECK(!Runtime::Current()->IsStarted() || IsInitializing()) << PrettyClass();
-  DCHECK_GE(this->object_size_, sizeof(Object));
-}
-
-template<bool kIsInstrumented, bool kCheckAddFinalizer>
-inline ObjPtr<Object> Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
-  CheckObjectAlloc();
-  gc::Heap* heap = Runtime::Current()->GetHeap();
-  const bool add_finalizer = kCheckAddFinalizer && IsFinalizable();
-  if (!kCheckAddFinalizer) {
-    DCHECK(!IsFinalizable());
-  }
-  // Note that the this pointer may be invalidated after the allocation.
-  ObjPtr<Object> obj =
-      heap->AllocObjectWithAllocator<kIsInstrumented, false>(self,
-                                                             this,
-                                                             this->object_size_,
-                                                             allocator_type,
-                                                             VoidFunctor());
-  if (add_finalizer && LIKELY(obj != nullptr)) {
-    heap->AddFinalizerReference(self, &obj);
-    if (UNLIKELY(self->IsExceptionPending())) {
-      // Failed to allocate finalizer reference, it means that the whole allocation failed.
-      obj = nullptr;
-    }
-  }
-  return obj;
-}
-
-inline ObjPtr<Object> Class::AllocObject(Thread* self) {
-  return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
-}
-
-inline ObjPtr<Object> Class::AllocNonMovableObject(Thread* self) {
-  return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentNonMovingAllocator());
-}
-
 inline uint32_t Class::ComputeClassSize(bool has_embedded_vtable,
                                         uint32_t num_vtable_entries,
                                         uint32_t num_8bit_static_fields,
@@ -853,10 +804,11 @@
   return size;
 }
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline bool Class::IsClassClass() {
-  ObjPtr<Class> java_lang_Class = GetClass<kVerifyFlags, kReadBarrierOption>()->
-      template GetClass<kVerifyFlags, kReadBarrierOption>();
+  // OK to look at from-space copies since java.lang.Class.class is not movable.
+  // See b/114413743
+  ObjPtr<Class> java_lang_Class = GetClass<kVerifyFlags, kWithoutReadBarrier>();
   return this == java_lang_Class;
 }
 
@@ -876,7 +828,7 @@
     return ProxyDescriptorEquals(match);
   } else {
     const DexFile& dex_file = GetDexFile();
-    const DexFile::TypeId& type_id = dex_file.GetTypeId(GetClassDef()->class_idx_);
+    const dex::TypeId& type_id = dex_file.GetTypeId(GetClassDef()->class_idx_);
     return strcmp(dex_file.GetTypeDescriptor(type_id), match) == 0;
   }
 }
@@ -922,6 +874,9 @@
 }
 
 inline void Class::SetAccessFlags(uint32_t new_access_flags) {
+  if (kIsDebugBuild) {
+    SetAccessFlagsDCheck(new_access_flags);
+  }
   // Called inside a transaction when setting pre-verified flag during boot image compilation.
   if (Runtime::Current()->IsActiveTransaction()) {
     SetField32<true>(AccessFlagsOffset(), new_access_flags);
@@ -947,7 +902,7 @@
     ObjectArray<Class>* interfaces = GetProxyInterfaces();
     return interfaces != nullptr ? interfaces->GetLength() : 0;
   } else {
-    const DexFile::TypeList* interfaces = GetInterfaceTypeList();
+    const dex::TypeList* interfaces = GetInterfaceTypeList();
     if (interfaces == nullptr) {
       return 0;
     } else {
@@ -1003,7 +958,6 @@
 }
 
 inline MemberOffset Class::EmbeddedVTableOffset(PointerSize pointer_size) {
-  CheckPointerSize(pointer_size);
   return MemberOffset(ImtPtrOffset(pointer_size).Uint32Value() + static_cast<size_t>(pointer_size));
 }
 
@@ -1016,15 +970,28 @@
   return GetFieldObject<Class, kVerifyFlags, kReadBarrierOption>(ComponentTypeOffset());
 }
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline bool Class::IsArrayClass() {
-  return GetComponentType<kVerifyFlags, kReadBarrierOption>() != nullptr;
+  // We do not need a read barrier for comparing with null.
+  return GetComponentType<kVerifyFlags, kWithoutReadBarrier>() != nullptr;
 }
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline bool Class::IsObjectArrayClass() {
-  ObjPtr<Class> const component_type = GetComponentType<kVerifyFlags, kReadBarrierOption>();
-  return component_type != nullptr && !component_type->IsPrimitive();
+  // We do not need a read barrier here as the primitive type is constant,
+  // both from-space and to-space component type classes shall yield the same result.
+  ObjPtr<Class> const component_type = GetComponentType<kVerifyFlags, kWithoutReadBarrier>();
+  constexpr VerifyObjectFlags kNewFlags = RemoveThisFlags(kVerifyFlags);
+  return component_type != nullptr && !component_type->IsPrimitive<kNewFlags>();
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+bool Class::IsPrimitiveArray() {
+  // We do not need a read barrier here as the primitive type is constant,
+  // both from-space and to-space component type classes shall yield the same result.
+  ObjPtr<Class> const component_type = GetComponentType<kVerifyFlags, kWithoutReadBarrier>();
+  constexpr VerifyObjectFlags kNewFlags = RemoveThisFlags(kVerifyFlags);
+  return component_type != nullptr && component_type->IsPrimitive<kNewFlags>();
 }
 
 inline bool Class::IsAssignableFrom(ObjPtr<Class> src) {
@@ -1066,49 +1033,42 @@
   return arr != nullptr ? arr->size() : 0u;
 }
 
-template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
+template <typename T, VerifyObjectFlags kVerifyFlags, typename Visitor>
+inline void Class::FixupNativePointer(
+    Class* dest, PointerSize pointer_size, const Visitor& visitor, MemberOffset member_offset) {
+  void** address =
+      reinterpret_cast<void**>(reinterpret_cast<uintptr_t>(dest) + member_offset.Uint32Value());
+  T old_value = GetFieldPtrWithSize<T, kVerifyFlags>(member_offset, pointer_size);
+  T new_value = visitor(old_value, address);
+  if (old_value != new_value) {
+    dest->SetFieldPtrWithSize</* kTransactionActive= */ false,
+                              /* kCheckTransaction= */ true,
+                              kVerifyNone>(member_offset, new_value, pointer_size);
+  }
+}
+
+template <VerifyObjectFlags kVerifyFlags, typename Visitor>
 inline void Class::FixupNativePointers(Class* dest,
                                        PointerSize pointer_size,
                                        const Visitor& visitor) {
-  auto dest_address_fn = [dest](MemberOffset offset) {
-    return reinterpret_cast<void**>(reinterpret_cast<uintptr_t>(dest) + offset.Uint32Value());
-  };
   // Update the field arrays.
-  LengthPrefixedArray<ArtField>* const sfields = GetSFieldsPtr();
-  void** sfields_dest_address = dest_address_fn(OFFSET_OF_OBJECT_MEMBER(Class, sfields_));
-  LengthPrefixedArray<ArtField>* const new_sfields = visitor(sfields, sfields_dest_address);
-  if (sfields != new_sfields) {
-    dest->SetSFieldsPtrUnchecked(new_sfields);
-  }
-  LengthPrefixedArray<ArtField>* const ifields = GetIFieldsPtr();
-  void** ifields_dest_address = dest_address_fn(OFFSET_OF_OBJECT_MEMBER(Class, ifields_));
-  LengthPrefixedArray<ArtField>* const new_ifields = visitor(ifields, ifields_dest_address);
-  if (ifields != new_ifields) {
-    dest->SetIFieldsPtrUnchecked(new_ifields);
-  }
+  FixupNativePointer<LengthPrefixedArray<ArtField>*, kVerifyFlags>(
+      dest, pointer_size, visitor, OFFSET_OF_OBJECT_MEMBER(Class, sfields_));
+  FixupNativePointer<LengthPrefixedArray<ArtField>*, kVerifyFlags>(
+      dest, pointer_size, visitor, OFFSET_OF_OBJECT_MEMBER(Class, ifields_));
   // Update method array.
-  LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
-  void** methods_dest_address = dest_address_fn(OFFSET_OF_OBJECT_MEMBER(Class, methods_));
-  LengthPrefixedArray<ArtMethod>* new_methods = visitor(methods, methods_dest_address);
-  if (methods != new_methods) {
-    dest->SetMethodsPtrInternal(new_methods);
-  }
+  FixupNativePointer<LengthPrefixedArray<ArtMethod>*, kVerifyFlags>(
+      dest, pointer_size, visitor, OFFSET_OF_OBJECT_MEMBER(Class, methods_));
   // Fix up embedded tables.
-  if (!IsTemp() && ShouldHaveEmbeddedVTable<kVerifyNone, kReadBarrierOption>()) {
-    for (int32_t i = 0, count = GetEmbeddedVTableLength(); i < count; ++i) {
-      ArtMethod* method = GetEmbeddedVTableEntry(i, pointer_size);
-      void** method_dest_addr = dest_address_fn(EmbeddedVTableEntryOffset(i, pointer_size));
-      ArtMethod* new_method = visitor(method, method_dest_addr);
-      if (method != new_method) {
-        dest->SetEmbeddedVTableEntryUnchecked(i, new_method, pointer_size);
-      }
+  if (!IsTemp<kVerifyNone>() && ShouldHaveEmbeddedVTable<kVerifyNone>()) {
+    for (int32_t i = 0, count = GetEmbeddedVTableLength<kVerifyFlags>(); i < count; ++i) {
+      FixupNativePointer<ArtMethod*, kVerifyFlags>(
+          dest, pointer_size, visitor, EmbeddedVTableEntryOffset(i, pointer_size));
     }
   }
-  if (!IsTemp() && ShouldHaveImt<kVerifyNone, kReadBarrierOption>()) {
-    ImTable* imt = GetImt(pointer_size);
-    void** imt_dest_addr = dest_address_fn(ImtPtrOffset(pointer_size));
-    ImTable* new_imt = visitor(imt, imt_dest_addr);
-    dest->SetImt(new_imt, pointer_size);
+  if (!IsTemp<kVerifyNone>() && ShouldHaveImt<kVerifyNone>()) {
+    FixupNativePointer<ImTable*, kVerifyFlags>(
+        dest, pointer_size, visitor, ImtPtrOffset(pointer_size));
   }
 }
 
@@ -1148,6 +1108,28 @@
   return component->IsPrimitive() || component->CannotBeAssignedFromOtherTypes();
 }
 
+template <bool kCheckTransaction>
+inline void Class::SetClassLoader(ObjPtr<ClassLoader> new_class_loader) {
+  if (kCheckTransaction && Runtime::Current()->IsActiveTransaction()) {
+    SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader);
+  } else {
+    DCHECK(!Runtime::Current()->IsActiveTransaction());
+    SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader);
+  }
+}
+
+inline void Class::SetRecursivelyInitialized() {
+  DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId());
+  uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
+  SetAccessFlags(flags | kAccRecursivelyInitialized);
+}
+
+inline void Class::SetHasDefaultMethods() {
+  DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId());
+  uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
+  SetAccessFlags(flags | kAccHasDefaultMethod);
+}
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 26dba02..fcd3714 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -30,9 +30,12 @@
 #include "dex/descriptors_names.h"
 #include "dex/dex_file-inl.h"
 #include "dex/dex_file_annotations.h"
+#include "dex/signature-inl.h"
 #include "dex_cache.h"
 #include "gc/accounting/card_table-inl.h"
+#include "gc/heap-inl.h"
 #include "handle_scope-inl.h"
+#include "hidden_api.h"
 #include "subtype_check.h"
 #include "method.h"
 #include "object-inl.h"
@@ -83,7 +86,7 @@
     Thread* self = Thread::Current();
     if (name == nullptr) {
       // Note: ThrowNullPointerException() requires a message which we deliberately want to omit.
-      self->ThrowNewException("Ljava/lang/NullPointerException;", /* msg */ nullptr);
+      self->ThrowNewException("Ljava/lang/NullPointerException;", /* msg= */ nullptr);
     } else {
       self->ThrowNewException("Ljava/lang/ClassNotFoundException;", name->ToModifiedUtf8().c_str());
     }
@@ -205,6 +208,10 @@
     }
   }
 
+  if (kIsDebugBuild && new_status >= ClassStatus::kInitialized) {
+    CHECK(h_this->WasVerificationAttempted()) << h_this->PrettyClassAndClassLoader();
+  }
+
   if (!class_linker_initialized) {
     // When the class linker is being initialized its single threaded and by definition there can be
     // no waiters. During initialization classes may appear temporary but won't be retired as their
@@ -426,14 +433,6 @@
   return GetClassRoot<mirror::Throwable>()->IsAssignableFrom(this);
 }
 
-void Class::SetClassLoader(ObjPtr<ClassLoader> new_class_loader) {
-  if (Runtime::Current()->IsActiveTransaction()) {
-    SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader);
-  } else {
-    SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader);
-  }
-}
-
 template <typename SignatureType>
 static inline ArtMethod* FindInterfaceMethodWithSignature(ObjPtr<Class> klass,
                                                           const StringPiece& name,
@@ -496,7 +495,7 @@
                                       PointerSize pointer_size) {
   // We always search by name and signature, ignoring the type index in the MethodId.
   const DexFile& dex_file = *dex_cache->GetDexFile();
-  const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
+  const dex::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
   StringPiece name = dex_file.StringDataByIdx(method_id.name_idx_);
   const Signature signature = dex_file.GetMethodSignature(method_id);
   return FindInterfaceMethod(name, signature, pointer_size);
@@ -623,15 +622,20 @@
   }
   // If not found, we need to search by name and signature.
   const DexFile& dex_file = *dex_cache->GetDexFile();
-  const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
+  const dex::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
   const Signature signature = dex_file.GetMethodSignature(method_id);
   StringPiece name;  // Delay strlen() until actually needed.
   // If we do not have a dex_cache match, try to find the declared method in this class now.
   if (this_dex_cache != dex_cache && !GetDeclaredMethodsSlice(pointer_size).empty()) {
     DCHECK(name.empty());
-    name = dex_file.StringDataByIdx(method_id.name_idx_);
+    // Avoid string comparisons by comparing the respective unicode lengths first.
+    uint32_t length, other_length;  // UTF16 length.
+    name = dex_file.GetMethodName(method_id, &length);
     for (ArtMethod& method : GetDeclaredMethodsSlice(pointer_size)) {
-      if (method.GetName() == name && method.GetSignature() == signature) {
+      DCHECK_NE(method.GetDexMethodIndex(), dex::kDexNoIndex);
+      const char* other_name = method.GetDexFile()->GetMethodName(
+          method.GetDexMethodIndex(), &other_length);
+      if (length == other_length && name == other_name && signature == method.GetSignature()) {
         return &method;
       }
     }
@@ -649,7 +653,7 @@
       // Matching dex_cache. We cannot compare the `dex_method_idx` anymore because
       // the type index differs, so compare the name index and proto index.
       for (ArtMethod& method : declared_methods) {
-        const DexFile::MethodId& cmp_method_id = dex_file.GetMethodId(method.GetDexMethodIndex());
+        const dex::MethodId& cmp_method_id = dex_file.GetMethodId(method.GetDexMethodIndex());
         if (cmp_method_id.name_idx_ == method_id.name_idx_ &&
             cmp_method_id.proto_idx_ == method_id.proto_idx_) {
           candidate_method = &method;
@@ -1003,7 +1007,7 @@
     return storage->c_str();
   } else {
     const DexFile& dex_file = GetDexFile();
-    const DexFile::TypeId& type_id = dex_file.GetTypeId(GetClassDef()->class_idx_);
+    const dex::TypeId& type_id = dex_file.GetTypeId(GetClassDef()->class_idx_);
     return dex_file.GetTypeDescriptor(type_id);
   }
 }
@@ -1016,7 +1020,7 @@
   return storage->c_str();
 }
 
-const DexFile::ClassDef* Class::GetClassDef() {
+const dex::ClassDef* Class::GetClassDef() {
   uint16_t class_def_idx = GetDexClassDefIndex();
   if (class_def_idx == DexFile::kDexNoIndex16) {
     return nullptr;
@@ -1084,7 +1088,7 @@
 
 const char* Class::GetSourceFile() {
   const DexFile& dex_file = GetDexFile();
-  const DexFile::ClassDef* dex_class_def = GetClassDef();
+  const dex::ClassDef* dex_class_def = GetClassDef();
   if (dex_class_def == nullptr) {
     // Generated classes have no class def.
     return nullptr;
@@ -1101,8 +1105,8 @@
   return "generated class";
 }
 
-const DexFile::TypeList* Class::GetInterfaceTypeList() {
-  const DexFile::ClassDef* class_def = GetClassDef();
+const dex::TypeList* Class::GetInterfaceTypeList() {
+  const dex::ClassDef* class_def = GetClassDef();
   if (class_def == nullptr) {
     return nullptr;
   }
@@ -1245,22 +1249,54 @@
 
 dex::TypeIndex Class::FindTypeIndexInOtherDexFile(const DexFile& dex_file) {
   std::string temp;
-  const DexFile::TypeId* type_id = dex_file.FindTypeId(GetDescriptor(&temp));
+  const dex::TypeId* type_id = dex_file.FindTypeId(GetDescriptor(&temp));
   return (type_id == nullptr) ? dex::TypeIndex() : dex_file.GetIndexForTypeId(*type_id);
 }
 
+ALWAYS_INLINE
+static bool IsMethodPreferredOver(ArtMethod* orig_method,
+                                  bool orig_method_hidden,
+                                  ArtMethod* new_method,
+                                  bool new_method_hidden) {
+  DCHECK(new_method != nullptr);
+
+  // Is this the first result?
+  if (orig_method == nullptr) {
+    return true;
+  }
+
+  // Original method is hidden, the new one is not?
+  if (orig_method_hidden && !new_method_hidden) {
+    return true;
+  }
+
+  // We iterate over virtual methods first and then over direct ones,
+  // so we can never be in situation where `orig_method` is direct and
+  // `new_method` is virtual.
+  DCHECK(!orig_method->IsDirect() || new_method->IsDirect());
+
+  // Original method is synthetic, the new one is not?
+  if (orig_method->IsSynthetic() && !new_method->IsSynthetic()) {
+    return true;
+  }
+
+  return false;
+}
+
 template <PointerSize kPointerSize, bool kTransactionActive>
 ObjPtr<Method> Class::GetDeclaredMethodInternal(
     Thread* self,
     ObjPtr<Class> klass,
     ObjPtr<String> name,
-    ObjPtr<ObjectArray<Class>> args) {
-  // Covariant return types permit the class to define multiple
-  // methods with the same name and parameter types. Prefer to
-  // return a non-synthetic method in such situations. We may
-  // still return a synthetic method to handle situations like
-  // escalated visibility. We never return miranda methods that
-  // were synthesized by the runtime.
+    ObjPtr<ObjectArray<Class>> args,
+    const std::function<hiddenapi::AccessContext()>& fn_get_access_context) {
+  // Covariant return types (or smali) permit the class to define
+  // multiple methods with the same name and parameter types.
+  // Prefer (in decreasing order of importance):
+  //  1) non-hidden method over hidden
+  //  2) virtual methods over direct
+  //  3) non-synthetic methods over synthetic
+  // We never return miranda methods that were synthesized by the runtime.
   StackHandleScope<3> hs(self);
   auto h_method_name = hs.NewHandle(name);
   if (UNLIKELY(h_method_name == nullptr)) {
@@ -1269,8 +1305,13 @@
   }
   auto h_args = hs.NewHandle(args);
   Handle<Class> h_klass = hs.NewHandle(klass);
+  constexpr hiddenapi::AccessMethod access_method = hiddenapi::AccessMethod::kNone;
   ArtMethod* result = nullptr;
+  bool result_hidden = false;
   for (auto& m : h_klass->GetDeclaredVirtualMethods(kPointerSize)) {
+    if (m.IsMiranda()) {
+      continue;
+    }
     auto* np_method = m.GetInterfaceMethodIfProxy(kPointerSize);
     // May cause thread suspension.
     ObjPtr<String> np_name = np_method->ResolveNameString();
@@ -1280,14 +1321,24 @@
       }
       continue;
     }
-    if (!m.IsMiranda()) {
-      if (!m.IsSynthetic()) {
-        return Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
-      }
-      result = &m;  // Remember as potential result if it's not a miranda method.
+    bool m_hidden = hiddenapi::ShouldDenyAccessToMember(&m, fn_get_access_context, access_method);
+    if (!m_hidden && !m.IsSynthetic()) {
+      // Non-hidden, virtual, non-synthetic. Best possible result, exit early.
+      return Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
+    } else if (IsMethodPreferredOver(result, result_hidden, &m, m_hidden)) {
+      // Remember as potential result.
+      result = &m;
+      result_hidden = m_hidden;
     }
   }
-  if (result == nullptr) {
+
+  if ((result != nullptr) && !result_hidden) {
+    // We have not found a non-hidden, virtual, non-synthetic method, but
+    // if we have found a non-hidden, virtual, synthetic method, we cannot
+    // do better than that later.
+    DCHECK(!result->IsDirect());
+    DCHECK(result->IsSynthetic());
+  } else {
     for (auto& m : h_klass->GetDirectMethods(kPointerSize)) {
       auto modifiers = m.GetAccessFlags();
       if ((modifiers & kAccConstructor) != 0) {
@@ -1307,12 +1358,20 @@
         continue;
       }
       DCHECK(!m.IsMiranda());  // Direct methods cannot be miranda methods.
-      if ((modifiers & kAccSynthetic) == 0) {
+      bool m_hidden = hiddenapi::ShouldDenyAccessToMember(&m, fn_get_access_context, access_method);
+      if (!m_hidden && !m.IsSynthetic()) {
+        // Non-hidden, direct, non-synthetic. Any virtual result could only have been
+        // hidden, therefore this is the best possible match. Exit now.
+        DCHECK((result == nullptr) || result_hidden);
         return Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
+      } else if (IsMethodPreferredOver(result, result_hidden, &m, m_hidden)) {
+        // Remember as potential result.
+        result = &m;
+        result_hidden = m_hidden;
       }
-      result = &m;  // Remember as potential result.
     }
   }
+
   return result != nullptr
       ? Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, result)
       : nullptr;
@@ -1323,25 +1382,29 @@
     Thread* self,
     ObjPtr<Class> klass,
     ObjPtr<String> name,
-    ObjPtr<ObjectArray<Class>> args);
+    ObjPtr<ObjectArray<Class>> args,
+    const std::function<hiddenapi::AccessContext()>& fn_get_access_context);
 template
 ObjPtr<Method> Class::GetDeclaredMethodInternal<PointerSize::k32, true>(
     Thread* self,
     ObjPtr<Class> klass,
     ObjPtr<String> name,
-    ObjPtr<ObjectArray<Class>> args);
+    ObjPtr<ObjectArray<Class>> args,
+    const std::function<hiddenapi::AccessContext()>& fn_get_access_context);
 template
 ObjPtr<Method> Class::GetDeclaredMethodInternal<PointerSize::k64, false>(
     Thread* self,
     ObjPtr<Class> klass,
     ObjPtr<String> name,
-    ObjPtr<ObjectArray<Class>> args);
+    ObjPtr<ObjectArray<Class>> args,
+    const std::function<hiddenapi::AccessContext()>& fn_get_access_context);
 template
 ObjPtr<Method> Class::GetDeclaredMethodInternal<PointerSize::k64, true>(
     Thread* self,
     ObjPtr<Class> klass,
     ObjPtr<String> name,
-    ObjPtr<ObjectArray<Class>> args);
+    ObjPtr<ObjectArray<Class>> args,
+    const std::function<hiddenapi::AccessContext()>& fn_get_access_context);
 
 template <PointerSize kPointerSize, bool kTransactionActive>
 ObjPtr<Constructor> Class::GetDeclaredConstructorInternal(
@@ -1463,5 +1526,12 @@
 template void Class::GetAccessFlagsDCheck<kVerifyWrites>();
 template void Class::GetAccessFlagsDCheck<kVerifyAll>();
 
+void Class::SetAccessFlagsDCheck(uint32_t new_access_flags) {
+  uint32_t old_access_flags = GetField32<kVerifyNone>(AccessFlagsOffset());
+  // kAccVerificationAttempted is retained.
+  CHECK((old_access_flags & kAccVerificationAttempted) == 0 ||
+        (new_access_flags & kAccVerificationAttempted) != 0);
+}
+
 }  // namespace mirror
 }  // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 811ee51..c9c542d 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -19,35 +19,43 @@
 
 #include "base/bit_utils.h"
 #include "base/casts.h"
-#include "base/enums.h"
-#include "base/iteration_range.h"
 #include "base/stride_iterator.h"
-#include "base/utils.h"
 #include "class_flags.h"
 #include "class_status.h"
-#include "dex/dex_file.h"
 #include "dex/dex_file_types.h"
 #include "dex/modifiers.h"
 #include "dex/primitive.h"
 #include "gc/allocator_type.h"
-#include "imtable.h"
 #include "object.h"
 #include "object_array.h"
 #include "read_barrier_option.h"
-#include "thread.h"
 
 namespace art {
 
+namespace dex {
+struct ClassDef;
+class TypeList;
+}  // namespace dex
+
+namespace hiddenapi {
+class AccessContext;
+}  // namespace hiddenapi
+
+template<typename T> class ArraySlice;
 class ArtField;
 class ArtMethod;
 struct ClassOffsets;
+class DexFile;
 template<class T> class Handle;
+class ImTable;
 enum InvokeType : uint32_t;
+template <typename Iter> class IterationRange;
 template<typename T> class LengthPrefixedArray;
-template<typename T> class ArraySlice;
+enum class PointerSize : size_t;
 class Signature;
 class StringPiece;
 template<size_t kNumReferences> class PACKED(4) StackHandleScope;
+class Thread;
 
 namespace mirror {
 
@@ -89,7 +97,7 @@
   static void SetStatus(Handle<Class> h_this, ClassStatus new_status, Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
-  static MemberOffset StatusOffset() {
+  static constexpr MemberOffset StatusOffset() {
     return MemberOffset(OFFSET_OF_OBJECT_MEMBER(Class, status_));
   }
 
@@ -173,7 +181,7 @@
     return GetField32<kVerifyFlags>(AccessFlagsOffset());
   }
 
-  static MemberOffset AccessFlagsOffset() {
+  static constexpr MemberOffset AccessFlagsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
   }
 
@@ -191,8 +199,9 @@
   }
 
   // Returns true if the class is an interface.
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE bool IsInterface() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return (GetAccessFlags() & kAccInterface) != 0;
+    return (GetAccessFlags<kVerifyFlags>() & kAccInterface) != 0;
   }
 
   // Returns true if the class is declared public.
@@ -210,49 +219,44 @@
   }
 
   ALWAYS_INLINE bool ShouldSkipHiddenApiChecks() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return (GetAccessFlags() & kAccSkipHiddenApiChecks) != 0;
+    return (GetAccessFlags() & kAccSkipHiddenapiChecks) != 0;
   }
 
   ALWAYS_INLINE void SetSkipHiddenApiChecks() REQUIRES_SHARED(Locks::mutator_lock_) {
     uint32_t flags = GetAccessFlags();
-    SetAccessFlags(flags | kAccSkipHiddenApiChecks);
+    SetAccessFlags(flags | kAccSkipHiddenapiChecks);
   }
 
-  ALWAYS_INLINE void SetRecursivelyInitialized() REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId());
-    uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
-    SetAccessFlags(flags | kAccRecursivelyInitialized);
-  }
+  ALWAYS_INLINE void SetRecursivelyInitialized() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ALWAYS_INLINE void SetHasDefaultMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId());
-    uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
-    SetAccessFlags(flags | kAccHasDefaultMethod);
-  }
+  ALWAYS_INLINE void SetHasDefaultMethods() REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE void SetFinalizable() REQUIRES_SHARED(Locks::mutator_lock_) {
     uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
     SetAccessFlags(flags | kAccClassIsFinalizable);
   }
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE bool IsStringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return (GetClassFlags() & kClassFlagString) != 0;
+    return (GetClassFlags<kVerifyFlags>() & kClassFlagString) != 0;
   }
 
   ALWAYS_INLINE void SetStringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     SetClassFlags(kClassFlagString | kClassFlagNoReferenceFields);
   }
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE bool IsClassLoaderClass() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetClassFlags() == kClassFlagClassLoader;
+    return GetClassFlags<kVerifyFlags>() == kClassFlagClassLoader;
   }
 
   ALWAYS_INLINE void SetClassLoaderClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     SetClassFlags(kClassFlagClassLoader);
   }
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE bool IsDexCacheClass() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return (GetClassFlags() & kClassFlagDexCache) != 0;
+    return (GetClassFlags<kVerifyFlags>() & kClassFlagDexCache) != 0;
   }
 
   ALWAYS_INLINE void SetDexCacheClass() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -260,8 +264,9 @@
   }
 
   // Returns true if the class is abstract.
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE bool IsAbstract() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return (GetAccessFlags() & kAccAbstract) != 0;
+    return (GetAccessFlags<kVerifyFlags>() & kAccAbstract) != 0;
   }
 
   // Returns true if the class is an annotation.
@@ -324,11 +329,12 @@
 
   // Returns true if this class is the placeholder and should retire and
   // be replaced with a class with the right size for embedded imt/vtable.
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsTemp() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ClassStatus s = GetStatus();
+    ClassStatus s = GetStatus<kVerifyFlags>();
     return s < ClassStatus::kResolving &&
            s != ClassStatus::kErrorResolved &&
-           ShouldHaveEmbeddedVTable();
+           ShouldHaveEmbeddedVTable<kVerifyFlags>();
   }
 
   String* GetName() REQUIRES_SHARED(Locks::mutator_lock_);  // Returns the cached name.
@@ -346,7 +352,7 @@
     return (access_flags & kAccClassIsProxy) != 0;
   }
 
-  static MemberOffset PrimitiveTypeOffset() {
+  static constexpr MemberOffset PrimitiveTypeOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_);
   }
 
@@ -416,27 +422,18 @@
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimVoid;
   }
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveArray() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return IsArrayClass<kVerifyFlags>() &&
-        GetComponentType<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()->
-        IsPrimitive();
-  }
-
   // Depth of class from java.lang.Object
   uint32_t Depth() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsClassClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool IsThrowableClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static MemberOffset ComponentTypeOffset() {
+  static constexpr MemberOffset ComponentTypeOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, component_type_);
   }
 
@@ -462,37 +459,27 @@
   }
 
   bool IsObjectClass() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return !IsPrimitive() && GetSuperClass() == nullptr;
+    // No read barrier is needed for comparing with null.
+    return !IsPrimitive() && GetSuperClass<kDefaultVerifyFlags, kWithoutReadBarrier>() == nullptr;
   }
 
   bool IsInstantiableNonArray() REQUIRES_SHARED(Locks::mutator_lock_) {
     return !IsPrimitive() && !IsInterface() && !IsAbstract() && !IsArrayClass();
   }
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsInstantiable() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return (!IsPrimitive() && !IsInterface() && !IsAbstract()) ||
-        (IsAbstract() && IsArrayClass<kVerifyFlags, kReadBarrierOption>());
+    return (!IsPrimitive<kVerifyFlags>() &&
+            !IsInterface<kVerifyFlags>() &&
+            !IsAbstract<kVerifyFlags>()) ||
+        (IsAbstract<kVerifyFlags>() && IsArrayClass<kVerifyFlags>());
   }
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE bool IsObjectArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsIntArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
-    constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
-    auto* component_type = GetComponentType<kVerifyFlags>();
-    return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
-  }
-
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsLongArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
-    constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
-    auto* component_type = GetComponentType<kVerifyFlags>();
-    return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
-  }
+  bool IsPrimitiveArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Creates a raw object instance but does not invoke the default constructor.
   template<bool kIsInstrumented, bool kCheckAddFinalizer = true>
@@ -504,8 +491,7 @@
   ObjPtr<Object> AllocNonMovableObject(Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE bool IsVariableSize() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -544,13 +530,12 @@
     return ComputeClassSize(false, 0, 0, 0, 0, 0, 0, pointer_size);
   }
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   uint32_t GetObjectSize() REQUIRES_SHARED(Locks::mutator_lock_);
-  static MemberOffset ObjectSizeOffset() {
+  static constexpr MemberOffset ObjectSizeOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, object_size_);
   }
-  static MemberOffset ObjectSizeAllocFastPathOffset() {
+  static constexpr MemberOffset ObjectSizeAllocFastPathOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, object_size_alloc_fast_path_);
   }
 
@@ -558,8 +543,7 @@
 
   void SetObjectSizeAllocFastPath(uint32_t new_object_size) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   uint32_t GetObjectSizeAllocFastPath() REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetObjectSizeWithoutChecks(uint32_t new_object_size)
@@ -623,7 +607,7 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  ALWAYS_INLINE Class* GetSuperClass() REQUIRES_SHARED(Locks::mutator_lock_);
+  ALWAYS_INLINE ObjPtr<Class> GetSuperClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get first common super class. It will never return null.
   // `This` and `klass` must be classes.
@@ -632,10 +616,11 @@
   void SetSuperClass(ObjPtr<Class> new_super_class) REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool HasSuperClass() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetSuperClass() != nullptr;
+    // No read barrier is needed for comparing with null.
+    return GetSuperClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr;
   }
 
-  static MemberOffset SuperClassOffset() {
+  static constexpr MemberOffset SuperClassOffset() {
     return MemberOffset(OFFSETOF_MEMBER(Class, super_class_));
   }
 
@@ -643,13 +628,14 @@
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ClassLoader* GetClassLoader() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
 
+  template <bool kCheckTransaction = true>
   void SetClassLoader(ObjPtr<ClassLoader> new_cl) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static MemberOffset DexCacheOffset() {
+  static constexpr MemberOffset DexCacheOffset() {
     return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_));
   }
 
-  static MemberOffset IfTableOffset() {
+  static constexpr MemberOffset IfTableOffset() {
     return MemberOffset(OFFSETOF_MEMBER(Class, iftable_));
   }
 
@@ -674,7 +660,7 @@
   ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetMethodsPtr()
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static MemberOffset MethodsOffset() {
+  static constexpr MemberOffset MethodsOffset() {
     return MemberOffset(OFFSETOF_MEMBER(Class, methods_));
   }
 
@@ -720,10 +706,12 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   template <PointerSize kPointerSize, bool kTransactionActive>
-  static ObjPtr<Method> GetDeclaredMethodInternal(Thread* self,
-                                                  ObjPtr<Class> klass,
-                                                  ObjPtr<String> name,
-                                                  ObjPtr<ObjectArray<Class>> args)
+  static ObjPtr<Method> GetDeclaredMethodInternal(
+      Thread* self,
+      ObjPtr<Class> klass,
+      ObjPtr<String> name,
+      ObjPtr<ObjectArray<Class>> args,
+      const std::function<hiddenapi::AccessContext()>& fn_get_access_context)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   template <PointerSize kPointerSize, bool kTransactionActive>
@@ -783,38 +771,35 @@
 
   void SetVTable(ObjPtr<PointerArray> new_vtable) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static MemberOffset VTableOffset() {
+  static constexpr MemberOffset VTableOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, vtable_);
   }
 
-  static MemberOffset EmbeddedVTableLengthOffset() {
+  static constexpr MemberOffset EmbeddedVTableLengthOffset() {
     return MemberOffset(sizeof(Class));
   }
 
-  static MemberOffset ImtPtrOffset(PointerSize pointer_size) {
+  static constexpr MemberOffset ImtPtrOffset(PointerSize pointer_size) {
     return MemberOffset(
         RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t),
                 static_cast<size_t>(pointer_size)));
   }
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool ShouldHaveImt() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>();
+    return ShouldHaveEmbeddedVTable<kVerifyFlags>();
   }
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool ShouldHaveEmbeddedVTable() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return IsInstantiable<kVerifyFlags, kReadBarrierOption>();
+    return IsInstantiable<kVerifyFlags>();
   }
 
   bool HasVTable() REQUIRES_SHARED(Locks::mutator_lock_);
 
   static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, PointerSize pointer_size);
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   int32_t GetVTableLength() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -822,6 +807,7 @@
   ArtMethod* GetVTableEntry(uint32_t i, PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   int32_t GetEmbeddedVTableLength() REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetEmbeddedVTableLength(int32_t len) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -948,8 +934,7 @@
     return (GetAccessFlags() & kAccRecursivelyInitialized) != 0;
   }
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE int32_t GetIfTableCount() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -977,9 +962,10 @@
 
   // Returns the number of instance fields containing reference types. Does not count fields in any
   // super classes.
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   uint32_t NumReferenceInstanceFields() REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK(IsResolved());
-    return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
+    DCHECK(IsResolved<kVerifyFlags>());
+    return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
   }
 
   uint32_t NumReferenceInstanceFieldsDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1005,9 +991,10 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns the number of static fields containing reference types.
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   uint32_t NumReferenceStaticFields() REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK(IsResolved());
-    return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
+    DCHECK(IsResolved<kVerifyFlags>());
+    return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
   }
 
   uint32_t NumReferenceStaticFieldsDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1021,8 +1008,7 @@
   }
 
   // Get the offset of the first reference static field. Other reference static fields follow.
-  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   MemberOffset GetFirstReferenceStaticFieldOffset(PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -1150,7 +1136,7 @@
 
   bool DescriptorEquals(const char* match) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  const DexFile::ClassDef* GetClassDef() REQUIRES_SHARED(Locks::mutator_lock_);
+  const dex::ClassDef* GetClassDef() REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE uint32_t NumDirectInterfaces() REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -1173,7 +1159,7 @@
 
   const DexFile& GetDexFile() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  const DexFile::TypeList* GetInterfaceTypeList() REQUIRES_SHARED(Locks::mutator_lock_);
+  const dex::TypeList* GetInterfaceTypeList() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Asserts we are initialized or initializing in the given thread.
   void AssertInitializedOrInitializingInThread(Thread* self)
@@ -1215,7 +1201,8 @@
 
   // Returns true if the class loader is null, ie the class loader is the boot strap class loader.
   bool IsBootStrapClassLoaded() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetClassLoader() == nullptr;
+    // No read barrier is needed for comparing with null.
+    return GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>() == nullptr;
   }
 
   static size_t ImTableEntrySize(PointerSize pointer_size) {
@@ -1262,14 +1249,14 @@
   // the corresponding entry in dest if visitor(obj) != obj to prevent dirty memory. Dest should be
   // initialized to a copy of *this to prevent issues. Does not visit the ArtMethod and ArtField
   // roots.
-  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-            ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
-            typename Visitor>
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename Visitor>
   void FixupNativePointers(Class* dest, PointerSize pointer_size, const Visitor& visitor)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  ALWAYS_INLINE void SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods)
+  template <typename T, VerifyObjectFlags kVerifyFlags, typename Visitor>
+  void FixupNativePointer(
+      Class* dest, PointerSize pointer_size, const Visitor& visitor, MemberOffset member_offset)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE static ArraySlice<ArtMethod> GetMethodsSliceRangeUnchecked(
@@ -1322,6 +1309,8 @@
   template<VerifyObjectFlags kVerifyFlags>
   void GetAccessFlagsDCheck() REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void SetAccessFlagsDCheck(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Check that the pointer size matches the one in the class linker.
   ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size);
 
@@ -1426,6 +1415,7 @@
 
   // Tid used to check for recursive <clinit> invocation.
   pid_t clinit_thread_id_;
+  static_assert(sizeof(pid_t) == sizeof(int32_t), "java.lang.Class.clinitThreadId size check");
 
   // ClassDef index in dex file, -1 if no class definition such as an array.
   // TODO: really 16bits
diff --git a/runtime/mirror/class_ext-inl.h b/runtime/mirror/class_ext-inl.h
index feaac85..8d68dc9 100644
--- a/runtime/mirror/class_ext-inl.h
+++ b/runtime/mirror/class_ext-inl.h
@@ -32,9 +32,7 @@
   }
   int32_t len = arr->GetLength();
   for (int32_t i = 0; i < len; i++) {
-    ArtMethod* method = arr->GetElementPtrSize<ArtMethod*,
-                                               kDefaultVerifyFlags,
-                                               kReadBarrierOption>(i, pointer_size);
+    ArtMethod* method = arr->GetElementPtrSize<ArtMethod*, kDefaultVerifyFlags>(i, pointer_size);
     if (method != nullptr) {
       method->VisitRoots<kReadBarrierOption>(visitor, pointer_size);
     }
diff --git a/runtime/mirror/class_ext.cc b/runtime/mirror/class_ext.cc
index 44bf989..146adc9 100644
--- a/runtime/mirror/class_ext.cc
+++ b/runtime/mirror/class_ext.cc
@@ -20,12 +20,14 @@
 #include "base/casts.h"
 #include "base/enums.h"
 #include "base/utils.h"
+#include "class-alloc-inl.h"
 #include "class-inl.h"
 #include "class_root.h"
 #include "dex/dex_file-inl.h"
 #include "gc/accounting/card_table-inl.h"
 #include "object-inl.h"
-#include "object_array.h"
+#include "object_array-alloc-inl.h"
+#include "object_array-inl.h"
 #include "stack_trace_element.h"
 #include "well_known_classes.h"
 
@@ -117,5 +119,17 @@
   SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, original_dex_file_), bytes);
 }
 
+void ClassExt::SetPreRedefineClassDefIndex(uint16_t index) {
+  DCHECK(!Runtime::Current()->IsActiveTransaction());
+  SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, pre_redefine_class_def_index_),
+      static_cast<int32_t>(index));
+}
+
+void ClassExt::SetPreRedefineDexFile(const DexFile* dex_file) {
+  DCHECK(!Runtime::Current()->IsActiveTransaction());
+  SetField64<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, pre_redefine_dex_file_ptr_),
+      static_cast<int64_t>(reinterpret_cast<uintptr_t>(dex_file)));
+}
+
 }  // namespace mirror
 }  // namespace art
diff --git a/runtime/mirror/class_ext.h b/runtime/mirror/class_ext.h
index 612fd0f..126f94a 100644
--- a/runtime/mirror/class_ext.h
+++ b/runtime/mirror/class_ext.h
@@ -64,6 +64,20 @@
 
   void SetOriginalDexFile(ObjPtr<Object> bytes) REQUIRES_SHARED(Locks::mutator_lock_);
 
+  uint16_t GetPreRedefineClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return static_cast<uint16_t>(
+        GetField32(OFFSET_OF_OBJECT_MEMBER(ClassExt, pre_redefine_class_def_index_)));
+  }
+
+  void SetPreRedefineClassDefIndex(uint16_t index) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  const DexFile* GetPreRedefineDexFile() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
+        GetField64(OFFSET_OF_OBJECT_MEMBER(ClassExt, pre_redefine_dex_file_ptr_))));
+  }
+
+  void SetPreRedefineDexFile(const DexFile* dex_file) REQUIRES_SHARED(Locks::mutator_lock_);
+
   void SetObsoleteArrays(ObjPtr<PointerArray> methods, ObjPtr<ObjectArray<DexCache>> dex_caches)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -88,6 +102,10 @@
   // The saved verification error of this class.
   HeapReference<Object> verify_error_;
 
+  // Native pointer to DexFile and ClassDef index of this class before it was JVMTI-redefined.
+  int64_t pre_redefine_dex_file_ptr_;
+  int32_t pre_redefine_class_def_index_;
+
   friend struct art::ClassExtOffsets;  // for verifying offset information
   DISALLOW_IMPLICIT_CONSTRUCTORS(ClassExt);
 };
diff --git a/runtime/mirror/class_loader-inl.h b/runtime/mirror/class_loader-inl.h
index 39c8ee0..64b4e74 100644
--- a/runtime/mirror/class_loader-inl.h
+++ b/runtime/mirror/class_loader-inl.h
@@ -33,7 +33,7 @@
   VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
   if (kVisitClasses) {
     // Visit classes loaded after.
-    ClassTable* const class_table = GetClassTable();
+    ClassTable* const class_table = GetClassTable<kVerifyFlags>();
     if (class_table != nullptr) {
       class_table->VisitRoots(visitor);
     }
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index f25f18f..783ba6a 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_MIRROR_CLASS_LOADER_H_
 #define ART_RUNTIME_MIRROR_CLASS_LOADER_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "obj_ptr.h"
 #include "object.h"
 #include "object_reference.h"
@@ -44,9 +44,10 @@
     return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, parent_));
   }
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ClassTable* GetClassTable() REQUIRES_SHARED(Locks::mutator_lock_) {
     return reinterpret_cast<ClassTable*>(
-        GetField64(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_)));
+        GetField64<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_)));
   }
 
   void SetClassTable(ClassTable* class_table) REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index bbe15ac..47b621a 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -27,12 +27,12 @@
 #include "base/enums.h"
 #include "class_linker.h"
 #include "dex/dex_file.h"
-#include "gc/heap-inl.h"
 #include "gc_root-inl.h"
 #include "mirror/call_site.h"
 #include "mirror/class.h"
 #include "mirror/method_type.h"
 #include "obj_ptr.h"
+#include "object-inl.h"
 #include "runtime.h"
 #include "write_barrier-inl.h"
 
@@ -42,6 +42,27 @@
 namespace mirror {
 
 template <typename T>
+inline DexCachePair<T>::DexCachePair(ObjPtr<T> object, uint32_t index)
+    : object(object), index(index) {}
+
+template <typename T>
+inline void DexCachePair<T>::Initialize(std::atomic<DexCachePair<T>>* dex_cache) {
+  DexCachePair<T> first_elem;
+  first_elem.object = GcRoot<T>(nullptr);
+  first_elem.index = InvalidIndexForSlot(0);
+  dex_cache[0].store(first_elem, std::memory_order_relaxed);
+}
+
+template <typename T>
+inline T* DexCachePair<T>::GetObjectForIndex(uint32_t idx) {
+  if (idx != index) {
+    return nullptr;
+  }
+  DCHECK(!object.IsNull());
+  return object.Read();
+}
+
+template <typename T>
 inline void NativeDexCachePair<T>::Initialize(std::atomic<NativeDexCachePair<T>>* dex_cache,
                                               PointerSize pointer_size) {
   NativeDexCachePair<T> first_elem;
@@ -63,6 +84,15 @@
 }
 
 inline String* DexCache::GetResolvedString(dex::StringIndex string_idx) {
+  const uint32_t num_preresolved_strings = NumPreResolvedStrings();
+  if (num_preresolved_strings != 0u) {
+    DCHECK_LT(string_idx.index_, num_preresolved_strings);
+    DCHECK_EQ(num_preresolved_strings, GetDexFile()->NumStringIds());
+    mirror::String* string = GetPreResolvedStrings()[string_idx.index_].Read();
+    if (LIKELY(string != nullptr)) {
+      return string;
+    }
+  }
   return GetStrings()[StringSlotIndex(string_idx)].load(
       std::memory_order_relaxed).GetObjectForIndex(string_idx.index_);
 }
@@ -80,6 +110,18 @@
   WriteBarrier::ForEveryFieldWrite(this);
 }
 
+inline void DexCache::SetPreResolvedString(dex::StringIndex string_idx,
+                                           ObjPtr<String> resolved) {
+  DCHECK(resolved != nullptr);
+  DCHECK_LT(string_idx.index_, GetDexFile()->NumStringIds());
+  GetPreResolvedStrings()[string_idx.index_] = GcRoot<mirror::String>(resolved);
+  Runtime* const runtime = Runtime::Current();
+  CHECK(runtime->IsAotCompiler());
+  CHECK(!runtime->IsActiveTransaction());
+  // TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
+  WriteBarrier::ForEveryFieldWrite(this);
+}
+
 inline void DexCache::ClearString(dex::StringIndex string_idx) {
   DCHECK(Runtime::Current()->IsAotCompiler());
   uint32_t slot_idx = StringSlotIndex(string_idx);
@@ -310,18 +352,25 @@
   // Visit arrays after.
   if (kVisitNativeRoots) {
     VisitDexCachePairs<String, kReadBarrierOption, Visitor>(
-        GetStrings(), NumStrings(), visitor);
+        GetStrings<kVerifyFlags>(), NumStrings<kVerifyFlags>(), visitor);
 
     VisitDexCachePairs<Class, kReadBarrierOption, Visitor>(
-        GetResolvedTypes(), NumResolvedTypes(), visitor);
+        GetResolvedTypes<kVerifyFlags>(), NumResolvedTypes<kVerifyFlags>(), visitor);
 
     VisitDexCachePairs<MethodType, kReadBarrierOption, Visitor>(
-        GetResolvedMethodTypes(), NumResolvedMethodTypes(), visitor);
+        GetResolvedMethodTypes<kVerifyFlags>(), NumResolvedMethodTypes<kVerifyFlags>(), visitor);
 
-    GcRoot<mirror::CallSite>* resolved_call_sites = GetResolvedCallSites();
-    for (size_t i = 0, num_call_sites = NumResolvedCallSites(); i != num_call_sites; ++i) {
+    GcRoot<mirror::CallSite>* resolved_call_sites = GetResolvedCallSites<kVerifyFlags>();
+    size_t num_call_sites = NumResolvedCallSites<kVerifyFlags>();
+    for (size_t i = 0; i != num_call_sites; ++i) {
       visitor.VisitRootIfNonNull(resolved_call_sites[i].AddressWithoutBarrier());
     }
+
+    GcRoot<mirror::String>* const preresolved_strings = GetPreResolvedStrings();
+    const size_t num_preresolved_strings = NumPreResolvedStrings();
+    for (size_t i = 0; i != num_preresolved_strings; ++i) {
+      visitor.VisitRootIfNonNull(preresolved_strings[i].AddressWithoutBarrier());
+    }
   }
 }
 
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index 661f954..7e79ebe 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -17,7 +17,6 @@
 #include "dex_cache-inl.h"
 
 #include "art_method-inl.h"
-#include "base/globals.h"
 #include "class_linker.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/heap.h"
@@ -27,6 +26,7 @@
 #include "object.h"
 #include "object_array-inl.h"
 #include "runtime.h"
+#include "runtime_globals.h"
 #include "string.h"
 #include "thread.h"
 #include "utils/dex_cache_arrays_layout-inl.h"
@@ -172,6 +172,21 @@
                   dex_file->NumCallSiteIds());
 }
 
+void DexCache::AddPreResolvedStringsArray() {
+  DCHECK_EQ(NumPreResolvedStrings(), 0u);
+  Thread* const self = Thread::Current();
+  LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
+  const size_t num_strings = GetDexFile()->NumStringIds();
+  SetField32<false>(NumPreResolvedStringsOffset(), num_strings);
+  GcRoot<mirror::String>* strings =
+      linear_alloc->AllocArray<GcRoot<mirror::String>>(self, num_strings);
+  CHECK(strings != nullptr);
+  SetPreResolvedStrings(strings);
+  for (size_t i = 0; i < GetDexFile()->NumStringIds(); ++i) {
+    CHECK(GetPreResolvedStrings()[i].Read() == nullptr);
+  }
+}
+
 void DexCache::Init(const DexFile* dex_file,
                     ObjPtr<String> location,
                     StringDexCacheType* strings,
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 8401b66..c742928 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -19,9 +19,9 @@
 
 #include "array.h"
 #include "base/bit_utils.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "dex/dex_file_types.h"
-#include "gc_root-inl.h"
+#include "gc_root.h"  // Note: must not use -inl here to avoid circular dependency.
 #include "object.h"
 #include "object_array.h"
 
@@ -67,19 +67,12 @@
   // it's always non-null if the id branch succeeds (except for the 0th id).
   // Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail
   // the lookup id == stored id branch.
-  DexCachePair(ObjPtr<T> object, uint32_t index)
-      : object(object),
-        index(index) {}
+  DexCachePair(ObjPtr<T> object, uint32_t index);
   DexCachePair() : index(0) {}
   DexCachePair(const DexCachePair<T>&) = default;
   DexCachePair& operator=(const DexCachePair<T>&) = default;
 
-  static void Initialize(std::atomic<DexCachePair<T>>* dex_cache) {
-    DexCachePair<T> first_elem;
-    first_elem.object = GcRoot<T>(nullptr);
-    first_elem.index = InvalidIndexForSlot(0);
-    dex_cache[0].store(first_elem, std::memory_order_relaxed);
-  }
+  static void Initialize(std::atomic<DexCachePair<T>>* dex_cache);
 
   static uint32_t InvalidIndexForSlot(uint32_t slot) {
     // Since the cache size is a power of two, 0 will always map to slot 0.
@@ -87,13 +80,7 @@
     return (slot == 0) ? 1u : 0u;
   }
 
-  T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (idx != index) {
-      return nullptr;
-    }
-    DCHECK(!object.IsNull());
-    return object.Read();
-  }
+  T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
 template <typename T> struct PACKED(2 * __SIZEOF_POINTER__) NativeDexCachePair {
@@ -226,60 +213,76 @@
     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
   }
 
-  static MemberOffset StringsOffset() {
+  static constexpr MemberOffset StringsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_);
   }
 
-  static MemberOffset ResolvedTypesOffset() {
+  static constexpr MemberOffset PreResolvedStringsOffset() {
+    return OFFSET_OF_OBJECT_MEMBER(DexCache, preresolved_strings_);
+  }
+
+  static constexpr MemberOffset ResolvedTypesOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_);
   }
 
-  static MemberOffset ResolvedFieldsOffset() {
+  static constexpr MemberOffset ResolvedFieldsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_fields_);
   }
 
-  static MemberOffset ResolvedMethodsOffset() {
+  static constexpr MemberOffset ResolvedMethodsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_);
   }
 
-  static MemberOffset ResolvedMethodTypesOffset() {
+  static constexpr MemberOffset ResolvedMethodTypesOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_method_types_);
   }
 
-  static MemberOffset ResolvedCallSitesOffset() {
+  static constexpr MemberOffset ResolvedCallSitesOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_call_sites_);
   }
 
-  static MemberOffset NumStringsOffset() {
+  static constexpr MemberOffset NumStringsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_strings_);
   }
 
-  static MemberOffset NumResolvedTypesOffset() {
+  static constexpr MemberOffset NumPreResolvedStringsOffset() {
+    return OFFSET_OF_OBJECT_MEMBER(DexCache, num_preresolved_strings_);
+  }
+
+  static constexpr MemberOffset NumResolvedTypesOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_types_);
   }
 
-  static MemberOffset NumResolvedFieldsOffset() {
+  static constexpr MemberOffset NumResolvedFieldsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_fields_);
   }
 
-  static MemberOffset NumResolvedMethodsOffset() {
+  static constexpr MemberOffset NumResolvedMethodsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_methods_);
   }
 
-  static MemberOffset NumResolvedMethodTypesOffset() {
+  static constexpr MemberOffset NumResolvedMethodTypesOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_method_types_);
   }
 
-  static MemberOffset NumResolvedCallSitesOffset() {
+  static constexpr MemberOffset NumResolvedCallSitesOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_call_sites_);
   }
 
+  static constexpr size_t PreResolvedStringsAlignment() {
+    return alignof(GcRoot<mirror::String>);
+  }
+
   String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetResolvedString(dex::StringIndex string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void SetPreResolvedString(dex::StringIndex string_idx,
+                            ObjPtr<mirror::String> resolved)
+      ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Clear a string for a string_idx, used to undo string intern transactions to make sure
   // the string isn't kept live.
   void ClearString(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -326,16 +329,29 @@
   ObjPtr<CallSite> SetResolvedCallSite(uint32_t call_site_idx, ObjPtr<CallSite> resolved)
       REQUIRES_SHARED(Locks::mutator_lock_) WARN_UNUSED;
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   StringDexCacheType* GetStrings() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetFieldPtr64<StringDexCacheType*>(StringsOffset());
+    return GetFieldPtr64<StringDexCacheType*, kVerifyFlags>(StringsOffset());
+  }
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  GcRoot<mirror::String>* GetPreResolvedStrings() ALWAYS_INLINE
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    return GetFieldPtr64<GcRoot<mirror::String>*, kVerifyFlags>(PreResolvedStringsOffset());
   }
 
   void SetStrings(StringDexCacheType* strings) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
     SetFieldPtr<false>(StringsOffset(), strings);
   }
 
+  void SetPreResolvedStrings(GcRoot<mirror::String>* strings)
+      ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+    SetFieldPtr<false>(PreResolvedStringsOffset(), strings);
+  }
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   TypeDexCacheType* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetFieldPtr<TypeDexCacheType*>(ResolvedTypesOffset());
+    return GetFieldPtr<TypeDexCacheType*, kVerifyFlags>(ResolvedTypesOffset());
   }
 
   void SetResolvedTypes(TypeDexCacheType* resolved_types)
@@ -364,9 +380,10 @@
     SetFieldPtr<false>(ResolvedFieldsOffset(), resolved_fields);
   }
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   MethodTypeDexCacheType* GetResolvedMethodTypes()
       ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetFieldPtr64<MethodTypeDexCacheType*>(ResolvedMethodTypesOffset());
+    return GetFieldPtr64<MethodTypeDexCacheType*, kVerifyFlags>(ResolvedMethodTypesOffset());
   }
 
   void SetResolvedMethodTypes(MethodTypeDexCacheType* resolved_method_types)
@@ -375,10 +392,11 @@
     SetFieldPtr<false>(ResolvedMethodTypesOffset(), resolved_method_types);
   }
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   GcRoot<CallSite>* GetResolvedCallSites()
       ALWAYS_INLINE
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetFieldPtr<GcRoot<CallSite>*>(ResolvedCallSitesOffset());
+    return GetFieldPtr<GcRoot<CallSite>*, kVerifyFlags>(ResolvedCallSitesOffset());
   }
 
   void SetResolvedCallSites(GcRoot<CallSite>* resolved_call_sites)
@@ -387,28 +405,39 @@
     SetFieldPtr<false>(ResolvedCallSitesOffset(), resolved_call_sites);
   }
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   size_t NumStrings() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetField32(NumStringsOffset());
+    return GetField32<kVerifyFlags>(NumStringsOffset());
   }
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  size_t NumPreResolvedStrings() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return GetField32<kVerifyFlags>(NumPreResolvedStringsOffset());
+  }
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   size_t NumResolvedTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetField32(NumResolvedTypesOffset());
+    return GetField32<kVerifyFlags>(NumResolvedTypesOffset());
   }
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   size_t NumResolvedMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetField32(NumResolvedMethodsOffset());
+    return GetField32<kVerifyFlags>(NumResolvedMethodsOffset());
   }
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   size_t NumResolvedFields() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetField32(NumResolvedFieldsOffset());
+    return GetField32<kVerifyFlags>(NumResolvedFieldsOffset());
   }
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   size_t NumResolvedMethodTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetField32(NumResolvedMethodTypesOffset());
+    return GetField32<kVerifyFlags>(NumResolvedMethodTypesOffset());
   }
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   size_t NumResolvedCallSites() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetField32(NumResolvedCallSitesOffset());
+    return GetField32<kVerifyFlags>(NumResolvedCallSitesOffset());
   }
 
   const DexFile* GetDexFile() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -432,12 +461,18 @@
                                    NativeDexCachePair<T> pair,
                                    PointerSize ptr_size);
 
+  static size_t PreResolvedStringsSize(size_t num_strings) {
+    return sizeof(GcRoot<mirror::String>) * num_strings;
+  }
+
   uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
   uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
   uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
   uint32_t MethodSlotIndex(uint32_t method_idx) REQUIRES_SHARED(Locks::mutator_lock_);
   uint32_t MethodTypeSlotIndex(dex::ProtoIndex proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void AddPreResolvedStringsArray() REQUIRES_SHARED(Locks::mutator_lock_);
+
  private:
   void Init(const DexFile* dex_file,
             ObjPtr<String> location,
@@ -519,22 +554,25 @@
 #endif
 
   HeapReference<String> location_;
-  // Number of elements in the call_sites_ array. Note that this appears here
-  // because of our packing logic for 32 bit fields.
-  uint32_t num_resolved_call_sites_;
+  // Number of elements in the preresolved_strings_ array. Note that this appears here because of
+  // our packing logic for 32 bit fields.
+  uint32_t num_preresolved_strings_;
 
-  uint64_t dex_file_;               // const DexFile*
-  uint64_t resolved_call_sites_;    // GcRoot<CallSite>* array with num_resolved_call_sites_
-                                    // elements.
-  uint64_t resolved_fields_;        // std::atomic<FieldDexCachePair>*, array with
-                                    // num_resolved_fields_ elements.
-  uint64_t resolved_method_types_;  // std::atomic<MethodTypeDexCachePair>* array with
-                                    // num_resolved_method_types_ elements.
-  uint64_t resolved_methods_;       // ArtMethod*, array with num_resolved_methods_ elements.
-  uint64_t resolved_types_;         // TypeDexCacheType*, array with num_resolved_types_ elements.
-  uint64_t strings_;                // std::atomic<StringDexCachePair>*, array with num_strings_
-                                    // elements.
+  uint64_t dex_file_;                // const DexFile*
+  uint64_t preresolved_strings_;     // GcRoot<mirror::String*> array with num_preresolved_strings
+                                     // elements.
+  uint64_t resolved_call_sites_;     // GcRoot<CallSite>* array with num_resolved_call_sites_
+                                     // elements.
+  uint64_t resolved_fields_;         // std::atomic<FieldDexCachePair>*, array with
+                                     // num_resolved_fields_ elements.
+  uint64_t resolved_method_types_;   // std::atomic<MethodTypeDexCachePair>* array with
+                                     // num_resolved_method_types_ elements.
+  uint64_t resolved_methods_;        // ArtMethod*, array with num_resolved_methods_ elements.
+  uint64_t resolved_types_;          // TypeDexCacheType*, array with num_resolved_types_ elements.
+  uint64_t strings_;                 // std::atomic<StringDexCachePair>*, array with num_strings_
+                                     // elements.
 
+  uint32_t num_resolved_call_sites_;    // Number of elements in the call_sites_ array.
   uint32_t num_resolved_fields_;        // Number of elements in the resolved_fields_ array.
   uint32_t num_resolved_method_types_;  // Number of elements in the resolved_method_types_ array.
   uint32_t num_resolved_methods_;       // Number of elements in the resolved_methods_ array.
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index e9e7ca8..f7c1c02 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -108,7 +108,7 @@
   EXPECT_NE(klass1->NumStaticFields(), 0u);
   for (ArtField& field : klass2->GetSFields()) {
     EXPECT_FALSE(
-        klass1->ResolvedFieldAccessTest</*throw_on_failure*/ false>(
+        klass1->ResolvedFieldAccessTest</*throw_on_failure=*/ false>(
             klass2.Get(),
             &field,
             klass1->GetDexCache(),
@@ -146,8 +146,8 @@
   Handle<mirror::DexCache> dex_cache = hs.NewHandle(
       class_linker_->FindDexCache(Thread::Current(), dex_file));
 
-  const DexFile::MethodId& method1_id = dex_file.GetMethodId(method1->GetDexMethodIndex());
-  const DexFile::MethodId& method2_id = dex_file.GetMethodId(method2->GetDexMethodIndex());
+  const dex::MethodId& method1_id = dex_file.GetMethodId(method1->GetDexMethodIndex());
+  const dex::MethodId& method2_id = dex_file.GetMethodId(method2->GetDexMethodIndex());
   Handle<mirror::MethodType> method1_type = hs.NewHandle(
       class_linker_->ResolveMethodType(soa.Self(),
                                        method1_id.proto_idx_,
diff --git a/runtime/mirror/emulated_stack_frame.cc b/runtime/mirror/emulated_stack_frame.cc
index ce39049..001469c 100644
--- a/runtime/mirror/emulated_stack_frame.cc
+++ b/runtime/mirror/emulated_stack_frame.cc
@@ -16,11 +16,15 @@
 
 #include "emulated_stack_frame.h"
 
-#include "class-inl.h"
+#include "array-alloc-inl.h"
+#include "array-inl.h"
+#include "class-alloc-inl.h"
 #include "class_root.h"
 #include "jvalue-inl.h"
 #include "method_handles-inl.h"
 #include "method_handles.h"
+#include "object_array-alloc-inl.h"
+#include "object_array-inl.h"
 #include "reflection-inl.h"
 
 namespace art {
diff --git a/runtime/mirror/executable.cc b/runtime/mirror/executable.cc
index fac3319..24e2047 100644
--- a/runtime/mirror/executable.cc
+++ b/runtime/mirror/executable.cc
@@ -38,18 +38,6 @@
 template bool Executable::CreateFromArtMethod<PointerSize::k64, false>(ArtMethod* method);
 template bool Executable::CreateFromArtMethod<PointerSize::k64, true>(ArtMethod* method);
 
-ArtMethod* Executable::GetArtMethod() {
-  return reinterpret_cast<ArtMethod*>(GetField64(ArtMethodOffset()));
-}
-
-template <bool kTransactionActive>
-void Executable::SetArtMethod(ArtMethod* method) {
-  SetField64<kTransactionActive>(ArtMethodOffset(), reinterpret_cast<uint64_t>(method));
-}
-
-template void Executable::SetArtMethod<false>(ArtMethod* method);
-template void Executable::SetArtMethod<true>(ArtMethod* method);
-
 mirror::Class* Executable::GetDeclaringClass() {
   return GetFieldObject<mirror::Class>(DeclaringClassOffset());
 }
diff --git a/runtime/mirror/executable.h b/runtime/mirror/executable.h
index bf66d79..14c9d4c 100644
--- a/runtime/mirror/executable.h
+++ b/runtime/mirror/executable.h
@@ -18,7 +18,7 @@
 #define ART_RUNTIME_MIRROR_EXECUTABLE_H_
 
 #include "accessible_object.h"
-#include "object.h"
+#include "object-inl.h"
 #include "read_barrier_option.h"
 
 namespace art {
@@ -36,10 +36,19 @@
   bool CreateFromArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
-  ArtMethod* GetArtMethod() REQUIRES_SHARED(Locks::mutator_lock_);
-  // Only used by the image writer.
-  template <bool kTransactionActive = false>
-  void SetArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ArtMethod* GetArtMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return reinterpret_cast64<ArtMethod*>(GetField64<kVerifyFlags>(ArtMethodOffset()));
+  }
+
+  template <bool kTransactionActive = false,
+            bool kCheckTransaction = true,
+            VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  void SetArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
+    SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+        ArtMethodOffset(), reinterpret_cast64<uint64_t>(method));
+  }
+
   mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
   static MemberOffset ArtMethodOffset() {
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index 2e263b9..803b880 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -20,7 +20,7 @@
 #include "field.h"
 
 #include "art_field-inl.h"
-#include "class-inl.h"
+#include "class-alloc-inl.h"
 #include "class_root.h"
 #include "dex_cache-inl.h"
 
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index 9e3c9af..3d4c5a7 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -39,9 +39,15 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  PointerArray* GetMethodArrayOrNull(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
+    return down_cast<PointerArray*>(
+        Get<kVerifyFlags, kReadBarrierOption>((i * kMax) + kMethodArray));
+  }
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   PointerArray* GetMethodArray(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
-    auto* method_array = down_cast<PointerArray*>(Get<kVerifyFlags, kReadBarrierOption>(
-        (i * kMax) + kMethodArray));
+    PointerArray* method_array = GetMethodArrayOrNull<kVerifyFlags, kReadBarrierOption>(i);
     DCHECK(method_array != nullptr);
     return method_array;
   }
@@ -49,9 +55,8 @@
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   size_t GetMethodArrayCount(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
-    auto* method_array = down_cast<PointerArray*>(
-        Get<kVerifyFlags, kReadBarrierOption>((i * kMax) + kMethodArray));
-    return method_array == nullptr ? 0u : method_array->GetLength();
+    PointerArray* method_array = GetMethodArrayOrNull<kVerifyFlags, kReadBarrierOption>(i);
+    return method_array == nullptr ? 0u : method_array->GetLength<kVerifyFlags>();
   }
 
   void SetMethodArray(int32_t i, ObjPtr<PointerArray> arr) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc
index 910a1fc..d7a1225 100644
--- a/runtime/mirror/method.cc
+++ b/runtime/mirror/method.cc
@@ -18,7 +18,7 @@
 
 #include "art_method.h"
 #include "class_root.h"
-#include "mirror/class-inl.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/object-inl.h"
 #include "obj_ptr-inl.h"
 
diff --git a/runtime/mirror/method_handle_impl.cc b/runtime/mirror/method_handle_impl.cc
index 88ccbc9..e8cacd9 100644
--- a/runtime/mirror/method_handle_impl.cc
+++ b/runtime/mirror/method_handle_impl.cc
@@ -16,7 +16,7 @@
 
 #include "method_handle_impl-inl.h"
 
-#include "class-inl.h"
+#include "class-alloc-inl.h"
 #include "class_root.h"
 
 namespace art {
diff --git a/runtime/mirror/method_handles_lookup.cc b/runtime/mirror/method_handles_lookup.cc
index d1e7a6d..de17c8d 100644
--- a/runtime/mirror/method_handles_lookup.cc
+++ b/runtime/mirror/method_handles_lookup.cc
@@ -16,7 +16,7 @@
 
 #include "method_handles_lookup.h"
 
-#include "class-inl.h"
+#include "class-alloc-inl.h"
 #include "class_root.h"
 #include "dex/modifiers.h"
 #include "handle_scope.h"
diff --git a/runtime/mirror/method_type.cc b/runtime/mirror/method_type.cc
index bc62ebd..6533656 100644
--- a/runtime/mirror/method_type.cc
+++ b/runtime/mirror/method_type.cc
@@ -16,9 +16,11 @@
 
 #include "method_type.h"
 
-#include "class-inl.h"
+#include "class-alloc-inl.h"
 #include "class_root.h"
 #include "method_handles.h"
+#include "object_array-alloc-inl.h"
+#include "object_array-inl.h"
 
 namespace art {
 namespace mirror {
diff --git a/runtime/mirror/method_type.h b/runtime/mirror/method_type.h
index 014b211..9cceff9 100644
--- a/runtime/mirror/method_type.h
+++ b/runtime/mirror/method_type.h
@@ -17,7 +17,6 @@
 #ifndef ART_RUNTIME_MIRROR_METHOD_TYPE_H_
 #define ART_RUNTIME_MIRROR_METHOD_TYPE_H_
 
-#include "base/utils.h"
 #include "object_array.h"
 #include "object.h"
 #include "string.h"
diff --git a/runtime/mirror/method_type_test.cc b/runtime/mirror/method_type_test.cc
index 2bdea72f..a8411d9 100644
--- a/runtime/mirror/method_type_test.cc
+++ b/runtime/mirror/method_type_test.cc
@@ -25,6 +25,7 @@
 #include "class_root.h"
 #include "common_runtime_test.h"
 #include "handle_scope-inl.h"
+#include "object_array-alloc-inl.h"
 #include "object_array-inl.h"
 #include "scoped_thread_state_change-inl.h"
 
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 40832bc..2c2ad9b 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -80,11 +80,11 @@
 }
 
 inline mirror::Object* Object::MonitorEnter(Thread* self) {
-  return Monitor::MonitorEnter(self, this, /*trylock*/false);
+  return Monitor::MonitorEnter(self, this, /*trylock=*/false);
 }
 
 inline mirror::Object* Object::MonitorTryEnter(Thread* self) {
-  return Monitor::MonitorEnter(self, this, /*trylock*/true);
+  return Monitor::MonitorEnter(self, this, /*trylock=*/true);
 }
 
 inline bool Object::MonitorExit(Thread* self) {
@@ -137,38 +137,42 @@
   return klass->IsAssignableFrom(GetClass<kVerifyFlags>());
 }
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline bool Object::IsClass() {
-  constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
-  Class* java_lang_Class = GetClass<kVerifyFlags, kReadBarrierOption>()->
-      template GetClass<kVerifyFlags, kReadBarrierOption>();
-  return GetClass<kNewFlags, kReadBarrierOption>() == java_lang_Class;
+  // OK to look at from-space copies since java.lang.Class.class is not movable.
+  // See b/114413743
+  ObjPtr<Class> klass = GetClass<kVerifyFlags, kWithoutReadBarrier>();
+  ObjPtr<Class> java_lang_Class = klass->GetClass<kVerifyFlags, kWithoutReadBarrier>();
+  return klass == java_lang_Class;
 }
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline Class* Object::AsClass() {
-  DCHECK((IsClass<kVerifyFlags, kReadBarrierOption>()));
+  DCHECK((IsClass<kVerifyFlags>()));
   return down_cast<Class*>(this);
 }
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline bool Object::IsObjectArray() {
-  constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
-  return IsArrayInstance<kVerifyFlags, kReadBarrierOption>() &&
-      !GetClass<kNewFlags, kReadBarrierOption>()->
-          template GetComponentType<kNewFlags, kReadBarrierOption>()->IsPrimitive();
+  // We do not need a read barrier here as the primitive type is constant,
+  // both from-space and to-space component type classes shall yield the same result.
+  constexpr VerifyObjectFlags kNewFlags = RemoveThisFlags(kVerifyFlags);
+  return IsArrayInstance<kVerifyFlags>() &&
+      !GetClass<kNewFlags, kWithoutReadBarrier>()->
+          template GetComponentType<kNewFlags, kWithoutReadBarrier>()->IsPrimitive();
 }
 
-template<class T, VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<class T, VerifyObjectFlags kVerifyFlags>
 inline ObjectArray<T>* Object::AsObjectArray() {
-  DCHECK((IsObjectArray<kVerifyFlags, kReadBarrierOption>()));
+  DCHECK((IsObjectArray<kVerifyFlags>()));
   return down_cast<ObjectArray<T>*>(this);
 }
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline bool Object::IsArrayInstance() {
-  return GetClass<kVerifyFlags, kReadBarrierOption>()->
-      template IsArrayClass<kVerifyFlags, kReadBarrierOption>();
+  // We do not need a read barrier here, both from-space and to-space version of the class
+  // shall return the same result from IsArrayClass().
+  return GetClass<kVerifyFlags, kWithoutReadBarrier>()->template IsArrayClass<kVerifyFlags>();
 }
 
 template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
@@ -182,119 +186,108 @@
   return down_cast<Reference*>(this);
 }
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline Array* Object::AsArray() {
-  DCHECK((IsArrayInstance<kVerifyFlags, kReadBarrierOption>()));
+  DCHECK((IsArrayInstance<kVerifyFlags>()));
   return down_cast<Array*>(this);
 }
 
+template<VerifyObjectFlags kVerifyFlags, Primitive::Type kType>
+ALWAYS_INLINE bool Object::IsSpecificPrimitiveArray() {
+  // We do not need a read barrier here as the primitive type is constant,
+  // both from-space and to-space component type classes shall yield the same result.
+  ObjPtr<Class> klass = GetClass<kVerifyFlags, kWithoutReadBarrier>();
+  constexpr VerifyObjectFlags kNewFlags = RemoveThisFlags(kVerifyFlags);
+  ObjPtr<Class> const component_type = klass->GetComponentType<kNewFlags, kWithoutReadBarrier>();
+  return component_type != nullptr &&
+         component_type->GetPrimitiveType<kNewFlags>() == kType;
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline bool Object::IsBooleanArray() {
+  return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimBoolean>();
+}
+
 template<VerifyObjectFlags kVerifyFlags>
 inline BooleanArray* Object::AsBooleanArray() {
-  constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
-  DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
-  DCHECK(GetClass<kNewFlags>()->GetComponentType()->IsPrimitiveBoolean());
+  DCHECK(IsBooleanArray<kVerifyFlags>());
   return down_cast<BooleanArray*>(this);
 }
 
 template<VerifyObjectFlags kVerifyFlags>
+inline bool Object::IsByteArray() {
+  return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimByte>();
+}
+
+template<VerifyObjectFlags kVerifyFlags>
 inline ByteArray* Object::AsByteArray() {
-  constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
-  DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
-  DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveByte());
+  DCHECK(IsByteArray<kVerifyFlags>());
   return down_cast<ByteArray*>(this);
 }
 
 template<VerifyObjectFlags kVerifyFlags>
-inline ByteArray* Object::AsByteSizedArray() {
-  constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
-  DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
-  DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveByte() ||
-         GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveBoolean());
-  return down_cast<ByteArray*>(this);
+inline bool Object::IsCharArray() {
+  return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimChar>();
 }
 
 template<VerifyObjectFlags kVerifyFlags>
 inline CharArray* Object::AsCharArray() {
-  constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
-  DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
-  DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveChar());
+  DCHECK(IsCharArray<kVerifyFlags>());
   return down_cast<CharArray*>(this);
 }
 
 template<VerifyObjectFlags kVerifyFlags>
+inline bool Object::IsShortArray() {
+  return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimShort>();
+}
+
+template<VerifyObjectFlags kVerifyFlags>
 inline ShortArray* Object::AsShortArray() {
-  constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
-  DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
-  DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveShort());
+  DCHECK(IsShortArray<kVerifyFlags>());
   return down_cast<ShortArray*>(this);
 }
 
 template<VerifyObjectFlags kVerifyFlags>
-inline ShortArray* Object::AsShortSizedArray() {
-  constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
-  DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
-  DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveShort() ||
-         GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveChar());
-  return down_cast<ShortArray*>(this);
-}
-
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
 inline bool Object::IsIntArray() {
-  constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
-  ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
-  ObjPtr<Class> component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
-  return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
+  return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimInt>();
 }
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline IntArray* Object::AsIntArray() {
-  DCHECK((IsIntArray<kVerifyFlags, kReadBarrierOption>()));
+  DCHECK((IsIntArray<kVerifyFlags>()));
   return down_cast<IntArray*>(this);
 }
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline bool Object::IsLongArray() {
-  constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
-  ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
-  ObjPtr<Class> component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
-  return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
+  return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimLong>();
 }
 
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
 inline LongArray* Object::AsLongArray() {
-  DCHECK((IsLongArray<kVerifyFlags, kReadBarrierOption>()));
+  DCHECK((IsLongArray<kVerifyFlags>()));
   return down_cast<LongArray*>(this);
 }
 
 template<VerifyObjectFlags kVerifyFlags>
 inline bool Object::IsFloatArray() {
-  constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
-  auto* component_type = GetClass<kVerifyFlags>()->GetComponentType();
-  return component_type != nullptr && component_type->template IsPrimitiveFloat<kNewFlags>();
+  return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimFloat>();
 }
 
 template<VerifyObjectFlags kVerifyFlags>
 inline FloatArray* Object::AsFloatArray() {
   DCHECK(IsFloatArray<kVerifyFlags>());
-  constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
-  DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
-  DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveFloat());
   return down_cast<FloatArray*>(this);
 }
 
 template<VerifyObjectFlags kVerifyFlags>
 inline bool Object::IsDoubleArray() {
-  constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
-  auto* component_type = GetClass<kVerifyFlags>()->GetComponentType();
-  return component_type != nullptr && component_type->template IsPrimitiveDouble<kNewFlags>();
+  return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimDouble>();
 }
 
 template<VerifyObjectFlags kVerifyFlags>
 inline DoubleArray* Object::AsDoubleArray() {
   DCHECK(IsDoubleArray<kVerifyFlags>());
-  constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
-  DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
-  DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveDouble());
   return down_cast<DoubleArray*>(this);
 }
 
@@ -347,15 +340,15 @@
   // values is OK because of that.
   static constexpr ReadBarrierOption kRBO = kWithoutReadBarrier;
   size_t result;
-  constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
-  if (IsArrayInstance<kVerifyFlags, kRBO>()) {
-    result = AsArray<kNewFlags, kRBO>()->template SizeOf<kNewFlags, kRBO>();
-  } else if (IsClass<kNewFlags, kRBO>()) {
-    result = AsClass<kNewFlags, kRBO>()->template SizeOf<kNewFlags, kRBO>();
+  constexpr VerifyObjectFlags kNewFlags = RemoveThisFlags(kVerifyFlags);
+  if (IsArrayInstance<kVerifyFlags>()) {
+    result = AsArray<kNewFlags>()->template SizeOf<kNewFlags, kRBO>();
+  } else if (IsClass<kNewFlags>()) {
+    result = AsClass<kNewFlags>()->template SizeOf<kNewFlags, kRBO>();
   } else if (GetClass<kNewFlags, kRBO>()->IsStringClass()) {
     result = AsString<kNewFlags, kRBO>()->template SizeOf<kNewFlags>();
   } else {
-    result = GetClass<kNewFlags, kRBO>()->template GetObjectSize<kNewFlags, kRBO>();
+    result = GetClass<kNewFlags, kRBO>()->template GetObjectSize<kNewFlags>();
   }
   DCHECK_GE(result, sizeof(Object)) << " class=" << Class::PrettyClass(GetClass<kNewFlags, kRBO>());
   return result;
@@ -584,6 +577,10 @@
   return atomic_addr->CompareAndSetStrongSequentiallyConsistent(old_value, new_value);
 }
 
+/*
+ * Returns a pointer to an object representing what the field points to, not an
+ * object representing the field.
+ */
 template<class T,
          VerifyObjectFlags kVerifyFlags,
          ReadBarrierOption kReadBarrierOption,
@@ -730,7 +727,7 @@
 inline ObjPtr<Object> Object::ExchangeFieldObject(MemberOffset field_offset,
                                                   ObjPtr<Object> new_value) {
   VerifyTransaction<kTransactionActive, kCheckTransaction>();
-  VerifyCAS<kVerifyFlags>(new_value, /*old_value*/ nullptr);
+  VerifyCAS<kVerifyFlags>(new_value, /*old_value=*/ nullptr);
 
   uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
   uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
@@ -867,7 +864,7 @@
     // inheritance hierarchy and find reference offsets the hard way. In the static case, just
     // consider this class.
     for (ObjPtr<Class> klass = kIsStatic
-            ? AsClass<kVerifyFlags, kReadBarrierOption>()
+            ? AsClass<kVerifyFlags>()
             : GetClass<kVerifyFlags, kReadBarrierOption>();
         klass != nullptr;
         klass = kIsStatic ? nullptr : klass->GetSuperClass<kVerifyFlags, kReadBarrierOption>()) {
@@ -879,7 +876,7 @@
       // Presumably GC can happen when we are cross compiling, it should not cause performance
       // problems to do pointer size logic.
       MemberOffset field_offset = kIsStatic
-          ? klass->GetFirstReferenceStaticFieldOffset<kVerifyFlags, kReadBarrierOption>(
+          ? klass->GetFirstReferenceStaticFieldOffset<kVerifyFlags>(
               Runtime::Current()->GetClassLinker()->GetImagePointerSize())
           : klass->GetFirstReferenceInstanceFieldOffset<kVerifyFlags, kReadBarrierOption>();
       for (size_t i = 0u; i < num_reference_fields; ++i) {
@@ -902,13 +899,13 @@
 
 template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
 inline void Object::VisitStaticFieldsReferences(ObjPtr<Class> klass, const Visitor& visitor) {
-  DCHECK(!klass->IsTemp());
+  DCHECK(!klass->IsTemp<kVerifyFlags>());
   klass->VisitFieldsReferences<true, kVerifyFlags, kReadBarrierOption>(0, visitor);
 }
 
 template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
 inline bool Object::IsClassLoader() {
-  return GetClass<kVerifyFlags, kReadBarrierOption>()->IsClassLoaderClass();
+  return GetClass<kVerifyFlags, kReadBarrierOption>()->template IsClassLoaderClass<kVerifyFlags>();
 }
 
 template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
@@ -919,7 +916,7 @@
 
 template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
 inline bool Object::IsDexCache() {
-  return GetClass<kVerifyFlags, kReadBarrierOption>()->IsDexCacheClass();
+  return GetClass<kVerifyFlags, kReadBarrierOption>()->template IsDexCacheClass<kVerifyFlags>();
 }
 
 template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index 8689e4d..ee84997 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -131,7 +131,7 @@
     UNREACHABLE();
   }
   DCHECK(kUseBakerReadBarrier);
-  LockWord lw(GetFieldPrimitive<uint32_t, /*kIsVolatile*/false>(MonitorOffset()));
+  LockWord lw(GetFieldPrimitive<uint32_t, /*kIsVolatile=*/false>(MonitorOffset()));
   uint32_t rb_state = lw.ReadBarrierState();
   DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
   return rb_state;
diff --git a/runtime/mirror/object-refvisitor-inl.h b/runtime/mirror/object-refvisitor-inl.h
index 39e32bf..f0bee5a 100644
--- a/runtime/mirror/object-refvisitor-inl.h
+++ b/runtime/mirror/object-refvisitor-inl.h
@@ -33,27 +33,27 @@
           typename JavaLangRefVisitor>
 inline void Object::VisitReferences(const Visitor& visitor,
                                     const JavaLangRefVisitor& ref_visitor) {
+  visitor(this, ClassOffset(), /* is_static= */ false);
   ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
-  visitor(this, ClassOffset(), false);
   const uint32_t class_flags = klass->GetClassFlags<kVerifyNone>();
   if (LIKELY(class_flags == kClassFlagNormal)) {
-    DCHECK((!klass->IsVariableSize<kVerifyFlags, kReadBarrierOption>()));
+    DCHECK((!klass->IsVariableSize<kVerifyFlags>()));
     VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
-    DCHECK((!klass->IsClassClass<kVerifyFlags, kReadBarrierOption>()));
-    DCHECK(!klass->IsStringClass());
-    DCHECK(!klass->IsClassLoaderClass());
-    DCHECK((!klass->IsArrayClass<kVerifyFlags, kReadBarrierOption>()));
+    DCHECK((!klass->IsClassClass<kVerifyFlags>()));
+    DCHECK(!klass->IsStringClass<kVerifyFlags>());
+    DCHECK(!klass->IsClassLoaderClass<kVerifyFlags>());
+    DCHECK((!klass->IsArrayClass<kVerifyFlags>()));
   } else {
     if ((class_flags & kClassFlagNoReferenceFields) == 0) {
-      DCHECK(!klass->IsStringClass());
+      DCHECK(!klass->IsStringClass<kVerifyFlags>());
       if (class_flags == kClassFlagClass) {
-        DCHECK((klass->IsClassClass<kVerifyFlags, kReadBarrierOption>()));
-        ObjPtr<Class> as_klass = AsClass<kVerifyNone, kReadBarrierOption>();
+        DCHECK((klass->IsClassClass<kVerifyFlags>()));
+        ObjPtr<Class> as_klass = AsClass<kVerifyNone>();
         as_klass->VisitReferences<kVisitNativeRoots, kVerifyFlags, kReadBarrierOption>(klass,
                                                                                        visitor);
       } else if (class_flags == kClassFlagObjectArray) {
-        DCHECK((klass->IsObjectArrayClass<kVerifyFlags, kReadBarrierOption>()));
-        AsObjectArray<mirror::Object, kVerifyNone, kReadBarrierOption>()->VisitReferences(visitor);
+        DCHECK((klass->IsObjectArrayClass<kVerifyFlags>()));
+        AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences(visitor);
       } else if ((class_flags & kClassFlagReference) != 0) {
         VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
         ref_visitor(klass, AsReference<kVerifyFlags, kReadBarrierOption>());
@@ -69,15 +69,16 @@
                                       kReadBarrierOption>(klass, visitor);
       }
     } else if (kIsDebugBuild) {
-      CHECK((!klass->IsClassClass<kVerifyFlags, kReadBarrierOption>()));
-      CHECK((!klass->IsObjectArrayClass<kVerifyFlags, kReadBarrierOption>()));
+      CHECK((!klass->IsClassClass<kVerifyFlags>()));
+      CHECK((!klass->IsObjectArrayClass<kVerifyFlags>()));
       // String still has instance fields for reflection purposes but these don't exist in
       // actual string instances.
-      if (!klass->IsStringClass()) {
+      if (!klass->IsStringClass<kVerifyFlags>()) {
         size_t total_reference_instance_fields = 0;
         ObjPtr<Class> super_class = klass;
         do {
-          total_reference_instance_fields += super_class->NumReferenceInstanceFields();
+          total_reference_instance_fields +=
+              super_class->NumReferenceInstanceFields<kVerifyFlags>();
           super_class = super_class->GetSuperClass<kVerifyFlags, kReadBarrierOption>();
         } while (super_class != nullptr);
         // The only reference field should be the object's class. This field is handled at the
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 025c10b..f6adc80 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -27,7 +27,7 @@
 #include "dex/descriptors_names.h"
 #include "dex/dex_file-inl.h"
 #include "gc/accounting/card_table-inl.h"
-#include "gc/heap.h"
+#include "gc/heap-inl.h"
 #include "handle_scope-inl.h"
 #include "iftable-inl.h"
 #include "monitor.h"
@@ -225,11 +225,10 @@
       }
       default: {
         LOG(FATAL) << "Invalid state during hashcode " << lw.GetState();
-        break;
+        UNREACHABLE();
       }
     }
   }
-  UNREACHABLE();
 }
 
 void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, ObjPtr<Object> new_value) {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 35946d7..ba222f6 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -20,12 +20,13 @@
 #include "base/atomic.h"
 #include "base/casts.h"
 #include "base/enums.h"
-#include "base/globals.h"
+#include "dex/primitive.h"
 #include "obj_ptr.h"
 #include "object_reference.h"
 #include "offsets.h"
 #include "read_barrier_config.h"
 #include "read_barrier_option.h"
+#include "runtime_globals.h"
 #include "verify_object.h"
 
 namespace art {
@@ -86,7 +87,7 @@
     return sizeof(Object);
   }
 
-  static MemberOffset ClassOffset() {
+  static constexpr MemberOffset ClassOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Object, klass_);
   }
 
@@ -138,7 +139,7 @@
       REQUIRES(!Locks::thread_list_lock_,
                !Locks::thread_suspend_count_lock_);
 
-  static MemberOffset MonitorOffset() {
+  static constexpr MemberOffset MonitorOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
   }
 
@@ -169,19 +170,14 @@
   void NotifyAll(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
   void Wait(Thread* self, int64_t timeout, int32_t nanos) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsClass() REQUIRES_SHARED(Locks::mutator_lock_);
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   Class* AsClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsObjectArray() REQUIRES_SHARED(Locks::mutator_lock_);
-  template<class T,
-           VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ObjectArray<T>* AsObjectArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -198,39 +194,39 @@
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   DexCache* AsDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsArrayInstance() REQUIRES_SHARED(Locks::mutator_lock_);
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   Array* AsArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  bool IsBooleanArray() REQUIRES_SHARED(Locks::mutator_lock_);
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   BooleanArray* AsBooleanArray() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  bool IsByteArray() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ByteArray* AsByteArray() REQUIRES_SHARED(Locks::mutator_lock_);
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ByteArray* AsByteSizedArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  bool IsCharArray() REQUIRES_SHARED(Locks::mutator_lock_);
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   CharArray* AsCharArray() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  bool IsShortArray() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ShortArray* AsShortArray() REQUIRES_SHARED(Locks::mutator_lock_);
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ShortArray* AsShortSizedArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsIntArray() REQUIRES_SHARED(Locks::mutator_lock_);
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   IntArray* AsIntArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsLongArray() REQUIRES_SHARED(Locks::mutator_lock_);
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
-           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   LongArray* AsLongArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -764,6 +760,9 @@
                             size_t num_bytes)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  template<VerifyObjectFlags kVerifyFlags, Primitive::Type kType>
+  bool IsSpecificPrimitiveArray() REQUIRES_SHARED(Locks::mutator_lock_);
+
   static Atomic<uint32_t> hash_code_seed;
 
   // The Class representing the type of the object.
diff --git a/runtime/mirror/object_array-alloc-inl.h b/runtime/mirror/object_array-alloc-inl.h
new file mode 100644
index 0000000..8e96d9f
--- /dev/null
+++ b/runtime/mirror/object_array-alloc-inl.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_OBJECT_ARRAY_ALLOC_INL_H_
+#define ART_RUNTIME_MIRROR_OBJECT_ARRAY_ALLOC_INL_H_
+
+#include "object_array.h"
+
+#include "array-alloc-inl.h"
+#include "array-inl.h"
+#include "class.h"
+#include "dex/primitive.h"
+#include "gc/heap-inl.h"
+#include "handle_scope-inl.h"
+#include "obj_ptr-inl.h"
+#include "object-inl.h"
+#include "runtime.h"
+
+namespace art {
+namespace mirror {
+
+template<class T>
+inline ObjPtr<ObjectArray<T>> ObjectArray<T>::Alloc(Thread* self,
+                                                    ObjPtr<Class> object_array_class,
+                                                    int32_t length,
+                                                    gc::AllocatorType allocator_type) {
+  ObjPtr<Array> array = Array::Alloc<true>(self,
+                                           object_array_class,
+                                           length,
+                                           ComponentSizeShiftWidth(kHeapReferenceSize),
+                                           allocator_type);
+  if (UNLIKELY(array == nullptr)) {
+    return nullptr;
+  }
+  DCHECK_EQ(array->GetClass()->GetComponentSizeShift(),
+            ComponentSizeShiftWidth(kHeapReferenceSize));
+  return array->AsObjectArray<T>();
+}
+
+template<class T>
+inline ObjPtr<ObjectArray<T>> ObjectArray<T>::Alloc(Thread* self,
+                                                    ObjPtr<Class> object_array_class,
+                                                    int32_t length) {
+  return Alloc(self,
+               object_array_class,
+               length,
+               Runtime::Current()->GetHeap()->GetCurrentAllocator());
+}
+
+template<class T>
+inline ObjPtr<ObjectArray<T>> ObjectArray<T>::CopyOf(Thread* self, int32_t new_length) {
+  DCHECK_GE(new_length, 0);
+  // We may get copied by a compacting GC.
+  StackHandleScope<1> hs(self);
+  Handle<ObjectArray<T>> h_this(hs.NewHandle(this));
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  gc::AllocatorType allocator_type = heap->IsMovableObject(this) ? heap->GetCurrentAllocator() :
+      heap->GetCurrentNonMovingAllocator();
+  ObjPtr<ObjectArray<T>> new_array = Alloc(self, GetClass(), new_length, allocator_type);
+  if (LIKELY(new_array != nullptr)) {
+    new_array->AssignableMemcpy(0, h_this.Get(), 0, std::min(h_this->GetLength(), new_length));
+  }
+  return new_array;
+}
+
+}  // namespace mirror
+}  // namespace art
+
+#endif  // ART_RUNTIME_MIRROR_OBJECT_ARRAY_ALLOC_INL_H_
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 1d2f47f..b984474 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -24,50 +24,19 @@
 #include "android-base/stringprintf.h"
 
 #include "array-inl.h"
-#include "base/utils.h"
 #include "class.h"
-#include "gc/heap.h"
-#include "handle_scope-inl.h"
 #include "obj_ptr-inl.h"
 #include "object-inl.h"
 #include "runtime.h"
-#include "thread.h"
+#include "thread-current-inl.h"
 #include "write_barrier-inl.h"
 
 namespace art {
 namespace mirror {
 
-template<class T>
-inline ObjPtr<ObjectArray<T>> ObjectArray<T>::Alloc(Thread* self,
-                                                    ObjPtr<Class> object_array_class,
-                                                    int32_t length,
-                                                    gc::AllocatorType allocator_type) {
-  ObjPtr<Array> array = Array::Alloc<true>(self,
-                                           object_array_class,
-                                           length,
-                                           ComponentSizeShiftWidth(kHeapReferenceSize),
-                                           allocator_type);
-  if (UNLIKELY(array == nullptr)) {
-    return nullptr;
-  }
-  DCHECK_EQ(array->GetClass()->GetComponentSizeShift(),
-            ComponentSizeShiftWidth(kHeapReferenceSize));
-  return array->AsObjectArray<T>();
-}
-
-template<class T>
-inline ObjPtr<ObjectArray<T>> ObjectArray<T>::Alloc(Thread* self,
-                                                    ObjPtr<Class> object_array_class,
-                                                    int32_t length) {
-  return Alloc(self,
-               object_array_class,
-               length,
-               Runtime::Current()->GetHeap()->GetCurrentAllocator());
-}
-
 template<class T> template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
 inline T* ObjectArray<T>::Get(int32_t i) {
-  if (!CheckIsValidIndex(i)) {
+  if (!CheckIsValidIndex<kVerifyFlags>(i)) {
     DCHECK(Thread::Current()->IsExceptionPending());
     return nullptr;
   }
@@ -348,22 +317,6 @@
 }
 
 template<class T>
-inline ObjPtr<ObjectArray<T>> ObjectArray<T>::CopyOf(Thread* self, int32_t new_length) {
-  DCHECK_GE(new_length, 0);
-  // We may get copied by a compacting GC.
-  StackHandleScope<1> hs(self);
-  Handle<ObjectArray<T>> h_this(hs.NewHandle(this));
-  gc::Heap* heap = Runtime::Current()->GetHeap();
-  gc::AllocatorType allocator_type = heap->IsMovableObject(this) ? heap->GetCurrentAllocator() :
-      heap->GetCurrentNonMovingAllocator();
-  ObjPtr<ObjectArray<T>> new_array = Alloc(self, GetClass(), new_length, allocator_type);
-  if (LIKELY(new_array != nullptr)) {
-    new_array->AssignableMemcpy(0, h_this.Get(), 0, std::min(h_this->GetLength(), new_length));
-  }
-  return new_array;
-}
-
-template<class T>
 inline MemberOffset ObjectArray<T>::OffsetOfElement(int32_t i) {
   return MemberOffset(DataOffset(kHeapReferenceSize).Int32Value() + (i * kHeapReferenceSize));
 }
diff --git a/runtime/mirror/object_reference-inl.h b/runtime/mirror/object_reference-inl.h
index 295b460..780d662 100644
--- a/runtime/mirror/object_reference-inl.h
+++ b/runtime/mirror/object_reference-inl.h
@@ -24,17 +24,26 @@
 namespace art {
 namespace mirror {
 
-template<bool kPoisonReferences, class MirrorType>
+template <bool kPoisonReferences, class MirrorType>
+ALWAYS_INLINE
 void ObjectReference<kPoisonReferences, MirrorType>::Assign(ObjPtr<MirrorType> ptr) {
   Assign(ptr.Ptr());
 }
 
-template<class MirrorType>
+template <class MirrorType>
+ALWAYS_INLINE
 bool HeapReference<MirrorType>::CasWeakRelaxed(MirrorType* expected_ptr, MirrorType* new_ptr) {
   return reference_.CompareAndSetWeakRelaxed(Compression::Compress(expected_ptr),
                                              Compression::Compress(new_ptr));
 }
 
+template <typename MirrorType>
+template <bool kIsVolatile>
+ALWAYS_INLINE
+void HeapReference<MirrorType>::Assign(ObjPtr<MirrorType> ptr) {
+  Assign<kIsVolatile>(ptr.Ptr());
+}
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 77154e2..e19e165 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -18,10 +18,10 @@
 #define ART_RUNTIME_MIRROR_OBJECT_REFERENCE_H_
 
 #include "base/atomic.h"
-#include "base/globals.h"
-#include "base/mutex.h"  // For Locks::mutator_lock_.
+#include "base/locks.h"  // For Locks::mutator_lock_.
 #include "heap_poisoning.h"
 #include "obj_ptr.h"
+#include "runtime_globals.h"
 
 namespace art {
 namespace mirror {
@@ -60,6 +60,15 @@
   using Compression = PtrCompression<kPoisonReferences, MirrorType>;
 
  public:
+  /*
+   * Returns a pointer to the mirror of the managed object this reference is for.
+   *
+   * This does NOT return the current object (which isn't derived from, and
+   * therefor cannot be a mirror::Object) as a mirror pointer.  Instead, this
+   * returns a pointer to the mirror of the managed object this refers to.
+   *
+   * TODO (chriswailes): Rename to GetPtr().
+   */
   MirrorType* AsMirrorPtr() const {
     return Compression::Decompress(reference_);
   }
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 0b615a6..27987c0 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -20,11 +20,13 @@
 #include <stdio.h>
 #include <memory>
 
+#include "array-alloc-inl.h"
 #include "array-inl.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "asm_support.h"
 #include "base/enums.h"
+#include "class-alloc-inl.h"
 #include "class-inl.h"
 #include "class_linker-inl.h"
 #include "class_linker.h"
@@ -38,6 +40,7 @@
 #include "iftable-inl.h"
 #include "obj_ptr.h"
 #include "object-inl.h"
+#include "object_array-alloc-inl.h"
 #include "object_array-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "string-inl.h"
@@ -67,8 +70,6 @@
     ASSERT_EQ(string->IsValueNull(), false);
     // strlen is necessary because the 1-character string "\x00\x00" is interpreted as ""
     ASSERT_TRUE(string->Equals(utf8_in) || (expected_utf16_length == 1 && strlen(utf8_in) == 0));
-    ASSERT_TRUE(string->Equals(StringPiece(utf8_in)) ||
-                (expected_utf16_length == 1 && strlen(utf8_in) == 0));
     for (int32_t i = 0; i < expected_utf16_length; i++) {
       EXPECT_EQ(utf16_expected[i], string->CharAt(i));
     }
@@ -204,7 +205,7 @@
 template<typename ArrayT>
 void TestPrimitiveArray(ClassLinker* cl) {
   ScopedObjectAccess soa(Thread::Current());
-  typedef typename ArrayT::ElementType T;
+  using T = typename ArrayT::ElementType;
 
   StackHandleScope<2> hs(soa.Self());
   Handle<ArrayT> a = hs.NewHandle(ArrayT::Alloc(soa.Self(), 2));
@@ -252,9 +253,9 @@
 }
 
 TEST_F(ObjectTest, PrimitiveArray_Double_Alloc) {
-  typedef DoubleArray ArrayT;
+  using ArrayT = DoubleArray;
   ScopedObjectAccess soa(Thread::Current());
-  typedef typename ArrayT::ElementType T;
+  using T = typename ArrayT::ElementType;
 
   StackHandleScope<2> hs(soa.Self());
   Handle<ArrayT> a = hs.NewHandle(ArrayT::Alloc(soa.Self(), 2));
@@ -283,9 +284,9 @@
 }
 
 TEST_F(ObjectTest, PrimitiveArray_Float_Alloc) {
-  typedef FloatArray ArrayT;
+  using ArrayT = FloatArray;
   ScopedObjectAccess soa(Thread::Current());
-  typedef typename ArrayT::ElementType T;
+  using T = typename ArrayT::ElementType;
 
   StackHandleScope<2> hs(soa.Self());
   Handle<ArrayT> a = hs.NewHandle(ArrayT::Alloc(soa.Self(), 2));
@@ -361,16 +362,16 @@
   Handle<Class> klass =
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", loader));
   ArtMethod* clinit = klass->FindClassInitializer(kRuntimePointerSize);
-  const DexFile::TypeId* klass_type_id = dex_file->FindTypeId("LStaticsFromCode;");
+  const dex::TypeId* klass_type_id = dex_file->FindTypeId("LStaticsFromCode;");
   ASSERT_TRUE(klass_type_id != nullptr);
 
-  const DexFile::TypeId* type_type_id = dex_file->FindTypeId("Ljava/lang/Object;");
+  const dex::TypeId* type_type_id = dex_file->FindTypeId("Ljava/lang/Object;");
   ASSERT_TRUE(type_type_id != nullptr);
 
-  const DexFile::StringId* name_str_id = dex_file->FindStringId("s0");
+  const dex::StringId* name_str_id = dex_file->FindStringId("s0");
   ASSERT_TRUE(name_str_id != nullptr);
 
-  const DexFile::FieldId* field_id = dex_file->FindFieldId(
+  const dex::FieldId* field_id = dex_file->FindFieldId(
       *klass_type_id, *name_str_id, *type_type_id);
   ASSERT_TRUE(field_id != nullptr);
   uint32_t field_idx = dex_file->GetIndexForFieldId(*field_id);
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 63c5ae5..9ace4f7 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -18,8 +18,8 @@
 #define ART_RUNTIME_MIRROR_REFERENCE_H_
 
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "obj_ptr.h"
 #include "object.h"
 #include "read_barrier_option.h"
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index 5a7575a..01f2d76 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -16,7 +16,7 @@
 
 #include "stack_trace_element.h"
 
-#include "class-inl.h"
+#include "class-alloc-inl.h"
 #include "class.h"
 #include "class_root.h"
 #include "gc/accounting/card_table-inl.h"
diff --git a/runtime/mirror/string-alloc-inl.h b/runtime/mirror/string-alloc-inl.h
new file mode 100644
index 0000000..4c4e2af
--- /dev/null
+++ b/runtime/mirror/string-alloc-inl.h
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ART_RUNTIME_MIRROR_STRING_ALLOC_INL_H_
+#define ART_RUNTIME_MIRROR_STRING_ALLOC_INL_H_
+
+#include "string-inl.h"
+
+#include "android-base/stringprintf.h"
+
+#include "array.h"
+#include "base/bit_utils.h"
+#include "class.h"
+#include "class_root.h"
+#include "gc/heap-inl.h"
+#include "runtime.h"
+#include "runtime_globals.h"
+#include "thread.h"
+
+namespace art {
+namespace mirror {
+
+// Sets string count in the allocation code path to ensure it is guarded by a CAS.
+class SetStringCountVisitor {
+ public:
+  explicit SetStringCountVisitor(int32_t count) : count_(count) {
+  }
+
+  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Avoid AsString as object is not yet in live bitmap or allocation stack.
+    ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
+    string->SetCount(count_);
+    DCHECK(!string->IsCompressed() || kUseStringCompression);
+  }
+
+ private:
+  const int32_t count_;
+};
+
+// Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
+class SetStringCountAndBytesVisitor {
+ public:
+  SetStringCountAndBytesVisitor(int32_t count, Handle<ByteArray> src_array, int32_t offset,
+                                int32_t high_byte)
+      : count_(count), src_array_(src_array), offset_(offset), high_byte_(high_byte) {
+  }
+
+  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Avoid AsString as object is not yet in live bitmap or allocation stack.
+    ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
+    string->SetCount(count_);
+    DCHECK(!string->IsCompressed() || kUseStringCompression);
+    int32_t length = String::GetLengthFromCount(count_);
+    const uint8_t* const src = reinterpret_cast<uint8_t*>(src_array_->GetData()) + offset_;
+    if (string->IsCompressed()) {
+      uint8_t* valueCompressed = string->GetValueCompressed();
+      for (int i = 0; i < length; i++) {
+        valueCompressed[i] = (src[i] & 0xFF);
+      }
+    } else {
+      uint16_t* value = string->GetValue();
+      for (int i = 0; i < length; i++) {
+        value[i] = high_byte_ + (src[i] & 0xFF);
+      }
+    }
+  }
+
+ private:
+  const int32_t count_;
+  Handle<ByteArray> src_array_;
+  const int32_t offset_;
+  const int32_t high_byte_;
+};
+
+// Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
+class SetStringCountAndValueVisitorFromCharArray {
+ public:
+  SetStringCountAndValueVisitorFromCharArray(int32_t count, Handle<CharArray> src_array,
+                                             int32_t offset) :
+    count_(count), src_array_(src_array), offset_(offset) {
+  }
+
+  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Avoid AsString as object is not yet in live bitmap or allocation stack.
+    ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
+    string->SetCount(count_);
+    const uint16_t* const src = src_array_->GetData() + offset_;
+    const int32_t length = String::GetLengthFromCount(count_);
+    if (kUseStringCompression && String::IsCompressed(count_)) {
+      for (int i = 0; i < length; ++i) {
+        string->GetValueCompressed()[i] = static_cast<uint8_t>(src[i]);
+      }
+    } else {
+      memcpy(string->GetValue(), src, length * sizeof(uint16_t));
+    }
+  }
+
+ private:
+  const int32_t count_;
+  Handle<CharArray> src_array_;
+  const int32_t offset_;
+};
+
+// Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
+class SetStringCountAndValueVisitorFromString {
+ public:
+  SetStringCountAndValueVisitorFromString(int32_t count,
+                                          Handle<String> src_string,
+                                          int32_t offset) :
+    count_(count), src_string_(src_string), offset_(offset) {
+  }
+
+  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Avoid AsString as object is not yet in live bitmap or allocation stack.
+    ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
+    string->SetCount(count_);
+    const int32_t length = String::GetLengthFromCount(count_);
+    bool compressible = kUseStringCompression && String::IsCompressed(count_);
+    if (src_string_->IsCompressed()) {
+      const uint8_t* const src = src_string_->GetValueCompressed() + offset_;
+      memcpy(string->GetValueCompressed(), src, length * sizeof(uint8_t));
+    } else {
+      const uint16_t* const src = src_string_->GetValue() + offset_;
+      if (compressible) {
+        for (int i = 0; i < length; ++i) {
+          string->GetValueCompressed()[i] = static_cast<uint8_t>(src[i]);
+        }
+      } else {
+        memcpy(string->GetValue(), src, length * sizeof(uint16_t));
+      }
+    }
+  }
+
+ private:
+  const int32_t count_;
+  Handle<String> src_string_;
+  const int32_t offset_;
+};
+
+template <bool kIsInstrumented, typename PreFenceVisitor>
+inline String* String::Alloc(Thread* self,
+                             int32_t utf16_length_with_flag,
+                             gc::AllocatorType allocator_type,
+                             const PreFenceVisitor& pre_fence_visitor) {
+  constexpr size_t header_size = sizeof(String);
+  const bool compressible = kUseStringCompression && String::IsCompressed(utf16_length_with_flag);
+  const size_t block_size = (compressible) ? sizeof(uint8_t) : sizeof(uint16_t);
+  size_t length = String::GetLengthFromCount(utf16_length_with_flag);
+  static_assert(sizeof(length) <= sizeof(size_t),
+                "static_cast<size_t>(utf16_length) must not lose bits.");
+  size_t data_size = block_size * length;
+  size_t size = header_size + data_size;
+  // String.equals() intrinsics assume zero-padding up to kObjectAlignment,
+  // so make sure the allocator clears the padding as well.
+  // http://b/23528461
+  size_t alloc_size = RoundUp(size, kObjectAlignment);
+
+  Runtime* runtime = Runtime::Current();
+  ObjPtr<Class> string_class = GetClassRoot<String>(runtime->GetClassLinker());
+  // Check for overflow and throw OutOfMemoryError if this was an unreasonable request.
+  // Do this by comparing with the maximum length that will _not_ cause an overflow.
+  const size_t overflow_length = (-header_size) / block_size;   // Unsigned arithmetic.
+  const size_t max_alloc_length = overflow_length - 1u;
+  static_assert(IsAligned<sizeof(uint16_t)>(kObjectAlignment),
+                "kObjectAlignment must be at least as big as Java char alignment");
+  const size_t max_length = RoundDown(max_alloc_length, kObjectAlignment / block_size);
+  if (UNLIKELY(length > max_length)) {
+    self->ThrowOutOfMemoryError(
+        android::base::StringPrintf("%s of length %d would overflow",
+                                    Class::PrettyDescriptor(string_class).c_str(),
+                                    static_cast<int>(length)).c_str());
+    return nullptr;
+  }
+
+  gc::Heap* heap = runtime->GetHeap();
+  return down_cast<String*>(
+      heap->AllocObjectWithAllocator<kIsInstrumented, true>(self,
+                                                            string_class,
+                                                            alloc_size,
+                                                            allocator_type,
+                                                            pre_fence_visitor));
+}
+
+template <bool kIsInstrumented>
+inline String* String::AllocEmptyString(Thread* self, gc::AllocatorType allocator_type) {
+  const int32_t length_with_flag = String::GetFlaggedCount(0, /* compressible= */ true);
+  SetStringCountVisitor visitor(length_with_flag);
+  return Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
+}
+
+template <bool kIsInstrumented>
+inline String* String::AllocFromByteArray(Thread* self,
+                                          int32_t byte_length,
+                                          Handle<ByteArray> array,
+                                          int32_t offset,
+                                          int32_t high_byte,
+                                          gc::AllocatorType allocator_type) {
+  const uint8_t* const src = reinterpret_cast<uint8_t*>(array->GetData()) + offset;
+  high_byte &= 0xff;  // Extract the relevant bits before determining `compressible`.
+  const bool compressible =
+      kUseStringCompression && String::AllASCII<uint8_t>(src, byte_length) && (high_byte == 0);
+  const int32_t length_with_flag = String::GetFlaggedCount(byte_length, compressible);
+  SetStringCountAndBytesVisitor visitor(length_with_flag, array, offset, high_byte << 8);
+  String* string = Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
+  return string;
+}
+
+template <bool kIsInstrumented>
+inline String* String::AllocFromCharArray(Thread* self,
+                                          int32_t count,
+                                          Handle<CharArray> array,
+                                          int32_t offset,
+                                          gc::AllocatorType allocator_type) {
+  // It is a caller error to have a count less than the actual array's size.
+  DCHECK_GE(array->GetLength(), count);
+  const bool compressible = kUseStringCompression &&
+                            String::AllASCII<uint16_t>(array->GetData() + offset, count);
+  const int32_t length_with_flag = String::GetFlaggedCount(count, compressible);
+  SetStringCountAndValueVisitorFromCharArray visitor(length_with_flag, array, offset);
+  String* new_string = Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
+  return new_string;
+}
+
+template <bool kIsInstrumented>
+inline String* String::AllocFromString(Thread* self,
+                                       int32_t string_length,
+                                       Handle<String> string,
+                                       int32_t offset,
+                                       gc::AllocatorType allocator_type) {
+  const bool compressible = kUseStringCompression &&
+      ((string->IsCompressed()) ? true : String::AllASCII<uint16_t>(string->GetValue() + offset,
+                                                                    string_length));
+  const int32_t length_with_flag = String::GetFlaggedCount(string_length, compressible);
+  SetStringCountAndValueVisitorFromString visitor(length_with_flag, string, offset);
+  String* new_string = Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
+  return new_string;
+}
+
+}  // namespace mirror
+}  // namespace art
+
+#endif  // ART_RUNTIME_MIRROR_STRING_ALLOC_INL_H_
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 8fa2c6c..f04a8aa 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -20,17 +20,10 @@
 
 #include "android-base/stringprintf.h"
 
-#include "array.h"
-#include "base/bit_utils.h"
-#include "base/globals.h"
-#include "base/utils.h"
-#include "class.h"
-#include "class_root.h"
+#include "class-inl.h"
 #include "common_throws.h"
 #include "dex/utf.h"
-#include "gc/heap-inl.h"
-#include "runtime.h"
-#include "thread.h"
+#include "runtime_globals.h"
 
 namespace art {
 namespace mirror {
@@ -49,127 +42,6 @@
   return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 1, 2, pointer_size);
 }
 
-// Sets string count in the allocation code path to ensure it is guarded by a CAS.
-class SetStringCountVisitor {
- public:
-  explicit SetStringCountVisitor(int32_t count) : count_(count) {
-  }
-
-  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    // Avoid AsString as object is not yet in live bitmap or allocation stack.
-    ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
-    string->SetCount(count_);
-    DCHECK(!string->IsCompressed() || kUseStringCompression);
-  }
-
- private:
-  const int32_t count_;
-};
-
-// Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
-class SetStringCountAndBytesVisitor {
- public:
-  SetStringCountAndBytesVisitor(int32_t count, Handle<ByteArray> src_array, int32_t offset,
-                                int32_t high_byte)
-      : count_(count), src_array_(src_array), offset_(offset), high_byte_(high_byte) {
-  }
-
-  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    // Avoid AsString as object is not yet in live bitmap or allocation stack.
-    ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
-    string->SetCount(count_);
-    DCHECK(!string->IsCompressed() || kUseStringCompression);
-    int32_t length = String::GetLengthFromCount(count_);
-    const uint8_t* const src = reinterpret_cast<uint8_t*>(src_array_->GetData()) + offset_;
-    if (string->IsCompressed()) {
-      uint8_t* valueCompressed = string->GetValueCompressed();
-      for (int i = 0; i < length; i++) {
-        valueCompressed[i] = (src[i] & 0xFF);
-      }
-    } else {
-      uint16_t* value = string->GetValue();
-      for (int i = 0; i < length; i++) {
-        value[i] = high_byte_ + (src[i] & 0xFF);
-      }
-    }
-  }
-
- private:
-  const int32_t count_;
-  Handle<ByteArray> src_array_;
-  const int32_t offset_;
-  const int32_t high_byte_;
-};
-
-// Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
-class SetStringCountAndValueVisitorFromCharArray {
- public:
-  SetStringCountAndValueVisitorFromCharArray(int32_t count, Handle<CharArray> src_array,
-                                             int32_t offset) :
-    count_(count), src_array_(src_array), offset_(offset) {
-  }
-
-  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    // Avoid AsString as object is not yet in live bitmap or allocation stack.
-    ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
-    string->SetCount(count_);
-    const uint16_t* const src = src_array_->GetData() + offset_;
-    const int32_t length = String::GetLengthFromCount(count_);
-    if (kUseStringCompression && String::IsCompressed(count_)) {
-      for (int i = 0; i < length; ++i) {
-        string->GetValueCompressed()[i] = static_cast<uint8_t>(src[i]);
-      }
-    } else {
-      memcpy(string->GetValue(), src, length * sizeof(uint16_t));
-    }
-  }
-
- private:
-  const int32_t count_;
-  Handle<CharArray> src_array_;
-  const int32_t offset_;
-};
-
-// Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
-class SetStringCountAndValueVisitorFromString {
- public:
-  SetStringCountAndValueVisitorFromString(int32_t count,
-                                          Handle<String> src_string,
-                                          int32_t offset) :
-    count_(count), src_string_(src_string), offset_(offset) {
-  }
-
-  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    // Avoid AsString as object is not yet in live bitmap or allocation stack.
-    ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
-    string->SetCount(count_);
-    const int32_t length = String::GetLengthFromCount(count_);
-    bool compressible = kUseStringCompression && String::IsCompressed(count_);
-    if (src_string_->IsCompressed()) {
-      const uint8_t* const src = src_string_->GetValueCompressed() + offset_;
-      memcpy(string->GetValueCompressed(), src, length * sizeof(uint8_t));
-    } else {
-      const uint16_t* const src = src_string_->GetValue() + offset_;
-      if (compressible) {
-        for (int i = 0; i < length; ++i) {
-          string->GetValueCompressed()[i] = static_cast<uint8_t>(src[i]);
-        }
-      } else {
-        memcpy(string->GetValue(), src, length * sizeof(uint16_t));
-      }
-    }
-  }
-
- private:
-  const int32_t count_;
-  Handle<String> src_string_;
-  const int32_t offset_;
-};
-
 inline uint16_t String::CharAt(int32_t index) {
   int32_t count = GetLength();
   if (UNLIKELY((index < 0) || (index >= count))) {
@@ -195,93 +67,6 @@
   return -1;
 }
 
-template <bool kIsInstrumented, typename PreFenceVisitor>
-inline String* String::Alloc(Thread* self, int32_t utf16_length_with_flag,
-                             gc::AllocatorType allocator_type,
-                             const PreFenceVisitor& pre_fence_visitor) {
-  constexpr size_t header_size = sizeof(String);
-  const bool compressible = kUseStringCompression && String::IsCompressed(utf16_length_with_flag);
-  const size_t block_size = (compressible) ? sizeof(uint8_t) : sizeof(uint16_t);
-  size_t length = String::GetLengthFromCount(utf16_length_with_flag);
-  static_assert(sizeof(length) <= sizeof(size_t),
-                "static_cast<size_t>(utf16_length) must not lose bits.");
-  size_t data_size = block_size * length;
-  size_t size = header_size + data_size;
-  // String.equals() intrinsics assume zero-padding up to kObjectAlignment,
-  // so make sure the allocator clears the padding as well.
-  // http://b/23528461
-  size_t alloc_size = RoundUp(size, kObjectAlignment);
-
-  Runtime* runtime = Runtime::Current();
-  ObjPtr<Class> string_class = GetClassRoot<String>(runtime->GetClassLinker());
-  // Check for overflow and throw OutOfMemoryError if this was an unreasonable request.
-  // Do this by comparing with the maximum length that will _not_ cause an overflow.
-  const size_t overflow_length = (-header_size) / block_size;   // Unsigned arithmetic.
-  const size_t max_alloc_length = overflow_length - 1u;
-  static_assert(IsAligned<sizeof(uint16_t)>(kObjectAlignment),
-                "kObjectAlignment must be at least as big as Java char alignment");
-  const size_t max_length = RoundDown(max_alloc_length, kObjectAlignment / block_size);
-  if (UNLIKELY(length > max_length)) {
-    self->ThrowOutOfMemoryError(
-        android::base::StringPrintf("%s of length %d would overflow",
-                                    Class::PrettyDescriptor(string_class).c_str(),
-                                    static_cast<int>(length)).c_str());
-    return nullptr;
-  }
-
-  gc::Heap* heap = runtime->GetHeap();
-  return down_cast<String*>(
-      heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, string_class, alloc_size,
-                                                            allocator_type, pre_fence_visitor));
-}
-
-template <bool kIsInstrumented>
-inline String* String::AllocEmptyString(Thread* self, gc::AllocatorType allocator_type) {
-  const int32_t length_with_flag = String::GetFlaggedCount(0, /* compressible */ true);
-  SetStringCountVisitor visitor(length_with_flag);
-  return Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
-}
-
-template <bool kIsInstrumented>
-inline String* String::AllocFromByteArray(Thread* self, int32_t byte_length,
-                                          Handle<ByteArray> array, int32_t offset,
-                                          int32_t high_byte, gc::AllocatorType allocator_type) {
-  const uint8_t* const src = reinterpret_cast<uint8_t*>(array->GetData()) + offset;
-  high_byte &= 0xff;  // Extract the relevant bits before determining `compressible`.
-  const bool compressible =
-      kUseStringCompression && String::AllASCII<uint8_t>(src, byte_length) && (high_byte == 0);
-  const int32_t length_with_flag = String::GetFlaggedCount(byte_length, compressible);
-  SetStringCountAndBytesVisitor visitor(length_with_flag, array, offset, high_byte << 8);
-  String* string = Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
-  return string;
-}
-
-template <bool kIsInstrumented>
-inline String* String::AllocFromCharArray(Thread* self, int32_t count,
-                                          Handle<CharArray> array, int32_t offset,
-                                          gc::AllocatorType allocator_type) {
-  // It is a caller error to have a count less than the actual array's size.
-  DCHECK_GE(array->GetLength(), count);
-  const bool compressible = kUseStringCompression &&
-                            String::AllASCII<uint16_t>(array->GetData() + offset, count);
-  const int32_t length_with_flag = String::GetFlaggedCount(count, compressible);
-  SetStringCountAndValueVisitorFromCharArray visitor(length_with_flag, array, offset);
-  String* new_string = Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
-  return new_string;
-}
-
-template <bool kIsInstrumented>
-inline String* String::AllocFromString(Thread* self, int32_t string_length, Handle<String> string,
-                                       int32_t offset, gc::AllocatorType allocator_type) {
-  const bool compressible = kUseStringCompression &&
-      ((string->IsCompressed()) ? true : String::AllASCII<uint16_t>(string->GetValue() + offset,
-                                                                    string_length));
-  const int32_t length_with_flag = String::GetFlaggedCount(string_length, compressible);
-  SetStringCountAndValueVisitorFromString visitor(length_with_flag, string, offset);
-  String* new_string = Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
-  return new_string;
-}
-
 inline int32_t String::GetHashCode() {
   int32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_));
   if (UNLIKELY(result == 0)) {
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index d5ef039..bf99c37 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -14,10 +14,10 @@
  * limitations under the License.
  */
 
-#include "string-inl.h"
+#include "string-alloc-inl.h"
 
 #include "arch/memcmp16.h"
-#include "array.h"
+#include "array-alloc-inl.h"
 #include "base/array_ref.h"
 #include "base/stl_util.h"
 #include "class-inl.h"
@@ -234,19 +234,6 @@
   }
 }
 
-bool String::Equals(const uint16_t* that_chars, int32_t that_offset, int32_t that_length) {
-  if (this->GetLength() != that_length) {
-    return false;
-  } else {
-    for (int32_t i = 0; i < that_length; ++i) {
-      if (this->CharAt(i) != that_chars[that_offset + i]) {
-        return false;
-      }
-    }
-    return true;
-  }
-}
-
 bool String::Equals(const char* modified_utf8) {
   const int32_t length = GetLength();
   int32_t i = 0;
@@ -274,30 +261,6 @@
   return *modified_utf8 == '\0';
 }
 
-bool String::Equals(const StringPiece& modified_utf8) {
-  const int32_t length = GetLength();
-  const char* p = modified_utf8.data();
-  for (int32_t i = 0; i < length; ++i) {
-    uint32_t ch = GetUtf16FromUtf8(&p);
-
-    if (GetLeadingUtf16Char(ch) != CharAt(i)) {
-      return false;
-    }
-
-    const uint16_t trailing = GetTrailingUtf16Char(ch);
-    if (trailing != 0) {
-      if (i == (length - 1)) {
-        return false;
-      }
-
-      if (CharAt(++i) != trailing) {
-        return false;
-      }
-    }
-  }
-  return true;
-}
-
 // Create a modified UTF-8 encoded std::string from a java/lang/String object.
 std::string String::ToModifiedUtf8() {
   size_t byte_count = GetUtfLength();
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index d08717c..4a7f4ae 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -18,16 +18,15 @@
 #define ART_RUNTIME_MIRROR_STRING_H_
 
 #include "base/bit_utils.h"
-#include "base/globals.h"
 #include "gc/allocator_type.h"
 #include "class.h"
 #include "object.h"
+#include "runtime_globals.h"
 
 namespace art {
 
 template<class T> class Handle;
 struct StringOffsets;
-class StringPiece;
 class StubTest_ReadBarrierForRoot_Test;
 
 namespace mirror {
@@ -50,11 +49,11 @@
     return sizeof(String);
   }
 
-  static MemberOffset CountOffset() {
+  static constexpr MemberOffset CountOffset() {
     return OFFSET_OF_OBJECT_MEMBER(String, count_);
   }
 
-  static MemberOffset ValueOffset() {
+  static constexpr MemberOffset ValueOffset() {
     return OFFSET_OF_OBJECT_MEMBER(String, value_);
   }
 
@@ -154,27 +153,10 @@
   static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, const char* utf8_data_in)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
-  // TODO: This is only used in the interpreter to compare against
-  // entries from a dex files constant pool (ArtField names). Should
-  // we unify this with Equals(const StringPiece&); ?
   bool Equals(const char* modified_utf8) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // TODO: This is only used to compare DexCache.location with
-  // a dex_file's location (which is an std::string). Do we really
-  // need this in mirror::String just for that one usage ?
-  bool Equals(const StringPiece& modified_utf8)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   bool Equals(ObjPtr<String> that) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Compare UTF-16 code point values not in a locale-sensitive manner
-  int Compare(int32_t utf16_length, const char* utf8_data_in);
-
-  // TODO: do we need this overload? give it a more intention-revealing name.
-  bool Equals(const uint16_t* that_chars, int32_t that_offset,
-              int32_t that_length)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   // Create a modified UTF-8 encoded std::string from a java/lang/String object.
   std::string ToModifiedUtf8() REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index 864e1ea..7c25529 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -18,6 +18,7 @@
 
 #include "array-inl.h"
 #include "art_field-inl.h"
+#include "base/casts.h"
 #include "class-inl.h"
 #include "class_linker.h"
 #include "class_root.h"
@@ -26,6 +27,7 @@
 #include "jvalue-inl.h"
 #include "method_handles-inl.h"
 #include "method_type.h"
+#include "object_array-alloc-inl.h"
 #include "obj_ptr-inl.h"
 #include "well_known_classes.h"
 
@@ -691,7 +693,7 @@
 template <typename T>
 class FieldAccessViaAccessor {
  public:
-  typedef Object::Accessor<T> Accessor;
+  using Accessor = Object::Accessor<T>;
 
   // Apply an Accessor to get a field in an object.
   static void Get(ObjPtr<Object> obj,
@@ -1033,7 +1035,7 @@
                                                                CASMode::kStrong,
                                                                std::memory_order_seq_cst);
       }
-      StoreResult(cas_result, result);
+      StoreResult(static_cast<uint8_t>(cas_result), result);
       break;
     }
     case VarHandle::AccessMode::kWeakCompareAndSet:
@@ -1058,7 +1060,7 @@
             CASMode::kWeak,
             std::memory_order_seq_cst);
       }
-      StoreResult(cas_result, result);
+      StoreResult(static_cast<uint8_t>(cas_result), result);
       break;
     }
     case VarHandle::AccessMode::kCompareAndExchange:
@@ -1680,8 +1682,7 @@
 }
 
 ArtField* FieldVarHandle::GetField() {
-  uintptr_t opaque_field = static_cast<uintptr_t>(GetField64(ArtFieldOffset()));
-  return reinterpret_cast<ArtField*>(opaque_field);
+  return reinterpret_cast64<ArtField*>(GetField64(ArtFieldOffset()));
 }
 
 bool FieldVarHandle::Access(AccessMode access_mode,
@@ -1946,7 +1947,7 @@
 
   // Determine offset and limit for accesses.
   int32_t byte_buffer_offset;
-  if (native_address == 0l) {
+  if (native_address == 0L) {
     // Accessing a heap allocated byte buffer.
     byte_buffer_offset = byte_buffer->GetField32(
         GetMemberOffset(WellKnownClasses::java_nio_ByteBuffer_offset));
diff --git a/runtime/mirror/var_handle_test.cc b/runtime/mirror/var_handle_test.cc
index 9df96dd..a349e34 100644
--- a/runtime/mirror/var_handle_test.cc
+++ b/runtime/mirror/var_handle_test.cc
@@ -20,6 +20,7 @@
 #include <vector>
 
 #include "art_field-inl.h"
+#include "class-alloc-inl.h"
 #include "class-inl.h"
 #include "class_linker-inl.h"
 #include "class_loader.h"
@@ -28,6 +29,7 @@
 #include "handle_scope-inl.h"
 #include "jvalue-inl.h"
 #include "method_type.h"
+#include "object_array-alloc-inl.h"
 #include "object_array-inl.h"
 #include "reflection.h"
 #include "scoped_thread_state_change-inl.h"
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 02aa1a8..6abc8d7 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -97,6 +97,7 @@
       lock_count_(0),
       obj_(GcRoot<mirror::Object>(obj)),
       wait_set_(nullptr),
+      wake_set_(nullptr),
       hash_code_(hash_code),
       locking_method_(nullptr),
       locking_dex_pc_(0),
@@ -120,6 +121,7 @@
       lock_count_(0),
       obj_(GcRoot<mirror::Object>(obj)),
       wait_set_(nullptr),
+      wake_set_(nullptr),
       hash_code_(hash_code),
       locking_method_(nullptr),
       locking_dex_pc_(0),
@@ -166,11 +168,11 @@
     }
     case LockWord::kUnlocked: {
       LOG(FATAL) << "Inflating unlocked lock word";
-      break;
+      UNREACHABLE();
     }
     default: {
       LOG(FATAL) << "Invalid monitor state " << lw.GetState();
-      return false;
+      UNREACHABLE();
     }
   }
   LockWord fat(this, lw.GCState());
@@ -226,7 +228,8 @@
 }
 
 void Monitor::AppendToWaitSet(Thread* thread) {
-  DCHECK(owner_ == Thread::Current());
+  // Not checking that the owner is equal to this thread, since we've released
+  // the monitor by the time this method is called.
   DCHECK(thread != nullptr);
   DCHECK(thread->GetWaitNext() == nullptr) << thread->GetWaitNext();
   if (wait_set_ == nullptr) {
@@ -245,71 +248,39 @@
 void Monitor::RemoveFromWaitSet(Thread *thread) {
   DCHECK(owner_ == Thread::Current());
   DCHECK(thread != nullptr);
-  if (wait_set_ == nullptr) {
-    return;
-  }
-  if (wait_set_ == thread) {
-    wait_set_ = thread->GetWaitNext();
-    thread->SetWaitNext(nullptr);
-    return;
-  }
-
-  Thread* t = wait_set_;
-  while (t->GetWaitNext() != nullptr) {
-    if (t->GetWaitNext() == thread) {
-      t->SetWaitNext(thread->GetWaitNext());
-      thread->SetWaitNext(nullptr);
-      return;
+  auto remove = [&](Thread*& set){
+    if (set != nullptr) {
+      if (set == thread) {
+        set = thread->GetWaitNext();
+        thread->SetWaitNext(nullptr);
+        return true;
+      }
+      Thread* t = set;
+      while (t->GetWaitNext() != nullptr) {
+        if (t->GetWaitNext() == thread) {
+          t->SetWaitNext(thread->GetWaitNext());
+          thread->SetWaitNext(nullptr);
+          return true;
+        }
+        t = t->GetWaitNext();
+      }
     }
-    t = t->GetWaitNext();
+    return false;
+  };
+  if (remove(wait_set_)) {
+    return;
   }
+  remove(wake_set_);
 }
 
 void Monitor::SetObject(mirror::Object* object) {
   obj_ = GcRoot<mirror::Object>(object);
 }
 
-// Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here.
-
-struct NthCallerWithDexPcVisitor final : public StackVisitor {
-  explicit NthCallerWithDexPcVisitor(Thread* thread, size_t frame)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        method_(nullptr),
-        dex_pc_(0),
-        current_frame_number_(0),
-        wanted_frame_number_(frame) {}
-  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* m = GetMethod();
-    if (m == nullptr || m->IsRuntimeMethod()) {
-      // Runtime method, upcall, or resolution issue. Skip.
-      return true;
-    }
-
-    // Is this the requested frame?
-    if (current_frame_number_ == wanted_frame_number_) {
-      method_ = m;
-      dex_pc_ = GetDexPc(false /* abort_on_error*/);
-      return false;
-    }
-
-    // Look for more.
-    current_frame_number_++;
-    return true;
-  }
-
-  ArtMethod* method_;
-  uint32_t dex_pc_;
-
- private:
-  size_t current_frame_number_;
-  const size_t wanted_frame_number_;
-};
-
 // This function is inlined and just helps to not have the VLOG and ATRACE check at all the
 // potential tracing points.
 void Monitor::AtraceMonitorLock(Thread* self, mirror::Object* obj, bool is_wait) {
-  if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging) && ATRACE_ENABLED())) {
+  if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging) && ATraceEnabled())) {
     AtraceMonitorLockImpl(self, obj, is_wait);
   }
 }
@@ -318,13 +289,41 @@
   // Wait() requires a deeper call stack to be useful. Otherwise you'll see "Waiting at
   // Object.java". Assume that we'll wait a nontrivial amount, so it's OK to do a longer
   // stack walk than if !is_wait.
-  NthCallerWithDexPcVisitor visitor(self, is_wait ? 1U : 0U);
-  visitor.WalkStack(false);
+  const size_t wanted_frame_number = is_wait ? 1U : 0U;
+
+  ArtMethod* method = nullptr;
+  uint32_t dex_pc = 0u;
+
+  size_t current_frame_number = 0u;
+  StackVisitor::WalkStack(
+      // Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here.
+      [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        ArtMethod* m = stack_visitor->GetMethod();
+        if (m == nullptr || m->IsRuntimeMethod()) {
+          // Runtime method, upcall, or resolution issue. Skip.
+          return true;
+        }
+
+        // Is this the requested frame?
+        if (current_frame_number == wanted_frame_number) {
+          method = m;
+          dex_pc = stack_visitor->GetDexPc(false /* abort_on_error*/);
+          return false;
+        }
+
+        // Look for more.
+        current_frame_number++;
+        return true;
+      },
+      self,
+      /* context= */ nullptr,
+      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+
   const char* prefix = is_wait ? "Waiting on " : "Locking ";
 
   const char* filename;
   int32_t line_number;
-  TranslateLocation(visitor.method_, visitor.dex_pc_, &filename, &line_number);
+  TranslateLocation(method, dex_pc, &filename, &line_number);
 
   // It would be nice to have a stable "ID" for the object here. However, the only stable thing
   // would be the identity hashcode. But we cannot use IdentityHashcode here: For one, there are
@@ -339,12 +338,12 @@
       (obj == nullptr ? -1 : static_cast<int32_t>(reinterpret_cast<uintptr_t>(obj))),
       (filename != nullptr ? filename : "null"),
       line_number);
-  ATRACE_BEGIN(tmp.c_str());
+  ATraceBegin(tmp.c_str());
 }
 
 void Monitor::AtraceMonitorUnlock() {
   if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) {
-    ATRACE_END();
+    ATraceEnd();
   }
 }
 
@@ -385,7 +384,7 @@
   } else {
     return false;
   }
-  AtraceMonitorLock(self, GetObject(), false /* is_wait */);
+  AtraceMonitorLock(self, GetObject(), /* is_wait= */ false);
   return true;
 }
 
@@ -432,7 +431,7 @@
     // If systrace logging is enabled, first look at the lock owner. Acquiring the monitor's
     // lock and then re-acquiring the mutator lock can deadlock.
     bool started_trace = false;
-    if (ATRACE_ENABLED()) {
+    if (ATraceEnabled()) {
       if (owner_ != nullptr) {  // Did the owner_ give the lock up?
         std::ostringstream oss;
         std::string name;
@@ -451,7 +450,7 @@
         oss << " blocking from "
             << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
             << ":" << line_number << ")";
-        ATRACE_BEGIN(oss.str().c_str());
+        ATraceBegin(oss.str().c_str());
         started_trace = true;
       }
     }
@@ -582,7 +581,7 @@
       }
     }
     if (started_trace) {
-      ATRACE_END();
+      ATraceEnd();
     }
     self->SetMonitorEnterObject(nullptr);
     monitor_lock_.Lock(self);  // Reacquire locks in order.
@@ -699,33 +698,81 @@
 bool Monitor::Unlock(Thread* self) {
   DCHECK(self != nullptr);
   uint32_t owner_thread_id = 0u;
-  {
-    MutexLock mu(self, monitor_lock_);
-    Thread* owner = owner_;
-    if (owner != nullptr) {
-      owner_thread_id = owner->GetThreadId();
-    }
-    if (owner == self) {
-      // We own the monitor, so nobody else can be in here.
-      AtraceMonitorUnlock();
-      if (lock_count_ == 0) {
-        owner_ = nullptr;
-        locking_method_ = nullptr;
-        locking_dex_pc_ = 0;
-        // Wake a contender.
-        monitor_contenders_.Signal(self);
-      } else {
-        --lock_count_;
-      }
+  DCHECK(!monitor_lock_.IsExclusiveHeld(self));
+  monitor_lock_.Lock(self);
+  Thread* owner = owner_;
+  if (owner != nullptr) {
+    owner_thread_id = owner->GetThreadId();
+  }
+  if (owner == self) {
+    // We own the monitor, so nobody else can be in here.
+    AtraceMonitorUnlock();
+    if (lock_count_ == 0) {
+      owner_ = nullptr;
+      locking_method_ = nullptr;
+      locking_dex_pc_ = 0;
+      SignalContendersAndReleaseMonitorLock(self);
+      return true;
+    } else {
+      --lock_count_;
+      monitor_lock_.Unlock(self);
       return true;
     }
   }
   // We don't own this, so we're not allowed to unlock it.
   // The JNI spec says that we should throw IllegalMonitorStateException in this case.
   FailedUnlock(GetObject(), self->GetThreadId(), owner_thread_id, this);
+  monitor_lock_.Unlock(self);
   return false;
 }
 
+void Monitor::SignalContendersAndReleaseMonitorLock(Thread* self) {
+  // We want to signal one thread to wake up, to acquire the monitor that
+  // we are releasing. This could either be a Thread waiting on its own
+  // ConditionVariable, or a thread waiting on monitor_contenders_.
+  while (wake_set_ != nullptr) {
+    // No risk of waking ourselves here; since monitor_lock_ is not released until we're ready to
+    // return, notify can't move the current thread from wait_set_ to wake_set_ until this
+    // method is done checking wake_set_.
+    Thread* thread = wake_set_;
+    wake_set_ = thread->GetWaitNext();
+    thread->SetWaitNext(nullptr);
+
+    // Check to see if the thread is still waiting.
+    {
+      // In the case of wait(), we'll be acquiring another thread's GetWaitMutex with
+      // self's GetWaitMutex held. This does not risk deadlock, because we only acquire this lock
+      // for threads in the wake_set_. A thread can only enter wake_set_ from Notify or NotifyAll,
+      // and those hold monitor_lock_. Thus, the threads whose wait mutexes we acquire here must
+      // have already been released from wait(), since we have not released monitor_lock_ until
+      // after we've chosen our thread to wake, so there is no risk of the following lock ordering
+      // leading to deadlock:
+      // Thread 1 waits
+      // Thread 2 waits
+      // Thread 3 moves threads 1 and 2 from wait_set_ to wake_set_
+      // Thread 1 enters this block, and attempts to acquire Thread 2's GetWaitMutex to wake it
+      // Thread 2 enters this block, and attempts to acquire Thread 1's GetWaitMutex to wake it
+      //
+      // Since monitor_lock_ is not released until the thread-to-be-woken-up's GetWaitMutex is
+      // acquired, two threads cannot attempt to acquire each other's GetWaitMutex while holding
+      // their own and cause deadlock.
+      MutexLock wait_mu(self, *thread->GetWaitMutex());
+      if (thread->GetWaitMonitor() != nullptr) {
+        // Release the lock, so that a potentially awakened thread will not
+        // immediately contend on it. The lock ordering here is:
+        // monitor_lock_, self->GetWaitMutex, thread->GetWaitMutex
+        monitor_lock_.Unlock(self);
+        thread->GetWaitConditionVariable()->Signal(self);
+        return;
+      }
+    }
+  }
+  // If we didn't wake any threads that were originally waiting on us,
+  // wake a contender.
+  monitor_contenders_.Signal(self);
+  monitor_lock_.Unlock(self);
+}
+
 void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
                    bool interruptShouldThrow, ThreadState why) {
   DCHECK(self != nullptr);
@@ -755,17 +802,9 @@
   }
 
   /*
-   * Add ourselves to the set of threads waiting on this monitor, and
-   * release our hold.  We need to let it go even if we're a few levels
+   * Release our hold - we need to let it go even if we're a few levels
    * deep in a recursive lock, and we need to restore that later.
-   *
-   * We append to the wait set ahead of clearing the count and owner
-   * fields so the subroutine can check that the calling thread owns
-   * the monitor.  Aside from that, the order of member updates is
-   * not order sensitive as we hold the pthread mutex.
    */
-  AppendToWaitSet(self);
-  ++num_waiters_;
   int prev_lock_count = lock_count_;
   lock_count_ = 0;
   owner_ = nullptr;
@@ -777,7 +816,7 @@
   AtraceMonitorUnlock();  // For the implict Unlock() just above. This will only end the deepest
                           // nesting, but that is enough for the visualization, and corresponds to
                           // the single Lock() we do afterwards.
-  AtraceMonitorLock(self, GetObject(), true /* is_wait */);
+  AtraceMonitorLock(self, GetObject(), /* is_wait= */ true);
 
   bool was_interrupted = false;
   bool timed_out = false;
@@ -790,6 +829,17 @@
     // Pseudo-atomically wait on self's wait_cond_ and release the monitor lock.
     MutexLock mu(self, *self->GetWaitMutex());
 
+    /*
+     * Add ourselves to the set of threads waiting on this monitor.
+     * It's important that we are only added to the wait set after
+     * acquiring our GetWaitMutex, so that calls to Notify() that occur after we
+     * have released monitor_lock_ will not move us from wait_set_ to wake_set_
+     * until we've signalled contenders on this monitor.
+     */
+    AppendToWaitSet(self);
+    ++num_waiters_;
+
+
     // Set wait_monitor_ to the monitor object we will be waiting on. When wait_monitor_ is
     // non-null a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
     // up.
@@ -797,8 +847,7 @@
     self->SetWaitMonitor(this);
 
     // Release the monitor lock.
-    monitor_contenders_.Signal(self);
-    monitor_lock_.Unlock(self);
+    SignalContendersAndReleaseMonitorLock(self);
 
     // Handle the case where the thread was interrupted before we called wait().
     if (self->IsInterrupted()) {
@@ -874,18 +923,12 @@
     ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
     return;
   }
-  // Signal the first waiting thread in the wait set.
-  while (wait_set_ != nullptr) {
-    Thread* thread = wait_set_;
-    wait_set_ = thread->GetWaitNext();
-    thread->SetWaitNext(nullptr);
-
-    // Check to see if the thread is still waiting.
-    MutexLock wait_mu(self, *thread->GetWaitMutex());
-    if (thread->GetWaitMonitor() != nullptr) {
-      thread->GetWaitConditionVariable()->Signal(self);
-      return;
-    }
+  // Move one thread from waiters to wake set
+  Thread* to_move = wait_set_;
+  if (to_move != nullptr) {
+    wait_set_ = to_move->GetWaitNext();
+    to_move->SetWaitNext(wake_set_);
+    wake_set_ = to_move;
   }
 }
 
@@ -897,12 +940,20 @@
     ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
     return;
   }
-  // Signal all threads in the wait set.
-  while (wait_set_ != nullptr) {
-    Thread* thread = wait_set_;
-    wait_set_ = thread->GetWaitNext();
-    thread->SetWaitNext(nullptr);
-    thread->Notify();
+
+  // Move all threads from waiters to wake set
+  Thread* to_move = wait_set_;
+  if (to_move != nullptr) {
+    wait_set_ = nullptr;
+    Thread* move_to = wake_set_;
+    if (move_to == nullptr) {
+      wake_set_ = to_move;
+      return;
+    }
+    while (move_to->GetWaitNext() != nullptr) {
+      move_to = move_to->GetWaitNext();
+    }
+    move_to->SetWaitNext(to_move);
   }
 }
 
@@ -1042,7 +1093,7 @@
         // No ordering required for preceding lockword read, since we retest.
         LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.GCState()));
         if (h_obj->CasLockWord(lock_word, thin_locked, CASMode::kWeak, std::memory_order_acquire)) {
-          AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
+          AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
           return h_obj.Get();  // Success!
         }
         continue;  // Go again.
@@ -1060,8 +1111,8 @@
             // Only this thread pays attention to the count. Thus there is no need for stronger
             // than relaxed memory ordering.
             if (!kUseReadBarrier) {
-              h_obj->SetLockWord(thin_locked, false /* volatile */);
-              AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
+              h_obj->SetLockWord(thin_locked, /* as_volatile= */ false);
+              AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
               return h_obj.Get();  // Success!
             } else {
               // Use CAS to preserve the read barrier state.
@@ -1069,7 +1120,7 @@
                                      thin_locked,
                                      CASMode::kWeak,
                                      std::memory_order_relaxed)) {
-                AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
+                AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
                 return h_obj.Get();  // Success!
               }
             }
@@ -1185,7 +1236,7 @@
       }
       default: {
         LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
-        return false;
+        UNREACHABLE();
       }
     }
   }
@@ -1229,7 +1280,7 @@
       case LockWord::kFatLocked:  // Unreachable given the loop condition above. Fall-through.
       default: {
         LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
-        return;
+        UNREACHABLE();
       }
     }
   }
@@ -1269,7 +1320,7 @@
     }
     default: {
       LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
-      return;
+      UNREACHABLE();
     }
   }
 }
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 6b7604e..c1f84e9 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -181,6 +181,8 @@
   // this routine.
   void RemoveFromWaitSet(Thread* thread) REQUIRES(monitor_lock_);
 
+  void SignalContendersAndReleaseMonitorLock(Thread* self) RELEASE(monitor_lock_);
+
   // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
   // calling thread must own the lock or the owner must be suspended. There's a race with other
   // threads inflating the lock, installing hash codes and spurious failures. The caller should
@@ -306,6 +308,9 @@
   // Threads currently waiting on this monitor.
   Thread* wait_set_ GUARDED_BY(monitor_lock_);
 
+  // Threads that were waiting on this monitor, but are now contending on it.
+  Thread* wake_set_ GUARDED_BY(monitor_lock_);
+
   // Stored object hash code, generated lazily by GetHashCode.
   AtomicInteger hash_code_;
 
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index 74623da..19e1f3d 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -43,7 +43,7 @@
 
   // Emit the process name, <= 37 bytes.
   {
-    int fd = open("/proc/self/cmdline", O_RDONLY);
+    int fd = open("/proc/self/cmdline", O_RDONLY  | O_CLOEXEC);
     char procName[33];
     memset(procName, 0, sizeof(procName));
     read(fd, procName, sizeof(procName) - 1);
diff --git a/runtime/monitor_objects_stack_visitor.h b/runtime/monitor_objects_stack_visitor.h
index c943402..3968239 100644
--- a/runtime/monitor_objects_stack_visitor.h
+++ b/runtime/monitor_objects_stack_visitor.h
@@ -20,7 +20,7 @@
 #include <android-base/logging.h>
 
 #include "art_method.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "monitor.h"
 #include "stack.h"
 #include "thread.h"
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index c88748f..8ddd50f 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -16,6 +16,7 @@
 
 #include "monitor.h"
 
+#include <memory>
 #include <string>
 
 #include "base/atomic.h"
@@ -62,7 +63,7 @@
       monitor_test_(monitor_test), initial_sleep_(initial_sleep), millis_(millis),
       expected_(expected) {}
 
-  void Run(Thread* self) {
+  void Run(Thread* self) override {
     {
       ScopedObjectAccess soa(self);
 
@@ -118,7 +119,7 @@
     }
   }
 
-  void Finalize() {
+  void Finalize() override {
     delete this;
   }
 
@@ -136,7 +137,7 @@
       monitor_test_(monitor_test), initial_sleep_(initial_sleep), millis_(millis),
       expected_(expected) {}
 
-  void Run(Thread* self) {
+  void Run(Thread* self) override {
     monitor_test_->barrier_->Wait(self);  // Wait for the other thread to set up the monitor.
 
     {
@@ -158,7 +159,7 @@
     monitor_test_->complete_barrier_->Wait(self);  // Wait for test completion.
   }
 
-  void Finalize() {
+  void Finalize() override {
     delete this;
   }
 
@@ -174,7 +175,7 @@
   InterruptTask(MonitorTest* monitor_test, uint64_t initial_sleep, uint64_t millis) :
       monitor_test_(monitor_test), initial_sleep_(initial_sleep), millis_(millis) {}
 
-  void Run(Thread* self) {
+  void Run(Thread* self) override {
     monitor_test_->barrier_->Wait(self);  // Wait for the other thread to set up the monitor.
 
     {
@@ -202,7 +203,7 @@
     monitor_test_->complete_barrier_->Wait(self);  // Wait for test completion.
   }
 
-  void Finalize() {
+  void Finalize() override {
     delete this;
   }
 
@@ -216,7 +217,7 @@
  public:
   explicit WatchdogTask(MonitorTest* monitor_test) : monitor_test_(monitor_test) {}
 
-  void Run(Thread* self) {
+  void Run(Thread* self) override {
     ScopedObjectAccess soa(self);
 
     monitor_test_->watchdog_object_.Get()->MonitorEnter(self);        // Lock the object.
@@ -231,7 +232,7 @@
     }
   }
 
-  void Finalize() {
+  void Finalize() override {
     delete this;
   }
 
@@ -251,8 +252,8 @@
                                                                               "hello, world!"));
 
   // Create the barrier used to synchronize.
-  test->barrier_ = std::unique_ptr<Barrier>(new Barrier(2));
-  test->complete_barrier_ = std::unique_ptr<Barrier>(new Barrier(3));
+  test->barrier_ = std::make_unique<Barrier>(2);
+  test->complete_barrier_ = std::make_unique<Barrier>(3);
   test->completed_ = false;
 
   // Our job: Fill the heap, then try Wait.
@@ -326,14 +327,14 @@
  public:
   explicit TryLockTask(Handle<mirror::Object> obj) : obj_(obj) {}
 
-  void Run(Thread* self) {
+  void Run(Thread* self) override {
     ScopedObjectAccess soa(self);
     // Lock is held by other thread, try lock should fail.
     ObjectTryLock<mirror::Object> lock(self, obj_);
     EXPECT_FALSE(lock.Acquired());
   }
 
-  void Finalize() {
+  void Finalize() override {
     delete this;
   }
 
@@ -361,7 +362,7 @@
     thread_pool.AddTask(self, new TryLockTask(obj1));
     thread_pool.StartWorkers(self);
     ScopedThreadSuspension sts(self, kSuspended);
-    thread_pool.Wait(Thread::Current(), /*do_work*/false, /*may_hold_locks*/false);
+    thread_pool.Wait(Thread::Current(), /*do_work=*/false, /*may_hold_locks=*/false);
   }
   // Test that the trylock actually locks the object.
   {
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 71fabd0..4f9cf70 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -20,6 +20,7 @@
 
 #include "android-base/stringprintf.h"
 
+#include "base/casts.h"
 #include "base/file_utils.h"
 #include "base/logging.h"
 #include "base/os.h"
@@ -34,6 +35,7 @@
 #include "dex/descriptors_names.h"
 #include "dex/dex_file-inl.h"
 #include "dex/dex_file_loader.h"
+#include "handle_scope-inl.h"
 #include "jit/debugger_interface.h"
 #include "jni/jni_internal.h"
 #include "mirror/class_loader.h"
@@ -74,10 +76,10 @@
     return false;
   }
 
-  oat_file = reinterpret_cast<const OatFile*>(static_cast<uintptr_t>(long_data[kOatFileIndex]));
+  oat_file = reinterpret_cast64<const OatFile*>(long_data[kOatFileIndex]);
   dex_files.reserve(array_size - 1);
   for (jsize i = kDexFileIndexStart; i < array_size; ++i) {
-    dex_files.push_back(reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(long_data[i])));
+    dex_files.push_back(reinterpret_cast64<const DexFile*>(long_data[i]));
   }
 
   env->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array), long_data, JNI_ABORT);
@@ -99,9 +101,9 @@
     return nullptr;
   }
 
-  long_data[kOatFileIndex] = reinterpret_cast<uintptr_t>(oat_file);
+  long_data[kOatFileIndex] = reinterpret_cast64<jlong>(oat_file);
   for (size_t i = 0; i < vec.size(); ++i) {
-    long_data[kDexFileIndexStart + i] = reinterpret_cast<uintptr_t>(vec[i].get());
+    long_data[kDexFileIndexStart + i] = reinterpret_cast64<jlong>(vec[i].get());
   }
 
   env->ReleaseLongArrayElements(long_array, long_data, 0);
@@ -111,7 +113,7 @@
 
   // Now release all the unique_ptrs.
   for (auto& dex_file : vec) {
-    dex_file.release();
+    dex_file.release();  // NOLINT
   }
 
   return long_array;
@@ -173,10 +175,9 @@
   std::string error_message;
   size_t length = static_cast<size_t>(end - start);
   MemMap dex_mem_map = MemMap::MapAnonymous("DEX data",
-                                            /* addr */ nullptr,
                                             length,
                                             PROT_READ | PROT_WRITE,
-                                            /* low_4gb */ false,
+                                            /*low_4gb=*/ false,
                                             &error_message);
   if (!dex_mem_map.IsValid()) {
     ScopedObjectAccess soa(env);
@@ -195,8 +196,8 @@
   std::unique_ptr<const DexFile> dex_file(dex_file_loader.Open(location,
                                                                0,
                                                                std::move(dex_mem_map),
-                                                               /* verify */ true,
-                                                               /* verify_location */ true,
+                                                               /* verify= */ true,
+                                                               /* verify_checksum= */ true,
                                                                &error_message));
   if (dex_file == nullptr) {
     ScopedObjectAccess soa(env);
@@ -243,7 +244,7 @@
   }
 
   size_t length = static_cast<size_t>(end - start);
-  memcpy(dex_mem_map.Begin(), base_address, length);
+  memcpy(dex_mem_map.Begin(), base_address + start, length);
   return CreateSingleDexFileCookie(env, std::move(dex_mem_map));
 }
 
@@ -294,7 +295,7 @@
       ScopedObjectAccess soa(env);
       for (auto& dex_file : dex_files) {
         if (linker->IsDexFileRegistered(soa.Self(), *dex_file)) {
-          dex_file.release();
+          dex_file.release();  // NOLINT
         }
       }
     }
@@ -323,6 +324,9 @@
   }
   Runtime* const runtime = Runtime::Current();
   bool all_deleted = true;
+  // We need to clear the caches since they may contain pointers to the dex instructions.
+  // Different dex file can be loaded at the same memory location later by chance.
+  Thread::ClearAllInterpreterCaches();
   {
     ScopedObjectAccess soa(env);
     ObjPtr<mirror::Object> dex_files_object = soa.Decode<mirror::Object>(cookie);
@@ -333,8 +337,7 @@
     int32_t i = kDexFileIndexStart;  // Oat file is at index 0.
     for (const DexFile* dex_file : dex_files) {
       if (dex_file != nullptr) {
-        RemoveNativeDebugInfoForDex(soa.Self(), ArrayRef<const uint8_t>(dex_file->Begin(),
-                                                                        dex_file->Size()));
+        RemoveNativeDebugInfoForDex(soa.Self(), dex_file);
         // Only delete the dex file if the dex cache is not found to prevent runtime crashes if there
         // are calls to DexFile.close while the ART DexFile is still in use.
         if (!class_linker->IsDexFileRegistered(soa.Self(), *dex_file)) {
@@ -380,7 +383,7 @@
   const std::string descriptor(DotToDescriptor(class_name.c_str()));
   const size_t hash(ComputeModifiedUtf8Hash(descriptor.c_str()));
   for (auto& dex_file : dex_files) {
-    const DexFile::ClassDef* dex_class_def =
+    const dex::ClassDef* dex_class_def =
         OatDexFile::FindClassDef(*dex_file, descriptor.c_str(), hash);
     if (dex_class_def != nullptr) {
       ScopedObjectAccess soa(env);
@@ -437,7 +440,7 @@
   std::set<const char*, CharPointerComparator> descriptors;
   for (auto& dex_file : dex_files) {
     for (size_t i = 0; i < dex_file->NumClassDefs(); ++i) {
-      const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+      const dex::ClassDef& class_def = dex_file->GetClassDef(i);
       const char* descriptor = dex_file->GetClassDescriptor(class_def);
       descriptors.insert(descriptor);
     }
@@ -547,7 +550,7 @@
   }
 
   OatFileAssistant oat_file_assistant(filename.c_str(), target_instruction_set,
-                                      false /* load_executable */);
+                                      /* load_executable= */ false);
   return env->NewStringUTF(oat_file_assistant.GetStatusDump().c_str());
 }
 
@@ -770,7 +773,7 @@
 
   OatFileAssistant oat_file_assistant(filename.c_str(),
                                       target_instruction_set,
-                                      false /* load_executable */);
+                                      /* load_executable= */ false);
 
   std::unique_ptr<OatFile> best_oat_file = oat_file_assistant.GetBestOatFile();
   if (best_oat_file == nullptr) {
@@ -833,8 +836,9 @@
     return;
   }
 
+  // Assign core platform domain as the dex files are allowed to access all the other domains.
   for (const DexFile* dex_file : dex_files) {
-    const_cast<DexFile*>(dex_file)->SetIsPlatformDexFile();
+    const_cast<DexFile*>(dex_file)->SetHiddenapiDomain(hiddenapi::Domain::kCorePlatform);
   }
 }
 
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 6f98a6d..1531bac 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -23,6 +23,7 @@
 
 #include "nativehelper/jni_macros.h"
 
+#include "base/file_utils.h"
 #include "base/histogram-inl.h"
 #include "base/time_utils.h"
 #include "class_linker.h"
@@ -37,6 +38,8 @@
 #include "hprof/hprof.h"
 #include "jni/java_vm_ext.h"
 #include "jni/jni_internal.h"
+#include "mirror/array-alloc-inl.h"
+#include "mirror/array-inl.h"
 #include "mirror/class.h"
 #include "mirror/object_array-inl.h"
 #include "native_util.h"
@@ -113,7 +116,7 @@
     return;
   }
 
-  int fd = dup(originalFd);
+  int fd = DupCloexec(originalFd);
   if (fd < 0) {
     ScopedObjectAccess soa(env);
     soa.Self()->ThrowNewExceptionF("Ljava/lang/RuntimeException;",
@@ -366,7 +369,7 @@
 
     VariableSizedHandleScope hs2(soa.Self());
     std::vector<Handle<mirror::Object>> raw_instances;
-    heap->GetInstances(hs2, h_class, includeAssignable, /* max_count */ 0, raw_instances);
+    heap->GetInstances(hs2, h_class, includeAssignable, /* max_count= */ 0, raw_instances);
     jobjectArray array = env->NewObjectArray(raw_instances.size(),
                                              WellKnownClasses::java_lang_Object,
                                              nullptr);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 0e61940..d705d5f 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -24,11 +24,13 @@
 #include <limits.h>
 #include "nativehelper/scoped_utf_chars.h"
 
-#include "android-base/stringprintf.h"
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
 
 #include "arch/instruction_set.h"
 #include "art_method-inl.h"
 #include "base/enums.h"
+#include "base/sdk_version.h"
 #include "class_linker-inl.h"
 #include "common_throws.h"
 #include "debugger.h"
@@ -44,6 +46,7 @@
 #include "intern_table.h"
 #include "jni/java_vm_ext.h"
 #include "jni/jni_internal.h"
+#include "mirror/array-alloc-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache-inl.h"
 #include "mirror/object-inl.h"
@@ -75,10 +78,6 @@
 static void VMRuntime_disableJitCompilation(JNIEnv*, jobject) {
 }
 
-static jboolean VMRuntime_hasUsedHiddenApi(JNIEnv*, jobject) {
-  return Runtime::Current()->HasPendingHiddenApiWarning() ? JNI_TRUE : JNI_FALSE;
-}
-
 static void VMRuntime_setHiddenApiExemptions(JNIEnv* env,
                                             jclass,
                                             jobjectArray exemptions) {
@@ -224,7 +223,8 @@
 }
 
 static jstring VMRuntime_bootClassPath(JNIEnv* env, jobject) {
-  return env->NewStringUTF(DefaultToDot(Runtime::Current()->GetBootClassPathString()));
+  std::string boot_class_path = android::base::Join(Runtime::Current()->GetBootClassPath(), ':');
+  return env->NewStringUTF(DefaultToDot(boot_class_path));
 }
 
 static jstring VMRuntime_classPath(JNIEnv* env, jobject) {
@@ -259,16 +259,19 @@
   // where workarounds can be enabled.
   // Note that targetSdkVersion may be CUR_DEVELOPMENT (10000).
   // Note that targetSdkVersion may be 0, meaning "current".
-  Runtime::Current()->SetTargetSdkVersion(target_sdk_version);
+  uint32_t uint_target_sdk_version =
+      target_sdk_version <= 0 ? static_cast<uint32_t>(SdkVersion::kUnset)
+                              : static_cast<uint32_t>(target_sdk_version);
+  Runtime::Current()->SetTargetSdkVersion(uint_target_sdk_version);
 
 #ifdef ART_TARGET_ANDROID
   // This part is letting libc/dynamic linker know about current app's
   // target sdk version to enable compatibility workarounds.
-  android_set_application_target_sdk_version(static_cast<uint32_t>(target_sdk_version));
+  android_set_application_target_sdk_version(uint_target_sdk_version);
 #endif
 }
 
-static void VMRuntime_registerNativeAllocation(JNIEnv* env, jobject, jint bytes) {
+static void VMRuntime_registerNativeAllocationInternal(JNIEnv* env, jobject, jint bytes) {
   if (UNLIKELY(bytes < 0)) {
     ScopedObjectAccess soa(env);
     ThrowRuntimeException("allocation size negative %d", bytes);
@@ -277,11 +280,7 @@
   Runtime::Current()->GetHeap()->RegisterNativeAllocation(env, static_cast<size_t>(bytes));
 }
 
-static void VMRuntime_registerSensitiveThread(JNIEnv*, jobject) {
-  Runtime::Current()->RegisterSensitiveThread();
-}
-
-static void VMRuntime_registerNativeFree(JNIEnv* env, jobject, jint bytes) {
+static void VMRuntime_registerNativeFreeInternal(JNIEnv* env, jobject, jint bytes) {
   if (UNLIKELY(bytes < 0)) {
     ScopedObjectAccess soa(env);
     ThrowRuntimeException("allocation size negative %d", bytes);
@@ -290,6 +289,18 @@
   Runtime::Current()->GetHeap()->RegisterNativeFree(env, static_cast<size_t>(bytes));
 }
 
+static jint VMRuntime_getNotifyNativeInterval(JNIEnv*, jclass) {
+  return Runtime::Current()->GetHeap()->GetNotifyNativeInterval();
+}
+
+static void VMRuntime_notifyNativeAllocationsInternal(JNIEnv* env, jobject) {
+  Runtime::Current()->GetHeap()->NotifyNativeAllocations(env);
+}
+
+static void VMRuntime_registerSensitiveThread(JNIEnv*, jobject) {
+  Runtime::Current()->RegisterSensitiveThread();
+}
+
 static void VMRuntime_updateProcessState(JNIEnv*, jobject, jint process_state) {
   Runtime* runtime = Runtime::Current();
   runtime->UpdateProcessState(static_cast<ProcessState>(process_state));
@@ -325,7 +336,7 @@
   Runtime::Current()->GetHeap()->GetTaskProcessor()->RunAllTasks(ThreadForEnv(env));
 }
 
-typedef std::map<std::string, ObjPtr<mirror::String>> StringTable;
+using StringTable = std::map<std::string, ObjPtr<mirror::String>>;
 
 class PreloadDexCachesStringsVisitor : public SingleRootVisitor {
  public:
@@ -374,7 +385,7 @@
   const char* class_name = dex_file->StringByTypeIdx(type_idx);
   ClassLinker* linker = Runtime::Current()->GetClassLinker();
   ObjPtr<mirror::Class> klass = (class_name[1] == '\0')
-      ? linker->FindPrimitiveClass(class_name[0])
+      ? linker->LookupPrimitiveClass(class_name[0])
       : linker->LookupClass(self, class_name, nullptr);
   if (klass == nullptr) {
     return;
@@ -402,9 +413,9 @@
     return;  // The entry already contains some ArtField.
   }
   const DexFile* dex_file = dex_cache->GetDexFile();
-  const DexFile::FieldId& field_id = dex_file->GetFieldId(field_idx);
+  const dex::FieldId& field_id = dex_file->GetFieldId(field_idx);
   ObjPtr<mirror::Class> klass = Runtime::Current()->GetClassLinker()->LookupResolvedType(
-      field_id.class_idx_, dex_cache, /* class_loader */ nullptr);
+      field_id.class_idx_, dex_cache, /* class_loader= */ nullptr);
   if (klass == nullptr) {
     return;
   }
@@ -428,16 +439,16 @@
     return;  // The entry already contains some ArtMethod.
   }
   const DexFile* dex_file = dex_cache->GetDexFile();
-  const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
+  const dex::MethodId& method_id = dex_file->GetMethodId(method_idx);
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
 
   ObjPtr<mirror::Class> klass = class_linker->LookupResolvedType(
-      method_id.class_idx_, dex_cache, /* class_loader */ nullptr);
+      method_id.class_idx_, dex_cache, /* class_loader= */ nullptr);
   if (klass == nullptr) {
     return;
   }
   // Call FindResolvedMethod to populate the dex cache.
-  class_linker->FindResolvedMethod(klass, dex_cache, /* class_loader */ nullptr, method_idx);
+  class_linker->FindResolvedMethod(klass, dex_cache, /* class_loader= */ nullptr, method_idx);
 }
 
 struct DexCacheStats {
@@ -682,6 +693,11 @@
   Runtime::Current()->SetProcessPackageName(package_name.c_str());
 }
 
+static jboolean VMRuntime_hasBootImageSpaces(JNIEnv* env ATTRIBUTE_UNUSED,
+                                             jclass klass ATTRIBUTE_UNUSED) {
+  return Runtime::Current()->GetHeap()->HasBootImageSpace() ? JNI_TRUE : JNI_FALSE;
+}
+
 static JNINativeMethod gMethods[] = {
   FAST_NATIVE_METHOD(VMRuntime, addressOf, "(Ljava/lang/Object;)J"),
   NATIVE_METHOD(VMRuntime, bootClassPath, "()Ljava/lang/String;"),
@@ -690,7 +706,7 @@
   NATIVE_METHOD(VMRuntime, clearGrowthLimit, "()V"),
   NATIVE_METHOD(VMRuntime, concurrentGC, "()V"),
   NATIVE_METHOD(VMRuntime, disableJitCompilation, "()V"),
-  NATIVE_METHOD(VMRuntime, hasUsedHiddenApi, "()Z"),
+  FAST_NATIVE_METHOD(VMRuntime, hasBootImageSpaces, "()Z"),  // Could be CRITICAL.
   NATIVE_METHOD(VMRuntime, setHiddenApiExemptions, "([Ljava/lang/String;)V"),
   NATIVE_METHOD(VMRuntime, setHiddenApiAccessLogSamplingRate, "(I)V"),
   NATIVE_METHOD(VMRuntime, getTargetHeapUtilization, "()F"),
@@ -702,9 +718,11 @@
   FAST_NATIVE_METHOD(VMRuntime, newUnpaddedArray, "(Ljava/lang/Class;I)Ljava/lang/Object;"),
   NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"),
   NATIVE_METHOD(VMRuntime, setTargetSdkVersionNative, "(I)V"),
-  NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(I)V"),
+  NATIVE_METHOD(VMRuntime, registerNativeAllocationInternal, "(I)V"),
+  NATIVE_METHOD(VMRuntime, registerNativeFreeInternal, "(I)V"),
+  NATIVE_METHOD(VMRuntime, getNotifyNativeInterval, "()I"),
+  NATIVE_METHOD(VMRuntime, notifyNativeAllocationsInternal, "()V"),
   NATIVE_METHOD(VMRuntime, registerSensitiveThread, "()V"),
-  NATIVE_METHOD(VMRuntime, registerNativeFree, "(I)V"),
   NATIVE_METHOD(VMRuntime, requestConcurrentGC, "()V"),
   NATIVE_METHOD(VMRuntime, requestHeapTrim, "()V"),
   NATIVE_METHOD(VMRuntime, runHeapTasks, "()V"),
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 3919227..32733a8 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -59,7 +59,7 @@
     ThreadList* thread_list = Runtime::Current()->GetThreadList();
     bool timed_out;
     Thread* thread = thread_list->SuspendThreadByPeer(peer,
-                                                      /* request_suspension */ true,
+                                                      /* request_suspension= */ true,
                                                       SuspendReason::kInternal,
                                                       &timed_out);
     if (thread != nullptr) {
@@ -113,7 +113,7 @@
       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         class_loader(nullptr) {}
 
-    bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
       DCHECK(class_loader == nullptr);
       ObjPtr<mirror::Class> c = GetMethod()->GetDeclaringClass();
       // c is null for runtime methods.
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 72dae47..26fc5e9 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -29,6 +29,7 @@
 #include "debugger.h"
 #include "hidden_api.h"
 #include "jit/jit.h"
+#include "jit/jit_code_cache.h"
 #include "jni/java_vm_ext.h"
 #include "jni/jni_internal.h"
 #include "native_util.h"
@@ -43,10 +44,6 @@
 #include "thread_list.h"
 #include "trace.h"
 
-#if defined(__linux__)
-#include <sys/prctl.h>
-#endif
-
 #include <sys/resource.h>
 
 namespace art {
@@ -58,33 +55,6 @@
 
 using android::base::StringPrintf;
 
-static void EnableDebugger() {
-#if defined(__linux__)
-  // To let a non-privileged gdbserver attach to this
-  // process, we must set our dumpable flag.
-  if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0) == -1) {
-    PLOG(ERROR) << "prctl(PR_SET_DUMPABLE) failed for pid " << getpid();
-  }
-
-  // Even if Yama is on a non-privileged native debugger should
-  // be able to attach to the debuggable app.
-  if (prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0) == -1) {
-    // if Yama is off prctl(PR_SET_PTRACER) returns EINVAL - don't log in this
-    // case since it's expected behaviour.
-    if (errno != EINVAL) {
-      PLOG(ERROR) << "prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY) failed for pid " << getpid();
-    }
-  }
-#endif
-  // We don't want core dumps, though, so set the core dump size to 0.
-  rlimit rl;
-  rl.rlim_cur = 0;
-  rl.rlim_max = RLIM_INFINITY;
-  if (setrlimit(RLIMIT_CORE, &rl) == -1) {
-    PLOG(ERROR) << "setrlimit(RLIMIT_CORE) failed for pid " << getpid();
-  }
-}
-
 class ClassSet {
  public:
   // The number of classes we reasonably expect to have to look at. Realistically the number is more
@@ -152,7 +122,8 @@
     // Drop the shared mutator lock.
     ScopedThreadSuspension sts(self, art::ThreadState::kNative);
     // Get exclusive mutator lock with suspend all.
-    ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!", /*long_suspend*/false);
+    ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!",
+                             /*long_suspend=*/false);
     MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
     runtime->GetThreadList()->ForEach(DoCollectNonDebuggableCallback, &classes);
   }
@@ -205,9 +176,6 @@
   }
 
   Dbg::SetJdwpAllowed((runtime_flags & DEBUG_ENABLE_JDWP) != 0);
-  if ((runtime_flags & DEBUG_ENABLE_JDWP) != 0) {
-    EnableDebugger();
-  }
   runtime_flags &= ~DEBUG_ENABLE_JDWP;
 
   const bool safe_mode = (runtime_flags & DEBUG_ENABLE_SAFEMODE) != 0;
@@ -224,7 +192,9 @@
   if ((runtime_flags & DEBUG_ALWAYS_JIT) != 0) {
     jit::JitOptions* jit_options = runtime->GetJITOptions();
     CHECK(jit_options != nullptr);
-    jit_options->SetJitAtFirstUse();
+    Runtime::Current()->DoAndMaybeSwitchInterpreter([=]() {
+        jit_options->SetJitAtFirstUse();
+    });
     runtime_flags &= ~DEBUG_ALWAYS_JIT;
   }
 
@@ -233,8 +203,11 @@
     runtime->AddCompilerOption("--debuggable");
     runtime_flags |= DEBUG_GENERATE_MINI_DEBUG_INFO;
     runtime->SetJavaDebuggable(true);
-    // Deoptimize the boot image as it may be non-debuggable.
-    runtime->DeoptimizeBootImage();
+    {
+      // Deoptimize the boot image as it may be non-debuggable.
+      ScopedSuspendAll ssa(__FUNCTION__);
+      runtime->DeoptimizeBootImage();
+    }
     runtime_flags &= ~DEBUG_JAVA_DEBUGGABLE;
     needs_non_debuggable_classes = true;
   }
@@ -270,15 +243,29 @@
 
   runtime->PreZygoteFork();
 
-  if (Trace::GetMethodTracingMode() != TracingMode::kTracingInactive) {
-    // Tracing active, pause it.
-    Trace::Pause();
-  }
-
   // Grab thread before fork potentially makes Thread::pthread_key_self_ unusable.
   return reinterpret_cast<jlong>(ThreadForEnv(env));
 }
 
+static void ZygoteHooks_nativePostZygoteFork(JNIEnv*, jclass) {
+  Runtime* runtime = Runtime::Current();
+  if (runtime->IsZygote()) {
+    runtime->PostZygoteFork();
+  }
+}
+
+static void ZygoteHooks_nativePostForkSystemServer(JNIEnv* env ATTRIBUTE_UNUSED,
+                                                   jclass klass ATTRIBUTE_UNUSED) {
+  // This JIT code cache for system server is created whilst the runtime is still single threaded.
+  // System server has a window where it can create executable pages for this purpose, but this is
+  // turned off after this hook. Consequently, the only JIT mode supported is the dual-view JIT
+  // where one mapping is R->RW and the other is RX. Single view requires RX->RWX->RX.
+  if (Runtime::Current()->GetJit() != nullptr) {
+    Runtime::Current()->GetJit()->GetCodeCache()->PostForkChildAction(
+        /* is_system_server= */ true, /* is_zygote= */ false);
+  }
+}
+
 static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
                                             jclass,
                                             jlong token,
@@ -292,8 +279,7 @@
   // Our system thread ID, etc, has changed so reset Thread state.
   thread->InitAfterFork();
   runtime_flags = EnableDebugFeatures(runtime_flags);
-  hiddenapi::EnforcementPolicy api_enforcement_policy = hiddenapi::EnforcementPolicy::kNoChecks;
-  bool dedupe_hidden_api_warnings = true;
+  hiddenapi::EnforcementPolicy api_enforcement_policy = hiddenapi::EnforcementPolicy::kDisabled;
 
   if ((runtime_flags & DISABLE_VERIFIER) != 0) {
     Runtime::Current()->DisableVerifier();
@@ -317,6 +303,15 @@
   }
 
   Runtime::Current()->GetHeap()->PostForkChildAction(thread);
+  if (Runtime::Current()->GetJit() != nullptr) {
+    if (!is_system_server) {
+      // System server already called the JIT cache post fork action in `nativePostForkSystemServer`.
+      Runtime::Current()->GetJit()->GetCodeCache()->PostForkChildAction(
+          /* is_system_server= */ false, is_zygote);
+    }
+    // This must be called after EnableDebugFeatures.
+    Runtime::Current()->GetJit()->PostForkChildAction(is_zygote);
+  }
 
   // Update tracing.
   if (Trace::GetMethodTracingMode() != TracingMode::kTracingInactive) {
@@ -360,14 +355,14 @@
     }
   }
 
-  bool do_hidden_api_checks = api_enforcement_policy != hiddenapi::EnforcementPolicy::kNoChecks;
+  bool do_hidden_api_checks = api_enforcement_policy != hiddenapi::EnforcementPolicy::kDisabled;
   DCHECK(!(is_system_server && do_hidden_api_checks))
       << "SystemServer should be forked with EnforcementPolicy::kDisable";
   DCHECK(!(is_zygote && do_hidden_api_checks))
       << "Child zygote processes should be forked with EnforcementPolicy::kDisable";
   Runtime::Current()->SetHiddenApiEnforcementPolicy(api_enforcement_policy);
-  Runtime::Current()->SetDedupeHiddenApiWarnings(dedupe_hidden_api_warnings);
-  if (api_enforcement_policy != hiddenapi::EnforcementPolicy::kNoChecks &&
+  Runtime::Current()->SetDedupeHiddenApiWarnings(true);
+  if (api_enforcement_policy != hiddenapi::EnforcementPolicy::kDisabled &&
       Runtime::Current()->GetHiddenApiEventLogSampleRate() != 0) {
     // Hidden API checks are enabled, and we are sampling access for the event log. Initialize the
     // random seed, to ensure the sampling is actually random. We do this post-fork, as doing it
@@ -375,9 +370,6 @@
     std::srand(static_cast<uint32_t>(NanoTime()));
   }
 
-  // Clear the hidden API warning flag, in case it was set.
-  Runtime::Current()->SetPendingHiddenApiWarning(false);
-
   if (is_zygote) {
     // If creating a child-zygote, do not call into the runtime's post-fork logic.
     // Doing so would spin up threads for Binder and JDWP. Instead, the Java side
@@ -399,7 +391,7 @@
         env,
         is_system_server,
         Runtime::NativeBridgeAction::kUnload,
-        /*isa*/ nullptr,
+        /*isa=*/ nullptr,
         profile_system_server);
   }
 }
@@ -416,6 +408,8 @@
 
 static JNINativeMethod gMethods[] = {
   NATIVE_METHOD(ZygoteHooks, nativePreFork, "()J"),
+  NATIVE_METHOD(ZygoteHooks, nativePostZygoteFork, "()V"),
+  NATIVE_METHOD(ZygoteHooks, nativePostForkSystemServer, "()V"),
   NATIVE_METHOD(ZygoteHooks, nativePostForkChild, "(JIZZLjava/lang/String;)V"),
   NATIVE_METHOD(ZygoteHooks, startZygoteNoThreadCreation, "()V"),
   NATIVE_METHOD(ZygoteHooks, stopZygoteNoThreadCreation, "()V"),
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 5a5fb16..db62475 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -30,13 +30,16 @@
 #include "dex/utf.h"
 #include "hidden_api.h"
 #include "jni/jni_internal.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
 #include "mirror/field-inl.h"
 #include "mirror/method.h"
 #include "mirror/method_handles_lookup.h"
 #include "mirror/object-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
+#include "mirror/string-alloc-inl.h"
 #include "mirror/string-inl.h"
 #include "native_util.h"
 #include "nativehelper/jni_macros.h"
@@ -51,18 +54,19 @@
 
 namespace art {
 
-// Returns true if the first caller outside of the Class class or java.lang.invoke package
-// is in a platform DEX file.
-static bool IsCallerTrusted(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
-  // Walk the stack and find the first frame not from java.lang.Class and not from java.lang.invoke.
-  // This is very expensive. Save this till the last.
+// Walks the stack, finds the caller of this reflective call and returns
+// a hiddenapi AccessContext formed from its declaring class.
+static hiddenapi::AccessContext GetReflectionCaller(Thread* self)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  // Walk the stack and find the first frame not from java.lang.Class and not
+  // from java.lang.invoke. This is very expensive. Save this till the last.
   struct FirstExternalCallerVisitor : public StackVisitor {
     explicit FirstExternalCallerVisitor(Thread* thread)
         : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
           caller(nullptr) {
     }
 
-    bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
       ArtMethod *m = GetMethod();
       if (m == nullptr) {
         // Attached native thread. Assume this is *not* boot class path.
@@ -99,50 +103,45 @@
 
   FirstExternalCallerVisitor visitor(self);
   visitor.WalkStack();
-  return visitor.caller != nullptr &&
-         hiddenapi::IsCallerTrusted(visitor.caller->GetDeclaringClass());
+
+  // Construct AccessContext from the calling class found on the stack.
+  // If the calling class cannot be determined, e.g. unattached threads,
+  // we conservatively assume the caller is trusted.
+  ObjPtr<mirror::Class> caller = (visitor.caller == nullptr)
+      ? nullptr : visitor.caller->GetDeclaringClass();
+  return caller.IsNull() ? hiddenapi::AccessContext(/* is_trusted= */ true)
+                         : hiddenapi::AccessContext(caller);
 }
 
-// Returns true if the first non-ClassClass caller up the stack is not allowed to
-// access hidden APIs. This can be *very* expensive. Never call this in a loop.
-ALWAYS_INLINE static bool ShouldEnforceHiddenApi(Thread* self)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  hiddenapi::EnforcementPolicy policy = Runtime::Current()->GetHiddenApiEnforcementPolicy();
-  return policy != hiddenapi::EnforcementPolicy::kNoChecks && !IsCallerTrusted(self);
+static std::function<hiddenapi::AccessContext()> GetHiddenapiAccessContextFunction(Thread* self) {
+  return [=]() REQUIRES_SHARED(Locks::mutator_lock_) { return GetReflectionCaller(self); };
 }
 
 // Returns true if the first non-ClassClass caller up the stack should not be
 // allowed access to `member`.
 template<typename T>
-ALWAYS_INLINE static bool ShouldBlockAccessToMember(T* member, Thread* self)
+ALWAYS_INLINE static bool ShouldDenyAccessToMember(T* member, Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  hiddenapi::Action action = hiddenapi::GetMemberAction(
-      member, self, IsCallerTrusted, hiddenapi::kReflection);
-  if (action != hiddenapi::kAllow) {
-    hiddenapi::NotifyHiddenApiListener(member);
-  }
-
-  return action == hiddenapi::kDeny;
+  return hiddenapi::ShouldDenyAccessToMember(member,
+                                             GetHiddenapiAccessContextFunction(self),
+                                             hiddenapi::AccessMethod::kReflection);
 }
 
 // Returns true if a class member should be discoverable with reflection given
 // the criteria. Some reflection calls only return public members
 // (public_only == true), some members should be hidden from non-boot class path
-// callers (enforce_hidden_api == true).
+// callers (hiddenapi_context).
 template<typename T>
 ALWAYS_INLINE static bool IsDiscoverable(bool public_only,
-                                         bool enforce_hidden_api,
+                                         const hiddenapi::AccessContext& access_context,
                                          T* member)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   if (public_only && ((member->GetAccessFlags() & kAccPublic) == 0)) {
     return false;
   }
 
-  return hiddenapi::GetMemberAction(member,
-                                    nullptr,
-                                    [enforce_hidden_api] (Thread*) { return !enforce_hidden_api; },
-                                    hiddenapi::kNone)
-      != hiddenapi::kDeny;
+  return !hiddenapi::ShouldDenyAccessToMember(
+      member, access_context, hiddenapi::AccessMethod::kNone);
 }
 
 ALWAYS_INLINE static inline ObjPtr<mirror::Class> DecodeClass(
@@ -224,7 +223,7 @@
     return soa.AddLocalReference<jobjectArray>(klass->GetProxyInterfaces()->Clone(soa.Self()));
   }
 
-  const DexFile::TypeList* iface_list = klass->GetInterfaceTypeList();
+  const dex::TypeList* iface_list = klass->GetInterfaceTypeList();
   if (iface_list == nullptr) {
     return nullptr;
   }
@@ -263,15 +262,15 @@
   IterationRange<StrideIterator<ArtField>> ifields = klass->GetIFields();
   IterationRange<StrideIterator<ArtField>> sfields = klass->GetSFields();
   size_t array_size = klass->NumInstanceFields() + klass->NumStaticFields();
-  bool enforce_hidden_api = ShouldEnforceHiddenApi(self);
+  hiddenapi::AccessContext hiddenapi_context = GetReflectionCaller(self);
   // Lets go subtract all the non discoverable fields.
   for (ArtField& field : ifields) {
-    if (!IsDiscoverable(public_only, enforce_hidden_api, &field)) {
+    if (!IsDiscoverable(public_only, hiddenapi_context, &field)) {
       --array_size;
     }
   }
   for (ArtField& field : sfields) {
-    if (!IsDiscoverable(public_only, enforce_hidden_api, &field)) {
+    if (!IsDiscoverable(public_only, hiddenapi_context, &field)) {
       --array_size;
     }
   }
@@ -282,7 +281,7 @@
     return nullptr;
   }
   for (ArtField& field : ifields) {
-    if (IsDiscoverable(public_only, enforce_hidden_api, &field)) {
+    if (IsDiscoverable(public_only, hiddenapi_context, &field)) {
       auto* reflect_field = mirror::Field::CreateFromArtField<kRuntimePointerSize>(self,
                                                                                    &field,
                                                                                    force_resolve);
@@ -297,7 +296,7 @@
     }
   }
   for (ArtField& field : sfields) {
-    if (IsDiscoverable(public_only, enforce_hidden_api, &field)) {
+    if (IsDiscoverable(public_only, hiddenapi_context, &field)) {
       auto* reflect_field = mirror::Field::CreateFromArtField<kRuntimePointerSize>(self,
                                                                                    &field,
                                                                                    force_resolve);
@@ -456,8 +455,7 @@
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Field> field = hs.NewHandle(GetPublicFieldRecursive(
       soa.Self(), DecodeClass(soa, javaThis), name_string));
-  if (field.Get() == nullptr ||
-      ShouldBlockAccessToMember(field->GetArtField(), soa.Self())) {
+  if (field.Get() == nullptr || ShouldDenyAccessToMember(field->GetArtField(), soa.Self())) {
     return nullptr;
   }
   return soa.AddLocalReference<jobject>(field.Get());
@@ -474,7 +472,7 @@
   Handle<mirror::Class> h_klass = hs.NewHandle(DecodeClass(soa, javaThis));
   Handle<mirror::Field> result =
       hs.NewHandle(GetDeclaredField(soa.Self(), h_klass.Get(), h_string.Get()));
-  if (result == nullptr || ShouldBlockAccessToMember(result->GetArtField(), soa.Self())) {
+  if (result == nullptr || ShouldDenyAccessToMember(result->GetArtField(), soa.Self())) {
     std::string name_str = h_string->ToModifiedUtf8();
     if (name_str == "value" && h_klass->IsStringClass()) {
       // We log the error for this specific case, as the user might just swallow the exception.
@@ -506,19 +504,20 @@
       soa.Self(),
       DecodeClass(soa, javaThis),
       soa.Decode<mirror::ObjectArray<mirror::Class>>(args)));
-  if (result == nullptr || ShouldBlockAccessToMember(result->GetArtMethod(), soa.Self())) {
+  if (result == nullptr || ShouldDenyAccessToMember(result->GetArtMethod(), soa.Self())) {
     return nullptr;
   }
   return soa.AddLocalReference<jobject>(result.Get());
 }
 
 static ALWAYS_INLINE inline bool MethodMatchesConstructor(
-    ArtMethod* m, bool public_only, bool enforce_hidden_api)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
+    ArtMethod* m,
+    bool public_only,
+    const hiddenapi::AccessContext& hiddenapi_context) REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK(m != nullptr);
   return m->IsConstructor() &&
          !m->IsStatic() &&
-         IsDiscoverable(public_only, enforce_hidden_api, m);
+         IsDiscoverable(public_only, hiddenapi_context, m);
 }
 
 static jobjectArray Class_getDeclaredConstructorsInternal(
@@ -526,12 +525,12 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<2> hs(soa.Self());
   bool public_only = (publicOnly != JNI_FALSE);
-  bool enforce_hidden_api = ShouldEnforceHiddenApi(soa.Self());
+  hiddenapi::AccessContext hiddenapi_context = GetReflectionCaller(soa.Self());
   Handle<mirror::Class> h_klass = hs.NewHandle(DecodeClass(soa, javaThis));
   size_t constructor_count = 0;
   // Two pass approach for speed.
   for (auto& m : h_klass->GetDirectMethods(kRuntimePointerSize)) {
-    constructor_count += MethodMatchesConstructor(&m, public_only, enforce_hidden_api) ? 1u : 0u;
+    constructor_count += MethodMatchesConstructor(&m, public_only, hiddenapi_context) ? 1u : 0u;
   }
   auto h_constructors = hs.NewHandle(mirror::ObjectArray<mirror::Constructor>::Alloc(
       soa.Self(), GetClassRoot<mirror::ObjectArray<mirror::Constructor>>(), constructor_count));
@@ -541,7 +540,7 @@
   }
   constructor_count = 0;
   for (auto& m : h_klass->GetDirectMethods(kRuntimePointerSize)) {
-    if (MethodMatchesConstructor(&m, public_only, enforce_hidden_api)) {
+    if (MethodMatchesConstructor(&m, public_only, hiddenapi_context)) {
       DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
       DCHECK(!Runtime::Current()->IsActiveTransaction());
       ObjPtr<mirror::Constructor> constructor =
@@ -567,8 +566,9 @@
           soa.Self(),
           DecodeClass(soa, javaThis),
           soa.Decode<mirror::String>(name),
-          soa.Decode<mirror::ObjectArray<mirror::Class>>(args)));
-  if (result == nullptr || ShouldBlockAccessToMember(result->GetArtMethod(), soa.Self())) {
+          soa.Decode<mirror::ObjectArray<mirror::Class>>(args),
+          GetHiddenapiAccessContextFunction(soa.Self())));
+  if (result == nullptr || ShouldDenyAccessToMember(result->GetArtMethod(), soa.Self())) {
     return nullptr;
   }
   return soa.AddLocalReference<jobject>(result.Get());
@@ -579,7 +579,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<2> hs(soa.Self());
 
-  bool enforce_hidden_api = ShouldEnforceHiddenApi(soa.Self());
+  hiddenapi::AccessContext hiddenapi_context = GetReflectionCaller(soa.Self());
   bool public_only = (publicOnly != JNI_FALSE);
 
   Handle<mirror::Class> klass = hs.NewHandle(DecodeClass(soa, javaThis));
@@ -588,7 +588,7 @@
     uint32_t modifiers = m.GetAccessFlags();
     // Add non-constructor declared methods.
     if ((modifiers & kAccConstructor) == 0 &&
-        IsDiscoverable(public_only, enforce_hidden_api, &m)) {
+        IsDiscoverable(public_only, hiddenapi_context, &m)) {
       ++num_methods;
     }
   }
@@ -602,7 +602,7 @@
   for (ArtMethod& m : klass->GetDeclaredMethods(kRuntimePointerSize)) {
     uint32_t modifiers = m.GetAccessFlags();
     if ((modifiers & kAccConstructor) == 0 &&
-        IsDiscoverable(public_only, enforce_hidden_api, &m)) {
+        IsDiscoverable(public_only, hiddenapi_context, &m)) {
       DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
       DCHECK(!Runtime::Current()->IsActiveTransaction());
       ObjPtr<mirror::Method> method =
@@ -647,7 +647,7 @@
     ObjPtr<mirror::ObjectArray<mirror::Object>> empty_array =
         mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
                                                    annotation_array_class,
-                                                   /* length */ 0);
+                                                   /* length= */ 0);
     return soa.AddLocalReference<jobjectArray>(empty_array);
   }
   return soa.AddLocalReference<jobjectArray>(annotations::GetAnnotationsForClass(klass));
@@ -816,7 +816,7 @@
       soa.Self(),
       ScopedNullHandle<mirror::ObjectArray<mirror::Class>>(),
       kRuntimePointerSize);
-  if (UNLIKELY(constructor == nullptr) || ShouldBlockAccessToMember(constructor, soa.Self())) {
+  if (UNLIKELY(constructor == nullptr) || ShouldDenyAccessToMember(constructor, soa.Self())) {
     soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
                                    "%s has no zero argument constructor",
                                    klass->PrettyClass().c_str());
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index 8976058..4be2086 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -19,11 +19,12 @@
 #include "nativehelper/jni_macros.h"
 
 #include "common_throws.h"
+#include "handle_scope-inl.h"
 #include "jni/jni_internal.h"
 #include "mirror/array.h"
 #include "mirror/object-inl.h"
+#include "mirror/string-alloc-inl.h"
 #include "mirror/string-inl.h"
-#include "mirror/string.h"
 #include "native_util.h"
 #include "nativehelper/scoped_local_ref.h"
 #include "scoped_fast_native_object_access-inl.h"
diff --git a/runtime/native/java_lang_StringFactory.cc b/runtime/native/java_lang_StringFactory.cc
index 3978ca8..13f8d5b 100644
--- a/runtime/native/java_lang_StringFactory.cc
+++ b/runtime/native/java_lang_StringFactory.cc
@@ -17,9 +17,10 @@
 #include "java_lang_StringFactory.h"
 
 #include "common_throws.h"
+#include "handle_scope-inl.h"
 #include "jni/jni_internal.h"
 #include "mirror/object-inl.h"
-#include "mirror/string-inl.h"
+#include "mirror/string-alloc-inl.h"
 #include "native_util.h"
 #include "nativehelper/jni_macros.h"
 #include "nativehelper/scoped_local_ref.h"
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index 2c4184c..e4bc8ce 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -101,32 +101,36 @@
       case Primitive::kPrimBoolean:
       case Primitive::kPrimByte:
         DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 1U);
-        dstArray->AsByteSizedArray()->Memmove(dstPos, srcArray->AsByteSizedArray(), srcPos, count);
+        // Note: Treating BooleanArray as ByteArray.
+        ObjPtr<mirror::ByteArray>::DownCast(dstArray)->Memmove(
+            dstPos, ObjPtr<mirror::ByteArray>::DownCast(srcArray), srcPos, count);
         return;
       case Primitive::kPrimChar:
       case Primitive::kPrimShort:
         DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 2U);
-        dstArray->AsShortSizedArray()->Memmove(dstPos, srcArray->AsShortSizedArray(), srcPos, count);
+        // Note: Treating CharArray as ShortArray.
+        ObjPtr<mirror::ShortArray>::DownCast(dstArray)->Memmove(
+            dstPos, ObjPtr<mirror::ShortArray>::DownCast(srcArray), srcPos, count);
         return;
       case Primitive::kPrimInt:
-        DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 4U);
-        dstArray->AsIntArray()->Memmove(dstPos, srcArray->AsIntArray(), srcPos, count);
-        return;
       case Primitive::kPrimFloat:
         DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 4U);
-        dstArray->AsFloatArray()->Memmove(dstPos, srcArray->AsFloatArray(), srcPos, count);
+        // Note: Treating FloatArray as IntArray.
+        ObjPtr<mirror::IntArray>::DownCast(dstArray)->Memmove(
+            dstPos, ObjPtr<mirror::IntArray>::DownCast(srcArray), srcPos, count);
         return;
       case Primitive::kPrimLong:
-        DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 8U);
-        dstArray->AsLongArray()->Memmove(dstPos, srcArray->AsLongArray(), srcPos, count);
-        return;
       case Primitive::kPrimDouble:
         DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 8U);
-        dstArray->AsDoubleArray()->Memmove(dstPos, srcArray->AsDoubleArray(), srcPos, count);
+        // Note: Treating DoubleArray as LongArray.
+        ObjPtr<mirror::LongArray>::DownCast(dstArray)->Memmove(
+            dstPos, ObjPtr<mirror::LongArray>::DownCast(srcArray), srcPos, count);
         return;
       case Primitive::kPrimNot: {
-        mirror::ObjectArray<mirror::Object>* dstObjArray = dstArray->AsObjectArray<mirror::Object>();
-        mirror::ObjectArray<mirror::Object>* srcObjArray = srcArray->AsObjectArray<mirror::Object>();
+        mirror::ObjectArray<mirror::Object>* dstObjArray =
+            dstArray->AsObjectArray<mirror::Object>();
+        mirror::ObjectArray<mirror::Object>* srcObjArray =
+            srcArray->AsObjectArray<mirror::Object>();
         dstObjArray->AssignableMemmove(dstPos, srcObjArray, srcPos, count);
         return;
       }
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index b7f0a7a..67ad0a4 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -147,7 +147,7 @@
   bool timed_out;
   // Take suspend thread lock to avoid races with threads trying to suspend this one.
   Thread* thread = thread_list->SuspendThreadByPeer(peer,
-                                                    /* request_suspension */ true,
+                                                    /* request_suspension= */ true,
                                                     SuspendReason::kInternal,
                                                     &timed_out);
   if (thread != nullptr) {
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index 1ad233a..46162c1 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -21,6 +21,7 @@
 #include "dex/descriptors_names.h"
 #include "dex/dex_file_loader.h"
 #include "dex/utf.h"
+#include "handle_scope-inl.h"
 #include "jni/jni_internal.h"
 #include "mirror/class_loader.h"
 #include "mirror/object-inl.h"
diff --git a/runtime/native/java_lang_invoke_MethodHandleImpl.cc b/runtime/native/java_lang_invoke_MethodHandleImpl.cc
index 1f2bf09..0b26bd7 100644
--- a/runtime/native/java_lang_invoke_MethodHandleImpl.cc
+++ b/runtime/native/java_lang_invoke_MethodHandleImpl.cc
@@ -48,7 +48,7 @@
   if (handle_kind >= mirror::MethodHandle::kFirstAccessorKind) {
     ArtField* const field = handle->GetTargetField();
     h_object.Assign(mirror::Field::CreateFromArtField<kRuntimePointerSize, false>(
-        soa.Self(), field, false /* force_resolve */));
+        soa.Self(), field, /* force_resolve= */ false));
   } else {
     ArtMethod* const method = handle->GetTargetMethod();
     if (method->IsConstructor()) {
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index 452a66d..ff94593 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -24,6 +24,7 @@
 #include "handle_scope-inl.h"
 #include "jni/jni_internal.h"
 #include "mirror/class-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "mirror/object-inl.h"
 #include "native_util.h"
 #include "scoped_fast_native_object_access-inl.h"
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 4b4d6e3..337c084 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -24,8 +24,10 @@
 #include "class_root.h"
 #include "dex/dex_file_annotations.h"
 #include "jni/jni_internal.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/method.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "mirror/object-inl.h"
 #include "native_util.h"
 #include "reflection.h"
diff --git a/runtime/native/java_lang_reflect_Executable.cc b/runtime/native/java_lang_reflect_Executable.cc
index a10db91..2ce56b5 100644
--- a/runtime/native/java_lang_reflect_Executable.cc
+++ b/runtime/native/java_lang_reflect_Executable.cc
@@ -24,9 +24,11 @@
 #include "dex/dex_file_annotations.h"
 #include "handle.h"
 #include "jni/jni_internal.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/method.h"
 #include "mirror/object-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
 #include "native_util.h"
 #include "reflection.h"
@@ -273,8 +275,8 @@
   this_method = this_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
   other_method = other_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
 
-  const DexFile::TypeList* this_list = this_method->GetParameterTypeList();
-  const DexFile::TypeList* other_list = other_method->GetParameterTypeList();
+  const dex::TypeList* this_list = this_method->GetParameterTypeList();
+  const dex::TypeList* other_list = other_method->GetParameterTypeList();
 
   if (this_list == other_list) {
     return 0;
@@ -296,9 +298,9 @@
   }
 
   for (int32_t i = 0; i < this_size; ++i) {
-    const DexFile::TypeId& lhs = this_method->GetDexFile()->GetTypeId(
+    const dex::TypeId& lhs = this_method->GetDexFile()->GetTypeId(
         this_list->GetTypeItem(i).type_idx_);
-    const DexFile::TypeId& rhs = other_method->GetDexFile()->GetTypeId(
+    const dex::TypeId& rhs = other_method->GetDexFile()->GetTypeId(
         other_list->GetTypeItem(i).type_idx_);
 
     uint32_t lhs_len, rhs_len;
@@ -341,7 +343,7 @@
   ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
   method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
 
-  const DexFile::TypeList* params = method->GetParameterTypeList();
+  const dex::TypeList* params = method->GetParameterTypeList();
   if (params == nullptr) {
     return nullptr;
   }
@@ -376,7 +378,7 @@
   ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
   method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
 
-  const DexFile::TypeList* params = method->GetParameterTypeList();
+  const dex::TypeList* params = method->GetParameterTypeList();
   return (params == nullptr) ? 0 : params->Size();
 }
 
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 895b2f9..f21ded9 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -30,6 +30,7 @@
 #include "jvalue-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/field-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "native_util.h"
 #include "reflection-inl.h"
 #include "scoped_fast_native_object_access-inl.h"
@@ -333,14 +334,15 @@
     return;
   }
   ObjPtr<mirror::Class> field_type;
-  const char* field_type_desciptor = f->GetArtField()->GetTypeDescriptor();
-  Primitive::Type field_prim_type = Primitive::GetType(field_type_desciptor[0]);
+  const char* field_type_descriptor = f->GetArtField()->GetTypeDescriptor();
+  Primitive::Type field_prim_type = Primitive::GetType(field_type_descriptor[0]);
   if (field_prim_type == Primitive::kPrimNot) {
     field_type = f->GetType();
-    DCHECK(field_type != nullptr);
   } else {
-    field_type = Runtime::Current()->GetClassLinker()->FindPrimitiveClass(field_type_desciptor[0]);
+    field_type =
+        Runtime::Current()->GetClassLinker()->LookupPrimitiveClass(field_type_descriptor[0]);
   }
+  DCHECK(field_type != nullptr) << field_type_descriptor;
   // We now don't expect suspension unless an exception is thrown.
   // Unbox the value, if necessary.
   ObjPtr<mirror::Object> boxed_value = soa.Decode<mirror::Object>(javaValue);
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 87fda6b..c65541d 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -27,7 +27,7 @@
 #include "jni/jni_internal.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
-#include "mirror/object_array-inl.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "native_util.h"
 #include "reflection.h"
 #include "scoped_fast_native_object_access-inl.h"
diff --git a/runtime/native/libcore_util_CharsetUtils.cc b/runtime/native/libcore_util_CharsetUtils.cc
index 2429804..95e0d79 100644
--- a/runtime/native/libcore_util_CharsetUtils.cc
+++ b/runtime/native/libcore_util_CharsetUtils.cc
@@ -18,6 +18,7 @@
 
 #include <string.h>
 
+#include "handle_scope-inl.h"
 #include "jni/jni_internal.h"
 #include "mirror/string-inl.h"
 #include "mirror/string.h"
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 4644480..5014f34 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -31,8 +31,10 @@
 #include "mirror/array.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
+#include "art_field-inl.h"
 #include "native_util.h"
 #include "scoped_fast_native_object_access-inl.h"
+#include "well_known_classes.h"
 
 namespace art {
 
@@ -74,8 +76,8 @@
     mirror::HeapReference<mirror::Object>* field_addr =
         reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
             reinterpret_cast<uint8_t*>(obj.Ptr()) + static_cast<size_t>(offset));
-    ReadBarrier::Barrier<mirror::Object, /* kIsVolatile */ false, kWithReadBarrier,
-        /* kAlwaysUpdateField */ true>(
+    ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kWithReadBarrier,
+        /* kAlwaysUpdateField= */ true>(
         obj.Ptr(),
         MemberOffset(offset),
         field_addr);
@@ -364,13 +366,17 @@
   ObjPtr<mirror::Object> dst = soa.Decode<mirror::Object>(dstObj);
   ObjPtr<mirror::Class> component_type = dst->GetClass()->GetComponentType();
   if (component_type->IsPrimitiveByte() || component_type->IsPrimitiveBoolean()) {
-    copyToArray(srcAddr, MakeObjPtr(dst->AsByteSizedArray()), dst_offset, sz);
+    // Note: Treating BooleanArray as ByteArray.
+    copyToArray(srcAddr, ObjPtr<mirror::ByteArray>::DownCast(dst), dst_offset, sz);
   } else if (component_type->IsPrimitiveShort() || component_type->IsPrimitiveChar()) {
-    copyToArray(srcAddr, MakeObjPtr(dst->AsShortSizedArray()), dst_offset, sz);
+    // Note: Treating CharArray as ShortArray.
+    copyToArray(srcAddr, ObjPtr<mirror::ShortArray>::DownCast(dst), dst_offset, sz);
   } else if (component_type->IsPrimitiveInt() || component_type->IsPrimitiveFloat()) {
-    copyToArray(srcAddr, MakeObjPtr(dst->AsIntArray()), dst_offset, sz);
+    // Note: Treating FloatArray as IntArray.
+    copyToArray(srcAddr, ObjPtr<mirror::IntArray>::DownCast(dst), dst_offset, sz);
   } else if (component_type->IsPrimitiveLong() || component_type->IsPrimitiveDouble()) {
-    copyToArray(srcAddr, MakeObjPtr(dst->AsLongArray()), dst_offset, sz);
+    // Note: Treating DoubleArray as LongArray.
+    copyToArray(srcAddr, ObjPtr<mirror::LongArray>::DownCast(dst), dst_offset, sz);
   } else {
     ThrowIllegalAccessException("not a primitive array");
   }
@@ -395,13 +401,17 @@
   ObjPtr<mirror::Object> src = soa.Decode<mirror::Object>(srcObj);
   ObjPtr<mirror::Class> component_type = src->GetClass()->GetComponentType();
   if (component_type->IsPrimitiveByte() || component_type->IsPrimitiveBoolean()) {
-    copyFromArray(dstAddr, MakeObjPtr(src->AsByteSizedArray()), src_offset, sz);
+    // Note: Treating BooleanArray as ByteArray.
+    copyFromArray(dstAddr, ObjPtr<mirror::ByteArray>::DownCast(src), src_offset, sz);
   } else if (component_type->IsPrimitiveShort() || component_type->IsPrimitiveChar()) {
-    copyFromArray(dstAddr, MakeObjPtr(src->AsShortSizedArray()), src_offset, sz);
+    // Note: Treating CharArray as ShortArray.
+    copyFromArray(dstAddr, ObjPtr<mirror::ShortArray>::DownCast(src), src_offset, sz);
   } else if (component_type->IsPrimitiveInt() || component_type->IsPrimitiveFloat()) {
-    copyFromArray(dstAddr, MakeObjPtr(src->AsIntArray()), src_offset, sz);
+    // Note: Treating FloatArray as IntArray.
+    copyFromArray(dstAddr, ObjPtr<mirror::IntArray>::DownCast(src), src_offset, sz);
   } else if (component_type->IsPrimitiveLong() || component_type->IsPrimitiveDouble()) {
-    copyFromArray(dstAddr, MakeObjPtr(src->AsLongArray()), src_offset, sz);
+    // Note: Treating DoubleArray as LongArray.
+    copyFromArray(dstAddr, ObjPtr<mirror::LongArray>::DownCast(src), src_offset, sz);
   } else {
     ThrowIllegalAccessException("not a primitive array");
   }
@@ -504,6 +514,33 @@
   std::atomic_thread_fence(std::memory_order_seq_cst);
 }
 
+static void Unsafe_park(JNIEnv* env, jobject, jboolean isAbsolute, jlong time) {
+  ScopedObjectAccess soa(env);
+  Thread::Current()->Park(isAbsolute, time);
+}
+
+static void Unsafe_unpark(JNIEnv* env, jobject, jobject jthread) {
+  art::ScopedFastNativeObjectAccess soa(env);
+  if (jthread == nullptr || !env->IsInstanceOf(jthread, WellKnownClasses::java_lang_Thread)) {
+    ThrowIllegalArgumentException("Argument to unpark() was not a Thread");
+    return;
+  }
+  art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+  art::Thread* thread = art::Thread::FromManagedThread(soa, jthread);
+  if (thread != nullptr) {
+    thread->Unpark();
+  } else {
+    // If thread is null, that means that either the thread is not started yet,
+    // or the thread has already terminated. Setting the field to true will be
+    // respected when the thread does start, and is harmless if the thread has
+    // already terminated.
+    ArtField* unparked =
+        jni::DecodeArtField(WellKnownClasses::java_lang_Thread_unparkedBeforeStart);
+    // JNI must use non transactional mode.
+    unparked->SetBoolean<false>(soa.Decode<mirror::Object>(jthread), JNI_TRUE);
+  }
+}
+
 static JNINativeMethod gMethods[] = {
   FAST_NATIVE_METHOD(Unsafe, compareAndSwapInt, "(Ljava/lang/Object;JII)Z"),
   FAST_NATIVE_METHOD(Unsafe, compareAndSwapLong, "(Ljava/lang/Object;JJJ)Z"),
@@ -546,6 +583,8 @@
   FAST_NATIVE_METHOD(Unsafe, putShort, "(Ljava/lang/Object;JS)V"),
   FAST_NATIVE_METHOD(Unsafe, putFloat, "(Ljava/lang/Object;JF)V"),
   FAST_NATIVE_METHOD(Unsafe, putDouble, "(Ljava/lang/Object;JD)V"),
+  FAST_NATIVE_METHOD(Unsafe, unpark, "(Ljava/lang/Object;)V"),
+  NATIVE_METHOD(Unsafe, park, "(ZJ)V"),
 
   // Each of the getFoo variants are overloaded with a call that operates
   // directively on a native pointer.
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index ce295aa..a4425ce 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -16,6 +16,7 @@
 
 #include "native_stack_dump.h"
 
+#include <memory>
 #include <ostream>
 
 #include <stdio.h>
@@ -49,7 +50,9 @@
 #include "base/os.h"
 #include "base/unix_file/fd_file.h"
 #include "base/utils.h"
+#include "class_linker.h"
 #include "oat_quick_method_header.h"
+#include "runtime.h"
 #include "thread-current-inl.h"
 
 #endif
@@ -62,6 +65,18 @@
 
 static constexpr bool kUseAddr2line = !kIsTargetBuild;
 
+std::string FindAddr2line() {
+  if (!kIsTargetBuild) {
+    constexpr const char* kAddr2linePrebuiltPath =
+      "/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8/bin/x86_64-linux-addr2line";
+    const char* env_value = getenv("ANDROID_BUILD_TOP");
+    if (env_value != nullptr) {
+      return std::string(env_value) + kAddr2linePrebuiltPath;
+    }
+  }
+  return std::string("/usr/bin/addr2line");
+}
+
 ALWAYS_INLINE
 static inline void WritePrefix(std::ostream& os, const char* prefix, bool odd) {
   if (prefix != nullptr) {
@@ -128,10 +143,10 @@
   } else {
     close(caller_to_addr2line[0]);
     close(addr2line_to_caller[1]);
-    return std::unique_ptr<Addr2linePipe>(new Addr2linePipe(addr2line_to_caller[0],
-                                                            caller_to_addr2line[1],
-                                                            name,
-                                                            pid));
+    return std::make_unique<Addr2linePipe>(addr2line_to_caller[0],
+                                           caller_to_addr2line[1],
+                                           name,
+                                           pid);
   }
 }
 
@@ -231,8 +246,9 @@
     }
     pipe->reset();  // Close early.
 
+    std::string addr2linePath = FindAddr2line();
     const char* args[7] = {
-        "/usr/bin/addr2line",
+        addr2linePath.c_str(),
         "--functions",
         "--inlines",
         "--demangle",
@@ -273,11 +289,17 @@
 }
 
 static bool PcIsWithinQuickCode(ArtMethod* method, uintptr_t pc) NO_THREAD_SAFETY_ANALYSIS {
-  uintptr_t code = reinterpret_cast<uintptr_t>(EntryPointToCodePointer(
-      method->GetEntryPointFromQuickCompiledCode()));
-  if (code == 0) {
+  const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
+  if (entry_point == nullptr) {
     return pc == 0;
   }
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  if (class_linker->IsQuickGenericJniStub(entry_point) ||
+      class_linker->IsQuickResolutionStub(entry_point) ||
+      class_linker->IsQuickToInterpreterBridge(entry_point)) {
+    return false;
+  }
+  uintptr_t code = reinterpret_cast<uintptr_t>(EntryPointToCodePointer(entry_point));
   uintptr_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].GetCodeSize();
   return code <= pc && pc <= (code + code_size);
 }
@@ -313,7 +335,7 @@
   if (kUseAddr2line) {
     // Try to run it to see whether we have it. Push an argument so that it doesn't assume a.out
     // and print to stderr.
-    use_addr2line = (gAborting > 0) && RunCommand("addr2line -h");
+    use_addr2line = (gAborting > 0) && RunCommand(FindAddr2line() + " -h");
   } else {
     use_addr2line = false;
   }
@@ -372,7 +394,7 @@
     }
     os << std::endl;
     if (try_addr2line && use_addr2line) {
-      Addr2line(it->map.name, it->pc - it->map.start, os, prefix, &addr2line_state);
+      Addr2line(it->map.name, it->rel_pc, os, prefix, &addr2line_state);
     }
   }
 
diff --git a/runtime/non_debuggable_classes.cc b/runtime/non_debuggable_classes.cc
index f42a2d6..8b6c2ed 100644
--- a/runtime/non_debuggable_classes.cc
+++ b/runtime/non_debuggable_classes.cc
@@ -16,6 +16,7 @@
 
 #include "non_debuggable_classes.h"
 
+#include "jni/jni_env_ext.h"
 #include "jni/jni_internal.h"
 #include "mirror/class-inl.h"
 #include "nativehelper/scoped_local_ref.h"
diff --git a/runtime/non_debuggable_classes.h b/runtime/non_debuggable_classes.h
index e1b5633..e2c51e6 100644
--- a/runtime/non_debuggable_classes.h
+++ b/runtime/non_debuggable_classes.h
@@ -19,7 +19,7 @@
 
 #include <vector>
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "jni.h"
 
 namespace art {
diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h
index 496a6f3..439f485 100644
--- a/runtime/noop_compiler_callbacks.h
+++ b/runtime/noop_compiler_callbacks.h
@@ -31,11 +31,6 @@
 
   void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) override {}
 
-  // This is only used by compilers which need to be able to run without relocation even when it
-  // would normally be enabled. For example the patchoat executable, and dex2oat --image, both need
-  // to disable the relocation since both deal with writing out the images directly.
-  bool IsRelocationPossible() override { return false; }
-
   verifier::VerifierDeps* GetVerifierDeps() const override { return nullptr; }
 
  private:
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index 71c6a82..ffec179 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -18,7 +18,7 @@
 #define ART_RUNTIME_NTH_CALLER_VISITOR_H_
 
 #include "art_method.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "stack.h"
 
 namespace art {
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 519eed7..c6a963a 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -17,10 +17,10 @@
 #include "oat.h"
 
 #include <string.h>
-#include <zlib.h>
 
 #include "android-base/stringprintf.h"
 
+#include "arch/instruction_set.h"
 #include "arch/instruction_set_features.h"
 #include "base/bit_utils.h"
 #include "base/strlcpy.h"
@@ -68,7 +68,7 @@
                      const InstructionSetFeatures* instruction_set_features,
                      uint32_t dex_file_count,
                      const SafeMap<std::string, std::string>* variable_data)
-    : adler32_checksum_(adler32(0L, Z_NULL, 0)),
+    : oat_checksum_(0u),
       instruction_set_(instruction_set),
       instruction_set_features_bitmap_(instruction_set_features->AsBitmap()),
       dex_file_count_(dex_file_count),
@@ -80,8 +80,7 @@
       quick_generic_jni_trampoline_offset_(0),
       quick_imt_conflict_trampoline_offset_(0),
       quick_resolution_trampoline_offset_(0),
-      quick_to_interpreter_bridge_offset_(0),
-      image_file_location_oat_checksum_(0) {
+      quick_to_interpreter_bridge_offset_(0) {
   // Don't want asserts in header as they would be checked in each file that includes it. But the
   // fields are private, so we check inside a method.
   static_assert(sizeof(magic_) == sizeof(kOatMagic),
@@ -143,47 +142,11 @@
 
 uint32_t OatHeader::GetChecksum() const {
   CHECK(IsValid());
-  return adler32_checksum_;
+  return oat_checksum_;
 }
 
-void OatHeader::UpdateChecksumWithHeaderData() {
-  UpdateChecksum(&instruction_set_, sizeof(instruction_set_));
-  UpdateChecksum(&instruction_set_features_bitmap_, sizeof(instruction_set_features_bitmap_));
-  UpdateChecksum(&dex_file_count_, sizeof(dex_file_count_));
-  UpdateChecksum(&image_file_location_oat_checksum_, sizeof(image_file_location_oat_checksum_));
-
-  // Update checksum for variable data size.
-  UpdateChecksum(&key_value_store_size_, sizeof(key_value_store_size_));
-
-  // Update for data, if existing.
-  if (key_value_store_size_ > 0U) {
-    UpdateChecksum(&key_value_store_, key_value_store_size_);
-  }
-
-  UpdateChecksum(&executable_offset_, sizeof(executable_offset_));
-  UpdateChecksum(&interpreter_to_interpreter_bridge_offset_,
-                 sizeof(interpreter_to_interpreter_bridge_offset_));
-  UpdateChecksum(&interpreter_to_compiled_code_bridge_offset_,
-                 sizeof(interpreter_to_compiled_code_bridge_offset_));
-  UpdateChecksum(&jni_dlsym_lookup_offset_, sizeof(jni_dlsym_lookup_offset_));
-  UpdateChecksum(&quick_generic_jni_trampoline_offset_,
-                 sizeof(quick_generic_jni_trampoline_offset_));
-  UpdateChecksum(&quick_imt_conflict_trampoline_offset_,
-                 sizeof(quick_imt_conflict_trampoline_offset_));
-  UpdateChecksum(&quick_resolution_trampoline_offset_,
-                 sizeof(quick_resolution_trampoline_offset_));
-  UpdateChecksum(&quick_to_interpreter_bridge_offset_,
-                 sizeof(quick_to_interpreter_bridge_offset_));
-}
-
-void OatHeader::UpdateChecksum(const void* data, size_t length) {
-  DCHECK(IsValid());
-  if (data != nullptr) {
-    const uint8_t* bytes = reinterpret_cast<const uint8_t*>(data);
-    adler32_checksum_ = adler32(adler32_checksum_, bytes, length);
-  } else {
-    DCHECK_EQ(0U, length);
-  }
+void OatHeader::SetChecksum(uint32_t oat_checksum) {
+  oat_checksum_ = oat_checksum;
 }
 
 InstructionSet OatHeader::GetInstructionSet() const {
@@ -353,16 +316,6 @@
   quick_to_interpreter_bridge_offset_ = offset;
 }
 
-uint32_t OatHeader::GetImageFileLocationOatChecksum() const {
-  CHECK(IsValid());
-  return image_file_location_oat_checksum_;
-}
-
-void OatHeader::SetImageFileLocationOatChecksum(uint32_t image_file_location_oat_checksum) {
-  CHECK(IsValid());
-  image_file_location_oat_checksum_ = image_file_location_oat_checksum;
-}
-
 uint32_t OatHeader::GetKeyValueStoreSize() const {
   CHECK(IsValid());
   return key_value_store_size_;
diff --git a/runtime/oat.h b/runtime/oat.h
index 963725a..88238d9 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -19,30 +19,28 @@
 
 #include <vector>
 
-#include "arch/instruction_set.h"
 #include "base/macros.h"
 #include "base/safe_map.h"
 #include "compiler_filter.h"
-#include "dex/dex_file.h"
 
 namespace art {
 
+enum class InstructionSet;
 class InstructionSetFeatures;
 
 class PACKED(4) OatHeader {
  public:
   static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
-  // Last oat version changed reason: Remove PIC option from oat files.
-  static constexpr uint8_t kOatVersion[] = { '1', '6', '2', '\0' };
+  // Last oat version changed reason: Partial boot image.
+  static constexpr uint8_t kOatVersion[] = { '1', '6', '6', '\0' };
 
-  static constexpr const char* kImageLocationKey = "image-location";
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
-  static constexpr const char* kDex2OatHostKey = "dex2oat-host";
   static constexpr const char* kDebuggableKey = "debuggable";
   static constexpr const char* kNativeDebuggableKey = "native-debuggable";
   static constexpr const char* kCompilerFilter = "compiler-filter";
   static constexpr const char* kClassPathKey = "classpath";
   static constexpr const char* kBootClassPathKey = "bootclasspath";
+  static constexpr const char* kBootClassPathChecksumsKey = "bootclasspath-checksums";
   static constexpr const char* kConcurrentCopying = "concurrent-copying";
   static constexpr const char* kCompilationReasonKey = "compilation-reason";
 
@@ -59,8 +57,7 @@
   std::string GetValidationErrorMessage() const;
   const char* GetMagic() const;
   uint32_t GetChecksum() const;
-  void UpdateChecksumWithHeaderData();
-  void UpdateChecksum(const void* data, size_t length);
+  void SetChecksum(uint32_t checksum);
   uint32_t GetDexFileCount() const {
     DCHECK(IsValid());
     return dex_file_count_;
@@ -97,9 +94,6 @@
   InstructionSet GetInstructionSet() const;
   uint32_t GetInstructionSetFeaturesBitmap() const;
 
-  uint32_t GetImageFileLocationOatChecksum() const;
-  void SetImageFileLocationOatChecksum(uint32_t image_file_location_oat_checksum);
-
   uint32_t GetKeyValueStoreSize() const;
   const uint8_t* GetKeyValueStore() const;
   const char* GetStoreValueByKey(const char* key) const;
@@ -126,7 +120,7 @@
 
   uint8_t magic_[4];
   uint8_t version_[4];
-  uint32_t adler32_checksum_;
+  uint32_t oat_checksum_;
 
   InstructionSet instruction_set_;
   uint32_t instruction_set_features_bitmap_;
@@ -141,8 +135,6 @@
   uint32_t quick_resolution_trampoline_offset_;
   uint32_t quick_to_interpreter_bridge_offset_;
 
-  uint32_t image_file_location_oat_checksum_;
-
   uint32_t key_value_store_size_;
   uint8_t key_value_store_[0];  // note variable width data at end
 
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index 721fab9..b71c4e8 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -18,6 +18,8 @@
 #define ART_RUNTIME_OAT_FILE_INL_H_
 
 #include "oat_file.h"
+
+#include "base/utils.h"
 #include "oat_quick_method_header.h"
 
 namespace art {
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 0579b6e..8a0a1e7 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -33,6 +33,7 @@
 #include "android/dlext.h"
 #endif
 
+#include <android-base/logging.h>
 #include "android-base/stringprintf.h"
 
 #include "art_method.h"
@@ -47,7 +48,9 @@
 #include "base/unix_file/fd_file.h"
 #include "base/utils.h"
 #include "dex/art_dex_file_loader.h"
+#include "dex/dex_file.h"
 #include "dex/dex_file_loader.h"
+#include "dex/dex_file_structs.h"
 #include "dex/dex_file_types.h"
 #include "dex/standard_dex_file.h"
 #include "dex/type_lookup_table.h"
@@ -101,7 +104,6 @@
                                   const std::string& vdex_filename,
                                   const std::string& elf_filename,
                                   const std::string& location,
-                                  uint8_t* requested_base,
                                   bool writable,
                                   bool executable,
                                   bool low_4gb,
@@ -115,7 +117,6 @@
                                   int oat_fd,
                                   const std::string& vdex_filename,
                                   const std::string& oat_filename,
-                                  uint8_t* requested_base,
                                   bool writable,
                                   bool executable,
                                   bool low_4gb,
@@ -156,9 +157,7 @@
                     /*inout*/MemMap* reservation,  // Where to load if not null.
                     /*out*/std::string* error_msg) = 0;
 
-  bool ComputeFields(uint8_t* requested_base,
-                     const std::string& file_path,
-                     std::string* error_msg);
+  bool ComputeFields(const std::string& file_path, std::string* error_msg);
 
   virtual void PreSetup(const std::string& elf_filename) = 0;
 
@@ -187,7 +186,6 @@
                                       const std::string& vdex_filename,
                                       const std::string& elf_filename,
                                       const std::string& location,
-                                      uint8_t* requested_base,
                                       bool writable,
                                       bool executable,
                                       bool low_4gb,
@@ -207,7 +205,7 @@
     return nullptr;
   }
 
-  if (!ret->ComputeFields(requested_base, elf_filename, error_msg)) {
+  if (!ret->ComputeFields(elf_filename, error_msg)) {
     return nullptr;
   }
 
@@ -230,7 +228,6 @@
                                       int oat_fd,
                                       const std::string& vdex_location,
                                       const std::string& oat_location,
-                                      uint8_t* requested_base,
                                       bool writable,
                                       bool executable,
                                       bool low_4gb,
@@ -248,7 +245,7 @@
     return nullptr;
   }
 
-  if (!ret->ComputeFields(requested_base, oat_location, error_msg)) {
+  if (!ret->ComputeFields(oat_location, error_msg)) {
     return nullptr;
   }
 
@@ -271,11 +268,11 @@
                            std::string* error_msg) {
   vdex_ = VdexFile::OpenAtAddress(vdex_begin_,
                                   vdex_end_ - vdex_begin_,
-                                  vdex_begin_ != nullptr /* mmap_reuse */,
+                                  /*mmap_reuse=*/ vdex_begin_ != nullptr,
                                   vdex_filename,
                                   writable,
                                   low_4gb,
-                                  /* unquicken*/ false,
+                                  /* unquicken=*/ false,
                                   error_msg);
   if (vdex_.get() == nullptr) {
     *error_msg = StringPrintf("Failed to load vdex file '%s' %s",
@@ -299,13 +296,13 @@
     } else {
       vdex_ = VdexFile::OpenAtAddress(vdex_begin_,
                                       vdex_end_ - vdex_begin_,
-                                      vdex_begin_ != nullptr /* mmap_reuse */,
+                                      /*mmap_reuse=*/ vdex_begin_ != nullptr,
                                       vdex_fd,
                                       s.st_size,
                                       vdex_filename,
                                       writable,
                                       low_4gb,
-                                      false /* unquicken */,
+                                      /*unquicken=*/ false,
                                       error_msg);
       if (vdex_.get() == nullptr) {
         *error_msg = "Failed opening vdex file.";
@@ -316,9 +313,7 @@
   return true;
 }
 
-bool OatFileBase::ComputeFields(uint8_t* requested_base,
-                                const std::string& file_path,
-                                std::string* error_msg) {
+bool OatFileBase::ComputeFields(const std::string& file_path, std::string* error_msg) {
   std::string symbol_error_msg;
   begin_ = FindDynamicSymbolAddress("oatdata", &symbol_error_msg);
   if (begin_ == nullptr) {
@@ -327,16 +322,6 @@
                               symbol_error_msg.c_str());
     return false;
   }
-  if (requested_base != nullptr && begin_ != requested_base) {
-    // Host can fail this check. Do not dump there to avoid polluting the output.
-    if (kIsTargetBuild && (kIsDebugBuild || VLOG_IS_ON(oat))) {
-      PrintFileToLog("/proc/self/maps", android::base::LogSeverity::WARNING);
-    }
-    *error_msg = StringPrintf("Failed to find oatdata symbol at expected address: "
-        "oatdata=%p != expected=%p. See process maps in the log.",
-        begin_, requested_base);
-    return false;
-  }
   end_ = FindDynamicSymbolAddress("oatlastword", &symbol_error_msg);
   if (end_ == nullptr) {
     *error_msg = StringPrintf("Failed to find oatlastword symbol in '%s' %s",
@@ -410,7 +395,7 @@
     return false;
   }
   static_assert(std::is_trivial<T>::value, "T must be a trivial type");
-  typedef __attribute__((__aligned__(1))) T unaligned_type;
+  using unaligned_type __attribute__((__aligned__(1))) = T;
   *value = *reinterpret_cast<const unaligned_type*>(*oat);
   *oat += sizeof(T);
   return true;
@@ -485,6 +470,7 @@
       }
       prev_entry = &entry;
     }
+    CHECK(prev_entry != nullptr);
     CHECK_LT(prev_entry->GetIndex(index_bits), number_of_indexes);
   }
 }
@@ -597,9 +583,9 @@
     const char* dex_file_location_data = reinterpret_cast<const char*>(oat);
     oat += dex_file_location_size;
 
-    std::string dex_file_location = ResolveRelativeEncodedDexLocation(
-        abs_dex_location,
-        std::string(dex_file_location_data, dex_file_location_size));
+    std::string dex_file_location(dex_file_location_data, dex_file_location_size);
+    std::string dex_file_name =
+        ResolveRelativeEncodedDexLocation(abs_dex_location, dex_file_location);
 
     uint32_t dex_file_checksum;
     if (UNLIKELY(!ReadOatDexFileData(*this, &oat, &dex_file_checksum))) {
@@ -649,15 +635,15 @@
         if (zip_fd != -1) {
           loaded = dex_file_loader.OpenZip(zip_fd,
                                            dex_file_location,
-                                           /* verify */ false,
-                                           /* verify_checksum */ false,
+                                           /*verify=*/ false,
+                                           /*verify_checksum=*/ false,
                                            error_msg,
                                            uncompressed_dex_files_.get());
         } else {
-          loaded = dex_file_loader.Open(dex_file_location.c_str(),
+          loaded = dex_file_loader.Open(dex_file_name.c_str(),
                                         dex_file_location,
-                                        /* verify */ false,
-                                        /* verify_checksum */ false,
+                                        /*verify=*/ false,
+                                        /*verify_checksum=*/ false,
                                         error_msg,
                                         uncompressed_dex_files_.get());
         }
@@ -835,7 +821,7 @@
         this, header->string_ids_size_, sizeof(GcRoot<mirror::String>), string_bss_mapping);
 
     std::string canonical_location =
-        DexFileLoader::GetDexCanonicalLocation(dex_file_location.c_str());
+        DexFileLoader::GetDexCanonicalLocation(dex_file_name.c_str());
 
     // Create the OatDexFile and add it to the owning container.
     OatDexFile* oat_dex_file = new OatDexFile(this,
@@ -852,10 +838,10 @@
     oat_dex_files_storage_.push_back(oat_dex_file);
 
     // Add the location and canonical location (if different) to the oat_dex_files_ table.
-    StringPiece key(oat_dex_file->GetDexFileLocation());
+    std::string_view key(oat_dex_file->GetDexFileLocation());
     oat_dex_files_.Put(key, oat_dex_file);
     if (canonical_location != dex_file_location) {
-      StringPiece canonical_key(oat_dex_file->GetCanonicalDexFileLocation());
+      std::string_view canonical_key(oat_dex_file->GetCanonicalDexFileLocation());
       oat_dex_files_.Put(canonical_key, oat_dex_file);
     }
   }
@@ -1057,11 +1043,8 @@
     }
 #ifdef ART_TARGET_ANDROID
     android_dlextinfo extinfo = {};
-    extinfo.flags = ANDROID_DLEXT_FORCE_LOAD |                  // Force-load, don't reuse handle
-                                                                //   (open oat files multiple
-                                                                //    times).
-                    ANDROID_DLEXT_FORCE_FIXED_VADDR;            // Take a non-zero vaddr as absolute
-                                                                //   (non-pic boot image).
+    extinfo.flags = ANDROID_DLEXT_FORCE_LOAD;   // Force-load, don't reuse handle
+                                                //   (open oat files multiple times).
     if (reservation != nullptr) {
       if (!reservation->IsValid()) {
         *error_msg = StringPrintf("Invalid reservation for %s", elf_filename.c_str());
@@ -1323,7 +1306,7 @@
   }
 
   // Complete the setup.
-  if (!oat_file->ComputeFields(/* requested_base */ nullptr, file->GetPath(), error_msg)) {
+  if (!oat_file->ComputeFields(file->GetPath(), error_msg)) {
     return nullptr;
   }
 
@@ -1407,10 +1390,9 @@
                              /*inout*/MemMap* reservation,
                              /*out*/std::string* error_msg) {
   ScopedTrace trace(__PRETTY_FUNCTION__);
-  // TODO: rename requested_base to oat_data_begin
   elf_file_.reset(ElfFile::Open(file,
                                 writable,
-                                /*program_header_only*/true,
+                                /*program_header_only=*/ true,
                                 low_4gb,
                                 error_msg));
   if (elf_file_ == nullptr) {
@@ -1428,20 +1410,23 @@
 
 std::string OatFile::ResolveRelativeEncodedDexLocation(
       const char* abs_dex_location, const std::string& rel_dex_location) {
-  // For host, we still do resolution as the rel_dex_location might be absolute
-  // for a target dex (for example /system/foo/foo.apk).
-  if (abs_dex_location != nullptr && (rel_dex_location[0] != '/' || !kIsTargetBuild)) {
-    // Strip :classes<N>.dex used for secondary multidex files.
+  if (abs_dex_location != nullptr) {
     std::string base = DexFileLoader::GetBaseLocation(rel_dex_location);
+    // Strip :classes<N>.dex used for secondary multidex files.
     std::string multidex_suffix = DexFileLoader::GetMultiDexSuffix(rel_dex_location);
-
-    // Check if the base is a suffix of the provided abs_dex_location.
-    std::string target_suffix = ((rel_dex_location[0] != '/') ? "/" : "") + base;
-    std::string abs_location(abs_dex_location);
-    if (abs_location.size() > target_suffix.size()) {
-      size_t pos = abs_location.size() - target_suffix.size();
-      if (abs_location.compare(pos, std::string::npos, target_suffix) == 0) {
-        return abs_location + multidex_suffix;
+    if (!kIsTargetBuild) {
+      // For host, we still do resolution as the rel_dex_location might be absolute
+      // for a target dex (for example /system/foo/foo.apk).
+      return std::string(abs_dex_location) + multidex_suffix;
+    } else if (rel_dex_location[0] != '/') {
+      // Check if the base is a suffix of the provided abs_dex_location.
+      std::string target_suffix = ((rel_dex_location[0] != '/') ? "/" : "") + base;
+      std::string abs_location(abs_dex_location);
+      if (abs_location.size() > target_suffix.size()) {
+        size_t pos = abs_location.size() - target_suffix.size();
+        if (abs_location.compare(pos, std::string::npos, target_suffix) == 0) {
+          return abs_location + multidex_suffix;
+        }
       }
     }
   }
@@ -1458,7 +1443,7 @@
                                   const std::string& location,
                                   const char* abs_dex_location,
                                   std::string* error_msg) {
-  std::unique_ptr<ElfOatFile> oat_file(new ElfOatFile(location, false /* executable */));
+  std::unique_ptr<ElfOatFile> oat_file(new ElfOatFile(location, /*executable=*/ false));
   return oat_file->InitializeFromElfFile(zip_fd, elf_file, vdex_file, abs_dex_location, error_msg)
       ? oat_file.release()
       : nullptr;
@@ -1467,7 +1452,6 @@
 OatFile* OatFile::Open(int zip_fd,
                        const std::string& oat_filename,
                        const std::string& oat_location,
-                       uint8_t* requested_base,
                        bool executable,
                        bool low_4gb,
                        const char* abs_dex_location,
@@ -1494,8 +1478,7 @@
                                                                  vdex_filename,
                                                                  oat_filename,
                                                                  oat_location,
-                                                                 requested_base,
-                                                                 false /* writable */,
+                                                                 /*writable=*/ false,
                                                                  executable,
                                                                  low_4gb,
                                                                  abs_dex_location,
@@ -1524,8 +1507,7 @@
                                                                 vdex_filename,
                                                                 oat_filename,
                                                                 oat_location,
-                                                                requested_base,
-                                                                false /* writable */,
+                                                                /*writable=*/ false,
                                                                 executable,
                                                                 low_4gb,
                                                                 abs_dex_location,
@@ -1538,7 +1520,6 @@
                        int vdex_fd,
                        int oat_fd,
                        const std::string& oat_location,
-                       uint8_t* requested_base,
                        bool executable,
                        bool low_4gb,
                        const char* abs_dex_location,
@@ -1553,8 +1534,7 @@
                                                                 oat_fd,
                                                                 vdex_location,
                                                                 oat_location,
-                                                                requested_base,
-                                                                false /* writable */,
+                                                                /*writable=*/ false,
                                                                 executable,
                                                                 low_4gb,
                                                                 abs_dex_location,
@@ -1572,11 +1552,11 @@
   return ElfOatFile::OpenElfFile(zip_fd,
                                  file,
                                  location,
-                                 /* writable */ true,
-                                 /* executable */ false,
-                                 /*low_4gb*/false,
+                                 /*writable=*/ true,
+                                 /*executable=*/ false,
+                                 /*low_4gb=*/false,
                                  abs_dex_location,
-                                 /* reservation */ nullptr,
+                                 /*reservation=*/ nullptr,
                                  error_msg);
 }
 
@@ -1589,11 +1569,11 @@
   return ElfOatFile::OpenElfFile(zip_fd,
                                  file,
                                  location,
-                                 /* writable */ false,
-                                 /* executable */ false,
-                                 /*low_4gb*/false,
+                                 /*writable=*/ false,
+                                 /*executable=*/ false,
+                                 /*low_4gb=*/false,
                                  abs_dex_location,
-                                 /* reservation */ nullptr,
+                                 /*reservation=*/ nullptr,
                                  error_msg);
 }
 
@@ -1684,7 +1664,7 @@
   // without any performance loss, for example by not doing the first lock-free lookup.
 
   const OatDexFile* oat_dex_file = nullptr;
-  StringPiece key(dex_location);
+  std::string_view key(dex_location);
   // Try to find the key cheaply in the oat_dex_files_ map which holds dex locations
   // directly mentioned in the oat file and doesn't require locking.
   auto primary_it = oat_dex_files_.find(key);
@@ -1703,7 +1683,7 @@
       // We haven't seen this dex_location before, we must check the canonical location.
       std::string dex_canonical_location = DexFileLoader::GetDexCanonicalLocation(dex_location);
       if (dex_canonical_location != dex_location) {
-        StringPiece canonical_key(dex_canonical_location);
+        std::string_view canonical_key(dex_canonical_location);
         auto canonical_it = oat_dex_files_.find(canonical_key);
         if (canonical_it != oat_dex_files_.end()) {
           oat_dex_file = canonical_it->second;
@@ -1712,7 +1692,7 @@
 
       // Copy the key to the string_cache_ and store the result in secondary map.
       string_cache_.emplace_back(key.data(), key.length());
-      StringPiece key_copy(string_cache_.back());
+      std::string_view key_copy(string_cache_.back());
       secondary_oat_dex_files_.PutBefore(secondary_lb, key_copy, oat_dex_file);
     }
   }
@@ -1782,11 +1762,15 @@
   }
 }
 
-OatDexFile::OatDexFile(TypeLookupTable&& lookup_table) : lookup_table_(std::move(lookup_table)) {}
+OatDexFile::OatDexFile(TypeLookupTable&& lookup_table) : lookup_table_(std::move(lookup_table)) {
+  // Stripped-down OatDexFile only allowed in the compiler.
+  CHECK(Runtime::Current() == nullptr || Runtime::Current()->IsAotCompiler());
+}
 
 OatDexFile::~OatDexFile() {}
 
 size_t OatDexFile::FileSize() const {
+  DCHECK(dex_file_pointer_ != nullptr);
   return reinterpret_cast<const DexFile::Header*>(dex_file_pointer_)->file_size_;
 }
 
@@ -1806,6 +1790,7 @@
 }
 
 uint32_t OatDexFile::GetOatClassOffset(uint16_t class_def_index) const {
+  DCHECK(oat_class_offsets_pointer_ != nullptr);
   return oat_class_offsets_pointer_[class_def_index];
 }
 
@@ -1851,13 +1836,23 @@
                            reinterpret_cast<const OatMethodOffsets*>(methods_pointer));
 }
 
-const DexFile::ClassDef* OatDexFile::FindClassDef(const DexFile& dex_file,
-                                                  const char* descriptor,
-                                                  size_t hash) {
+ArrayRef<const uint8_t> OatDexFile::GetQuickenedInfoOf(const DexFile& dex_file,
+                                                       uint32_t dex_method_idx) const {
+  const OatFile* oat_file = GetOatFile();
+  if (oat_file == nullptr) {
+    return ArrayRef<const uint8_t>();
+  } else  {
+    return oat_file->GetVdexFile()->GetQuickenedInfoOf(dex_file, dex_method_idx);
+  }
+}
+
+const dex::ClassDef* OatDexFile::FindClassDef(const DexFile& dex_file,
+                                              const char* descriptor,
+                                              size_t hash) {
   const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
   DCHECK_EQ(ComputeModifiedUtf8Hash(descriptor), hash);
   bool used_lookup_table = false;
-  const DexFile::ClassDef* lookup_table_classdef = nullptr;
+  const dex::ClassDef* lookup_table_classdef = nullptr;
   if (LIKELY((oat_dex_file != nullptr) && oat_dex_file->GetTypeLookupTable().Valid())) {
     used_lookup_table = true;
     const uint32_t class_def_idx = oat_dex_file->GetTypeLookupTable().Lookup(descriptor, hash);
@@ -1874,10 +1869,10 @@
     DCHECK(!used_lookup_table);
     return nullptr;
   }
-  const DexFile::TypeId* type_id = dex_file.FindTypeId(descriptor);
+  const dex::TypeId* type_id = dex_file.FindTypeId(descriptor);
   if (type_id != nullptr) {
     dex::TypeIndex type_idx = dex_file.GetIndexForTypeId(*type_id);
-    const DexFile::ClassDef* found_class_def = dex_file.FindClassDef(type_idx);
+    const dex::ClassDef* found_class_def = dex_file.FindClassDef(type_idx);
     if (kIsDebugBuild && used_lookup_table) {
       DCHECK_EQ(found_class_def, lookup_table_classdef);
     }
@@ -1940,7 +1935,7 @@
       }
       case kOatClassMax: {
         LOG(FATAL) << "Invalid OatClassType " << type_;
-        break;
+        UNREACHABLE();
       }
     }
 }
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index b3736e6..37dbe6a 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -19,18 +19,16 @@
 
 #include <list>
 #include <string>
+#include <string_view>
 #include <vector>
 
 #include "base/array_ref.h"
 #include "base/mutex.h"
 #include "base/os.h"
 #include "base/safe_map.h"
-#include "base/stringpiece.h"
 #include "base/tracking_safe_map.h"
-#include "base/utils.h"
 #include "class_status.h"
 #include "compiler_filter.h"
-#include "dex/dex_file.h"
 #include "dex/dex_file_layout.h"
 #include "dex/type_lookup_table.h"
 #include "dex/utf.h"
@@ -41,6 +39,7 @@
 namespace art {
 
 class BitVector;
+class DexFile;
 class ElfFile;
 class DexLayoutSections;
 template <class MirrorType> class GcRoot;
@@ -51,6 +50,10 @@
 class OatQuickMethodHeader;
 class VdexFile;
 
+namespace dex {
+struct ClassDef;
+}  // namespace dex
+
 namespace gc {
 namespace collector {
 class DummyOatFile;
@@ -85,7 +88,6 @@
   static OatFile* Open(int zip_fd,
                        const std::string& filename,
                        const std::string& location,
-                       uint8_t* requested_base,
                        bool executable,
                        bool low_4gb,
                        const char* abs_dex_location,
@@ -99,7 +101,6 @@
                        int vdex_fd,
                        int oat_fd,
                        const std::string& oat_location,
-                       uint8_t* requested_base,
                        bool executable,
                        bool low_4gb,
                        const char* abs_dex_location,
@@ -228,12 +229,12 @@
     // A representation of an invalid OatClass, used when an OatClass can't be found.
     // See FindOatClass().
     static OatClass Invalid() {
-      return OatClass(/* oat_file */ nullptr,
+      return OatClass(/* oat_file= */ nullptr,
                       ClassStatus::kErrorUnresolved,
                       kOatClassNoneCompiled,
-                      /* bitmap_size */ 0,
-                      /* bitmap_pointer */ nullptr,
-                      /* methods_pointer */ nullptr);
+                      /* bitmap_size= */ 0,
+                      /* bitmap_pointer= */ nullptr,
+                      /* methods_pointer= */ nullptr);
     }
 
    private:
@@ -397,12 +398,13 @@
   // Owning storage for the OatDexFile objects.
   std::vector<const OatDexFile*> oat_dex_files_storage_;
 
-  // NOTE: We use a StringPiece as the key type to avoid a memory allocation on every
-  // lookup with a const char* key. The StringPiece doesn't own its backing storage,
+  // NOTE: We use a std::string_view as the key type to avoid a memory allocation on every
+  // lookup with a const char* key. The std::string_view doesn't own its backing storage,
   // therefore we're using the OatDexFile::dex_file_location_ as the backing storage
   // for keys in oat_dex_files_ and the string_cache_ entries for the backing storage
   // of keys in secondary_oat_dex_files_ and oat_dex_files_by_canonical_location_.
-  typedef AllocationTrackingSafeMap<StringPiece, const OatDexFile*, kAllocatorTagOatFile> Table;
+  using Table =
+      AllocationTrackingSafeMap<std::string_view, const OatDexFile*, kAllocatorTagOatFile>;
 
   // Map each location and canonical location (if different) retrieved from the
   // oat file to its OatDexFile. This map doesn't change after it's constructed in Setup()
@@ -421,7 +423,7 @@
 
   // Cache of strings. Contains the backing storage for keys in the secondary_oat_dex_files_
   // and the lazily initialized oat_dex_files_by_canonical_location_.
-  // NOTE: We're keeping references to contained strings in form of StringPiece and adding
+  // NOTE: We're keeping references to contained strings in form of std::string_view and adding
   // new strings to the end. The adding of a new element must not touch any previously stored
   // elements. std::list<> and std::deque<> satisfy this requirement, std::vector<> doesn't.
   mutable std::list<std::string> string_cache_ GUARDED_BY(secondary_lookup_lock_);
@@ -501,11 +503,14 @@
     return dex_file_pointer_;
   }
 
+  ArrayRef<const uint8_t> GetQuickenedInfoOf(const DexFile& dex_file,
+                                             uint32_t dex_method_idx) const;
+
   // Looks up a class definition by its class descriptor. Hash must be
   // ComputeModifiedUtf8Hash(descriptor).
-  static const DexFile::ClassDef* FindClassDef(const DexFile& dex_file,
-                                               const char* descriptor,
-                                               size_t hash);
+  static const dex::ClassDef* FindClassDef(const DexFile& dex_file,
+                                           const char* descriptor,
+                                           size_t hash);
 
   // Madvise the dex file based on the state we are moving to.
   static void MadviseDexFile(const DexFile& dex_file, MadviseState state);
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 009abdb..80ac01f 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -76,9 +76,9 @@
                        isa,
                        load_executable,
                        only_load_system_executable,
-                       -1 /* vdex_fd */,
-                       -1 /* oat_fd */,
-                       -1 /* zip_fd */) {}
+                       /*vdex_fd=*/ -1,
+                       /*oat_fd=*/ -1,
+                       /*zip_fd=*/ -1) {}
 
 
 OatFileAssistant::OatFileAssistant(const char* dex_location,
@@ -91,8 +91,8 @@
     : isa_(isa),
       load_executable_(load_executable),
       only_load_system_executable_(only_load_system_executable),
-      odex_(this, /*is_oat_location*/ false),
-      oat_(this, /*is_oat_location*/ true),
+      odex_(this, /*is_oat_location=*/ false),
+      oat_(this, /*is_oat_location=*/ true),
       zip_fd_(zip_fd) {
   CHECK(dex_location != nullptr) << "OatFileAssistant: null dex location";
 
@@ -124,7 +124,7 @@
     // Get the oat filename.
     std::string oat_file_name;
     if (DexLocationToOatFilename(dex_location_, isa_, &oat_file_name, &error_msg)) {
-      oat_.Reset(oat_file_name, false /* use_fd */);
+      oat_.Reset(oat_file_name, /*use_fd=*/ false);
     } else {
       LOG(WARNING) << "Failed to determine oat file name for dex location "
                    << dex_location_ << ": " << error_msg;
@@ -419,7 +419,7 @@
       // starts up.
       LOG(WARNING) << "Dex location " << dex_location_ << " does not seem to include dex file. "
         << "Allow oat file use. This is potentially dangerous.";
-    } else if (file.GetOatHeader().GetImageFileLocationOatChecksum() != image_info->oat_checksum) {
+    } else if (!image_info->ValidateBootClassPathChecksums(file)) {
       VLOG(oat) << "Oat image checksum does not match image checksum.";
       return kOatBootImageOutOfDate;
     }
@@ -560,6 +560,22 @@
   return required_dex_checksums_found_ ? &cached_required_dex_checksums_ : nullptr;
 }
 
+bool OatFileAssistant::ImageInfo::ValidateBootClassPathChecksums(const OatFile& oat_file) const {
+  const char* oat_boot_class_path_checksums =
+      oat_file.GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey);
+  if (oat_boot_class_path_checksums == nullptr) {
+    return false;
+  }
+  // The checksums can be either the same or a prefix of the expected checksums,
+  // ending before the ':' delimiter.
+  size_t length = strlen(oat_boot_class_path_checksums);
+  if (length > boot_class_path_checksums.length() ||
+      (length < boot_class_path_checksums.length() && boot_class_path_checksums[length] != ':')) {
+    return false;
+  }
+  return boot_class_path_checksums.compare(0u, length, oat_boot_class_path_checksums) == 0;
+}
+
 std::unique_ptr<OatFileAssistant::ImageInfo>
 OatFileAssistant::ImageInfo::GetRuntimeImageInfo(InstructionSet isa, std::string* error_msg) {
   CHECK(error_msg != nullptr);
@@ -567,16 +583,11 @@
   Runtime* runtime = Runtime::Current();
   std::unique_ptr<ImageInfo> info(new ImageInfo());
   info->location = runtime->GetImageLocation();
-
-  std::unique_ptr<ImageHeader> image_header(
-      gc::space::ImageSpace::ReadImageHeader(info->location.c_str(), isa, error_msg));
-  if (image_header == nullptr) {
+  info->boot_class_path_checksums = gc::space::ImageSpace::GetBootClassPathChecksums(
+      runtime->GetBootClassPath(), info->location, isa, error_msg);
+  if (info->boot_class_path_checksums.empty()) {
     return nullptr;
   }
-
-  info->oat_checksum = image_header->GetOatChecksum();
-  info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
-  info->patch_delta = image_header->GetPatchDelta();
   return info;
 }
 
@@ -693,17 +704,17 @@
             vdex = VdexFile::Open(vdex_fd_,
                                   s.st_size,
                                   vdex_filename,
-                                  false /*writable*/,
-                                  false /*low_4gb*/,
-                                  false /* unquicken */,
+                                  /*writable=*/ false,
+                                  /*low_4gb=*/ false,
+                                  /*unquicken=*/ false,
                                   &error_msg);
           }
         }
       } else {
         vdex = VdexFile::Open(vdex_filename,
-                              false /*writeable*/,
-                              false /*low_4gb*/,
-                              false /*unquicken*/,
+                              /*writable=*/ false,
+                              /*low_4gb=*/ false,
+                              /*unquicken=*/ false,
                               &error_msg);
       }
       if (vdex == nullptr) {
@@ -779,22 +790,20 @@
                                     vdex_fd_,
                                     oat_fd_,
                                     filename_.c_str(),
-                                    /* requested_base */ nullptr,
                                     executable,
-                                    /* low_4gb */ false,
+                                    /*low_4gb=*/ false,
                                     oat_file_assistant_->dex_location_.c_str(),
-                                    /* reservation */ nullptr,
+                                    /*reservation=*/ nullptr,
                                     &error_msg));
         }
       } else {
-        file_.reset(OatFile::Open(/* zip_fd */ -1,
+        file_.reset(OatFile::Open(/*zip_fd=*/ -1,
                                   filename_.c_str(),
                                   filename_.c_str(),
-                                  /* requested_base */ nullptr,
                                   executable,
-                                  /* low_4gb */ false,
+                                  /*low_4gb=*/ false,
                                   oat_file_assistant_->dex_location_.c_str(),
-                                  /* reservation */ nullptr,
+                                  /*reservation=*/ nullptr,
                                   &error_msg));
       }
       if (file_.get() == nullptr) {
@@ -924,7 +933,7 @@
     std::string* out_compilation_reason) {
   // It may not be possible to load an oat file executable (e.g., selinux restrictions). Load
   // non-executable and check the status manually.
-  OatFileAssistant oat_file_assistant(filename.c_str(), isa, false /* load_executable */);
+  OatFileAssistant oat_file_assistant(filename.c_str(), isa, /*load_executable=*/ false);
   std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
 
   if (oat_file == nullptr) {
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 3da1a22..def55b8 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -246,10 +246,10 @@
 
  private:
   struct ImageInfo {
-    uint32_t oat_checksum = 0;
-    uintptr_t oat_data_begin = 0;
-    int32_t patch_delta = 0;
+    bool ValidateBootClassPathChecksums(const OatFile& oat_file) const;
+
     std::string location;
+    std::string boot_class_path_checksums;
 
     static std::unique_ptr<ImageInfo> GetRuntimeImageInfo(InstructionSet isa,
                                                           std::string* error_msg);
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index efdefb1..a99bd51 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -61,6 +61,14 @@
       VerifyOptimizationStatus(
           file, CompilerFilter::NameOfFilter(expected_filter), expected_reason);
   }
+  void InsertNewBootClasspathEntry() {
+    std::string extra_dex_filename = GetMultiDexSrc1();
+    Runtime* runtime = Runtime::Current();
+    runtime->boot_class_path_.push_back(extra_dex_filename);
+    if (!runtime->boot_class_path_locations_.empty()) {
+      runtime->boot_class_path_locations_.push_back(extra_dex_filename);
+    }
+  }
 };
 
 class ScopedNonWritable {
@@ -182,8 +190,8 @@
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
             oat_file_assistant.GetDexOptNeeded(
                 CompilerFilter::kDefaultCompilerFilter,
-                /* downgrade */ false,
-                /* profile_changed */ false,
+                /* profile_changed= */ false,
+                /* downgrade= */ false,
                 relative_context.get()));
 }
 
@@ -236,17 +244,50 @@
   Copy(GetDexSrc1(), dex_location);
   GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed, "install");
 
-  // For the use of oat location by making the dex parent not writable.
-  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+  // Force the use of oat location by making the dex parent not writable.
+  OatFileAssistant oat_file_assistant(
+      dex_location.c_str(), kRuntimeISA, /*load_executable=*/ false);
 
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
+            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
   EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
+            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
+
+  EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+  EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
+  EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
+  EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles());
+
+  VerifyOptimizationStatus(dex_location, CompilerFilter::kSpeed, "install");
+}
+
+// Case: We have an ODEX file compiled against partial boot image.
+// Expect: The status is kNoDexOptNeeded.
+TEST_F(OatFileAssistantTest, OdexUpToDatePartialBootImage) {
+  std::string dex_location = GetScratchDir() + "/OdexUpToDate.jar";
+  std::string odex_location = GetOdexDir() + "/OdexUpToDate.odex";
+  Copy(GetDexSrc1(), dex_location);
+  GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed, "install");
+
+  // Insert an extra dex file to the boot class path.
+  InsertNewBootClasspathEntry();
+
+  // Force the use of oat location by making the dex parent not writable.
+  OatFileAssistant oat_file_assistant(
+      dex_location.c_str(), kRuntimeISA, /*load_executable=*/ false);
+
+  EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
+            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+  EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
+            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
+  EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
+            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+  EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
+            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -302,7 +343,7 @@
   Copy(GetDexSrc1(), dex_location);
   GenerateOatForTest(dex_location.c_str(), CompilerFilter::kSpeed);
 
-  // For the use of oat location by making the dex parent not writable.
+  // Force the use of oat location by making the dex parent not writable.
   ScopedNonWritable scoped_non_writable(dex_location);
   ASSERT_TRUE(scoped_non_writable.IsSuccessful());
 
@@ -336,11 +377,11 @@
   GenerateOatForTest(dex_location.c_str(),
                      odex_location.c_str(),
                      CompilerFilter::kSpeed,
-                     /* with_alternate_image */ false);
+                     /* with_alternate_image= */ false);
 
-  android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY));
-  android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY));
-  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
+  android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY | O_CLOEXEC));
+  android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY | O_CLOEXEC));
+  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(),
                                       kRuntimeISA,
@@ -375,17 +416,17 @@
   GenerateOatForTest(dex_location.c_str(),
                      odex_location.c_str(),
                      CompilerFilter::kSpeed,
-                     /* with_alternate_image */ false);
+                     /* with_alternate_image= */ false);
 
-  android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY));
-  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
+  android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY | O_CLOEXEC));
+  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(),
                                       kRuntimeISA,
                                       false,
                                       false,
                                       vdex_fd.get(),
-                                      -1 /* oat_fd */,
+                                      /* oat_fd= */ -1,
                                       zip_fd.get());
   EXPECT_EQ(-OatFileAssistant::kDex2OatForBootImage,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -408,16 +449,16 @@
   GenerateOatForTest(dex_location.c_str(),
                      odex_location.c_str(),
                      CompilerFilter::kSpeed,
-                     /* with_alternate_image */ false);
+                     /* with_alternate_image= */ false);
 
-  android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY));
-  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
+  android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY | O_CLOEXEC));
+  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(),
                                       kRuntimeISA,
                                       false,
                                       false,
-                                      -1 /* vdex_fd */,
+                                      /* vdex_fd= */ -1,
                                       odex_fd.get(),
                                       zip_fd.get());
 
@@ -436,13 +477,13 @@
 
   Copy(GetDexSrc1(), dex_location);
 
-  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
+  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
   OatFileAssistant oat_file_assistant(dex_location.c_str(),
                                       kRuntimeISA,
                                       false,
                                       false,
-                                      -1 /* vdex_fd */,
-                                      -1 /* oat_fd */,
+                                      /* vdex_fd= */ -1,
+                                      /* oat_fd= */ -1,
                                       zip_fd);
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -637,7 +678,7 @@
   // Strip the dex file.
   Copy(GetStrippedDexSrc1(), dex_location);
 
-  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, /*load_executable*/false);
+  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, /*load_executable=*/false);
 
   // Because the dex file is stripped, the odex file is considered the source
   // of truth for the dex checksums. The oat file should be considered
@@ -730,7 +771,7 @@
   Copy(GetDexSrc1(), dex_location);
   GenerateOatForTest(dex_location.c_str(),
                      CompilerFilter::kSpeed,
-                     /* with_alternate_image */ true);
+                     /* with_alternate_image= */ true);
 
   ScopedNonWritable scoped_non_writable(dex_location);
   ASSERT_TRUE(scoped_non_writable.IsSuccessful());
@@ -765,7 +806,7 @@
   Copy(GetDexSrc1(), dex_location);
   GenerateOatForTest(dex_location.c_str(),
                      CompilerFilter::kExtract,
-                     /* with_alternate_image */ true);
+                     /* with_alternate_image= */ true);
 
   ScopedNonWritable scoped_non_writable(dex_location);
   ASSERT_TRUE(scoped_non_writable.IsSuccessful());
@@ -1148,7 +1189,7 @@
         loaded_oat_file_(nullptr)
   {}
 
-  void Run(Thread* self ATTRIBUTE_UNUSED) {
+  void Run(Thread* self ATTRIBUTE_UNUSED) override {
     // Load the dex files, and save a pointer to the loaded oat file, so that
     // we can verify only one oat file was loaded for the dex location.
     std::vector<std::unique_ptr<const DexFile>> dex_files;
@@ -1167,7 +1208,7 @@
     dex_files = Runtime::Current()->GetOatFileManager().OpenDexFilesFromOat(
         dex_location_.c_str(),
         Runtime::Current()->GetSystemClassLoader(),
-        /*dex_elements*/nullptr,
+        /*dex_elements=*/nullptr,
         &oat_file,
         &error_msgs);
     CHECK(!dex_files.empty()) << android::base::Join(error_msgs, '\n');
@@ -1213,7 +1254,7 @@
     tasks.push_back(std::move(task));
   }
   thread_pool.StartWorkers(self);
-  thread_pool.Wait(self, /* do_work */ true, /* may_hold_locks */ false);
+  thread_pool.Wait(self, /* do_work= */ true, /* may_hold_locks= */ false);
 
   // Verify that tasks which got an oat file got a unique one.
   std::set<const OatFile*> oat_files;
@@ -1335,8 +1376,8 @@
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
             oat_file_assistant.GetDexOptNeeded(
                   CompilerFilter::kDefaultCompilerFilter,
-                  /* downgrade */ false,
-                  /* profile_changed */ false,
+                  /* profile_changed= */ false,
+                  /* downgrade= */ false,
                   updated_context.get()));
 }
 
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 1f0b265..5aa1ea2 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -27,6 +27,7 @@
 #include "base/bit_vector-inl.h"
 #include "base/file_utils.h"
 #include "base/logging.h"  // For VLOG.
+#include "base/mutex-inl.h"
 #include "base/stl_util.h"
 #include "base/systrace.h"
 #include "class_linker.h"
@@ -84,7 +85,7 @@
   auto it = oat_files_.find(compare);
   CHECK(it != oat_files_.end());
   oat_files_.erase(it);
-  compare.release();
+  compare.release();  // NOLINT b/117926937
 }
 
 const OatFile* OatFileManager::FindOpenedOatFileFromDexLocation(
@@ -118,9 +119,10 @@
 }
 
 std::vector<const OatFile*> OatFileManager::GetBootOatFiles() const {
-  std::vector<const OatFile*> oat_files;
   std::vector<gc::space::ImageSpace*> image_spaces =
       Runtime::Current()->GetHeap()->GetBootImageSpaces();
+  std::vector<const OatFile*> oat_files;
+  oat_files.reserve(image_spaces.size());
   for (gc::space::ImageSpace* image_space : image_spaces) {
     oat_files.push_back(image_space->GetOatFile());
   }
@@ -151,8 +153,9 @@
 }
 
 std::vector<const OatFile*> OatFileManager::RegisterImageOatFiles(
-    std::vector<gc::space::ImageSpace*> spaces) {
+    const std::vector<gc::space::ImageSpace*>& spaces) {
   std::vector<const OatFile*> oat_files;
+  oat_files.reserve(spaces.size());
   for (gc::space::ImageSpace* space : spaces) {
     oat_files.push_back(RegisterOatFile(space->ReleaseOatFile()));
   }
@@ -181,9 +184,9 @@
 
  private:
   static BitVector GenerateTypeIndexes(const DexFile* dex_file) {
-    BitVector type_indexes(/*start_bits*/0, /*expandable*/true, Allocator::GetMallocAllocator());
+    BitVector type_indexes(/*start_bits=*/0, /*expandable=*/true, Allocator::GetMallocAllocator());
     for (uint16_t i = 0; i < dex_file->NumClassDefs(); ++i) {
-      const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+      const dex::ClassDef& class_def = dex_file->GetClassDef(i);
       uint16_t type_idx = class_def.class_idx_.index_;
       type_indexes.SetBit(type_idx);
     }
@@ -290,10 +293,12 @@
 
   // Generate type index information for each dex file.
   std::vector<TypeIndexInfo> loaded_types;
+  loaded_types.reserve(dex_files_loaded.size());
   for (const DexFile* dex_file : dex_files_loaded) {
     loaded_types.push_back(TypeIndexInfo(dex_file));
   }
   std::vector<TypeIndexInfo> unloaded_types;
+  unloaded_types.reserve(dex_files_unloaded.size());
   for (const DexFile* dex_file : dex_files_unloaded) {
     unloaded_types.push_back(TypeIndexInfo(dex_file));
   }
@@ -302,12 +307,12 @@
   std::priority_queue<DexFileAndClassPair> queue;
   for (size_t i = 0; i < dex_files_loaded.size(); ++i) {
     if (loaded_types[i].GetIterator() != loaded_types[i].GetIteratorEnd()) {
-      queue.emplace(dex_files_loaded[i], &loaded_types[i], /*from_loaded_oat*/true);
+      queue.emplace(dex_files_loaded[i], &loaded_types[i], /*from_loaded_oat=*/true);
     }
   }
   for (size_t i = 0; i < dex_files_unloaded.size(); ++i) {
     if (unloaded_types[i].GetIterator() != unloaded_types[i].GetIteratorEnd()) {
-      queue.emplace(dex_files_unloaded[i], &unloaded_types[i], /*from_loaded_oat*/false);
+      queue.emplace(dex_files_unloaded[i], &unloaded_types[i], /*from_loaded_oat=*/false);
     }
   }
 
@@ -385,8 +390,8 @@
   // the oat file without addition checks
   ClassLoaderContext::VerificationResult result = context->VerifyClassLoaderContextMatch(
       oat_file->GetClassLoaderContext(),
-      /*verify_names*/ true,
-      /*verify_checksums*/ true);
+      /*verify_names=*/ true,
+      /*verify_checksums=*/ true);
   switch (result) {
     case ClassLoaderContext::VerificationResult::kForcedToSkipChecks:
       return CheckCollisionResult::kSkippedClassLoaderContextSharedLibrary;
@@ -526,6 +531,8 @@
   if (source_oat_file != nullptr) {
     bool added_image_space = false;
     if (source_oat_file->IsExecutable()) {
+      ScopedTrace app_image_timing("AppImage:Loading");
+
       // We need to throw away the image space if we are debuggable but the oat-file source of the
       // image is not otherwise we might get classes with inlined methods or other such things.
       std::unique_ptr<gc::space::ImageSpace> image_space;
@@ -565,7 +572,7 @@
           if (added_image_space) {
             // Successfully added image space to heap, release the map so that it does not get
             // freed.
-            image_space.release();
+            image_space.release();  // NOLINT b/117926937
 
             // Register for tracking.
             for (const auto& dex_file : dex_files) {
diff --git a/runtime/oat_file_manager.h b/runtime/oat_file_manager.h
index 4132b25..99e1b73 100644
--- a/runtime/oat_file_manager.h
+++ b/runtime/oat_file_manager.h
@@ -23,8 +23,8 @@
 #include <unordered_map>
 #include <vector>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "jni.h"
 
 namespace art {
@@ -73,7 +73,8 @@
 
   // Returns the oat files for the images, registers the oat files.
   // Takes ownership of the imagespace's underlying oat files.
-  std::vector<const OatFile*> RegisterImageOatFiles(std::vector<gc::space::ImageSpace*> spaces)
+  std::vector<const OatFile*> RegisterImageOatFiles(
+      const std::vector<gc::space::ImageSpace*>& spaces)
       REQUIRES(!Locks::oat_file_manager_lock_);
 
   // Finds or creates the oat file holding dex_location. Then loads and returns
diff --git a/runtime/oat_file_test.cc b/runtime/oat_file_test.cc
index 51d8fca..ce09da4d 100644
--- a/runtime/oat_file_test.cc
+++ b/runtime/oat_file_test.cc
@@ -35,10 +35,6 @@
       OatFile::ResolveRelativeEncodedDexLocation(
         nullptr, "/data/app/foo/base.apk"));
 
-  EXPECT_EQ(std::string("/system/framework/base.apk"),
-      OatFile::ResolveRelativeEncodedDexLocation(
-        "/data/app/foo/base.apk", "/system/framework/base.apk"));
-
   EXPECT_EQ(std::string("/data/app/foo/base.apk"),
       OatFile::ResolveRelativeEncodedDexLocation(
         "/data/app/foo/base.apk", "base.apk"));
@@ -55,13 +51,29 @@
       OatFile::ResolveRelativeEncodedDexLocation(
         "/data/app/foo/base.apk", "base.apk!classes11.dex"));
 
-  EXPECT_EQ(std::string("base.apk"),
-      OatFile::ResolveRelativeEncodedDexLocation(
-        "/data/app/foo/sludge.apk", "base.apk"));
-
-  EXPECT_EQ(std::string("o/base.apk"),
-      OatFile::ResolveRelativeEncodedDexLocation(
-        "/data/app/foo/base.apk", "o/base.apk"));
+  // Host and target differ in their way of handling locations
+  // that are prefix of one another, due to boot image files.
+  if (kIsTargetBuild) {
+    EXPECT_EQ(std::string("/system/framework/base.apk"),
+        OatFile::ResolveRelativeEncodedDexLocation(
+          "/data/app/foo/base.apk", "/system/framework/base.apk"));
+    EXPECT_EQ(std::string("base.apk"),
+        OatFile::ResolveRelativeEncodedDexLocation(
+          "/data/app/foo/sludge.apk", "base.apk"));
+    EXPECT_EQ(std::string("o/base.apk"),
+        OatFile::ResolveRelativeEncodedDexLocation(
+          "/data/app/foo/base.apk", "o/base.apk"));
+  } else {
+    EXPECT_EQ(std::string("/data/app/foo/base.apk"),
+        OatFile::ResolveRelativeEncodedDexLocation(
+          "/data/app/foo/base.apk", "/system/framework/base.apk"));
+    EXPECT_EQ(std::string("/data/app/foo/sludge.apk"),
+        OatFile::ResolveRelativeEncodedDexLocation(
+          "/data/app/foo/sludge.apk", "base.apk"));
+    EXPECT_EQ(std::string("/data/app/foo/base.apk"),
+        OatFile::ResolveRelativeEncodedDexLocation(
+          "/data/app/foo/base.apk", "o/base.apk"));
+  }
 }
 
 TEST_F(OatFileTest, LoadOat) {
@@ -74,14 +86,13 @@
   std::string error_msg;
   ASSERT_TRUE(OatFileAssistant::DexLocationToOatFilename(
         dex_location, kRuntimeISA, &oat_location, &error_msg)) << error_msg;
-  std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                    oat_location.c_str(),
                                                    oat_location.c_str(),
-                                                   /* requested_base */ nullptr,
-                                                   /* executable */ false,
-                                                   /* low_4gb */ false,
+                                                   /*executable=*/ false,
+                                                   /*low_4gb=*/ false,
                                                    dex_location.c_str(),
-                                                   /* reservation */ nullptr,
+                                                   /*reservation=*/ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file.get() != nullptr);
 
@@ -102,14 +113,13 @@
 
   // Ensure we can load that file. Just a precondition.
   {
-    std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+    std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                      oat_location.c_str(),
                                                      oat_location.c_str(),
-                                                     /* requested_base */ nullptr,
-                                                     /* executable */ false,
-                                                     /* low_4gb */ false,
+                                                     /*executable=*/ false,
+                                                     /*low_4gb=*/ false,
                                                      dex_location.c_str(),
-                                                     /* reservation */ nullptr,
+                                                     /*reservation=*/ nullptr,
                                                      &error_msg));
     ASSERT_TRUE(odex_file != nullptr);
     ASSERT_EQ(2u, odex_file->GetOatDexFiles().size());
@@ -119,14 +129,13 @@
   Copy(GetTestDexFileName("MainUncompressed"), dex_location);
 
   // And try to load again.
-  std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+  std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
                                                    oat_location,
                                                    oat_location,
-                                                   /* requested_base */ nullptr,
-                                                   /* executable */ false,
-                                                   /* low_4gb */ false,
+                                                   /*executable=*/ false,
+                                                   /*low_4gb=*/ false,
                                                    dex_location.c_str(),
-                                                   /* reservation */ nullptr,
+                                                   /*reservation=*/ nullptr,
                                                    &error_msg));
   EXPECT_TRUE(odex_file == nullptr);
   EXPECT_NE(std::string::npos, error_msg.find("expected 2 uncompressed dex files, but found 1"))
diff --git a/runtime/obj_ptr-inl.h b/runtime/obj_ptr-inl.h
index f1e3b50..f096445 100644
--- a/runtime/obj_ptr-inl.h
+++ b/runtime/obj_ptr-inl.h
@@ -17,6 +17,8 @@
 #ifndef ART_RUNTIME_OBJ_PTR_INL_H_
 #define ART_RUNTIME_OBJ_PTR_INL_H_
 
+#include <ostream>
+
 #include "base/bit_utils.h"
 #include "obj_ptr.h"
 #include "thread-current-inl.h"
@@ -24,18 +26,27 @@
 namespace art {
 
 template<class MirrorType>
+inline uintptr_t ObjPtr<MirrorType>::GetCurrentTrimedCookie() {
+  Thread* self = Thread::Current();
+  if (UNLIKELY(self == nullptr)) {
+    return kCookieMask;
+  }
+  return self->GetPoisonObjectCookie() & kCookieMask;
+}
+
+template<class MirrorType>
 inline bool ObjPtr<MirrorType>::IsValid() const {
   if (!kObjPtrPoisoning || IsNull()) {
     return true;
   }
-  return GetCookie() == TrimCookie(Thread::Current()->GetPoisonObjectCookie());
+  return GetCookie() == GetCurrentTrimedCookie();
 }
 
 template<class MirrorType>
 inline void ObjPtr<MirrorType>::AssertValid() const {
   if (kObjPtrPoisoning) {
     CHECK(IsValid()) << "Stale object pointer " << PtrUnchecked() << " , expected cookie "
-        << TrimCookie(Thread::Current()->GetPoisonObjectCookie()) << " but got " << GetCookie();
+        << GetCurrentTrimedCookie() << " but got " << GetCookie();
   }
 }
 
@@ -47,9 +58,7 @@
     DCHECK_LE(ref, 0xFFFFFFFFU);
     ref >>= kObjectAlignmentShift;
     // Put cookie in high bits.
-    Thread* self = Thread::Current();
-    DCHECK(self != nullptr);
-    ref |= self->GetPoisonObjectCookie() << kCookieShift;
+    ref |= GetCurrentTrimedCookie() << kCookieShift;
   }
   return ref;
 }
diff --git a/runtime/obj_ptr.h b/runtime/obj_ptr.h
index e421d87..b0f24da 100644
--- a/runtime/obj_ptr.h
+++ b/runtime/obj_ptr.h
@@ -17,12 +17,15 @@
 #ifndef ART_RUNTIME_OBJ_PTR_H_
 #define ART_RUNTIME_OBJ_PTR_H_
 
-#include <ostream>
+#include <iosfwd>
 #include <type_traits>
 
-#include "base/globals.h"
+#include "base/locks.h"  // For Locks::mutator_lock_.
 #include "base/macros.h"
-#include "base/mutex.h"  // For Locks::mutator_lock_.
+#include "runtime_globals.h"
+
+// Always inline ObjPtr methods even in debug builds.
+#define OBJPTR_INLINE __attribute__ ((always_inline))
 
 namespace art {
 
@@ -45,26 +48,26 @@
                 "must have a least kObjectAlignmentShift bits");
 
  public:
-  ALWAYS_INLINE ObjPtr() REQUIRES_SHARED(Locks::mutator_lock_) : reference_(0u) {}
+  OBJPTR_INLINE ObjPtr() REQUIRES_SHARED(Locks::mutator_lock_) : reference_(0u) {}
 
   // Note: The following constructors allow implicit conversion. This simplifies code that uses
   //       them, e.g., for parameter passing. However, in general, implicit-conversion constructors
   //       are discouraged and detected by clang-tidy.
 
-  ALWAYS_INLINE ObjPtr(std::nullptr_t)
+  OBJPTR_INLINE ObjPtr(std::nullptr_t)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : reference_(0u) {}
 
   template <typename Type,
             typename = typename std::enable_if<std::is_base_of<MirrorType, Type>::value>::type>
-  ALWAYS_INLINE ObjPtr(Type* ptr)
+  OBJPTR_INLINE ObjPtr(Type* ptr)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : reference_(Encode(static_cast<MirrorType*>(ptr))) {
   }
 
   template <typename Type,
             typename = typename std::enable_if<std::is_base_of<MirrorType, Type>::value>::type>
-  ALWAYS_INLINE ObjPtr(const ObjPtr<Type>& other)
+  OBJPTR_INLINE ObjPtr(const ObjPtr<Type>& other)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : reference_(kObjPtrPoisoningValidateOnCopy
                        ? Encode(static_cast<MirrorType*>(other.Ptr()))
@@ -73,7 +76,7 @@
 
   template <typename Type,
             typename = typename std::enable_if<std::is_base_of<MirrorType, Type>::value>::type>
-  ALWAYS_INLINE ObjPtr& operator=(const ObjPtr<Type>& other)
+  OBJPTR_INLINE ObjPtr& operator=(const ObjPtr<Type>& other)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     reference_ = kObjPtrPoisoningValidateOnCopy
                      ? Encode(static_cast<MirrorType*>(other.Ptr()))
@@ -81,63 +84,63 @@
     return *this;
   }
 
-  ALWAYS_INLINE ObjPtr& operator=(MirrorType* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
+  OBJPTR_INLINE ObjPtr& operator=(MirrorType* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
     Assign(ptr);
     return *this;
   }
 
-  ALWAYS_INLINE void Assign(MirrorType* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
+  OBJPTR_INLINE void Assign(MirrorType* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
     reference_ = Encode(ptr);
   }
 
-  ALWAYS_INLINE MirrorType* operator->() const REQUIRES_SHARED(Locks::mutator_lock_) {
+  OBJPTR_INLINE MirrorType* operator->() const REQUIRES_SHARED(Locks::mutator_lock_) {
     return Ptr();
   }
 
-  ALWAYS_INLINE bool IsNull() const {
+  OBJPTR_INLINE bool IsNull() const {
     return reference_ == 0;
   }
 
   // Ptr makes sure that the object pointer is valid.
-  ALWAYS_INLINE MirrorType* Ptr() const REQUIRES_SHARED(Locks::mutator_lock_) {
+  OBJPTR_INLINE MirrorType* Ptr() const REQUIRES_SHARED(Locks::mutator_lock_) {
     AssertValid();
     return PtrUnchecked();
   }
 
-  ALWAYS_INLINE bool IsValid() const REQUIRES_SHARED(Locks::mutator_lock_);
+  OBJPTR_INLINE bool IsValid() const REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ALWAYS_INLINE void AssertValid() const REQUIRES_SHARED(Locks::mutator_lock_);
+  OBJPTR_INLINE void AssertValid() const REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ALWAYS_INLINE bool operator==(const ObjPtr& ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
+  OBJPTR_INLINE bool operator==(const ObjPtr& ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
     return Ptr() == ptr.Ptr();
   }
 
   template <typename PointerType>
-  ALWAYS_INLINE bool operator==(const PointerType* ptr) const
+  OBJPTR_INLINE bool operator==(const PointerType* ptr) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     return Ptr() == ptr;
   }
 
-  ALWAYS_INLINE bool operator==(std::nullptr_t) const {
+  OBJPTR_INLINE bool operator==(std::nullptr_t) const {
     return IsNull();
   }
 
-  ALWAYS_INLINE bool operator!=(const ObjPtr& ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
+  OBJPTR_INLINE bool operator!=(const ObjPtr& ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
     return Ptr() != ptr.Ptr();
   }
 
   template <typename PointerType>
-  ALWAYS_INLINE bool operator!=(const PointerType* ptr) const
+  OBJPTR_INLINE bool operator!=(const PointerType* ptr) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     return Ptr() != ptr;
   }
 
-  ALWAYS_INLINE bool operator!=(std::nullptr_t) const {
+  OBJPTR_INLINE bool operator!=(std::nullptr_t) const {
     return !IsNull();
   }
 
   // Ptr unchecked does not check that object pointer is valid. Do not use if you can avoid it.
-  ALWAYS_INLINE MirrorType* PtrUnchecked() const {
+  OBJPTR_INLINE MirrorType* PtrUnchecked() const {
     if (kObjPtrPoisoning) {
       return reinterpret_cast<MirrorType*>(
           static_cast<uintptr_t>(static_cast<uint32_t>(reference_ << kObjectAlignmentShift)));
@@ -156,15 +159,13 @@
 
  private:
   // Trim off high bits of thread local cookie.
-  ALWAYS_INLINE static uintptr_t TrimCookie(uintptr_t cookie) {
-    return cookie & kCookieMask;
-  }
+  OBJPTR_INLINE static uintptr_t GetCurrentTrimedCookie();
 
-  ALWAYS_INLINE uintptr_t GetCookie() const {
+  OBJPTR_INLINE uintptr_t GetCookie() const {
     return reference_ >> kCookieShift;
   }
 
-  ALWAYS_INLINE static uintptr_t Encode(MirrorType* ptr) REQUIRES_SHARED(Locks::mutator_lock_);
+  OBJPTR_INLINE static uintptr_t Encode(MirrorType* ptr) REQUIRES_SHARED(Locks::mutator_lock_);
   // The encoded reference and cookie.
   uintptr_t reference_;
 
@@ -184,24 +185,24 @@
 };
 
 template<class MirrorType, typename PointerType>
-ALWAYS_INLINE bool operator==(const PointerType* a, const ObjPtr<MirrorType>& b)
+OBJPTR_INLINE bool operator==(const PointerType* a, const ObjPtr<MirrorType>& b)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   return b == a;
 }
 
 template<class MirrorType>
-ALWAYS_INLINE bool operator==(std::nullptr_t, const ObjPtr<MirrorType>& b) {
+OBJPTR_INLINE bool operator==(std::nullptr_t, const ObjPtr<MirrorType>& b) {
   return b == nullptr;
 }
 
 template<typename MirrorType, typename PointerType>
-ALWAYS_INLINE bool operator!=(const PointerType* a, const ObjPtr<MirrorType>& b)
+OBJPTR_INLINE bool operator!=(const PointerType* a, const ObjPtr<MirrorType>& b)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   return b != a;
 }
 
 template<class MirrorType>
-ALWAYS_INLINE bool operator!=(std::nullptr_t, const ObjPtr<MirrorType>& b) {
+OBJPTR_INLINE bool operator!=(std::nullptr_t, const ObjPtr<MirrorType>& b) {
   return b != nullptr;
 }
 
@@ -216,7 +217,7 @@
 }
 
 template<class MirrorType>
-ALWAYS_INLINE std::ostream& operator<<(std::ostream& os, ObjPtr<MirrorType> ptr);
+OBJPTR_INLINE std::ostream& operator<<(std::ostream& os, ObjPtr<MirrorType> ptr);
 
 }  // namespace art
 
diff --git a/runtime/object_lock.h b/runtime/object_lock.h
index 5916f90..15b763a 100644
--- a/runtime/object_lock.h
+++ b/runtime/object_lock.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_OBJECT_LOCK_H_
 #define ART_RUNTIME_OBJECT_LOCK_H_
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "handle.h"
 
 namespace art {
diff --git a/runtime/offsets.h b/runtime/offsets.h
index 4df9b27..6d1a8e0 100644
--- a/runtime/offsets.h
+++ b/runtime/offsets.h
@@ -17,24 +17,24 @@
 #ifndef ART_RUNTIME_OFFSETS_H_
 #define ART_RUNTIME_OFFSETS_H_
 
-#include <ostream>
+#include <iosfwd>
 
 #include "base/enums.h"
-#include "base/globals.h"
+#include "runtime_globals.h"
 
 namespace art {
 
 // Allow the meaning of offsets to be strongly typed.
 class Offset {
  public:
-  explicit Offset(size_t val) : val_(val) {}
-  int32_t Int32Value() const {
+  constexpr explicit Offset(size_t val) : val_(val) {}
+  constexpr int32_t Int32Value() const {
     return static_cast<int32_t>(val_);
   }
-  uint32_t Uint32Value() const {
+  constexpr uint32_t Uint32Value() const {
     return static_cast<uint32_t>(val_);
   }
-  size_t SizeValue() const {
+  constexpr size_t SizeValue() const {
     return val_;
   }
 
@@ -46,7 +46,7 @@
 // Offsets relative to the current frame.
 class FrameOffset : public Offset {
  public:
-  explicit FrameOffset(size_t val) : Offset(val) {}
+  constexpr explicit FrameOffset(size_t val) : Offset(val) {}
   bool operator>(FrameOffset other) const { return val_ > other.val_; }
   bool operator<(FrameOffset other) const { return val_ < other.val_; }
 };
@@ -55,7 +55,7 @@
 template<PointerSize pointer_size>
 class ThreadOffset : public Offset {
  public:
-  explicit ThreadOffset(size_t val) : Offset(val) {}
+  constexpr explicit ThreadOffset(size_t val) : Offset(val) {}
 };
 
 using ThreadOffset32 = ThreadOffset<PointerSize::k32>;
@@ -64,7 +64,7 @@
 // Offsets relative to an object.
 class MemberOffset : public Offset {
  public:
-  explicit MemberOffset(size_t val) : Offset(val) {}
+  constexpr explicit MemberOffset(size_t val) : Offset(val) {}
 };
 
 }  // namespace art
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 4d16eb5..6fd691f 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -16,9 +16,11 @@
 
 #include "parsed_options.h"
 
+#include <memory>
 #include <sstream>
 
 #include <android-base/logging.h>
+#include <android-base/strings.h>
 
 #include "base/file_utils.h"
 #include "base/macros.h"
@@ -67,7 +69,7 @@
   using M = RuntimeArgumentMap;
 
   std::unique_ptr<RuntimeParser::Builder> parser_builder =
-      std::unique_ptr<RuntimeParser::Builder>(new RuntimeParser::Builder());
+      std::make_unique<RuntimeParser::Builder>();
 
   parser_builder->
        Define("-Xzygote")
@@ -77,7 +79,7 @@
       .Define("-showversion")
           .IntoKey(M::ShowVersion)
       .Define("-Xbootclasspath:_")
-          .WithType<std::string>()
+          .WithType<ParseStringList<':'>>()  // std::vector<std::string>, split by :
           .IntoKey(M::BootClassPath)
       .Define("-Xbootclasspath-locations:_")
           .WithType<ParseStringList<':'>>()  // std::vector<std::string>, split by :
@@ -149,6 +151,10 @@
           .IntoKey(M::LongGCLogThreshold)
       .Define("-XX:DumpGCPerformanceOnShutdown")
           .IntoKey(M::DumpGCPerformanceOnShutdown)
+      .Define("-XX:DumpRegionInfoBeforeGC")
+          .IntoKey(M::DumpRegionInfoBeforeGC)
+      .Define("-XX:DumpRegionInfoAfterGC")
+          .IntoKey(M::DumpRegionInfoAfterGC)
       .Define("-XX:DumpJITInfoOnShutdown")
           .IntoKey(M::DumpJITInfoOnShutdown)
       .Define("-XX:IgnoreMaxFootprint")
@@ -322,7 +328,7 @@
           .WithValueMap({{"false", false}, {"true", true}})
           .IntoKey(M::SlowDebug)
       .Define("-Xtarget-sdk-version:_")
-          .WithType<int>()
+          .WithType<unsigned int>()
           .IntoKey(M::TargetSdkVersion)
       .Define("-Xhidden-api-checks")
           .IntoKey(M::HiddenApiChecks)
@@ -346,7 +352,7 @@
 
   // TODO: Move Usage information into this DSL.
 
-  return std::unique_ptr<RuntimeParser>(new RuntimeParser(parser_builder->Build()));
+  return std::make_unique<RuntimeParser>(parser_builder->Build());
 }
 
 #pragma GCC diagnostic pop
@@ -512,7 +518,7 @@
                  GetInstructionSetString(kRuntimeISA));
     Exit(0);
   } else if (args.Exists(M::BootClassPath)) {
-    LOG(INFO) << "setting boot class path to " << *args.Get(M::BootClassPath);
+    LOG(INFO) << "setting boot class path to " << args.Get(M::BootClassPath)->Join();
   }
 
   if (args.GetOrDefault(M::Interpret)) {
@@ -524,8 +530,9 @@
   }
 
   // Set a default boot class path if we didn't get an explicit one via command line.
-  if (getenv("BOOTCLASSPATH") != nullptr) {
-    args.SetIfMissing(M::BootClassPath, std::string(getenv("BOOTCLASSPATH")));
+  const char* env_bcp = getenv("BOOTCLASSPATH");
+  if (env_bcp != nullptr) {
+    args.SetIfMissing(M::BootClassPath, ParseStringList<':'>::Split(env_bcp));
   }
 
   // Set a default class path if we didn't get an explicit one via command line.
@@ -585,41 +592,20 @@
     args.Set(M::BackgroundGc, BackgroundGcOption { background_collector_type_ });
   }
 
-  // If a reference to the dalvik core.jar snuck in, replace it with
-  // the art specific version. This can happen with on device
-  // boot.art/boot.oat generation by GenerateImage which relies on the
-  // value of BOOTCLASSPATH.
-#if defined(ART_TARGET)
-  std::string core_jar("/core.jar");
-  std::string core_libart_jar("/core-libart.jar");
-#else
-  // The host uses hostdex files.
-  std::string core_jar("/core-hostdex.jar");
-  std::string core_libart_jar("/core-libart-hostdex.jar");
-#endif
-  auto boot_class_path_string = args.GetOrDefault(M::BootClassPath);
-
-  size_t core_jar_pos = boot_class_path_string.find(core_jar);
-  if (core_jar_pos != std::string::npos) {
-    boot_class_path_string.replace(core_jar_pos, core_jar.size(), core_libart_jar);
-    args.Set(M::BootClassPath, boot_class_path_string);
-  }
-
-  {
-    auto&& boot_class_path = args.GetOrDefault(M::BootClassPath);
-    auto&& boot_class_path_locations = args.GetOrDefault(M::BootClassPathLocations);
-    if (args.Exists(M::BootClassPathLocations)) {
-      size_t boot_class_path_count = ParseStringList<':'>::Split(boot_class_path).Size();
-
-      if (boot_class_path_count != boot_class_path_locations.Size()) {
-        Usage("The number of boot class path files does not match"
-            " the number of boot class path locations given\n"
-            "  boot class path files     (%zu): %s\n"
-            "  boot class path locations (%zu): %s\n",
-            boot_class_path.size(), boot_class_path_string.c_str(),
-            boot_class_path_locations.Size(), boot_class_path_locations.Join().c_str());
-        return false;
-      }
+  const ParseStringList<':'>* boot_class_path_locations = args.Get(M::BootClassPathLocations);
+  if (boot_class_path_locations != nullptr && boot_class_path_locations->Size() != 0u) {
+    const ParseStringList<':'>* boot_class_path = args.Get(M::BootClassPath);
+    if (boot_class_path == nullptr ||
+        boot_class_path_locations->Size() != boot_class_path->Size()) {
+      Usage("The number of boot class path files does not match"
+          " the number of boot class path locations given\n"
+          "  boot class path files     (%zu): %s\n"
+          "  boot class path locations (%zu): %s\n",
+          (boot_class_path != nullptr) ? boot_class_path->Size() : 0u,
+          (boot_class_path != nullptr) ? boot_class_path->Join().c_str() : "<nil>",
+          boot_class_path_locations->Size(),
+          boot_class_path_locations->Join().c_str());
+      return false;
     }
   }
 
@@ -717,6 +703,7 @@
   UsageMessage(stream, "  -Xgc:[no]postsweepingverify_rosalloc\n");
   UsageMessage(stream, "  -Xgc:[no]postverify_rosalloc\n");
   UsageMessage(stream, "  -Xgc:[no]presweepingverify\n");
+  UsageMessage(stream, "  -Xgc:[no]generational_cc\n");
   UsageMessage(stream, "  -Ximage:filename\n");
   UsageMessage(stream, "  -Xbootclasspath-locations:bootclasspath\n"
                        "     (override the dex locations of the -Xbootclasspath files)\n");
@@ -751,7 +738,7 @@
   UsageMessage(stream, "  -Xcompiler:filename\n");
   UsageMessage(stream, "  -Xcompiler-option dex2oat-option\n");
   UsageMessage(stream, "  -Ximage-compiler-option dex2oat-option\n");
-  UsageMessage(stream, "  -Xpatchoat:filename\n");
+  UsageMessage(stream, "  -Xpatchoat:filename (obsolete, ignored)\n");
   UsageMessage(stream, "  -Xusejit:booleanvalue\n");
   UsageMessage(stream, "  -Xjitinitialsize:N\n");
   UsageMessage(stream, "  -Xjitmaxsize:N\n");
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index 8c77d39..095d66e 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -23,10 +23,10 @@
 #include <jni.h>
 
 #include "arch/instruction_set.h"
-#include "base/globals.h"
 #include "gc/collector_type.h"
 #include "gc/space/large_object_space.h"
 // #include "jit/profile_saver_options.h"
+#include "runtime_globals.h"
 #include "runtime_options.h"
 
 namespace art {
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index 9e5d9ab..77d2316 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -26,7 +26,7 @@
 class ParsedOptionsTest : public ::testing::Test {
  public:
   static void SetUpTestCase() {
-    CommonRuntimeTest::SetUpAndroidRoot();
+    CommonRuntimeTest::SetUpAndroidRootEnvVars();
   }
 };
 
@@ -40,8 +40,7 @@
   boot_class_path += "-Xbootclasspath:";
 
   bool first_dex_file = true;
-  for (const std::string &dex_file_name :
-           CommonRuntimeTest::GetLibCoreDexFileNames()) {
+  for (const std::string &dex_file_name : CommonRuntimeTest::GetLibCoreDexFileNames()) {
     if (!first_dex_file) {
       class_path += ":";
     } else {
@@ -50,6 +49,8 @@
     class_path += dex_file_name;
   }
   boot_class_path += class_path;
+  std::vector<std::string> expected_boot_class_path;
+  Split(class_path, ':', &expected_boot_class_path);
 
   RuntimeOptions options;
   options.push_back(std::make_pair(boot_class_path.c_str(), nullptr));
@@ -78,9 +79,11 @@
   using Opt = RuntimeArgumentMap;
 
 #define EXPECT_PARSED_EQ(expected, actual_key) EXPECT_EQ(expected, map.GetOrDefault(actual_key))
+#define EXPECT_PARSED_EQ_AS_STRING_VECTOR(expected, actual_key) \
+  EXPECT_EQ(expected, static_cast<std::vector<std::string>>(map.GetOrDefault(actual_key)))
 #define EXPECT_PARSED_EXISTS(actual_key) EXPECT_TRUE(map.Exists(actual_key))
 
-  EXPECT_PARSED_EQ(class_path, Opt::BootClassPath);
+  EXPECT_PARSED_EQ_AS_STRING_VECTOR(expected_boot_class_path, Opt::BootClassPath);
   EXPECT_PARSED_EQ(class_path, Opt::ClassPath);
   EXPECT_PARSED_EQ(std::string("boot_image"), Opt::Image);
   EXPECT_PARSED_EXISTS(Opt::CheckJni);
@@ -127,6 +130,23 @@
   EXPECT_EQ(gc::kCollectorTypeSS, xgc.collector_type_);
 }
 
+TEST_F(ParsedOptionsTest, ParsedOptionsGenerationalCC) {
+  RuntimeOptions options;
+  options.push_back(std::make_pair("-Xgc:generational_cc", nullptr));
+
+  RuntimeArgumentMap map;
+  bool parsed = ParsedOptions::Parse(options, false, &map);
+  ASSERT_TRUE(parsed);
+  ASSERT_NE(0u, map.Size());
+
+  using Opt = RuntimeArgumentMap;
+
+  EXPECT_TRUE(map.Exists(Opt::GcOption));
+
+  XGcOption xgc = map.GetOrDefault(Opt::GcOption);
+  ASSERT_TRUE(xgc.generational_cc);
+}
+
 TEST_F(ParsedOptionsTest, ParsedOptionsInstructionSet) {
   using Opt = RuntimeArgumentMap;
 
diff --git a/runtime/proxy_test.h b/runtime/proxy_test.h
index 411dc7a..23e536d 100644
--- a/runtime/proxy_test.h
+++ b/runtime/proxy_test.h
@@ -47,7 +47,7 @@
 
   // Builds the interfaces array.
   jobjectArray proxyClassInterfaces =
-      soa.Env()->NewObjectArray(interfaces.size(), javaLangClass, /* initialElement */ nullptr);
+      soa.Env()->NewObjectArray(interfaces.size(), javaLangClass, /* initialElement= */ nullptr);
   soa.Self()->AssertNoPendingException();
   for (size_t i = 0; i < interfaces.size(); ++i) {
     soa.Env()->SetObjectArrayElement(proxyClassInterfaces, i,
@@ -62,7 +62,7 @@
   jobjectArray proxyClassMethods = soa.Env()->NewObjectArray(
       methods_count,
       soa.AddLocalReference<jclass>(GetClassRoot<mirror::Method>()),
-      /* initialElement */ nullptr);
+      /* initialElement= */ nullptr);
   soa.Self()->AssertNoPendingException();
 
   jsize array_index = 0;
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 7b92151..3bc718b 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -20,6 +20,7 @@
 #include "art_method-inl.h"
 #include "base/enums.h"
 #include "base/logging.h"  // For VLOG_IS_ON.
+#include "base/systrace.h"
 #include "dex/dex_file_types.h"
 #include "dex/dex_instruction.h"
 #include "entrypoints/entrypoint_utils.h"
@@ -126,7 +127,7 @@
         exception_handler_->SetHandlerDexPc(found_dex_pc);
         exception_handler_->SetHandlerQuickFramePc(
             GetCurrentOatQuickMethodHeader()->ToNativeQuickPc(
-                method, found_dex_pc, /* is_catch_handler */ true));
+                method, found_dex_pc, /* is_for_catch_handler= */ true));
         exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
         exception_handler_->SetHandlerMethodHeader(GetCurrentOatQuickMethodHeader());
         return false;  // End stack walk.
@@ -154,46 +155,36 @@
   DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
 };
 
-// Counts instrumentation stack frame prior to catch handler or upcall.
-class InstrumentationStackVisitor : public StackVisitor {
- public:
-  InstrumentationStackVisitor(Thread* self, size_t frame_depth)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        frame_depth_(frame_depth),
-        instrumentation_frames_to_pop_(0) {
-    CHECK_NE(frame_depth_, kInvalidFrameDepth);
-  }
-
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
-    size_t current_frame_depth = GetFrameDepth();
-    if (current_frame_depth < frame_depth_) {
-      CHECK(GetMethod() != nullptr);
-      if (UNLIKELY(reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == GetReturnPc())) {
-        if (!IsInInlinedFrame()) {
-          // We do not count inlined frames, because we do not instrument them. The reason we
-          // include them in the stack walking is the check against `frame_depth_`, which is
-          // given to us by a visitor that visits inlined frames.
-          ++instrumentation_frames_to_pop_;
+static size_t GetInstrumentationFramesToPop(Thread* self, size_t frame_depth)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  CHECK_NE(frame_depth, kInvalidFrameDepth);
+  size_t instrumentation_frames_to_pop = 0;
+  StackVisitor::WalkStack(
+      [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        size_t current_frame_depth = stack_visitor->GetFrameDepth();
+        if (current_frame_depth < frame_depth) {
+          CHECK(stack_visitor->GetMethod() != nullptr);
+          if (UNLIKELY(reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) ==
+                  stack_visitor->GetReturnPc())) {
+            if (!stack_visitor->IsInInlinedFrame()) {
+              // We do not count inlined frames, because we do not instrument them. The reason we
+              // include them in the stack walking is the check against `frame_depth_`, which is
+              // given to us by a visitor that visits inlined frames.
+              ++instrumentation_frames_to_pop;
+            }
+          }
+          return true;
         }
-      }
-      return true;
-    } else {
-      // We reached the frame of the catch handler or the upcall.
-      return false;
-    }
-  }
-
-  size_t GetInstrumentationFramesToPop() const {
-    return instrumentation_frames_to_pop_;
-  }
-
- private:
-  const size_t frame_depth_;
-  size_t instrumentation_frames_to_pop_;
-
-  DISALLOW_COPY_AND_ASSIGN(InstrumentationStackVisitor);
-};
+        // We reached the frame of the catch handler or the upcall.
+        return false;
+      },
+      self,
+      /* context= */ nullptr,
+      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+      /* check_suspended */ true,
+      /* include_transitions */ true);
+  return instrumentation_frames_to_pop;
+}
 
 // Finds the appropriate exception catch after calling all method exit instrumentation functions.
 // Note that this might change the exception being thrown.
@@ -218,7 +209,10 @@
     }
 
     // Walk the stack to find catch handler.
-    CatchBlockStackVisitor visitor(self_, context_, &exception_ref, this, /*skip*/already_popped);
+    CatchBlockStackVisitor visitor(self_, context_,
+                                   &exception_ref,
+                                   this,
+                                   /*skip_frames=*/already_popped);
     visitor.WalkStack(true);
     uint32_t new_pop_count = handler_frame_depth_;
     DCHECK_GE(new_pop_count, already_popped);
@@ -226,16 +220,15 @@
 
     // Figure out how many of those frames have instrumentation we need to remove (Should be the
     // exact same as number of new_pop_count if there aren't inlined frames).
-    InstrumentationStackVisitor instrumentation_visitor(self_, handler_frame_depth_);
-    instrumentation_visitor.WalkStack(true);
-    size_t instrumentation_frames_to_pop = instrumentation_visitor.GetInstrumentationFramesToPop();
+    size_t instrumentation_frames_to_pop =
+        GetInstrumentationFramesToPop(self_, handler_frame_depth_);
 
     if (kDebugExceptionDelivery) {
       if (*handler_quick_frame_ == nullptr) {
         LOG(INFO) << "Handler is upcall";
       }
       if (handler_method_ != nullptr) {
-        const DexFile* dex_file = handler_method_->GetDeclaringClass()->GetDexCache()->GetDexFile();
+        const DexFile* dex_file = handler_method_->GetDexFile();
         int line_number = annotations::GetLineNumFromPC(dex_file, handler_method_, handler_dex_pc_);
         LOG(INFO) << "Handler: " << handler_method_->PrettyMethod() << " (line: "
                   << line_number << ")";
@@ -402,6 +395,8 @@
   bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
     ArtMethod* method = GetMethod();
+    VLOG(deopt) << "Deoptimizing stack: depth: " << GetFrameDepth()
+                << " at method " << ArtMethod::PrettyMethod(method);
     if (method == nullptr || single_frame_done_) {
       FinishStackWalk();
       return false;  // End stack walk.
@@ -598,13 +593,17 @@
 
   // Compiled code made an explicit deoptimization.
   ArtMethod* deopt_method = visitor.GetSingleFrameDeoptMethod();
+  SCOPED_TRACE << "Deoptimizing "
+               <<  deopt_method->PrettyMethod()
+               << ": " << GetDeoptimizationKindName(kind);
+
   DCHECK(deopt_method != nullptr);
   if (VLOG_IS_ON(deopt) || kDebugExceptionDelivery) {
     LOG(INFO) << "Single-frame deopting: "
               << deopt_method->PrettyMethod()
               << " due to "
               << GetDeoptimizationKindName(kind);
-    DumpFramesWithType(self_, /* details */ true);
+    DumpFramesWithType(self_, /* details= */ true);
   }
   if (Runtime::Current()->UseJitCompilation()) {
     Runtime::Current()->GetJit()->GetCodeCache()->InvalidateCompiledCodeFor(
@@ -642,10 +641,8 @@
   DCHECK(is_deoptimization_) << "Non-deoptimization handlers should use FindCatch";
   uintptr_t return_pc = 0;
   if (method_tracing_active_) {
-    InstrumentationStackVisitor visitor(self_, handler_frame_depth_);
-    visitor.WalkStack(true);
-
-    size_t instrumentation_frames_to_pop = visitor.GetInstrumentationFramesToPop();
+    size_t instrumentation_frames_to_pop =
+        GetInstrumentationFramesToPop(self_, handler_frame_depth_);
     instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
     return_pc = instrumentation->PopFramesForDeoptimization(self_, instrumentation_frames_to_pop);
   }
@@ -666,53 +663,41 @@
   UNREACHABLE();
 }
 
-// Prints out methods with their type of frame.
-class DumpFramesWithTypeStackVisitor final : public StackVisitor {
- public:
-  explicit DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        show_details_(show_details) {}
-
-  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* method = GetMethod();
-    if (show_details_) {
-      LOG(INFO) << "|> pc   = " << std::hex << GetCurrentQuickFramePc();
-      LOG(INFO) << "|> addr = " << std::hex << reinterpret_cast<uintptr_t>(GetCurrentQuickFrame());
-      if (GetCurrentQuickFrame() != nullptr && method != nullptr) {
-        LOG(INFO) << "|> ret  = " << std::hex << GetReturnPc();
-      }
-    }
-    if (method == nullptr) {
-      // Transition, do go on, we want to unwind over bridges, all the way.
-      if (show_details_) {
-        LOG(INFO) << "N  <transition>";
-      }
-      return true;
-    } else if (method->IsRuntimeMethod()) {
-      if (show_details_) {
-        LOG(INFO) << "R  " << method->PrettyMethod(true);
-      }
-      return true;
-    } else {
-      bool is_shadow = GetCurrentShadowFrame() != nullptr;
-      LOG(INFO) << (is_shadow ? "S" : "Q")
-                << ((!is_shadow && IsInInlinedFrame()) ? "i" : " ")
-                << " "
-                << method->PrettyMethod(true);
-      return true;  // Go on.
-    }
-  }
-
- private:
-  bool show_details_;
-
-  DISALLOW_COPY_AND_ASSIGN(DumpFramesWithTypeStackVisitor);
-};
-
 void QuickExceptionHandler::DumpFramesWithType(Thread* self, bool details) {
-  DumpFramesWithTypeStackVisitor visitor(self, details);
-  visitor.WalkStack(true);
+  StackVisitor::WalkStack(
+      [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        ArtMethod* method = stack_visitor->GetMethod();
+        if (details) {
+          LOG(INFO) << "|> pc   = " << std::hex << stack_visitor->GetCurrentQuickFramePc();
+          LOG(INFO) << "|> addr = " << std::hex
+              << reinterpret_cast<uintptr_t>(stack_visitor->GetCurrentQuickFrame());
+          if (stack_visitor->GetCurrentQuickFrame() != nullptr && method != nullptr) {
+            LOG(INFO) << "|> ret  = " << std::hex << stack_visitor->GetReturnPc();
+          }
+        }
+        if (method == nullptr) {
+          // Transition, do go on, we want to unwind over bridges, all the way.
+          if (details) {
+            LOG(INFO) << "N  <transition>";
+          }
+          return true;
+        } else if (method->IsRuntimeMethod()) {
+          if (details) {
+            LOG(INFO) << "R  " << method->PrettyMethod(true);
+          }
+          return true;
+        } else {
+          bool is_shadow = stack_visitor->GetCurrentShadowFrame() != nullptr;
+          LOG(INFO) << (is_shadow ? "S" : "Q")
+                    << ((!is_shadow && stack_visitor->IsInInlinedFrame()) ? "i" : " ")
+                    << " "
+                    << method->PrettyMethod(true);
+          return true;  // Go on.
+        }
+      },
+      self,
+      /* context= */ nullptr,
+      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
 }
 
 }  // namespace art
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 672303a..1bcbcff 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -19,7 +19,6 @@
 
 #include "read_barrier.h"
 
-#include "base/utils.h"
 #include "gc/accounting/read_barrier_table.h"
 #include "gc/collector/concurrent_copying-inl.h"
 #include "gc/heap.h"
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 0741da6..3b89377 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -19,8 +19,8 @@
 
 #include <android-base/logging.h>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/runtime_debug.h"
 #include "gc_root.h"
 #include "jni.h"
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index d62cbdb..45f5633 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -277,7 +277,7 @@
     size_t identical;
 
     SummaryElement() : equiv(0), identical(0) {}
-    SummaryElement(SummaryElement&& ref) {
+    SummaryElement(SummaryElement&& ref) noexcept {
       root = ref.root;
       equiv = ref.equiv;
       identical = ref.identical;
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index 6af5ca5..6388944 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -23,7 +23,7 @@
 #include <vector>
 
 #include "base/allocator.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "gc_root.h"
 #include "obj_ptr.h"
 
diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc
index 1d54d21..2acb2c7 100644
--- a/runtime/reference_table_test.cc
+++ b/runtime/reference_table_test.cc
@@ -26,6 +26,8 @@
 #include "dex/primitive.h"
 #include "handle_scope-inl.h"
 #include "mirror/array-inl.h"
+#include "mirror/array-alloc-inl.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
 #include "mirror/string.h"
diff --git a/runtime/reflection-inl.h b/runtime/reflection-inl.h
index 9fe4bca..8ad61f0 100644
--- a/runtime/reflection-inl.h
+++ b/runtime/reflection-inl.h
@@ -21,7 +21,6 @@
 
 #include "android-base/stringprintf.h"
 
-#include "base/utils.h"
 #include "common_throws.h"
 #include "dex/descriptors_names.h"
 #include "dex/primitive.h"
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 646de75..dbf40f6a 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -33,6 +33,7 @@
 #include "nth_caller_visitor.h"
 #include "scoped_thread_state_change-inl.h"
 #include "stack_reference.h"
+#include "thread-inl.h"
 #include "well_known_classes.h"
 
 namespace art {
@@ -226,7 +227,7 @@
                                     ArtMethod* m,
                                     Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    const DexFile::TypeList* classes = m->GetParameterTypeList();
+    const dex::TypeList* classes = m->GetParameterTypeList();
     // Set receiver if non-null (method is not static)
     if (receiver != nullptr) {
       Append(receiver);
@@ -367,7 +368,7 @@
 
 void CheckMethodArguments(JavaVMExt* vm, ArtMethod* m, uint32_t* args)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  const DexFile::TypeList* params = m->GetParameterTypeList();
+  const dex::TypeList* params = m->GetParameterTypeList();
   if (params == nullptr) {
     return;  // No arguments so nothing to check.
   }
@@ -461,7 +462,7 @@
 bool CheckArgsForInvokeMethod(ArtMethod* np_method,
                               ObjPtr<mirror::ObjectArray<mirror::Object>> objects)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  const DexFile::TypeList* classes = np_method->GetParameterTypeList();
+  const dex::TypeList* classes = np_method->GetParameterTypeList();
   uint32_t classes_size = (classes == nullptr) ? 0 : classes->Size();
   uint32_t arg_count = (objects == nullptr) ? 0 : objects->GetLength();
   if (UNLIKELY(arg_count != classes_size)) {
@@ -886,32 +887,31 @@
 
   JValue boxed_value;
   ObjPtr<mirror::Class> klass = o->GetClass();
-  ObjPtr<mirror::Class> src_class = nullptr;
-  ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+  Primitive::Type primitive_type;
   ArtField* primitive_field = &klass->GetIFieldsPtr()->At(0);
   if (klass->DescriptorEquals("Ljava/lang/Boolean;")) {
-    src_class = class_linker->FindPrimitiveClass('Z');
+    primitive_type = Primitive::kPrimBoolean;
     boxed_value.SetZ(primitive_field->GetBoolean(o));
   } else if (klass->DescriptorEquals("Ljava/lang/Byte;")) {
-    src_class = class_linker->FindPrimitiveClass('B');
+    primitive_type = Primitive::kPrimByte;
     boxed_value.SetB(primitive_field->GetByte(o));
   } else if (klass->DescriptorEquals("Ljava/lang/Character;")) {
-    src_class = class_linker->FindPrimitiveClass('C');
+    primitive_type = Primitive::kPrimChar;
     boxed_value.SetC(primitive_field->GetChar(o));
   } else if (klass->DescriptorEquals("Ljava/lang/Float;")) {
-    src_class = class_linker->FindPrimitiveClass('F');
+    primitive_type = Primitive::kPrimFloat;
     boxed_value.SetF(primitive_field->GetFloat(o));
   } else if (klass->DescriptorEquals("Ljava/lang/Double;")) {
-    src_class = class_linker->FindPrimitiveClass('D');
+    primitive_type = Primitive::kPrimDouble;
     boxed_value.SetD(primitive_field->GetDouble(o));
   } else if (klass->DescriptorEquals("Ljava/lang/Integer;")) {
-    src_class = class_linker->FindPrimitiveClass('I');
+    primitive_type = Primitive::kPrimInt;
     boxed_value.SetI(primitive_field->GetInt(o));
   } else if (klass->DescriptorEquals("Ljava/lang/Long;")) {
-    src_class = class_linker->FindPrimitiveClass('J');
+    primitive_type = Primitive::kPrimLong;
     boxed_value.SetJ(primitive_field->GetLong(o));
   } else if (klass->DescriptorEquals("Ljava/lang/Short;")) {
-    src_class = class_linker->FindPrimitiveClass('S');
+    primitive_type = Primitive::kPrimShort;
     boxed_value.SetS(primitive_field->GetShort(o));
   } else {
     std::string temp;
@@ -923,7 +923,8 @@
   }
 
   return ConvertPrimitiveValue(unbox_for_result,
-                               src_class->GetPrimitiveType(), dst_class->GetPrimitiveType(),
+                               primitive_type,
+                               dst_class->GetPrimitiveType(),
                                boxed_value, unboxed_value);
 }
 
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 74580a2..574e302 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_REFLECTION_H_
 #define ART_RUNTIME_REFLECTION_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "dex/primitive.h"
 #include "jni.h"
 #include "obj_ptr.h"
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 424ee06..9fab7fb 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -25,6 +25,7 @@
 #include "dex/descriptors_names.h"
 #include "jni/java_vm_ext.h"
 #include "jni/jni_internal.h"
+#include "mirror/class-alloc-inl.h"
 #include "nativehelper/scoped_local_ref.h"
 #include "scoped_thread_state_change-inl.h"
 
@@ -33,7 +34,7 @@
 // TODO: Convert to CommonRuntimeTest. Currently MakeExecutable is used.
 class ReflectionTest : public CommonCompilerTest {
  protected:
-  virtual void SetUp() {
+  void SetUp() override {
     CommonCompilerTest::SetUp();
 
     vm_ = Runtime::Current()->GetJavaVM();
@@ -73,7 +74,7 @@
     }
   }
 
-  virtual void TearDown() {
+  void TearDown() override {
     CleanUpJniEnv();
     CommonCompilerTest::TearDown();
   }
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 374591e..2ffaf98 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -22,9 +22,13 @@
 #include "arch/instruction_set.h"
 #include "art_method.h"
 #include "base/callee_save_type.h"
+#include "base/casts.h"
+#include "base/mutex.h"
 #include "entrypoints/quick/callee_save_frame.h"
 #include "gc_root-inl.h"
+#include "interpreter/mterp/mterp.h"
 #include "obj_ptr-inl.h"
+#include "thread_list.h"
 
 namespace art {
 
@@ -82,7 +86,16 @@
 
 inline ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  return reinterpret_cast<ArtMethod*>(callee_save_methods_[static_cast<size_t>(type)]);
+  return reinterpret_cast64<ArtMethod*>(callee_save_methods_[static_cast<size_t>(type)]);
+}
+
+template<typename Action>
+void Runtime::DoAndMaybeSwitchInterpreter(Action lamda) {
+  MutexLock tll_mu(Thread::Current(), *Locks::thread_list_lock_);
+  lamda();
+  Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) {
+      thread->tls32_.use_mterp.store(interpreter::CanUseMterp());
+  }, nullptr);
 }
 
 }  // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 6878cc0..26f21b0 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -34,6 +34,7 @@
 #include <cstdio>
 #include <cstdlib>
 #include <limits>
+#include <thread>
 #include <vector>
 
 #include "android-base/strings.h"
@@ -41,6 +42,7 @@
 #include "aot_class_linker.h"
 #include "arch/arm/registers_arm.h"
 #include "arch/arm64/registers_arm64.h"
+#include "arch/context.h"
 #include "arch/instruction_set_features.h"
 #include "arch/mips/registers_mips.h"
 #include "arch/mips64/registers_mips64.h"
@@ -49,7 +51,6 @@
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "asm_support.h"
-#include "asm_support_check.h"
 #include "base/aborting.h"
 #include "base/arena_allocator.h"
 #include "base/atomic.h"
@@ -62,6 +63,7 @@
 #include "base/mutex.h"
 #include "base/os.h"
 #include "base/quasi_atomic.h"
+#include "base/sdk_version.h"
 #include "base/stl_util.h"
 #include "base/systrace.h"
 #include "base/unix_file/fd_file.h"
@@ -86,7 +88,7 @@
 #include "hidden_api.h"
 #include "image-inl.h"
 #include "instrumentation.h"
-#include "intern_table.h"
+#include "intern_table-inl.h"
 #include "interpreter/interpreter.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
@@ -96,6 +98,7 @@
 #include "linear_alloc.h"
 #include "memory_representation.h"
 #include "mirror/array.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_ext.h"
 #include "mirror/class_loader.h"
@@ -166,6 +169,11 @@
 #include <android/set_abort_message.h>
 #endif
 
+// Static asserts to check the values of generated assembly-support macros.
+#define ASM_DEFINE(NAME, EXPR) static_assert((NAME) == (EXPR), "Unexpected value of " #NAME);
+#include "asm_defines.def"
+#undef ASM_DEFINE
+
 namespace art {
 
 // If a signal isn't handled properly, enable a handler that attempts to dump the Java stack.
@@ -227,8 +235,8 @@
       class_linker_(nullptr),
       signal_catcher_(nullptr),
       java_vm_(nullptr),
-      fault_message_lock_("Fault message lock"),
-      fault_message_(""),
+      thread_pool_ref_count_(0u),
+      fault_message_(nullptr),
       threads_being_born_(0),
       shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)),
       shutting_down_(false),
@@ -248,7 +256,7 @@
       preinitialization_transactions_(),
       verify_(verifier::VerifyMode::kNone),
       allow_dex_file_fallback_(true),
-      target_sdk_version_(kUnsetSdkVersion),
+      target_sdk_version_(static_cast<uint32_t>(SdkVersion::kUnset)),
       implicit_null_checks_(false),
       implicit_so_checks_(false),
       implicit_suspend_checks_(false),
@@ -257,16 +265,16 @@
       is_native_bridge_loaded_(false),
       is_native_debuggable_(false),
       async_exceptions_thrown_(false),
+      non_standard_exits_enabled_(false),
       is_java_debuggable_(false),
       zygote_max_failed_boots_(0),
       experimental_flags_(ExperimentalFlags::kNone),
       oat_file_manager_(nullptr),
       is_low_memory_mode_(false),
       safe_mode_(false),
-      hidden_api_policy_(hiddenapi::EnforcementPolicy::kNoChecks),
-      pending_hidden_api_warning_(false),
+      hidden_api_policy_(hiddenapi::EnforcementPolicy::kDisabled),
+      core_platform_api_policy_(hiddenapi::EnforcementPolicy::kJustWarn),
       dedupe_hidden_api_warnings_(true),
-      always_set_hidden_api_warning_flag_(false),
       hidden_api_access_event_log_rate_(0),
       dump_native_stack_on_sig_quit_(true),
       pruned_dalvik_cache_(false),
@@ -277,7 +285,6 @@
   static_assert(Runtime::kCalleeSaveSize ==
                     static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType), "Unexpected size");
 
-  CheckAsmSupportOffsetsAndSizes();
   std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
   interpreter::CheckInterpreterAsmConstants();
   callbacks_.reset(new RuntimeCallbacks());
@@ -301,15 +308,15 @@
     // Very few things are actually capable of distinguishing between the peer & peerless states so
     // this should be fine.
     bool thread_attached = AttachCurrentThread("Shutdown thread",
-                                               /* as_daemon */ false,
+                                               /* as_daemon= */ false,
                                                GetSystemThreadGroup(),
-                                               /* Create peer */ IsStarted());
+                                               /* create_peer= */ IsStarted());
     if (UNLIKELY(!thread_attached)) {
       LOG(WARNING) << "Failed to attach shutdown thread. Trying again without a peer.";
       CHECK(AttachCurrentThread("Shutdown thread (no java peer)",
-                                /* as_daemon */   false,
-                                /* thread_group*/ nullptr,
-                                /* Create peer */ false));
+                                /* as_daemon= */   false,
+                                /* thread_group=*/ nullptr,
+                                /* create_peer= */ false));
     }
     self = Thread::Current();
   } else {
@@ -317,14 +324,45 @@
   }
 
   if (dump_gc_performance_on_shutdown_) {
+    heap_->CalculatePreGcWeightedAllocatedBytes();
+    uint64_t process_cpu_end_time = ProcessCpuNanoTime();
     ScopedLogSeverity sls(LogSeverity::INFO);
     // This can't be called from the Heap destructor below because it
     // could call RosAlloc::InspectAll() which needs the thread_list
     // to be still alive.
     heap_->DumpGcPerformanceInfo(LOG_STREAM(INFO));
+
+    uint64_t process_cpu_time = process_cpu_end_time - heap_->GetProcessCpuStartTime();
+    uint64_t gc_cpu_time = heap_->GetTotalGcCpuTime();
+    float ratio = static_cast<float>(gc_cpu_time) / process_cpu_time;
+    LOG_STREAM(INFO) << "GC CPU time " << PrettyDuration(gc_cpu_time)
+        << " out of process CPU time " << PrettyDuration(process_cpu_time)
+        << " (" << ratio << ")"
+        << "\n";
+    double pre_gc_weighted_allocated_bytes =
+        heap_->GetPreGcWeightedAllocatedBytes() / process_cpu_time;
+    // Here we don't use process_cpu_time for normalization, because VM shutdown is not a real
+    // GC. Both numerator and denominator take into account until the end of the last GC,
+    // instead of the whole process life time like pre_gc_weighted_allocated_bytes.
+    double post_gc_weighted_allocated_bytes =
+        heap_->GetPostGcWeightedAllocatedBytes() /
+          (heap_->GetPostGCLastProcessCpuTime() - heap_->GetProcessCpuStartTime());
+
+    LOG_STREAM(INFO) << "Average bytes allocated at GC start, weighted by CPU time between GCs: "
+        << static_cast<uint64_t>(pre_gc_weighted_allocated_bytes)
+        << " (" <<  PrettySize(pre_gc_weighted_allocated_bytes)  << ")";
+    LOG_STREAM(INFO) << "Average bytes allocated at GC end, weighted by CPU time between GCs: "
+        << static_cast<uint64_t>(post_gc_weighted_allocated_bytes)
+        << " (" <<  PrettySize(post_gc_weighted_allocated_bytes)  << ")"
+        << "\n";
   }
 
+  WaitForThreadPoolWorkersToStart();
+
   if (jit_ != nullptr) {
+    // Wait for the workers to be created since there can't be any threads attaching during
+    // shutdown.
+    jit_->WaitForWorkersToBeCreated();
     // Stop the profile saver thread before marking the runtime as shutting down.
     // The saver will try to dump the profiles before being sopped and that
     // requires holding the mutator lock.
@@ -373,6 +411,8 @@
     // JIT compiler threads.
     jit_->DeleteThreadPool();
   }
+  DeleteThreadPool();
+  CHECK(thread_pool_ == nullptr);
 
   // Make sure our internal threads are dead before we start tearing down things they're using.
   GetRuntimeCallbacks()->StopDebugger();
@@ -402,6 +442,7 @@
   if (jit_ != nullptr) {
     VLOG(jit) << "Deleting jit";
     jit_.reset(nullptr);
+    jit_code_cache_.reset(nullptr);
   }
 
   // Shutdown the fault manager if it was initialized.
@@ -523,18 +564,18 @@
 void Runtime::Abort(const char* msg) {
   auto old_value = gAborting.fetch_add(1);  // set before taking any locks
 
-#ifdef ART_TARGET_ANDROID
+  // Only set the first abort message.
   if (old_value == 0) {
-    // Only set the first abort message.
-    android_set_abort_message(msg);
-  }
-#else
-  UNUSED(old_value);
-#endif
-
 #ifdef ART_TARGET_ANDROID
-  android_set_abort_message(msg);
+    android_set_abort_message(msg);
+#else
+    // Set the runtime fault message in case our unexpected-signal code will run.
+    Runtime* current = Runtime::Current();
+    if (current != nullptr) {
+      current->SetFaultMessage(msg);
+    }
 #endif
+  }
 
   // Ensure that we don't have multiple threads trying to abort at once,
   // which would result in significantly worse diagnostics.
@@ -575,9 +616,18 @@
 }
 
 void Runtime::PreZygoteFork() {
+  if (GetJit() != nullptr) {
+    GetJit()->PreZygoteFork();
+  }
   heap_->PreZygoteFork();
 }
 
+void Runtime::PostZygoteFork() {
+  if (GetJit() != nullptr) {
+    GetJit()->PostZygoteFork();
+  }
+}
+
 void Runtime::CallExitHook(jint status) {
   if (exit_ != nullptr) {
     ScopedThreadStateChange tsc(Thread::Current(), kNative);
@@ -610,7 +660,7 @@
                            bool ignore_unrecognized,
                            RuntimeArgumentMap* runtime_options) {
   Locks::Init();
-  InitLogging(/* argv */ nullptr, Abort);  // Calls Locks::Init() as a side effect.
+  InitLogging(/* argv= */ nullptr, Abort);  // Calls Locks::Init() as a side effect.
   bool parsed = ParsedOptions::Parse(raw_options, ignore_unrecognized, runtime_options);
   if (!parsed) {
     LOG(ERROR) << "Failed to parse options";
@@ -695,15 +745,6 @@
   return env->NewGlobalRef(system_class_loader.get());
 }
 
-std::string Runtime::GetPatchoatExecutable() const {
-  if (!patchoat_executable_.empty()) {
-    return patchoat_executable_;
-  }
-  std::string patchoat_executable(GetAndroidRoot());
-  patchoat_executable += (kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat");
-  return patchoat_executable;
-}
-
 std::string Runtime::GetCompilerExecutable() const {
   if (!compiler_executable_.empty()) {
     return compiler_executable_;
@@ -748,7 +789,7 @@
 
   self->TransitionFromRunnableToSuspended(kNative);
 
-  started_ = true;
+  DoAndMaybeSwitchInterpreter([=](){ started_ = true; });
 
   if (!IsImageDex2OatEnabled() || !GetHeap()->HasBootImageSpace()) {
     ScopedObjectAccess soa(self);
@@ -790,17 +831,13 @@
   // recoding profiles. Maybe we should consider changing the name to be more clear it's
   // not only about compiling. b/28295073.
   if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
+    // Try to load compiler pre zygote to reduce PSS. b/27744947
     std::string error_msg;
-    if (!IsZygote()) {
-    // If we are the zygote then we need to wait until after forking to create the code cache
-    // due to SELinux restrictions on r/w/x memory regions.
-      CreateJit();
-    } else if (jit_options_->UseJitCompilation()) {
-      if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
-        // Try to load compiler pre zygote to reduce PSS. b/27744947
-        LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
-      }
+    if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
+      LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
     }
+    CreateJitCodeCache(/*rwx_memory_allowed=*/true);
+    CreateJit();
   }
 
   // Send the start phase event. We have to wait till here as this is when the main thread peer
@@ -820,7 +857,7 @@
         ? NativeBridgeAction::kInitialize
         : NativeBridgeAction::kUnload;
     InitNonZygoteOrPostFork(self->GetJniEnv(),
-                            /* is_system_server */ false,
+                            /* is_system_server= */ false,
                             action,
                             GetInstructionSetString(kRuntimeISA));
   }
@@ -896,29 +933,32 @@
     }
   }
 
-  // Create the thread pools.
-  heap_->CreateThreadPool();
-  // Reset the gc performance data at zygote fork so that the GCs
-  // before fork aren't attributed to an app.
-  heap_->ResetGcPerformanceInfo();
-
-  // We may want to collect profiling samples for system server, but we never want to JIT there.
   if (is_system_server) {
-    jit_options_->SetUseJitCompilation(false);
     jit_options_->SetSaveProfilingInfo(profile_system_server);
     if (profile_system_server) {
       jit_options_->SetWaitForJitNotificationsToSaveProfile(false);
       VLOG(profiler) << "Enabling system server profiles";
     }
   }
-  if (!safe_mode_ &&
-      (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) &&
-      jit_ == nullptr) {
-    // Note that when running ART standalone (not zygote, nor zygote fork),
-    // the jit may have already been created.
-    CreateJit();
+
+  // Create the thread pools.
+  heap_->CreateThreadPool();
+  {
+    ScopedTrace timing("CreateThreadPool");
+    constexpr size_t kStackSize = 64 * KB;
+    constexpr size_t kMaxRuntimeWorkers = 4u;
+    const size_t num_workers =
+        std::min(static_cast<size_t>(std::thread::hardware_concurrency()), kMaxRuntimeWorkers);
+    MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
+    CHECK(thread_pool_ == nullptr);
+    thread_pool_.reset(new ThreadPool("Runtime", num_workers, /*create_peers=*/false, kStackSize));
+    thread_pool_->StartWorkers(Thread::Current());
   }
 
+  // Reset the gc performance data at zygote fork so that the GCs
+  // before fork aren't attributed to an app.
+  heap_->ResetGcPerformanceInfo();
+
   StartSignalCatcher();
 
   // Start the JDWP thread. If the command-line debugger flags specified "suspend=y",
@@ -959,127 +999,12 @@
   VLOG(startup) << "Runtime::StartDaemonThreads exiting";
 }
 
-// Attempts to open dex files from image(s). Given the image location, try to find the oat file
-// and open it to get the stored dex file. If the image is the first for a multi-image boot
-// classpath, go on and also open the other images.
-static bool OpenDexFilesFromImage(const std::string& image_location,
-                                  std::vector<std::unique_ptr<const DexFile>>* dex_files,
-                                  size_t* failures) {
-  DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is nullptr";
-
-  // Use a work-list approach, so that we can easily reuse the opening code.
-  std::vector<std::string> image_locations;
-  image_locations.push_back(image_location);
-
-  for (size_t index = 0; index < image_locations.size(); ++index) {
-    std::string system_filename;
-    bool has_system = false;
-    std::string cache_filename_unused;
-    bool dalvik_cache_exists_unused;
-    bool has_cache_unused;
-    bool is_global_cache_unused;
-    bool found_image = gc::space::ImageSpace::FindImageFilename(image_locations[index].c_str(),
-                                                                kRuntimeISA,
-                                                                &system_filename,
-                                                                &has_system,
-                                                                &cache_filename_unused,
-                                                                &dalvik_cache_exists_unused,
-                                                                &has_cache_unused,
-                                                                &is_global_cache_unused);
-
-    if (!found_image || !has_system) {
-      return false;
-    }
-
-    // We are falling back to non-executable use of the oat file because patching failed, presumably
-    // due to lack of space.
-    std::string vdex_filename =
-        ImageHeader::GetVdexLocationFromImageLocation(system_filename.c_str());
-    std::string oat_filename =
-        ImageHeader::GetOatLocationFromImageLocation(system_filename.c_str());
-    std::string oat_location =
-        ImageHeader::GetOatLocationFromImageLocation(image_locations[index].c_str());
-    // Note: in the multi-image case, the image location may end in ".jar," and not ".art." Handle
-    //       that here.
-    if (android::base::EndsWith(oat_location, ".jar")) {
-      oat_location.replace(oat_location.length() - 3, 3, "oat");
-    }
-    std::string error_msg;
-
-    std::unique_ptr<VdexFile> vdex_file(VdexFile::Open(vdex_filename,
-                                                       false /* writable */,
-                                                       false /* low_4gb */,
-                                                       false, /* unquicken */
-                                                       &error_msg));
-    if (vdex_file.get() == nullptr) {
-      return false;
-    }
-
-    std::unique_ptr<File> file(OS::OpenFileForReading(oat_filename.c_str()));
-    if (file.get() == nullptr) {
-      return false;
-    }
-    std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.get(),
-                                                    false /* writable */,
-                                                    false /* program_header_only */,
-                                                    false /* low_4gb */,
-                                                    &error_msg));
-    if (elf_file.get() == nullptr) {
-      return false;
-    }
-    std::unique_ptr<const OatFile> oat_file(
-        OatFile::OpenWithElfFile(/* zip_fd */ -1,
-                                 elf_file.release(),
-                                 vdex_file.release(),
-                                 oat_location,
-                                 nullptr,
-                                 &error_msg));
-    if (oat_file == nullptr) {
-      LOG(WARNING) << "Unable to use '" << oat_filename << "' because " << error_msg;
-      return false;
-    }
-
-    for (const OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
-      if (oat_dex_file == nullptr) {
-        *failures += 1;
-        continue;
-      }
-      std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
-      if (dex_file.get() == nullptr) {
-        *failures += 1;
-      } else {
-        dex_files->push_back(std::move(dex_file));
-      }
-    }
-
-    if (index == 0) {
-      // First file. See if this is a multi-image environment, and if so, enqueue the other images.
-      const OatHeader& boot_oat_header = oat_file->GetOatHeader();
-      const char* boot_cp = boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
-      if (boot_cp != nullptr) {
-        gc::space::ImageSpace::ExtractMultiImageLocations(image_locations[0],
-                                                          boot_cp,
-                                                          &image_locations);
-      }
-    }
-
-    Runtime::Current()->GetOatFileManager().RegisterOatFile(std::move(oat_file));
-  }
-  return true;
-}
-
-
-static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
-                           const std::vector<std::string>& dex_locations,
-                           const std::string& image_location,
+static size_t OpenDexFiles(ArrayRef<const std::string> dex_filenames,
+                           ArrayRef<const std::string> dex_locations,
                            std::vector<std::unique_ptr<const DexFile>>* dex_files) {
   DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
   size_t failure_count = 0;
-  if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
-    return failure_count;
-  }
   const ArtDexFileLoader dex_file_loader;
-  failure_count = 0;
   for (size_t i = 0; i < dex_filenames.size(); i++) {
     const char* dex_filename = dex_filenames[i].c_str();
     const char* dex_location = dex_locations[i].c_str();
@@ -1109,6 +1034,10 @@
   sentinel_ = GcRoot<mirror::Object>(sentinel);
 }
 
+GcRoot<mirror::Object> Runtime::GetSentinel() {
+  return sentinel_;
+}
+
 static inline void CreatePreAllocatedException(Thread* self,
                                                Runtime* runtime,
                                                GcRoot<mirror::Throwable>* exception,
@@ -1122,7 +1051,7 @@
   CHECK(klass != nullptr);
   gc::AllocatorType allocator_type = runtime->GetHeap()->GetCurrentAllocator();
   ObjPtr<mirror::Throwable> exception_object = ObjPtr<mirror::Throwable>::DownCast(
-      klass->Alloc</* kIsInstrumented */ true>(self, allocator_type));
+      klass->Alloc</* kIsInstrumented= */ true>(self, allocator_type));
   CHECK(exception_object != nullptr);
   *exception = GcRoot<mirror::Throwable>(exception_object);
   // Initialize the "detailMessage" field.
@@ -1132,7 +1061,7 @@
   ArtField* detailMessageField =
       throwable->FindDeclaredInstanceField("detailMessage", "Ljava/lang/String;");
   CHECK(detailMessageField != nullptr);
-  detailMessageField->SetObject</* kTransactionActive */ false>(exception->Read(), message);
+  detailMessageField->SetObject</* kTransactionActive= */ false>(exception->Read(), message);
 }
 
 bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
@@ -1165,8 +1094,10 @@
                                                  reinterpret_cast<uint8_t*>(kSentinelAddr),
                                                  kPageSize,
                                                  PROT_NONE,
-                                                 /* low_4g */ true,
-                                                 /* error_msg */ nullptr);
+                                                 /*low_4gb=*/ true,
+                                                 /*reuse=*/ false,
+                                                 /*reservation=*/ nullptr,
+                                                 /*error_msg=*/ nullptr);
     if (!protected_fault_page_.IsValid()) {
       LOG(WARNING) << "Could not reserve sentinel fault page";
     } else if (reinterpret_cast<uintptr_t>(protected_fault_page_.Begin()) != kSentinelAddr) {
@@ -1185,12 +1116,49 @@
   Monitor::Init(runtime_options.GetOrDefault(Opt::LockProfThreshold),
                 runtime_options.GetOrDefault(Opt::StackDumpLockProfThreshold));
 
-  boot_class_path_string_ = runtime_options.ReleaseOrDefault(Opt::BootClassPath);
+  image_location_ = runtime_options.GetOrDefault(Opt::Image);
+  SetInstructionSet(runtime_options.GetOrDefault(Opt::ImageInstructionSet));
+  boot_class_path_ = runtime_options.ReleaseOrDefault(Opt::BootClassPath);
+  boot_class_path_locations_ = runtime_options.ReleaseOrDefault(Opt::BootClassPathLocations);
+  DCHECK(boot_class_path_locations_.empty() ||
+         boot_class_path_locations_.size() == boot_class_path_.size());
+  if (boot_class_path_.empty()) {
+    // Try to extract the boot class path from the system boot image.
+    if (image_location_.empty()) {
+      LOG(ERROR) << "Empty boot class path, cannot continue without image.";
+      return false;
+    }
+    std::string system_oat_filename = ImageHeader::GetOatLocationFromImageLocation(
+        GetSystemImageFilename(image_location_.c_str(), instruction_set_));
+    std::string system_oat_location = ImageHeader::GetOatLocationFromImageLocation(image_location_);
+    std::string error_msg;
+    std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
+                                                    system_oat_filename,
+                                                    system_oat_location,
+                                                    /*executable=*/ false,
+                                                    /*low_4gb=*/ false,
+                                                    /*abs_dex_location=*/ nullptr,
+                                                    /*reservation=*/ nullptr,
+                                                    &error_msg));
+    if (oat_file == nullptr) {
+      LOG(ERROR) << "Could not open boot oat file for extracting boot class path: " << error_msg;
+      return false;
+    }
+    const OatHeader& oat_header = oat_file->GetOatHeader();
+    const char* oat_boot_class_path = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
+    if (oat_boot_class_path != nullptr) {
+      Split(oat_boot_class_path, ':', &boot_class_path_);
+    }
+    if (boot_class_path_.empty()) {
+      LOG(ERROR) << "Boot class path missing from boot image oat file " << oat_file->GetLocation();
+      return false;
+    }
+  }
+
   class_path_string_ = runtime_options.ReleaseOrDefault(Opt::ClassPath);
   properties_ = runtime_options.ReleaseOrDefault(Opt::PropertiesList);
 
   compiler_callbacks_ = runtime_options.GetOrDefault(Opt::CompilerCallbacksPtr);
-  patchoat_executable_ = runtime_options.ReleaseOrDefault(Opt::PatchOat);
   must_relocate_ = runtime_options.GetOrDefault(Opt::Relocate);
   is_zygote_ = runtime_options.Exists(Opt::Zygote);
   is_explicit_gc_disabled_ = runtime_options.Exists(Opt::DisableExplicitGC);
@@ -1212,7 +1180,6 @@
     }
   }
   image_compiler_options_ = runtime_options.ReleaseOrDefault(Opt::ImageCompilerOptions);
-  image_location_ = runtime_options.GetOrDefault(Opt::Image);
 
   max_spins_before_thin_lock_inflation_ =
       runtime_options.GetOrDefault(Opt::MaxSpinsBeforeThinLockInflation);
@@ -1237,8 +1204,8 @@
   // As is, we're encoding some logic here about which specific policy to use, which would be better
   // controlled by the framework.
   hidden_api_policy_ = do_hidden_api_checks
-      ? hiddenapi::EnforcementPolicy::kDarkGreyAndBlackList
-      : hiddenapi::EnforcementPolicy::kNoChecks;
+      ? hiddenapi::EnforcementPolicy::kEnabled
+      : hiddenapi::EnforcementPolicy::kDisabled;
 
   no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
   force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
@@ -1273,6 +1240,10 @@
             kExtraDefaultHeapGrowthMultiplier;
   }
   XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
+
+  // Generational CC collection is currently only compatible with Baker read barriers.
+  bool use_generational_cc = kUseBakerReadBarrier && xgc_option.generational_cc;
+
   heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize),
                        runtime_options.GetOrDefault(Opt::HeapGrowthLimit),
                        runtime_options.GetOrDefault(Opt::HeapMinFree),
@@ -1281,8 +1252,10 @@
                        foreground_heap_growth_multiplier,
                        runtime_options.GetOrDefault(Opt::MemoryMaximumSize),
                        runtime_options.GetOrDefault(Opt::NonMovingSpaceCapacity),
-                       runtime_options.GetOrDefault(Opt::Image),
-                       runtime_options.GetOrDefault(Opt::ImageInstructionSet),
+                       GetBootClassPath(),
+                       GetBootClassPathLocations(),
+                       image_location_,
+                       instruction_set_,
                        // Override the collector type to CC if the read barrier config.
                        kUseReadBarrier ? gc::kCollectorTypeCC : xgc_option.collector_type_,
                        kUseReadBarrier ? BackgroundGcOption(gc::kCollectorTypeCCBackground)
@@ -1305,7 +1278,10 @@
                        xgc_option.gcstress_,
                        xgc_option.measure_,
                        runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM),
-                       runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs));
+                       use_generational_cc,
+                       runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs),
+                       runtime_options.Exists(Opt::DumpRegionInfoBeforeGC),
+                       runtime_options.Exists(Opt::DumpRegionInfoAfterGC));
 
   if (!heap_->HasBootImageSpace() && !allow_dex_file_fallback_) {
     LOG(ERROR) << "Dex file fallback disabled, cannot continue without image.";
@@ -1354,7 +1330,6 @@
     }
     case JdwpProvider::kUnset: {
       LOG(FATAL) << "Illegal jdwp provider " << jdwp_provider_ << " was not filtered out!";
-      break;
     }
   }
   callbacks_->AddThreadLifecycleCallback(Dbg::GetThreadLifecycleCallback());
@@ -1377,13 +1352,13 @@
     arena_pool_.reset(new MallocArenaPool());
     jit_arena_pool_.reset(new MallocArenaPool());
   } else {
-    arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ false));
-    jit_arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ false, "CompilerMetadata"));
+    arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ false));
+    jit_arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ false, "CompilerMetadata"));
   }
 
   if (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
     // 4gb, no malloc. Explanation in header.
-    low_4gb_arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ true));
+    low_4gb_arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ true));
   }
   linear_alloc_.reset(CreateLinearAlloc());
 
@@ -1458,7 +1433,7 @@
   CHECK_EQ(self->GetThreadId(), ThreadList::kMainThreadId);
   CHECK(self != nullptr);
 
-  self->SetCanCallIntoJava(!IsAotCompiler());
+  self->SetIsRuntimeThread(IsAotCompiler());
 
   // Set us to runnable so tools using a runtime can allocate and GC by default
   self->TransitionFromSuspendedToRunnable();
@@ -1483,47 +1458,43 @@
         image_space->VerifyImageAllocations();
       }
     }
-    if (boot_class_path_string_.empty()) {
-      // The bootclasspath is not explicitly specified: construct it from the loaded dex files.
-      const std::vector<const DexFile*>& boot_class_path = GetClassLinker()->GetBootClassPath();
-      std::vector<std::string> dex_locations;
-      dex_locations.reserve(boot_class_path.size());
-      for (const DexFile* dex_file : boot_class_path) {
-        dex_locations.push_back(dex_file->GetLocation());
-      }
-      boot_class_path_string_ = android::base::Join(dex_locations, ':');
-    }
     {
       ScopedTrace trace2("AddImageStringsToTable");
-      GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces());
+      for (gc::space::ImageSpace* image_space : heap_->GetBootImageSpaces()) {
+        GetInternTable()->AddImageStringsToTable(image_space, VoidFunctor());
+      }
+    }
+    if (heap_->GetBootImageSpaces().size() != GetBootClassPath().size()) {
+      // The boot image did not contain all boot class path components. Load the rest.
+      DCHECK_LT(heap_->GetBootImageSpaces().size(), GetBootClassPath().size());
+      size_t start = heap_->GetBootImageSpaces().size();
+      DCHECK_LT(start, GetBootClassPath().size());
+      std::vector<std::unique_ptr<const DexFile>> extra_boot_class_path;
+      if (runtime_options.Exists(Opt::BootClassPathDexList)) {
+        extra_boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
+      } else {
+        OpenDexFiles(ArrayRef<const std::string>(GetBootClassPath()).SubArray(start),
+                     ArrayRef<const std::string>(GetBootClassPathLocations()).SubArray(start),
+                     &extra_boot_class_path);
+      }
+      class_linker_->AddExtraBootDexFiles(self, std::move(extra_boot_class_path));
     }
     if (IsJavaDebuggable()) {
       // Now that we have loaded the boot image, deoptimize its methods if we are running
       // debuggable, as the code may have been compiled non-debuggable.
+      ScopedThreadSuspension sts(self, ThreadState::kNative);
+      ScopedSuspendAll ssa(__FUNCTION__);
       DeoptimizeBootImage();
     }
   } else {
-    std::vector<std::string> dex_filenames;
-    Split(boot_class_path_string_, ':', &dex_filenames);
-
-    std::vector<std::string> dex_locations;
-    if (!runtime_options.Exists(Opt::BootClassPathLocations)) {
-      dex_locations = dex_filenames;
-    } else {
-      dex_locations = runtime_options.GetOrDefault(Opt::BootClassPathLocations);
-      CHECK_EQ(dex_filenames.size(), dex_locations.size());
-    }
-
     std::vector<std::unique_ptr<const DexFile>> boot_class_path;
     if (runtime_options.Exists(Opt::BootClassPathDexList)) {
       boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
     } else {
-      OpenDexFiles(dex_filenames,
-                   dex_locations,
-                   runtime_options.GetOrDefault(Opt::Image),
+      OpenDexFiles(ArrayRef<const std::string>(GetBootClassPath()),
+                   ArrayRef<const std::string>(GetBootClassPathLocations()),
                    &boot_class_path);
     }
-    instruction_set_ = runtime_options.GetOrDefault(Opt::ImageInstructionSet);
     if (!class_linker_->InitWithoutImage(std::move(boot_class_path), &error_msg)) {
       LOG(ERROR) << "Could not initialize without image: " << error_msg;
       return false;
@@ -1612,10 +1583,14 @@
   // Runtime initialization is largely done now.
   // We load plugins first since that can modify the runtime state slightly.
   // Load all plugins
-  for (auto& plugin : plugins_) {
-    std::string err;
-    if (!plugin.Load(&err)) {
-      LOG(FATAL) << plugin << " failed to load: " << err;
+  {
+    // The init method of plugins expect the state of the thread to be non runnable.
+    ScopedThreadSuspension sts(self, ThreadState::kNative);
+    for (auto& plugin : plugins_) {
+      std::string err;
+      if (!plugin.Load(&err)) {
+        LOG(FATAL) << plugin << " failed to load: " << err;
+      }
     }
   }
 
@@ -1777,7 +1752,8 @@
   // libcore can't because it's the library that implements System.loadLibrary!
   {
     std::string error_msg;
-    if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr, &error_msg)) {
+    if (!java_vm_->LoadNativeLibrary(
+          env, "libjavacore.so", nullptr, WellKnownClasses::java_lang_Object, &error_msg)) {
       LOG(FATAL) << "LoadNativeLibrary failed for \"libjavacore.so\": " << error_msg;
     }
   }
@@ -1786,7 +1762,8 @@
                                                 ? "libopenjdkd.so"
                                                 : "libopenjdk.so";
     std::string error_msg;
-    if (!java_vm_->LoadNativeLibrary(env, kOpenJdkLibrary, nullptr, &error_msg)) {
+    if (!java_vm_->LoadNativeLibrary(
+          env, kOpenJdkLibrary, nullptr, WellKnownClasses::java_lang_Object, &error_msg)) {
       LOG(FATAL) << "LoadNativeLibrary failed for \"" << kOpenJdkLibrary << "\": " << error_msg;
     }
   }
@@ -1970,7 +1947,7 @@
     return 0;  // backward compatibility
   default:
     LOG(FATAL) << "Unknown statistic " << kind;
-    return -1;  // unreachable
+    UNREACHABLE();
   }
 }
 
@@ -2152,7 +2129,7 @@
     method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub());
   }
   // Create empty conflict table.
-  method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count*/0u, linear_alloc),
+  method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count=*/0u, linear_alloc),
                               pointer_size);
   return method;
 }
@@ -2284,7 +2261,7 @@
     LOG(WARNING) << "JIT profile information will not be recorded: profile filename is empty.";
     return;
   }
-  if (!OS::FileExists(profile_output_filename.c_str(), false /*check_file_type*/)) {
+  if (!OS::FileExists(profile_output_filename.c_str(), /*check_file_type=*/ false)) {
     LOG(WARNING) << "JIT profile information will not be recorded: profile file does not exits.";
     return;
   }
@@ -2462,8 +2439,27 @@
 }
 
 void Runtime::SetFaultMessage(const std::string& message) {
-  MutexLock mu(Thread::Current(), fault_message_lock_);
-  fault_message_ = message;
+  std::string* new_msg = new std::string(message);
+  std::string* cur_msg = fault_message_.exchange(new_msg);
+  delete cur_msg;
+}
+
+std::string Runtime::GetFaultMessage() {
+  // Retrieve the message. Temporarily replace with null so that SetFaultMessage will not delete
+  // the string in parallel.
+  std::string* cur_msg = fault_message_.exchange(nullptr);
+
+  // Make a copy of the string.
+  std::string ret = cur_msg == nullptr ? "" : *cur_msg;
+
+  // Put the message back if it hasn't been updated.
+  std::string* null_str = nullptr;
+  if (!fault_message_.compare_exchange_strong(null_str, cur_msg)) {
+    // Already replaced.
+    delete cur_msg;
+  }
+
+  return ret;
 }
 
 void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* argv)
@@ -2479,27 +2475,64 @@
   instruction_set += GetInstructionSetString(kRuntimeISA);
   argv->push_back(instruction_set);
 
-  std::unique_ptr<const InstructionSetFeatures> features(InstructionSetFeatures::FromCppDefines());
-  std::string feature_string("--instruction-set-features=");
-  feature_string += features->GetFeatureString();
-  argv->push_back(feature_string);
+  if (InstructionSetFeatures::IsRuntimeDetectionSupported()) {
+    argv->push_back("--instruction-set-features=runtime");
+  } else {
+    std::unique_ptr<const InstructionSetFeatures> features(
+        InstructionSetFeatures::FromCppDefines());
+    std::string feature_string("--instruction-set-features=");
+    feature_string += features->GetFeatureString();
+    argv->push_back(feature_string);
+  }
 }
 
-void Runtime::CreateJit() {
-  CHECK(!IsAotCompiler());
+void Runtime::CreateJitCodeCache(bool rwx_memory_allowed) {
   if (kIsDebugBuild && GetInstrumentation()->IsForcedInterpretOnly()) {
     DCHECK(!jit_options_->UseJitCompilation());
   }
-  std::string error_msg;
-  jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg));
-  if (jit_.get() == nullptr) {
-    LOG(WARNING) << "Failed to create JIT " << error_msg;
+
+  if (!jit_options_->UseJitCompilation() && !jit_options_->GetSaveProfilingInfo()) {
     return;
   }
+
+  std::string error_msg;
+  bool profiling_only = !jit_options_->UseJitCompilation();
+  jit_code_cache_.reset(jit::JitCodeCache::Create(profiling_only,
+                                                  rwx_memory_allowed,
+                                                  IsZygote(),
+                                                  &error_msg));
+  if (jit_code_cache_.get() == nullptr) {
+    LOG(WARNING) << "Failed to create JIT Code Cache: " << error_msg;
+  }
+}
+
+void Runtime::CreateJit() {
+  DCHECK(jit_ == nullptr);
+  if (jit_code_cache_.get() == nullptr) {
+    if (!IsSafeMode()) {
+      LOG(WARNING) << "Missing code cache, cannot create JIT.";
+    }
+    return;
+  }
+  if (IsSafeMode()) {
+    LOG(INFO) << "Not creating JIT because of SafeMode.";
+    jit_code_cache_.reset();
+    return;
+  }
+
+  jit::Jit* jit = jit::Jit::Create(jit_code_cache_.get(), jit_options_.get());
+  DoAndMaybeSwitchInterpreter([=](){ jit_.reset(jit); });
+  if (jit == nullptr) {
+    LOG(WARNING) << "Failed to allocate JIT";
+    // Release JIT code cache resources (several MB of memory).
+    jit_code_cache_.reset();
+  } else {
+    jit->CreateThreadPool();
+  }
 }
 
 bool Runtime::CanRelocate() const {
-  return !IsAotCompiler() || compiler_callbacks_->IsRelocationPossible();
+  return !IsAotCompiler();
 }
 
 bool Runtime::IsCompilingBootImage() const {
@@ -2523,12 +2556,12 @@
   const PointerSize pointer_size = GetClassLinker()->GetImagePointerSize();
   if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr) {
     imt_unimplemented_method_->SetImtConflictTable(
-        ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
+        ClassLinker::CreateImtConflictTable(/*count=*/0u, GetLinearAlloc(), pointer_size),
         pointer_size);
   }
   if (imt_conflict_method_->GetImtConflictTable(pointer_size) == nullptr) {
     imt_conflict_method_->SetImtConflictTable(
-          ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
+          ClassLinker::CreateImtConflictTable(/*count=*/0u, GetLinearAlloc(), pointer_size),
           pointer_size);
   }
 }
@@ -2637,6 +2670,7 @@
       : instrumentation_(instrumentation) {}
 
   bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES(Locks::mutator_lock_) {
+    DCHECK(Locks::mutator_lock_->IsExclusiveHeld(Thread::Current()));
     auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
     for (auto& m : klass->GetMethods(pointer_size)) {
       const void* code = m.GetEntryPointFromQuickCompiledCode();
@@ -2663,9 +2697,54 @@
   // we patch entry points of methods in boot image to interpreter bridge, as
   // boot image code may be AOT compiled as not debuggable.
   if (!GetInstrumentation()->IsForcedInterpretOnly()) {
-    ScopedObjectAccess soa(Thread::Current());
     UpdateEntryPointsClassVisitor visitor(GetInstrumentation());
     GetClassLinker()->VisitClasses(&visitor);
+    jit::Jit* jit = GetJit();
+    if (jit != nullptr) {
+      // Code JITted by the zygote is not compiled debuggable.
+      jit->GetCodeCache()->ClearEntryPointsInZygoteExecSpace();
+    }
   }
 }
+
+Runtime::ScopedThreadPoolUsage::ScopedThreadPoolUsage()
+    : thread_pool_(Runtime::Current()->AcquireThreadPool()) {}
+
+Runtime::ScopedThreadPoolUsage::~ScopedThreadPoolUsage() {
+  Runtime::Current()->ReleaseThreadPool();
+}
+
+bool Runtime::DeleteThreadPool() {
+  // Make sure workers are started to prevent thread shutdown errors.
+  WaitForThreadPoolWorkersToStart();
+  std::unique_ptr<ThreadPool> thread_pool;
+  {
+    MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
+    if (thread_pool_ref_count_ == 0) {
+      thread_pool = std::move(thread_pool_);
+    }
+  }
+  return thread_pool != nullptr;
+}
+
+ThreadPool* Runtime::AcquireThreadPool() {
+  MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
+  ++thread_pool_ref_count_;
+  return thread_pool_.get();
+}
+
+void Runtime::ReleaseThreadPool() {
+  MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
+  CHECK_GT(thread_pool_ref_count_, 0u);
+  --thread_pool_ref_count_;
+}
+
+void Runtime::WaitForThreadPoolWorkersToStart() {
+  // Need to make sure workers are created before deleting the pool.
+  ScopedThreadPoolUsage stpu;
+  if (stpu.GetThreadPool() != nullptr) {
+    stpu.GetThreadPool()->WaitForWorkersToBeCreated();
+  }
+}
+
 }  // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index f0bf754..ee2c514 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -27,10 +27,9 @@
 #include <memory>
 #include <vector>
 
-#include "arch/instruction_set.h"
+#include "base/locks.h"
 #include "base/macros.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 #include "deoptimization_kind.h"
 #include "dex/dex_file_types.h"
 #include "experimental_flags.h"
@@ -56,6 +55,7 @@
 
 namespace jit {
 class Jit;
+class JitCodeCache;
 class JitOptions;
 }  // namespace jit
 
@@ -83,6 +83,7 @@
 class ClassLinker;
 class CompilerCallbacks;
 class DexFile;
+enum class InstructionSet;
 class InternTable;
 class IsMarkedVisitor;
 class JavaVMExt;
@@ -90,6 +91,7 @@
 class MonitorList;
 class MonitorPool;
 class NullPointerHandler;
+class OatFileAssistantTest;
 class OatFileManager;
 class Plugin;
 struct RuntimeArgumentMap;
@@ -98,6 +100,7 @@
 class StackOverflowHandler;
 class SuspensionHandler;
 class ThreadList;
+class ThreadPool;
 class Trace;
 struct TraceConfig;
 class Transaction;
@@ -164,7 +167,6 @@
   }
 
   std::string GetCompilerExecutable() const;
-  std::string GetPatchoatExecutable() const;
 
   const std::vector<std::string>& GetCompilerOptions() const {
     return compiler_options_;
@@ -242,8 +244,14 @@
 
   ~Runtime();
 
-  const std::string& GetBootClassPathString() const {
-    return boot_class_path_string_;
+  const std::vector<std::string>& GetBootClassPath() const {
+    return boot_class_path_;
+  }
+
+  const std::vector<std::string>& GetBootClassPathLocations() const {
+    DCHECK(boot_class_path_locations_.empty() ||
+           boot_class_path_locations_.size() == boot_class_path_.size());
+    return boot_class_path_locations_.empty() ? boot_class_path_ : boot_class_path_locations_;
   }
 
   const std::string& GetClassPathString() const {
@@ -400,7 +408,7 @@
   QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
+  static constexpr size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
     return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
   }
 
@@ -444,6 +452,7 @@
   bool UseJitCompilation() const;
 
   void PreZygoteFork();
+  void PostZygoteFork();
   void InitNonZygoteOrPostFork(
       JNIEnv* env,
       bool is_system_server,
@@ -510,12 +519,7 @@
   void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
-  // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
-  // with the unexpected_signal_lock_.
-  const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
-    return fault_message_;
-  }
+  void SetFaultMessage(const std::string& message);
 
   void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
 
@@ -535,8 +539,12 @@
     return hidden_api_policy_;
   }
 
-  void SetPendingHiddenApiWarning(bool value) {
-    pending_hidden_api_warning_ = value;
+  void SetCorePlatformApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
+    core_platform_api_policy_ = policy;
+  }
+
+  hiddenapi::EnforcementPolicy GetCorePlatformApiEnforcementPolicy() const {
+    return core_platform_api_policy_;
   }
 
   void SetHiddenApiExemptions(const std::vector<std::string>& exemptions) {
@@ -547,10 +555,6 @@
     return hidden_api_exemptions_;
   }
 
-  bool HasPendingHiddenApiWarning() const {
-    return pending_hidden_api_warning_;
-  }
-
   void SetDedupeHiddenApiWarnings(bool value) {
     dedupe_hidden_api_warnings_ = value;
   }
@@ -559,14 +563,6 @@
     return dedupe_hidden_api_warnings_;
   }
 
-  void AlwaysSetHiddenApiWarningFlag() {
-    always_set_hidden_api_warning_flag_ = true;
-  }
-
-  bool ShouldAlwaysSetHiddenApiWarningFlag() const {
-    return always_set_hidden_api_warning_flag_;
-  }
-
   void SetHiddenApiEventLogSampleRate(uint32_t rate) {
     hidden_api_access_event_log_rate_ = rate;
   }
@@ -599,11 +595,11 @@
     return is_running_on_memory_tool_;
   }
 
-  void SetTargetSdkVersion(int32_t version) {
+  void SetTargetSdkVersion(uint32_t version) {
     target_sdk_version_ = version;
   }
 
-  int32_t GetTargetSdkVersion() const {
+  uint32_t GetTargetSdkVersion() const {
     return target_sdk_version_;
   }
 
@@ -615,6 +611,8 @@
     return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
   }
 
+  void CreateJitCodeCache(bool rwx_memory_allowed);
+
   // Create the JIT and instrumentation and code cache.
   void CreateJit();
 
@@ -645,7 +643,7 @@
   void SetJavaDebuggable(bool value);
 
   // Deoptimize the boot image, called for Java debuggable apps.
-  void DeoptimizeBootImage();
+  void DeoptimizeBootImage() REQUIRES(Locks::mutator_lock_);
 
   bool IsNativeDebuggable() const {
     return is_native_debuggable_;
@@ -655,14 +653,33 @@
     is_native_debuggable_ = value;
   }
 
+  bool AreNonStandardExitsEnabled() const {
+    return non_standard_exits_enabled_;
+  }
+
+  void SetNonStandardExitsEnabled() {
+    DoAndMaybeSwitchInterpreter([=](){ non_standard_exits_enabled_ = true; });
+  }
+
   bool AreAsyncExceptionsThrown() const {
     return async_exceptions_thrown_;
   }
 
   void SetAsyncExceptionsThrown() {
-    async_exceptions_thrown_ = true;
+    DoAndMaybeSwitchInterpreter([=](){ async_exceptions_thrown_ = true; });
   }
 
+  // Change state and re-check which interpreter should be used.
+  //
+  // This must be called whenever there is an event that forces
+  // us to use different interpreter (e.g. debugger is attached).
+  //
+  // Changing the state using the lamda gives us some multihreading safety.
+  // It ensures that two calls do not interfere with each other and
+  // it makes it possible to DCHECK that thread local flag is correct.
+  template<typename Action>
+  static void DoAndMaybeSwitchInterpreter(Action lamda);
+
   // Returns the build fingerprint, if set. Otherwise an empty string is returned.
   std::string GetFingerprint() {
     return fingerprint_;
@@ -670,6 +687,9 @@
 
   // Called from class linker.
   void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
+  // For testing purpose only.
+  // TODO: Remove this when this is no longer needed (b/116087961).
+  GcRoot<mirror::Object> GetSentinel() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
   LinearAlloc* CreateLinearAlloc();
@@ -682,6 +702,10 @@
   double GetHashTableMinLoadFactor() const;
   double GetHashTableMaxLoadFactor() const;
 
+  bool IsSafeMode() const {
+    return safe_mode_;
+  }
+
   void SetSafeMode(bool mode) {
     safe_mode_ = mode;
   }
@@ -773,12 +797,32 @@
     return jdwp_provider_;
   }
 
-  static constexpr int32_t kUnsetSdkVersion = 0u;
-
   uint32_t GetVerifierLoggingThresholdMs() const {
     return verifier_logging_threshold_ms_;
   }
 
+  // Atomically delete the thread pool if the reference count is 0.
+  bool DeleteThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
+
+  // Wait for all the thread workers to be attached.
+  void WaitForThreadPoolWorkersToStart() REQUIRES(!Locks::runtime_thread_pool_lock_);
+
+  // Scoped usage of the runtime thread pool. Prevents the pool from being
+  // deleted. Note that the thread pool is only for startup and gets deleted after.
+  class ScopedThreadPoolUsage {
+   public:
+    ScopedThreadPoolUsage();
+    ~ScopedThreadPoolUsage();
+
+    // Return the thread pool.
+    ThreadPool* GetThreadPool() const {
+      return thread_pool_;
+    }
+
+   private:
+    ThreadPool* const thread_pool_;
+  };
+
  private:
   static void InitPlatformSignalHandlers();
 
@@ -809,6 +853,15 @@
   void VisitConstantRoots(RootVisitor* visitor)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Note: To be lock-free, GetFaultMessage temporarily replaces the lock message with null.
+  //       As such, there is a window where a call will return an empty string. In general,
+  //       only aborting code should retrieve this data (via GetFaultMessageForAbortLogging
+  //       friend).
+  std::string GetFaultMessage();
+
+  ThreadPool* AcquireThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
+  void ReleaseThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
+
   // A pointer to the active runtime or null.
   static Runtime* instance_;
 
@@ -845,12 +898,12 @@
   bool image_dex2oat_enabled_;
 
   std::string compiler_executable_;
-  std::string patchoat_executable_;
   std::vector<std::string> compiler_options_;
   std::vector<std::string> image_compiler_options_;
   std::string image_location_;
 
-  std::string boot_class_path_string_;
+  std::vector<std::string> boot_class_path_;
+  std::vector<std::string> boot_class_path_locations_;
   std::string class_path_string_;
   std::vector<std::string> properties_;
 
@@ -889,11 +942,16 @@
   std::unique_ptr<JavaVMExt> java_vm_;
 
   std::unique_ptr<jit::Jit> jit_;
+  std::unique_ptr<jit::JitCodeCache> jit_code_cache_;
   std::unique_ptr<jit::JitOptions> jit_options_;
 
-  // Fault message, printed when we get a SIGSEGV.
-  Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  std::string fault_message_ GUARDED_BY(fault_message_lock_);
+  // Runtime thread pool. The pool is only for startup and gets deleted after.
+  std::unique_ptr<ThreadPool> thread_pool_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
+  size_t thread_pool_ref_count_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
+
+  // Fault message, printed when we get a SIGSEGV. Stored as a native-heap object and accessed
+  // lock-free, so needs to be atomic.
+  std::atomic<std::string*> fault_message_;
 
   // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
   // the shutdown lock so that threads aren't born while we're shutting down.
@@ -955,7 +1013,7 @@
   std::vector<std::string> cpu_abilist_;
 
   // Specifies target SDK version to allow workarounds for certain API levels.
-  int32_t target_sdk_version_;
+  uint32_t target_sdk_version_;
 
   // Implicit checks flags.
   bool implicit_null_checks_;       // NullPointer checks are implicit.
@@ -963,7 +1021,7 @@
   bool implicit_suspend_checks_;    // Thread suspension checks are implicit.
 
   // Whether or not the sig chain (and implicitly the fault handler) should be
-  // disabled. Tools like dex2oat or patchoat don't need them. This enables
+  // disabled. Tools like dex2oat don't need them. This enables
   // building a statically link version of dex2oat.
   bool no_sig_chain_;
 
@@ -988,6 +1046,10 @@
   // MterpShouldSwitchInterpreters function.
   bool async_exceptions_thrown_;
 
+  // Whether anything is going to be using the shadow-frame APIs to force a function to return
+  // early. Doing this requires that (1) we be debuggable and (2) that mterp is exited.
+  bool non_standard_exits_enabled_;
+
   // Whether Java code needs to be debuggable.
   bool is_java_debuggable_;
 
@@ -1021,23 +1083,17 @@
   // Whether access checks on hidden API should be performed.
   hiddenapi::EnforcementPolicy hidden_api_policy_;
 
+  // Whether access checks on core platform API should be performed.
+  hiddenapi::EnforcementPolicy core_platform_api_policy_;
+
   // List of signature prefixes of methods that have been removed from the blacklist, and treated
   // as if whitelisted.
   std::vector<std::string> hidden_api_exemptions_;
 
-  // Whether the application has used an API which is not restricted but we
-  // should issue a warning about it.
-  bool pending_hidden_api_warning_;
-
   // Do not warn about the same hidden API access violation twice.
   // This is only used for testing.
   bool dedupe_hidden_api_warnings_;
 
-  // Hidden API can print warnings into the log and/or set a flag read by the
-  // framework to show a UI warning. If this flag is set, always set the flag
-  // when there is a warning. This is only used for testing.
-  bool always_set_hidden_api_warning_flag_;
-
   // How often to log hidden API access to the event log. An integer between 0
   // (never) and 0x10000 (always).
   uint32_t hidden_api_access_event_log_rate_;
@@ -1089,6 +1145,11 @@
 
   uint32_t verifier_logging_threshold_ms_;
 
+  // Note: See comments on GetFaultMessage.
+  friend std::string GetFaultMessageForAbortLogging();
+  friend class ScopedThreadPoolUsage;
+  friend class OatFileAssistantTest;
+
   DISALLOW_COPY_AND_ASSIGN(Runtime);
 };
 
diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc
index 4bd3b3a..55ba293 100644
--- a/runtime/runtime_android.cc
+++ b/runtime/runtime_android.cc
@@ -30,8 +30,8 @@
   HandleUnexpectedSignalCommon(signal_number,
                                info,
                                raw_context,
-                               /* handle_timeout_signal */ false,
-                               /* dump_on_stderr */ false);
+                               /* handle_timeout_signal= */ false,
+                               /* dump_on_stderr= */ false);
 
   // Run the old signal handler.
   old_action.sa_sigaction(signal_number, info, raw_context);
@@ -44,7 +44,7 @@
   if (android_root != nullptr && strcmp(android_root, "/system") != 0) {
     InitPlatformSignalHandlersCommon(HandleUnexpectedSignalAndroid,
                                      &old_action,
-                                     /* handle_timeout_signal */ false);
+                                     /* handle_timeout_signal= */ false);
   }
 }
 
diff --git a/runtime/runtime_callbacks.cc b/runtime/runtime_callbacks.cc
index 758917c..da13eb8 100644
--- a/runtime/runtime_callbacks.cc
+++ b/runtime/runtime_callbacks.cc
@@ -151,6 +151,26 @@
   Remove(cb, &monitor_callbacks_);
 }
 
+void RuntimeCallbacks::ThreadParkStart(bool is_absolute, int64_t timeout) {
+  for (ParkCallback * cb : park_callbacks_) {
+    cb->ThreadParkStart(is_absolute, timeout);
+  }
+}
+
+void RuntimeCallbacks::ThreadParkFinished(bool timeout) {
+  for (ParkCallback * cb : park_callbacks_) {
+    cb->ThreadParkFinished(timeout);
+  }
+}
+
+void RuntimeCallbacks::AddParkCallback(ParkCallback* cb) {
+  park_callbacks_.push_back(cb);
+}
+
+void RuntimeCallbacks::RemoveParkCallback(ParkCallback* cb) {
+  Remove(cb, &park_callbacks_);
+}
+
 void RuntimeCallbacks::RemoveThreadLifecycleCallback(ThreadLifecycleCallback* cb) {
   Remove(cb, &thread_callbacks_);
 }
@@ -185,14 +205,14 @@
                                       Handle<mirror::Class> temp_class,
                                       Handle<mirror::ClassLoader> loader,
                                       const DexFile& initial_dex_file,
-                                      const DexFile::ClassDef& initial_class_def,
+                                      const dex::ClassDef& initial_class_def,
                                       /*out*/DexFile const** final_dex_file,
-                                      /*out*/DexFile::ClassDef const** final_class_def) {
+                                      /*out*/dex::ClassDef const** final_class_def) {
   DexFile const* current_dex_file = &initial_dex_file;
-  DexFile::ClassDef const* current_class_def = &initial_class_def;
+  dex::ClassDef const* current_class_def = &initial_class_def;
   for (ClassLoadCallback* cb : class_callbacks_) {
     DexFile const* new_dex_file = nullptr;
-    DexFile::ClassDef const* new_class_def = nullptr;
+    dex::ClassDef const* new_class_def = nullptr;
     cb->ClassPreDefine(descriptor,
                        temp_class,
                        loader,
diff --git a/runtime/runtime_callbacks.h b/runtime/runtime_callbacks.h
index 9f0410d..41d552a 100644
--- a/runtime/runtime_callbacks.h
+++ b/runtime/runtime_callbacks.h
@@ -20,13 +20,16 @@
 #include <vector>
 
 #include "base/array_ref.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
-#include "dex/dex_file.h"
 #include "handle.h"
 
 namespace art {
 
+namespace dex {
+struct ClassDef;
+}  // namespace dex
+
 namespace mirror {
 class Class;
 class ClassLoader;
@@ -35,6 +38,7 @@
 
 class ArtMethod;
 class ClassLoadCallback;
+class DexFile;
 class Thread;
 class MethodCallback;
 class Monitor;
@@ -115,6 +119,19 @@
   virtual ~MonitorCallback() {}
 };
 
+class ParkCallback {
+ public:
+  // Called on entry to the Unsafe.#park method
+  virtual void ThreadParkStart(bool is_absolute, int64_t millis_timeout)
+      REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+
+  // Called just after the thread has woken up from going to sleep for a park(). This will only be
+  // called for Unsafe.park() calls where the thread did (or at least could have) gone to sleep.
+  virtual void ThreadParkFinished(bool timed_out) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+
+  virtual ~ParkCallback() {}
+};
+
 // A callback to let parts of the runtime note that they are currently relying on a particular
 // method remaining in it's current state. Users should not rely on always being called. If multiple
 // callbacks are added the runtime will short-circuit when the first one returns 'true'.
@@ -170,9 +187,9 @@
                       Handle<mirror::Class> temp_class,
                       Handle<mirror::ClassLoader> loader,
                       const DexFile& initial_dex_file,
-                      const DexFile::ClassDef& initial_class_def,
+                      const dex::ClassDef& initial_class_def,
                       /*out*/DexFile const** final_dex_file,
-                      /*out*/DexFile::ClassDef const** final_class_def)
+                      /*out*/dex::ClassDef const** final_class_def)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void AddMethodCallback(MethodCallback* cb) REQUIRES(Locks::mutator_lock_);
@@ -193,6 +210,11 @@
   void AddMonitorCallback(MonitorCallback* cb) REQUIRES_SHARED(Locks::mutator_lock_);
   void RemoveMonitorCallback(MonitorCallback* cb) REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void ThreadParkStart(bool is_absolute, int64_t timeout) REQUIRES_SHARED(Locks::mutator_lock_);
+  void ThreadParkFinished(bool timed_out) REQUIRES_SHARED(Locks::mutator_lock_);
+  void AddParkCallback(ParkCallback* cb) REQUIRES_SHARED(Locks::mutator_lock_);
+  void RemoveParkCallback(ParkCallback* cb) REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Returns true if some MethodInspectionCallback indicates the method is being inspected/depended
   // on by some code.
   bool IsMethodBeingInspected(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -243,6 +265,8 @@
       GUARDED_BY(Locks::mutator_lock_);
   std::vector<MonitorCallback*> monitor_callbacks_
       GUARDED_BY(Locks::mutator_lock_);
+  std::vector<ParkCallback*> park_callbacks_
+      GUARDED_BY(Locks::mutator_lock_);
   std::vector<MethodInspectionCallback*> method_inspection_callbacks_
       GUARDED_BY(Locks::mutator_lock_);
   std::vector<DdmCallback*> ddm_callbacks_
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index aaedb23..df06a9f 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -147,6 +147,8 @@
   self->TransitionFromSuspendedToRunnable();
   bool started = runtime_->Start();
   ASSERT_TRUE(started);
+  // Make sure the workers are done starting so we don't get callbacks for them.
+  runtime_->WaitForThreadPoolWorkersToStart();
 
   cb_.state = CallbackState::kBase;  // Ignore main thread attach.
 
@@ -191,10 +193,9 @@
 TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttach) {
   std::string error_msg;
   MemMap stack = MemMap::MapAnonymous("ThreadLifecycleCallback Thread",
-                                      /* addr */ nullptr,
                                       128 * kPageSize,  // Just some small stack.
                                       PROT_READ | PROT_WRITE,
-                                      /* low_4gb */ false,
+                                      /*low_4gb=*/ false,
                                       &error_msg);
   ASSERT_TRUE(stack.IsValid()) << error_msg;
 
@@ -256,9 +257,9 @@
                         Handle<mirror::Class> klass ATTRIBUTE_UNUSED,
                         Handle<mirror::ClassLoader> class_loader ATTRIBUTE_UNUSED,
                         const DexFile& initial_dex_file,
-                        const DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
+                        const dex::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
                         /*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
-                        /*out*/DexFile::ClassDef const** final_class_def ATTRIBUTE_UNUSED) override
+                        /*out*/dex::ClassDef const** final_class_def ATTRIBUTE_UNUSED) override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       const std::string& location = initial_dex_file.GetLocation();
       std::string event =
@@ -458,20 +459,20 @@
       ref_ = { &k->GetDexFile(), k->GetDexClassDefIndex() };
     }
 
-    void MonitorContendedLocking(Monitor* mon ATTRIBUTE_UNUSED)
+    void MonitorContendedLocking(Monitor* mon ATTRIBUTE_UNUSED) override
         REQUIRES_SHARED(Locks::mutator_lock_) { }
 
-    void MonitorContendedLocked(Monitor* mon ATTRIBUTE_UNUSED)
+    void MonitorContendedLocked(Monitor* mon ATTRIBUTE_UNUSED) override
         REQUIRES_SHARED(Locks::mutator_lock_) { }
 
-    void ObjectWaitStart(Handle<mirror::Object> obj, int64_t millis ATTRIBUTE_UNUSED)
+    void ObjectWaitStart(Handle<mirror::Object> obj, int64_t millis ATTRIBUTE_UNUSED) override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       if (IsInterestingObject(obj.Get())) {
         saw_wait_start_ = true;
       }
     }
 
-    void MonitorWaitFinished(Monitor* m, bool timed_out ATTRIBUTE_UNUSED)
+    void MonitorWaitFinished(Monitor* m, bool timed_out ATTRIBUTE_UNUSED) override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       if (IsInterestingObject(m->GetObject())) {
         saw_wait_finished_ = true;
@@ -505,10 +506,10 @@
           self,
           // Just a random class
           soa.Decode<mirror::Class>(WellKnownClasses::java_util_Collections).Ptr(),
-          /*ms*/0,
-          /*ns*/0,
-          /*interruptShouldThrow*/false,
-          /*why*/kWaiting);
+          /*ms=*/0,
+          /*ns=*/0,
+          /*interruptShouldThrow=*/false,
+          /*why=*/kWaiting);
     }
   }
   ASSERT_TRUE(cb_.saw_wait_start_);
diff --git a/runtime/runtime_common.cc b/runtime/runtime_common.cc
index eae2505..5676577 100644
--- a/runtime/runtime_common.cc
+++ b/runtime/runtime_common.cc
@@ -371,6 +371,11 @@
 #pragma GCC diagnostic ignored "-Wframe-larger-than="
 #endif
 
+std::string GetFaultMessageForAbortLogging() {
+  Runtime* runtime = Runtime::Current();
+  return  (runtime != nullptr) ? runtime->GetFaultMessage() : "";
+}
+
 static void HandleUnexpectedSignalCommonDump(int signal_number,
                                              siginfo_t* info,
                                              void* raw_context,
@@ -427,9 +432,9 @@
     }
 
     if (dump_on_stderr) {
-      std::cerr << "Fault message: " << runtime->GetFaultMessage() << std::endl;
+      std::cerr << "Fault message: " << GetFaultMessageForAbortLogging() << std::endl;
     } else {
-      LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << runtime->GetFaultMessage();
+      LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << GetFaultMessageForAbortLogging();
     }
   }
 }
diff --git a/runtime/runtime_globals.h b/runtime/runtime_globals.h
new file mode 100644
index 0000000..81d350b
--- /dev/null
+++ b/runtime/runtime_globals.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_RUNTIME_GLOBALS_H_
+#define ART_RUNTIME_RUNTIME_GLOBALS_H_
+
+#include "base/globals.h"
+
+namespace art {
+
+// Size of Dex virtual registers.
+static constexpr size_t kVRegSize = 4;
+
+// Returns whether the given memory offset can be used for generating
+// an implicit null check.
+static inline bool CanDoImplicitNullCheckOn(uintptr_t offset) {
+  return offset < kPageSize;
+}
+
+// Required object alignment
+static constexpr size_t kObjectAlignmentShift = 3;
+static constexpr size_t kObjectAlignment = 1u << kObjectAlignmentShift;
+static constexpr size_t kLargeObjectAlignment = kPageSize;
+
+// Garbage collector constants.
+static constexpr bool kMovingCollector = true;
+static constexpr bool kMarkCompactSupport = false && kMovingCollector;
+// True if we allow moving classes.
+static constexpr bool kMovingClasses = !kMarkCompactSupport;
+// When using the Concurrent Copying (CC) collector, if
+// `ART_USE_GENERATIONAL_CC` is true, enable generational collection by default,
+// i.e. use sticky-bit CC for minor collections and (full) CC for major
+// collections.
+// This default value can be overridden with the runtime option
+// `-Xgc:[no]generational_cc`.
+//
+// TODO(b/67628039): Consider either:
+// - renaming this to a better descriptive name (e.g.
+//   `ART_USE_GENERATIONAL_CC_BY_DEFAULT`); or
+// - removing `ART_USE_GENERATIONAL_CC` and having a fixed default value.
+// Any of these changes will require adjusting users of this preprocessor
+// directive and the corresponding build system environment variable (e.g. in
+// ART's continuous testing).
+#ifdef ART_USE_GENERATIONAL_CC
+static constexpr bool kEnableGenerationalCCByDefault = true;
+#else
+static constexpr bool kEnableGenerationalCCByDefault = false;
+#endif
+
+// If true, enable the tlab allocator by default.
+#ifdef ART_USE_TLAB
+static constexpr bool kUseTlab = true;
+#else
+static constexpr bool kUseTlab = false;
+#endif
+
+// Kinds of tracing clocks.
+enum class TraceClockSource {
+  kThreadCpu,
+  kWall,
+  kDual,  // Both wall and thread CPU clocks.
+};
+
+#if defined(__linux__)
+static constexpr TraceClockSource kDefaultTraceClockSource = TraceClockSource::kDual;
+#else
+static constexpr TraceClockSource kDefaultTraceClockSource = TraceClockSource::kWall;
+#endif
+
+static constexpr bool kDefaultMustRelocate = true;
+
+// Size of a heap reference.
+static constexpr size_t kHeapReferenceSize = sizeof(uint32_t);
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_RUNTIME_GLOBALS_H_
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 6313553..cfa8ea6 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -31,8 +31,8 @@
   HandleUnexpectedSignalCommon(signal_number,
                                info,
                                raw_context,
-                               /* handle_timeout_signal */ true,
-                               /* dump_on_stderr */ true);
+                               /* handle_timeout_signal= */ true,
+                               /* dump_on_stderr= */ true);
 
   if (getenv("debug_db_uid") != nullptr || getenv("art_wait_for_gdb_on_crash") != nullptr) {
     pid_t tid = GetTid();
@@ -77,7 +77,7 @@
   // On the host, we don't have debuggerd to dump a stack for us when something unexpected happens.
   InitPlatformSignalHandlersCommon(HandleUnexpectedSignalLinux,
                                    nullptr,
-                                   /* handle_timeout_signal */ true);
+                                   /* handle_timeout_signal= */ true);
 }
 
 }  // namespace art
diff --git a/runtime/runtime_options.cc b/runtime/runtime_options.cc
index f8c680d..12dab15 100644
--- a/runtime/runtime_options.cc
+++ b/runtime/runtime_options.cc
@@ -18,6 +18,7 @@
 
 #include <memory>
 
+#include "base/sdk_version.h"
 #include "base/utils.h"
 #include "debugger.h"
 #include "gc/heap.h"
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index ae1e08f..222c821 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -37,7 +37,7 @@
 RUNTIME_OPTIONS_KEY (Unit,                Zygote)
 RUNTIME_OPTIONS_KEY (Unit,                Help)
 RUNTIME_OPTIONS_KEY (Unit,                ShowVersion)
-RUNTIME_OPTIONS_KEY (std::string,         BootClassPath)
+RUNTIME_OPTIONS_KEY (ParseStringList<':'>,BootClassPath)           // std::vector<std::string>
 RUNTIME_OPTIONS_KEY (ParseStringList<':'>,BootClassPathLocations)  // std::vector<std::string>
 RUNTIME_OPTIONS_KEY (std::string,         ClassPath)
 RUNTIME_OPTIONS_KEY (std::string,         Image)
@@ -64,6 +64,8 @@
 RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
                                           ThreadSuspendTimeout,           ThreadList::kDefaultThreadSuspendTimeout)
 RUNTIME_OPTIONS_KEY (Unit,                DumpGCPerformanceOnShutdown)
+RUNTIME_OPTIONS_KEY (Unit,                DumpRegionInfoBeforeGC)
+RUNTIME_OPTIONS_KEY (Unit,                DumpRegionInfoAfterGC)
 RUNTIME_OPTIONS_KEY (Unit,                DumpJITInfoOnShutdown)
 RUNTIME_OPTIONS_KEY (Unit,                IgnoreMaxFootprint)
 RUNTIME_OPTIONS_KEY (Unit,                LowMemoryMode)
@@ -116,7 +118,8 @@
                                           ImageCompilerOptions)  // -Ximage-compiler-option ...
 RUNTIME_OPTIONS_KEY (verifier::VerifyMode, \
                                           Verify,                         verifier::VerifyMode::kEnable)
-RUNTIME_OPTIONS_KEY (int,                 TargetSdkVersion,               Runtime::kUnsetSdkVersion)
+RUNTIME_OPTIONS_KEY (unsigned int,        TargetSdkVersion, \
+                                          static_cast<unsigned int>(SdkVersion::kUnset))
 RUNTIME_OPTIONS_KEY (Unit,                HiddenApiChecks)
 RUNTIME_OPTIONS_KEY (std::string,         NativeBridge)
 RUNTIME_OPTIONS_KEY (unsigned int,        ZygoteMaxFailedBoots,           10)
diff --git a/runtime/runtime_options.h b/runtime/runtime_options.h
index 3f5e776..39b44e7 100644
--- a/runtime/runtime_options.h
+++ b/runtime/runtime_options.h
@@ -23,7 +23,6 @@
 #include <vector>
 
 #include "arch/instruction_set.h"
-#include "base/logging.h"
 #include "base/variant_map.h"
 #include "cmdline_types.h"  // TODO: don't need to include this file here
 #include "gc/collector_type.h"
diff --git a/runtime/scoped_thread_state_change-inl.h b/runtime/scoped_thread_state_change-inl.h
index 3089c24..2541ab5 100644
--- a/runtime/scoped_thread_state_change-inl.h
+++ b/runtime/scoped_thread_state_change-inl.h
@@ -22,6 +22,7 @@
 #include <android-base/logging.h>
 
 #include "base/casts.h"
+#include "base/mutex.h"
 #include "jni/jni_env_ext-inl.h"
 #include "obj_ptr-inl.h"
 #include "runtime.h"
diff --git a/runtime/scoped_thread_state_change.cc b/runtime/scoped_thread_state_change.cc
index edbce05..ae833b4 100644
--- a/runtime/scoped_thread_state_change.cc
+++ b/runtime/scoped_thread_state_change.cc
@@ -20,6 +20,7 @@
 
 #include "base/casts.h"
 #include "jni/java_vm_ext.h"
+#include "mirror/object-inl.h"
 #include "obj_ptr-inl.h"
 #include "runtime-inl.h"
 
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 0c42c5a..b2ad90a 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -19,8 +19,8 @@
 
 #include "jni.h"
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/value_object.h"
 #include "thread_state.h"
 
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index f4a27b8..38ea9cc 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -118,7 +118,7 @@
 
   ScopedThreadStateChange tsc(Thread::Current(), kWaitingForSignalCatcherOutput);
 
-  std::unique_ptr<File> file(new File(output_fd.release(), true /* check_usage */));
+  std::unique_ptr<File> file(new File(output_fd.release(), true /* check_usage= */));
   bool success = file->WriteFully(s.data(), s.size());
   if (success) {
     success = file->FlushCloseOrErase() == 0;
@@ -169,7 +169,7 @@
 
 void SignalCatcher::HandleSigUsr1() {
   LOG(INFO) << "SIGUSR1 forcing GC (no HPROF) and profile save";
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
   ProfileSaver::ForceProcessProfiles();
 }
 
diff --git a/runtime/stack.cc b/runtime/stack.cc
index eb9c661..80a563b 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -139,9 +139,9 @@
     } else {
       uint16_t reg = accessor.RegistersSize() - accessor.InsSize();
       uint32_t value = 0;
-      bool success = GetVReg(m, reg, kReferenceVReg, &value);
-      // We currently always guarantee the `this` object is live throughout the method.
-      CHECK(success) << "Failed to read the this object in " << ArtMethod::PrettyMethod(m);
+      if (!GetVReg(m, reg, kReferenceVReg, &value)) {
+        return nullptr;
+      }
       return reinterpret_cast<mirror::Object*>(value);
     }
   }
@@ -223,20 +223,39 @@
   switch (location_kind) {
     case DexRegisterLocation::Kind::kInStack: {
       const int32_t offset = dex_register_map[vreg].GetStackOffsetInBytes();
+      BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
+      if (kind == kReferenceVReg && !stack_mask.LoadBit(offset / kFrameSlotSize)) {
+        return false;
+      }
       const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
       *val = *reinterpret_cast<const uint32_t*>(addr);
       return true;
     }
-    case DexRegisterLocation::Kind::kInRegister:
+    case DexRegisterLocation::Kind::kInRegister: {
+      uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
+      uint32_t reg = dex_register_map[vreg].GetMachineRegister();
+      if (kind == kReferenceVReg && !(register_mask & (1 << reg))) {
+        return false;
+      }
+      return GetRegisterIfAccessible(reg, kind, val);
+    }
     case DexRegisterLocation::Kind::kInRegisterHigh:
     case DexRegisterLocation::Kind::kInFpuRegister:
     case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
+      if (kind == kReferenceVReg) {
+        return false;
+      }
       uint32_t reg = dex_register_map[vreg].GetMachineRegister();
       return GetRegisterIfAccessible(reg, kind, val);
     }
-    case DexRegisterLocation::Kind::kConstant:
-      *val = dex_register_map[vreg].GetConstant();
+    case DexRegisterLocation::Kind::kConstant: {
+      uint32_t result = dex_register_map[vreg].GetConstant();
+      if (kind == kReferenceVReg && result != 0) {
+        return false;
+      }
+      *val = result;
       return true;
+    }
     case DexRegisterLocation::Kind::kNone:
       return false;
     default:
@@ -549,7 +568,9 @@
     cur_shadow_frame_->SetMethod(method);
   } else {
     DCHECK(cur_quick_frame_ != nullptr);
-    CHECK(!IsInInlinedFrame()) << "We do not support setting inlined method's ArtMethod!";
+    CHECK(!IsInInlinedFrame()) << "We do not support setting inlined method's ArtMethod: "
+                               << GetMethod()->PrettyMethod() << " is inlined into "
+                               << GetOuterMethod()->PrettyMethod();
     *cur_quick_frame_ = method;
   }
 }
@@ -795,13 +816,14 @@
             // JNI methods cannot have any inlined frames.
             && !method->IsNative()) {
           DCHECK_NE(cur_quick_frame_pc_, 0u);
-          CodeInfo code_info(cur_oat_quick_method_header_, CodeInfo::DecodeFlags::InlineInfoOnly);
+          current_code_info_ = CodeInfo(cur_oat_quick_method_header_,
+                                        CodeInfo::DecodeFlags::InlineInfoOnly);
           uint32_t native_pc_offset =
               cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
-          StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+          StackMap stack_map = current_code_info_.GetStackMapForNativePcOffset(native_pc_offset);
           if (stack_map.IsValid() && stack_map.HasInlineInfo()) {
             DCHECK_EQ(current_inline_frames_.size(), 0u);
-            for (current_inline_frames_ = code_info.GetInlineInfosOf(stack_map);
+            for (current_inline_frames_ = current_code_info_.GetInlineInfosOf(stack_map);
                  !current_inline_frames_.empty();
                  current_inline_frames_.pop_back()) {
               bool should_continue = VisitFrame();
diff --git a/runtime/stack.h b/runtime/stack.h
index 02578d2..1f305d2 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -20,8 +20,8 @@
 #include <stdint.h>
 #include <string>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "quick/quick_method_frame_info.h"
 #include "stack_map.h"
 
@@ -143,6 +143,36 @@
   template <CountTransitions kCount = CountTransitions::kYes>
   void WalkStack(bool include_transitions = false) REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Convenience helper function to walk the stack with a lambda as a visitor.
+  template <CountTransitions kCountTransitions = CountTransitions::kYes,
+            typename T>
+  ALWAYS_INLINE static void WalkStack(const T& fn,
+                                      Thread* thread,
+                                      Context* context,
+                                      StackWalkKind walk_kind,
+                                      bool check_suspended = true,
+                                      bool include_transitions = false)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    class LambdaStackVisitor : public StackVisitor {
+     public:
+      LambdaStackVisitor(const T& fn,
+                         Thread* thread,
+                         Context* context,
+                         StackWalkKind walk_kind,
+                         bool check_suspended = true)
+          : StackVisitor(thread, context, walk_kind, check_suspended), fn_(fn) {}
+
+      bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
+        return fn_(this);
+      }
+
+     private:
+      T fn_;
+    };
+    LambdaStackVisitor visitor(fn, thread, context, walk_kind, check_suspended);
+    visitor.template WalkStack<kCountTransitions>(include_transitions);
+  }
+
   Thread* GetThread() const {
     return thread_;
   }
@@ -312,6 +342,7 @@
   size_t cur_depth_;
   // Current inlined frames of the method we are currently at.
   // We keep poping frames from the end as we visit the frames.
+  CodeInfo current_code_info_;
   BitTableRange<InlineInfo> current_inline_frames_;
 
  protected:
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 5f44286..87133cf 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -301,6 +301,8 @@
     InlineInfoOnly = 2,
   };
 
+  CodeInfo() {}
+
   explicit CodeInfo(const uint8_t* data, DecodeFlags flags = AllTables) {
     Decode(reinterpret_cast<const uint8_t*>(data), flags);
   }
@@ -358,7 +360,7 @@
   ALWAYS_INLINE DexRegisterMap GetDexRegisterMapOf(StackMap stack_map) const {
     if (stack_map.HasDexRegisterMap()) {
       DexRegisterMap map(number_of_dex_registers_, DexRegisterLocation::Invalid());
-      DecodeDexRegisterMap(stack_map.Row(), /* first_dex_register */ 0, &map);
+      DecodeDexRegisterMap(stack_map.Row(), /* first_dex_register= */ 0, &map);
       return map;
     }
     return DexRegisterMap(0, DexRegisterLocation::None());
@@ -445,8 +447,6 @@
   }
 
  private:
-  CodeInfo() {}
-
   // Returns lower bound (fist stack map which has pc greater or equal than the desired one).
   // It ignores catch stack maps at the end (it is the same as if they had maximum pc value).
   BitTable<StackMap>::const_iterator BinarySearchNativePc(uint32_t packed_pc) const;
@@ -486,10 +486,10 @@
     callback(&CodeInfo::dex_register_catalog_);
   }
 
-  uint32_t packed_frame_size_;  // Frame size in kStackAlignment units.
-  uint32_t core_spill_mask_;
-  uint32_t fp_spill_mask_;
-  uint32_t number_of_dex_registers_;
+  uint32_t packed_frame_size_ = 0;  // Frame size in kStackAlignment units.
+  uint32_t core_spill_mask_ = 0;
+  uint32_t fp_spill_mask_ = 0;
+  uint32_t number_of_dex_registers_ = 0;
   BitTable<StackMap> stack_maps_;
   BitTable<RegisterMask> register_masks_;
   BitTable<StackMask> stack_masks_;
diff --git a/runtime/subtype_check.h b/runtime/subtype_check.h
index aac547e..493ea85 100644
--- a/runtime/subtype_check.h
+++ b/runtime/subtype_check.h
@@ -20,7 +20,7 @@
 #include "subtype_check_bits_and_status.h"
 #include "subtype_check_info.h"
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "mirror/class.h"
 #include "runtime.h"
 
@@ -237,7 +237,7 @@
   static SubtypeCheckInfo::State EnsureInitialized(ClassPtr klass)
       REQUIRES(Locks::subtype_check_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    return InitializeOrAssign(klass, /*assign*/false).GetState();
+    return InitializeOrAssign(klass, /*assign=*/false).GetState();
   }
 
   // Force this class's SubtypeCheckInfo state into Assigned|Overflowed.
@@ -250,7 +250,7 @@
   static SubtypeCheckInfo::State EnsureAssigned(ClassPtr klass)
       REQUIRES(Locks::subtype_check_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    return InitializeOrAssign(klass, /*assign*/true).GetState();
+    return InitializeOrAssign(klass, /*assign=*/true).GetState();
   }
 
   // Resets the SubtypeCheckInfo into the Uninitialized state.
@@ -398,7 +398,7 @@
 
     // Force all ancestors to Assigned | Overflowed.
     ClassPtr parent_klass = GetParentClass(klass);
-    size_t parent_depth = InitializeOrAssign(parent_klass, /*assign*/true).GetDepth();
+    size_t parent_depth = InitializeOrAssign(parent_klass, /*assign=*/true).GetDepth();
     if (kIsDebugBuild) {
       SubtypeCheckInfo::State parent_state = GetSubtypeCheckInfo(parent_klass).GetState();
       DCHECK(parent_state == SubtypeCheckInfo::kAssigned ||
@@ -542,17 +542,17 @@
                                                    int32_t new_value)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (Runtime::Current() != nullptr && Runtime::Current()->IsActiveTransaction()) {
-      return klass->template CasField32</*kTransactionActive*/true>(offset,
-                                                                    old_value,
-                                                                    new_value,
-                                                                    CASMode::kWeak,
-                                                                    std::memory_order_seq_cst);
-    } else {
-      return klass->template CasField32</*kTransactionActive*/false>(offset,
+      return klass->template CasField32</*kTransactionActive=*/true>(offset,
                                                                      old_value,
                                                                      new_value,
                                                                      CASMode::kWeak,
                                                                      std::memory_order_seq_cst);
+    } else {
+      return klass->template CasField32</*kTransactionActive=*/false>(offset,
+                                                                      old_value,
+                                                                      new_value,
+                                                                      CASMode::kWeak,
+                                                                      std::memory_order_seq_cst);
     }
   }
 
diff --git a/runtime/subtype_check_bits.h b/runtime/subtype_check_bits.h
index 462f203..23d8ac3 100644
--- a/runtime/subtype_check_bits.h
+++ b/runtime/subtype_check_bits.h
@@ -56,9 +56,9 @@
  *
  * See subtype_check.h and subtype_check_info.h for more details.
  */
-BITSTRUCT_DEFINE_START(SubtypeCheckBits, /*size*/ BitString::BitStructSizeOf() + 1u)
-  BitStructField<BitString, /*lsb*/ 0> bitstring_;
-  BitStructUint</*lsb*/ BitString::BitStructSizeOf(), /*width*/ 1> overflow_;
+BITSTRUCT_DEFINE_START(SubtypeCheckBits, /*size=*/ BitString::BitStructSizeOf() + 1u)
+  BitStructField<BitString, /*lsb=*/ 0> bitstring_;
+  BitStructUint</*lsb=*/ BitString::BitStructSizeOf(), /*width=*/ 1> overflow_;
 BITSTRUCT_DEFINE_END(SubtypeCheckBits);
 
 }  // namespace art
diff --git a/runtime/subtype_check_bits_and_status.h b/runtime/subtype_check_bits_and_status.h
index 321a723..eec6e21 100644
--- a/runtime/subtype_check_bits_and_status.h
+++ b/runtime/subtype_check_bits_and_status.h
@@ -68,11 +68,11 @@
 static constexpr size_t kClassStatusBitSize = MinimumBitsToStore(enum_cast<>(ClassStatus::kLast));
 static_assert(kClassStatusBitSize == 4u, "ClassStatus should need 4 bits.");
 BITSTRUCT_DEFINE_START(SubtypeCheckBitsAndStatus, BitSizeOf<BitString::StorageType>())
-  BitStructField<SubtypeCheckBits, /*lsb*/ 0> subtype_check_info_;
+  BitStructField<SubtypeCheckBits, /*lsb=*/ 0> subtype_check_info_;
   BitStructField<ClassStatus,
-                 /*lsb*/ SubtypeCheckBits::BitStructSizeOf(),
-                 /*width*/ kClassStatusBitSize> status_;
-  BitStructInt</*lsb*/ 0, /*width*/ BitSizeOf<BitString::StorageType>()> int32_alias_;
+                 /*lsb=*/ SubtypeCheckBits::BitStructSizeOf(),
+                 /*width=*/ kClassStatusBitSize> status_;
+  BitStructInt</*lsb=*/ 0, /*width=*/ BitSizeOf<BitString::StorageType>()> int32_alias_;
 BITSTRUCT_DEFINE_END(SubtypeCheckBitsAndStatus);
 
 // Use the spare alignment from "ClassStatus" to store all the new SubtypeCheckInfo data.
diff --git a/runtime/subtype_check_info_test.cc b/runtime/subtype_check_info_test.cc
index e40bca5..44a2a69 100644
--- a/runtime/subtype_check_info_test.cc
+++ b/runtime/subtype_check_info_test.cc
@@ -86,11 +86,11 @@
 
 struct SubtypeCheckInfoTest : public ::testing::Test {
  protected:
-  virtual void SetUp() {
-    android::base::InitLogging(/*argv*/nullptr);
+  void SetUp() override {
+    android::base::InitLogging(/*argv=*/nullptr);
   }
 
-  virtual void TearDown() {
+  void TearDown() override {
   }
 
   static SubtypeCheckInfo MakeSubtypeCheckInfo(BitString path_to_root = {},
@@ -131,7 +131,7 @@
 
   // Create an SubtypeCheckInfo with the same depth, but with everything else reset.
   // Returns: SubtypeCheckInfo in the Uninitialized state.
-  static SubtypeCheckInfo CopyCleared(SubtypeCheckInfo sc) {
+  static SubtypeCheckInfo CopyCleared(const SubtypeCheckInfo& sc) {
     SubtypeCheckInfo cleared_copy{};
     cleared_copy.depth_ = sc.depth_;
     DCHECK_EQ(SubtypeCheckInfo::kUninitialized, cleared_copy.GetState());
@@ -158,33 +158,33 @@
 
   // Illegal values during construction would cause a Dcheck failure and crash.
   ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({1u}),
-                                    /*next*/MakeBitStringChar(0),
-                                    /*overflow*/false,
-                                    /*depth*/0u),
+                                    /*next=*/MakeBitStringChar(0),
+                                    /*overflow=*/false,
+                                    /*depth=*/0u),
                GetExpectedMessageForDeathTest("Path was too long for the depth"));
   ASSERT_DEATH(MakeSubtypeCheckInfoInfused(MakeBitString({1u, 1u}),
-                                           /*overflow*/false,
-                                           /*depth*/0u),
+                                           /*overflow=*/false,
+                                           /*depth=*/0u),
                GetExpectedMessageForDeathTest("Bitstring too long for depth"));
   ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({1u}),
-                                    /*next*/MakeBitStringChar(0),
-                                    /*overflow*/false,
-                                    /*depth*/1u),
+                                    /*next=*/MakeBitStringChar(0),
+                                    /*overflow=*/false,
+                                    /*depth=*/1u),
                GetExpectedMessageForDeathTest("Expected \\(Assigned\\|Initialized\\) "
                                               "state to have >0 Next value"));
   ASSERT_DEATH(MakeSubtypeCheckInfoInfused(MakeBitString({0u, 2u, 1u}),
-                                           /*overflow*/false,
-                                           /*depth*/2u),
+                                           /*overflow=*/false,
+                                           /*depth=*/2u),
                GetExpectedMessageForDeathTest("Path to root had non-0s following 0s"));
   ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({0u, 2u}),
-                                    /*next*/MakeBitStringChar(1u),
-                                    /*overflow*/false,
-                                    /*depth*/2u),
+                                    /*next=*/MakeBitStringChar(1u),
+                                    /*overflow=*/false,
+                                    /*depth=*/2u),
                GetExpectedMessageForDeathTest("Path to root had non-0s following 0s"));
   ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({0u, 1u, 1u}),
-                                    /*next*/MakeBitStringChar(0),
-                                    /*overflow*/false,
-                                    /*depth*/3u),
+                                    /*next=*/MakeBitStringChar(0),
+                                    /*overflow=*/false,
+                                    /*depth=*/3u),
                GetExpectedMessageForDeathTest("Path to root had non-0s following 0s"));
 
   // These are really slow (~1sec per death test on host),
@@ -194,62 +194,62 @@
 TEST_F(SubtypeCheckInfoTest, States) {
   EXPECT_EQ(SubtypeCheckInfo::kUninitialized, MakeSubtypeCheckInfo().GetState());
   EXPECT_EQ(SubtypeCheckInfo::kInitialized,
-            MakeSubtypeCheckInfo(/*path*/{}, /*next*/MakeBitStringChar(1)).GetState());
+            MakeSubtypeCheckInfo(/*path_to_root=*/{}, /*next=*/MakeBitStringChar(1)).GetState());
   EXPECT_EQ(SubtypeCheckInfo::kOverflowed,
-            MakeSubtypeCheckInfo(/*path*/{},
-                                 /*next*/MakeBitStringChar(1),
-                                 /*overflow*/true,
-                                 /*depth*/1u).GetState());
+            MakeSubtypeCheckInfo(/*path_to_root=*/{},
+                                 /*next=*/MakeBitStringChar(1),
+                                 /*overflow=*/true,
+                                 /*depth=*/1u).GetState());
   EXPECT_EQ(SubtypeCheckInfo::kAssigned,
-            MakeSubtypeCheckInfo(/*path*/MakeBitString({1u}),
-                                 /*next*/MakeBitStringChar(1),
-                                 /*overflow*/false,
-                                 /*depth*/1u).GetState());
+            MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitString({1u}),
+                                 /*next=*/MakeBitStringChar(1),
+                                 /*overflow=*/false,
+                                 /*depth=*/1u).GetState());
 
   // Test edge conditions: depth == BitString::kCapacity (No Next value).
   EXPECT_EQ(SubtypeCheckInfo::kAssigned,
-            MakeSubtypeCheckInfo(/*path*/MakeBitStringMax(),
-                                 /*next*/MakeBitStringChar(0),
-                                 /*overflow*/false,
-                                 /*depth*/BitString::kCapacity).GetState());
+            MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax(),
+                                 /*next=*/MakeBitStringChar(0),
+                                 /*overflow=*/false,
+                                 /*depth=*/BitString::kCapacity).GetState());
   EXPECT_EQ(SubtypeCheckInfo::kInitialized,
-            MakeSubtypeCheckInfo(/*path*/MakeBitStringMax<BitString::kCapacity - 1u>(),
-                                 /*next*/MakeBitStringChar(0),
-                                 /*overflow*/false,
-                                 /*depth*/BitString::kCapacity).GetState());
+            MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax<BitString::kCapacity - 1u>(),
+                                 /*next=*/MakeBitStringChar(0),
+                                 /*overflow=*/false,
+                                 /*depth=*/BitString::kCapacity).GetState());
   // Test edge conditions: depth > BitString::kCapacity (Must overflow).
   EXPECT_EQ(SubtypeCheckInfo::kOverflowed,
-            MakeSubtypeCheckInfo(/*path*/MakeBitStringMax(),
-                                 /*next*/MakeBitStringChar(0),
-                                 /*overflow*/true,
-                                 /*depth*/BitString::kCapacity + 1u).GetState());
+            MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax(),
+                                 /*next=*/MakeBitStringChar(0),
+                                 /*overflow=*/true,
+                                 /*depth=*/BitString::kCapacity + 1u).GetState());
 }
 
 TEST_F(SubtypeCheckInfoTest, NextValue) {
   // Validate "Next" is correctly aliased as the Bitstring[Depth] character.
   EXPECT_EQ(MakeBitStringChar(1u), MakeSubtypeCheckInfoUnchecked(MakeBitString({1u, 2u, 3u}),
-                                                           /*overflow*/false,
-                                                           /*depth*/0u).GetNext());
+                                                                 /*overflow=*/false,
+                                                                 /*depth=*/0u).GetNext());
   EXPECT_EQ(MakeBitStringChar(2u), MakeSubtypeCheckInfoUnchecked(MakeBitString({1u, 2u, 3u}),
-                                                           /*overflow*/false,
-                                                           /*depth*/1u).GetNext());
+                                                                 /*overflow=*/false,
+                                                                 /*depth=*/1u).GetNext());
   EXPECT_EQ(MakeBitStringChar(3u), MakeSubtypeCheckInfoUnchecked(MakeBitString({1u, 2u, 3u}),
-                                                           /*overflow*/false,
-                                                           /*depth*/2u).GetNext());
+                                                                 /*overflow=*/false,
+                                                                 /*depth=*/2u).GetNext());
   EXPECT_EQ(MakeBitStringChar(1u), MakeSubtypeCheckInfoUnchecked(MakeBitString({0u, 2u, 1u}),
-                                                           /*overflow*/false,
-                                                           /*depth*/2u).GetNext());
+                                                                 /*overflow=*/false,
+                                                                 /*depth=*/2u).GetNext());
   // Test edge conditions: depth == BitString::kCapacity (No Next value).
   EXPECT_FALSE(HasNext(MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<BitString::kCapacity>(),
-                                                     /*overflow*/false,
-                                                     /*depth*/BitString::kCapacity)));
+                                                     /*overflow=*/false,
+                                                     /*depth=*/BitString::kCapacity)));
   // Anything with depth >= BitString::kCapacity has no next value.
   EXPECT_FALSE(HasNext(MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<BitString::kCapacity>(),
-                                                     /*overflow*/false,
-                                                     /*depth*/BitString::kCapacity + 1u)));
+                                                     /*overflow=*/false,
+                                                     /*depth=*/BitString::kCapacity + 1u)));
   EXPECT_FALSE(HasNext(MakeSubtypeCheckInfoUnchecked(MakeBitStringMax(),
-                                                     /*overflow*/false,
-                                                     /*depth*/std::numeric_limits<size_t>::max())));
+                                                     /*overflow=*/false,
+                                                     /*depth=*/std::numeric_limits<size_t>::max())));
 }
 
 template <size_t kPos = BitString::kCapacity>
@@ -259,10 +259,10 @@
   using StorageType = BitString::StorageType;
 
   SubtypeCheckInfo sci =
-      MakeSubtypeCheckInfo(/*path_to_root*/MakeBitStringMax(),
-                           /*next*/BitStringChar{},
-                           /*overflow*/false,
-                           /*depth*/BitString::kCapacity);
+      MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax(),
+                           /*next=*/BitStringChar{},
+                           /*overflow=*/false,
+                           /*depth=*/BitString::kCapacity);
   // 0b000...111 where LSB == 1, and trailing 1s = the maximum bitstring representation.
   EXPECT_EQ(MaxInt<StorageType>(LenForPos()), sci.GetEncodedPathToRoot());
 
@@ -275,8 +275,8 @@
 
   SubtypeCheckInfo sci2 =
       MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<2u>(),
-                                   /*overflow*/false,
-                                   /*depth*/BitString::kCapacity);
+                                   /*overflow=*/false,
+                                   /*depth=*/BitString::kCapacity);
 
 #define MAKE_ENCODED_PATH(pos0, pos1, pos2) \
     (((pos0) << 0) | \
@@ -290,8 +290,8 @@
 
   SubtypeCheckInfo sci3 =
       MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<2u>(),
-                                   /*overflow*/false,
-                                   /*depth*/BitString::kCapacity - 1u);
+                                   /*overflow=*/false,
+                                   /*depth=*/BitString::kCapacity - 1u);
 
   EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b1111, 0b0),
             sci3.GetEncodedPathToRoot());
@@ -300,8 +300,8 @@
 
   SubtypeCheckInfo sci4 =
       MakeSubtypeCheckInfoUnchecked(MakeBitString({0b1010101u}),
-                                   /*overflow*/false,
-                                   /*depth*/BitString::kCapacity - 2u);
+                                   /*overflow=*/false,
+                                   /*depth=*/BitString::kCapacity - 2u);
 
   EXPECT_EQ(MAKE_ENCODED_PATH(0b1010101u, 0b0000, 0b0), sci4.GetEncodedPathToRoot());
   EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b0000, 0b0),
@@ -320,7 +320,7 @@
   SubtypeCheckInfo root = SubtypeCheckInfo::CreateRoot();
   EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());
 
-  SubtypeCheckInfo childC = root.CreateChild(/*assign*/true);
+  SubtypeCheckInfo childC = root.CreateChild(/*assign_next=*/true);
   EXPECT_EQ(SubtypeCheckInfo::kAssigned, childC.GetState());
   EXPECT_EQ(MakeBitStringChar(2u), root.GetNext());  // Next incremented for Assign.
   EXPECT_EQ(MakeBitString({1u}), GetPathToRoot(childC));
@@ -331,7 +331,7 @@
 
   // CopyCleared is just a thin wrapper around value-init and providing the depth.
   SubtypeCheckInfo cleared_copy_value =
-      SubtypeCheckInfo::Create(SubtypeCheckBits{}, /*depth*/1u);
+      SubtypeCheckInfo::Create(SubtypeCheckBits{}, /*depth=*/1u);
   EXPECT_EQ(SubtypeCheckInfo::kUninitialized, cleared_copy_value.GetState());
   EXPECT_EQ(MakeBitString({}), GetPathToRoot(cleared_copy_value));
 }
@@ -340,7 +340,7 @@
   SubtypeCheckInfo root = SubtypeCheckInfo::CreateRoot();
   EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());
 
-  SubtypeCheckInfo childC = root.CreateChild(/*assign*/true);
+  SubtypeCheckInfo childC = root.CreateChild(/*assign_next=*/true);
   EXPECT_EQ(SubtypeCheckInfo::kAssigned, childC.GetState());
   EXPECT_EQ(MakeBitStringChar(2u), root.GetNext());  // Next incremented for Assign.
   EXPECT_EQ(MakeBitString({1u}), GetPathToRoot(childC));
@@ -350,17 +350,17 @@
   SubtypeCheckInfo root = SubtypeCheckInfo::CreateRoot();
   EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());
 
-  SubtypeCheckInfo childA = root.CreateChild(/*assign*/false);
+  SubtypeCheckInfo childA = root.CreateChild(/*assign_next=*/false);
   EXPECT_EQ(SubtypeCheckInfo::kInitialized, childA.GetState());
   EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());  // Next unchanged for Initialize.
   EXPECT_EQ(MakeBitString({}), GetPathToRoot(childA));
 
-  SubtypeCheckInfo childB = root.CreateChild(/*assign*/false);
+  SubtypeCheckInfo childB = root.CreateChild(/*assign_next=*/false);
   EXPECT_EQ(SubtypeCheckInfo::kInitialized, childB.GetState());
   EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());  // Next unchanged for Initialize.
   EXPECT_EQ(MakeBitString({}), GetPathToRoot(childB));
 
-  SubtypeCheckInfo childC = root.CreateChild(/*assign*/true);
+  SubtypeCheckInfo childC = root.CreateChild(/*assign_next=*/true);
   EXPECT_EQ(SubtypeCheckInfo::kAssigned, childC.GetState());
   EXPECT_EQ(MakeBitStringChar(2u), root.GetNext());  // Next incremented for Assign.
   EXPECT_EQ(MakeBitString({1u}), GetPathToRoot(childC));
@@ -369,19 +369,19 @@
     size_t cur_depth = 1u;
     SubtypeCheckInfo latest_child = childC;
     while (cur_depth != BitString::kCapacity) {
-      latest_child = latest_child.CreateChild(/*assign*/true);
+      latest_child = latest_child.CreateChild(/*assign_next=*/true);
       ASSERT_EQ(SubtypeCheckInfo::kAssigned, latest_child.GetState());
       ASSERT_EQ(cur_depth + 1u, GetPathToRoot(latest_child).Length());
       cur_depth++;
     }
 
     // Future assignments will result in a too-deep overflow.
-    SubtypeCheckInfo child_of_deep = latest_child.CreateChild(/*assign*/true);
+    SubtypeCheckInfo child_of_deep = latest_child.CreateChild(/*assign_next=*/true);
     EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of_deep.GetState());
     EXPECT_EQ(GetPathToRoot(latest_child), GetPathToRoot(child_of_deep));
 
     // Assignment of too-deep overflow also causes overflow.
-    SubtypeCheckInfo child_of_deep_2 = child_of_deep.CreateChild(/*assign*/true);
+    SubtypeCheckInfo child_of_deep_2 = child_of_deep.CreateChild(/*assign_next=*/true);
     EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of_deep_2.GetState());
     EXPECT_EQ(GetPathToRoot(child_of_deep), GetPathToRoot(child_of_deep_2));
   }
@@ -393,7 +393,7 @@
         break;
       }
 
-      SubtypeCheckInfo child = root.CreateChild(/*assign*/true);
+      SubtypeCheckInfo child = root.CreateChild(/*assign_next=*/true);
       ASSERT_EQ(SubtypeCheckInfo::kAssigned, child.GetState());
       ASSERT_EQ(MakeBitStringChar(cur_next+1u), root.GetNext());
       ASSERT_EQ(MakeBitString({cur_next}), GetPathToRoot(child));
@@ -403,20 +403,20 @@
     // Now the root will be in a state that further assigns will be too-wide overflow.
 
     // Initialization still succeeds.
-    SubtypeCheckInfo child = root.CreateChild(/*assign*/false);
+    SubtypeCheckInfo child = root.CreateChild(/*assign_next=*/false);
     EXPECT_EQ(SubtypeCheckInfo::kInitialized, child.GetState());
     EXPECT_EQ(MakeBitStringChar(cur_next), root.GetNext());
     EXPECT_EQ(MakeBitString({}), GetPathToRoot(child));
 
     // Assignment goes to too-wide Overflow.
-    SubtypeCheckInfo child_of = root.CreateChild(/*assign*/true);
+    SubtypeCheckInfo child_of = root.CreateChild(/*assign_next=*/true);
     EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of.GetState());
     EXPECT_EQ(MakeBitStringChar(cur_next), root.GetNext());
     EXPECT_EQ(MakeBitString({}), GetPathToRoot(child_of));
 
     // Assignment of overflowed child still succeeds.
     // The path to root is the same.
-    SubtypeCheckInfo child_of2 = child_of.CreateChild(/*assign*/true);
+    SubtypeCheckInfo child_of2 = child_of.CreateChild(/*assign_next=*/true);
     EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of2.GetState());
     EXPECT_EQ(GetPathToRoot(child_of), GetPathToRoot(child_of2));
   }
diff --git a/runtime/subtype_check_test.cc b/runtime/subtype_check_test.cc
index 666bf81..719e5d9 100644
--- a/runtime/subtype_check_test.cc
+++ b/runtime/subtype_check_test.cc
@@ -301,19 +301,19 @@
 
 struct SubtypeCheckTest : public ::testing::Test {
  protected:
-  virtual void SetUp() {
-    android::base::InitLogging(/*argv*/nullptr);
+  void SetUp() override {
+    android::base::InitLogging(/*argv=*/nullptr);
 
     CreateRootedTree(BitString::kCapacity + 2u, BitString::kCapacity + 2u);
   }
 
-  virtual void TearDown() {
+  void TearDown() override {
   }
 
   void CreateRootedTree(size_t width, size_t height) {
     all_classes_.clear();
-    root_ = CreateClassFor(/*parent*/nullptr, /*x*/0, /*y*/0);
-    CreateTreeFor(root_, /*width*/width, /*depth*/height);
+    root_ = CreateClassFor(/*parent=*/nullptr, /*x=*/0, /*y=*/0);
+    CreateTreeFor(root_, /*width=*/width, /*levels=*/height);
   }
 
   MockClass* CreateClassFor(MockClass* parent, size_t x, size_t y) {
@@ -681,7 +681,7 @@
     const std::vector<std::pair<SubtypeCheckInfo::State, SubtypeCheckInfo::State>>& transitions) {
   ASSERT_EQ(depth, transitions.size());
 
-  EnsureStateChangedTestRecursive(root, /*cur_depth*/0u, depth, transitions);
+  EnsureStateChangedTestRecursive(root, /*cur_depth=*/0u, depth, transitions);
 }
 
 TEST_F(SubtypeCheckTest, EnsureInitialized_NoOverflow) {
@@ -869,8 +869,8 @@
 
   {
     // Create too-wide siblings at the kTargetDepth level.
-    MockClass* child = root_->FindChildAt(/*x*/0, kTargetDepth - 1u);
-    CreateTreeFor(child, kMaxWidthCutOff*2, /*depth*/1);
+    MockClass* child = root_->FindChildAt(/*x=*/0, kTargetDepth - 1u);
+    CreateTreeFor(child, kMaxWidthCutOff*2, /*levels=*/1);
     ASSERT_LE(kMaxWidthCutOff*2, child->GetNumberOfChildren());
     ASSERT_TRUE(IsTooWide(child->GetMaxChild())) << *(child->GetMaxChild());
     // Leave the rest of the tree as the default.
@@ -914,15 +914,15 @@
 
   {
     // Create too-wide siblings at the kTargetDepth level.
-    MockClass* child = root_->FindChildAt(/*x*/0, kTargetDepth - 1);
-    CreateTreeFor(child, kMaxWidthCutOff*2, /*depth*/1);
+    MockClass* child = root_->FindChildAt(/*x=*/0, kTargetDepth - 1);
+    CreateTreeFor(child, kMaxWidthCutOff*2, /*levels=*/1);
     ASSERT_LE(kMaxWidthCutOff*2, child->GetNumberOfChildren()) << *child;
     ASSERT_TRUE(IsTooWide(child->GetMaxChild())) << *(child->GetMaxChild());
     // Leave the rest of the tree as the default.
 
     // Create too-wide children for a too-wide parent.
-    MockClass* child_subchild = child->FindChildAt(/*x*/0, kTargetDepth);
-    CreateTreeFor(child_subchild, kMaxWidthCutOffSub*2, /*depth*/1);
+    MockClass* child_subchild = child->FindChildAt(/*x=*/0, kTargetDepth);
+    CreateTreeFor(child_subchild, kMaxWidthCutOffSub*2, /*levels=*/1);
     ASSERT_LE(kMaxWidthCutOffSub*2, child_subchild->GetNumberOfChildren()) << *child_subchild;
     ASSERT_TRUE(IsTooWide(child_subchild->GetMaxChild())) << *(child_subchild->GetMaxChild());
   }
@@ -1035,8 +1035,8 @@
 
   {
     // Create too-wide siblings at the kTargetDepth level.
-    MockClass* child = root_->FindChildAt(/*x*/0, kTargetDepth - 1u);
-    CreateTreeFor(child, kMaxWidthCutOff*2, /*depth*/1);
+    MockClass* child = root_->FindChildAt(/*x=*/0, kTargetDepth - 1u);
+    CreateTreeFor(child, kMaxWidthCutOff*2, /*levels=*/1);
     ASSERT_LE(kMaxWidthCutOff*2, child->GetNumberOfChildren());
     ASSERT_TRUE(IsTooWide(child->GetMaxChild())) << *(child->GetMaxChild());
     // Leave the rest of the tree as the default.
@@ -1045,7 +1045,7 @@
     MockClass* child_subchild = child->GetMaxChild();
     ASSERT_TRUE(child_subchild != nullptr);
     ASSERT_EQ(0u, child_subchild->GetNumberOfChildren()) << *child_subchild;
-    CreateTreeFor(child_subchild, /*width*/1, /*levels*/kTooDeepTargetDepth);
+    CreateTreeFor(child_subchild, /*width=*/1, /*levels=*/kTooDeepTargetDepth);
     MockClass* too_deep_child = child_subchild->FindChildAt(0, kTooDeepTargetDepth + 2);
     ASSERT_TRUE(too_deep_child != nullptr) << child_subchild->ToDotGraph();
     ASSERT_TRUE(IsTooWide(too_deep_child)) << *(too_deep_child);
diff --git a/runtime/suspend_reason.h b/runtime/suspend_reason.h
index 4e75a4f..af2be10 100644
--- a/runtime/suspend_reason.h
+++ b/runtime/suspend_reason.h
@@ -17,13 +17,11 @@
 #ifndef ART_RUNTIME_SUSPEND_REASON_H_
 #define ART_RUNTIME_SUSPEND_REASON_H_
 
-#include <ostream>
+#include <iosfwd>
 
 namespace art {
 
 // The various reasons that we might be suspending a thread.
-// TODO Once kForDebugger is removed by removing the old debugger we should make the kForUserCode
-//      just a basic count for bookkeeping instead of linking it as directly with internal suspends.
 enum class SuspendReason {
   // Suspending for internal reasons (e.g. GC, stack trace, etc.).
   // TODO Split this into more descriptive sections.
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 91c27af..00f882e 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -19,6 +19,7 @@
 
 #include "thread.h"
 
+#include "arch/instruction_set.h"
 #include "base/aborting.h"
 #include "base/casts.h"
 #include "base/mutex-inl.h"
@@ -385,6 +386,7 @@
 }
 
 inline ShadowFrame* Thread::PushShadowFrame(ShadowFrame* new_top_frame) {
+  new_top_frame->CheckConsistentVRegs();
   return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
 }
 
@@ -392,6 +394,26 @@
   return tlsPtr_.managed_stack.PopShadowFrame();
 }
 
+inline uint8_t* Thread::GetStackEndForInterpreter(bool implicit_overflow_check) const {
+  uint8_t* end = tlsPtr_.stack_end + (implicit_overflow_check
+      ? GetStackOverflowReservedBytes(kRuntimeISA)
+          : 0);
+  if (kIsDebugBuild) {
+    // In a debuggable build, but especially under ASAN, the access-checks interpreter has a
+    // potentially humongous stack size. We don't want to take too much of the stack regularly,
+    // so do not increase the regular reserved size (for compiled code etc) and only report the
+    // virtually smaller stack to the interpreter here.
+    end += GetStackOverflowReservedBytes(kRuntimeISA);
+  }
+  return end;
+}
+
+inline void Thread::ResetDefaultStackEnd() {
+  // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
+  // to throw a StackOverflowError.
+  tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_THREAD_INL_H_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 19fe4ea..4828aae 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -16,10 +16,6 @@
 
 #include "thread.h"
 
-#if !defined(__APPLE__)
-#include <sched.h>
-#endif
-
 #include <pthread.h>
 #include <signal.h>
 #include <sys/resource.h>
@@ -39,12 +35,16 @@
 #include <sstream>
 
 #include "android-base/stringprintf.h"
+#include "android-base/strings.h"
 
 #include "arch/context-inl.h"
 #include "arch/context.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
+#include "base/atomic.h"
 #include "base/bit_utils.h"
+#include "base/casts.h"
+#include "arch/context.h"
 #include "base/file_utils.h"
 #include "base/memory_tool.h"
 #include "base/mutex.h"
@@ -70,13 +70,16 @@
 #include "gc_root.h"
 #include "handle_scope-inl.h"
 #include "indirect_reference_table-inl.h"
+#include "instrumentation.h"
 #include "interpreter/interpreter.h"
+#include "interpreter/mterp/mterp.h"
 #include "interpreter/shadow_frame-inl.h"
 #include "java_frame_root_info.h"
 #include "jni/java_vm_ext.h"
 #include "jni/jni_internal.h"
-#include "mirror/class-inl.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class_loader.h"
+#include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/stack_trace_element.h"
 #include "monitor.h"
@@ -88,10 +91,12 @@
 #include "oat_quick_method_header.h"
 #include "obj_ptr-inl.h"
 #include "object_lock.h"
+#include "palette/palette.h"
 #include "quick/quick_method_frame_info.h"
 #include "quick_exception_handler.h"
 #include "read_barrier-inl.h"
 #include "reflection.h"
+#include "runtime-inl.h"
 #include "runtime.h"
 #include "runtime_callbacks.h"
 #include "scoped_thread_state_change-inl.h"
@@ -149,11 +154,12 @@
 void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) {
   CHECK(kUseReadBarrier);
   tls32_.is_gc_marking = is_marking;
-  UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active */ is_marking);
+  UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active= */ is_marking);
   ResetQuickAllocEntryPointsForThread(is_marking);
 }
 
 void Thread::InitTlsEntryPoints() {
+  ScopedTrace trace("InitTlsEntryPoints");
   // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
   uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints);
   uintptr_t* end = reinterpret_cast<uintptr_t*>(
@@ -281,6 +287,120 @@
       << "No deoptimization context for thread " << *this;
 }
 
+enum {
+  kPermitAvailable = 0,  // Incrementing consumes the permit
+  kNoPermit = 1,  // Incrementing marks as waiter waiting
+  kNoPermitWaiterWaiting = 2
+};
+
+void Thread::Park(bool is_absolute, int64_t time) {
+  DCHECK(this == Thread::Current());
+#if ART_USE_FUTEXES
+  // Consume the permit, or mark as waiting. This cannot cause park_state to go
+  // outside of its valid range (0, 1, 2), because in all cases where 2 is
+  // assigned it is set back to 1 before returning, and this method cannot run
+  // concurrently with itself since it operates on the current thread.
+  int old_state = tls32_.park_state_.fetch_add(1, std::memory_order_relaxed);
+  if (old_state == kNoPermit) {
+    // no permit was available. block thread until later.
+    Runtime::Current()->GetRuntimeCallbacks()->ThreadParkStart(is_absolute, time);
+    int result = 0;
+    bool timed_out = false;
+    if (!is_absolute && time == 0) {
+      // Thread.getState() is documented to return waiting for untimed parks.
+      ScopedThreadSuspension sts(this, ThreadState::kWaiting);
+      DCHECK_EQ(NumberOfHeldMutexes(), 0u);
+      result = futex(tls32_.park_state_.Address(),
+                     FUTEX_WAIT_PRIVATE,
+                     /* sleep if val = */ kNoPermitWaiterWaiting,
+                     /* timeout */ nullptr,
+                     nullptr,
+                     0);
+    } else if (time > 0) {
+      // Only actually suspend and futex_wait if we're going to wait for some
+      // positive amount of time - the kernel will reject negative times with
+      // EINVAL, and a zero time will just noop.
+
+      // Thread.getState() is documented to return timed wait for timed parks.
+      ScopedThreadSuspension sts(this, ThreadState::kTimedWaiting);
+      DCHECK_EQ(NumberOfHeldMutexes(), 0u);
+      timespec timespec;
+      if (is_absolute) {
+        // Time is millis when scheduled for an absolute time
+        timespec.tv_nsec = (time % 1000) * 1000000;
+        timespec.tv_sec = time / 1000;
+        // This odd looking pattern is recommended by futex documentation to
+        // wait until an absolute deadline, with otherwise identical behavior to
+        // FUTEX_WAIT_PRIVATE. This also allows parkUntil() to return at the
+        // correct time when the system clock changes.
+        result = futex(tls32_.park_state_.Address(),
+                       FUTEX_WAIT_BITSET_PRIVATE | FUTEX_CLOCK_REALTIME,
+                       /* sleep if val = */ kNoPermitWaiterWaiting,
+                       &timespec,
+                       nullptr,
+                       FUTEX_BITSET_MATCH_ANY);
+      } else {
+        // Time is nanos when scheduled for a relative time
+        timespec.tv_sec = time / 1000000000;
+        timespec.tv_nsec = time % 1000000000;
+        result = futex(tls32_.park_state_.Address(),
+                       FUTEX_WAIT_PRIVATE,
+                       /* sleep if val = */ kNoPermitWaiterWaiting,
+                       &timespec,
+                       nullptr,
+                       0);
+      }
+    }
+    if (result == -1) {
+      switch (errno) {
+        case ETIMEDOUT:
+          timed_out = true;
+          FALLTHROUGH_INTENDED;
+        case EAGAIN:
+        case EINTR: break;  // park() is allowed to spuriously return
+        default: PLOG(FATAL) << "Failed to park";
+      }
+    }
+    // Mark as no longer waiting, and consume permit if there is one.
+    tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed);
+    // TODO: Call to signal jvmti here
+    Runtime::Current()->GetRuntimeCallbacks()->ThreadParkFinished(timed_out);
+  } else {
+    // the fetch_add has consumed the permit. immediately return.
+    DCHECK_EQ(old_state, kPermitAvailable);
+  }
+#else
+  #pragma clang diagnostic push
+  #pragma clang diagnostic warning "-W#warnings"
+  #warning "LockSupport.park/unpark implemented as noops without FUTEX support."
+  #pragma clang diagnostic pop
+  UNUSED(is_absolute, time);
+  UNIMPLEMENTED(WARNING);
+  sched_yield();
+#endif
+}
+
+void Thread::Unpark() {
+#if ART_USE_FUTEXES
+  // Set permit available; will be consumed either by fetch_add (when the thread
+  // tries to park) or store (when the parked thread is woken up)
+  if (tls32_.park_state_.exchange(kPermitAvailable, std::memory_order_relaxed)
+      == kNoPermitWaiterWaiting) {
+    int result = futex(tls32_.park_state_.Address(),
+                       FUTEX_WAKE_PRIVATE,
+                       /* number of waiters = */ 1,
+                       nullptr,
+                       nullptr,
+                       0);
+    if (result == -1) {
+      PLOG(FATAL) << "Failed to unpark";
+    }
+  }
+#else
+  UNIMPLEMENTED(WARNING);
+#endif
+}
+
 void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) {
   StackedShadowFrameRecord* record = new StackedShadowFrameRecord(
       sf, type, tlsPtr_.stacked_shadow_frame_record);
@@ -485,6 +605,22 @@
 
     runtime->GetRuntimeCallbacks()->ThreadStart(self);
 
+    // Unpark ourselves if the java peer was unparked before it started (see
+    // b/28845097#comment49 for more information)
+
+    ArtField* unparkedField = jni::DecodeArtField(
+        WellKnownClasses::java_lang_Thread_unparkedBeforeStart);
+    bool should_unpark = false;
+    {
+      // Hold the lock here, so that if another thread calls unpark before the thread starts
+      // we don't observe the unparkedBeforeStart field before the unparker writes to it,
+      // which could cause a lost unpark.
+      art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+      should_unpark = unparkedField->GetBoolean(self->tlsPtr_.opeer) == JNI_TRUE;
+    }
+    if (should_unpark) {
+      self->Unpark();
+    }
     // Invoke the 'run' method of our java.lang.Thread.
     ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer;
     jmethodID mid = WellKnownClasses::java_lang_Thread_run;
@@ -500,7 +636,7 @@
 Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
                                   ObjPtr<mirror::Object> thread_peer) {
   ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer);
-  Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer)));
+  Thread* result = reinterpret_cast64<Thread*>(f->GetLong(thread_peer));
   // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
   // to stop it from going away.
   if (kIsDebugBuild) {
@@ -576,7 +712,7 @@
   VLOG(threads) << "installing stack protected region at " << std::hex <<
         static_cast<void*>(pregion) << " to " <<
         static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
-  if (ProtectStack(/* fatal_on_error */ false)) {
+  if (ProtectStack(/* fatal_on_error= */ false)) {
     // Tell the kernel that we won't be needing these pages any more.
     // NB. madvise will probably write zeroes into the memory (on linux it does).
     uint32_t unwanted_size = stack_top - pregion - kPageSize;
@@ -645,7 +781,7 @@
       static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
 
   // Protect the bottom of the stack to prevent read/write to it.
-  ProtectStack(/* fatal_on_error */ true);
+  ProtectStack(/* fatal_on_error= */ true);
 
   // Tell the kernel that we won't be needing these pages any more.
   // NB. madvise will probably write zeroes into the memory (on linux it does).
@@ -728,7 +864,7 @@
       // JNIEnvExt we created.
       // Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization
       //       between the threads.
-      child_jni_env_ext.release();
+      child_jni_env_ext.release();  // NOLINT pthreads API.
       return;
     }
   }
@@ -767,6 +903,8 @@
   tlsPtr_.pthread_self = pthread_self();
   CHECK(is_started_);
 
+  ScopedTrace trace("Thread::Init");
+
   SetUpAlternateSignalStack();
   if (!InitStackHwm()) {
     return false;
@@ -776,7 +914,10 @@
   RemoveSuspendTrigger();
   InitCardTable();
   InitTid();
-  interpreter::InitInterpreterTls(this);
+  {
+    ScopedTrace trace2("InitInterpreterTls");
+    interpreter::InitInterpreterTls(this);
+  }
 
 #ifdef ART_TARGET_ANDROID
   __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this;
@@ -800,6 +941,7 @@
     }
   }
 
+  ScopedTrace trace3("ThreadList::Register");
   thread_list->Register(this);
   return true;
 }
@@ -807,6 +949,7 @@
 template <typename PeerAction>
 Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_action) {
   Runtime* runtime = Runtime::Current();
+  ScopedTrace trace("Thread::Attach");
   if (runtime == nullptr) {
     LOG(ERROR) << "Thread attaching to non-existent runtime: " <<
         ((thread_name != nullptr) ? thread_name : "(Unnamed)");
@@ -814,6 +957,7 @@
   }
   Thread* self;
   {
+    ScopedTrace trace2("Thread birth");
     MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
     if (runtime->IsShuttingDownLocked()) {
       LOG(WARNING) << "Thread attaching while runtime is shutting down: " <<
@@ -907,7 +1051,7 @@
     }
     self->GetJniEnv()->SetLongField(thread_peer,
                                     WellKnownClasses::java_lang_Thread_nativePeer,
-                                    reinterpret_cast<jlong>(self));
+                                    reinterpret_cast64<jlong>(self));
     return true;
   };
   return Attach(thread_name, as_daemon, set_peer_action);
@@ -949,8 +1093,9 @@
 
   Thread* self = this;
   DCHECK_EQ(self, Thread::Current());
-  env->SetLongField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer,
-                    reinterpret_cast<jlong>(self));
+  env->SetLongField(peer.get(),
+                    WellKnownClasses::java_lang_Thread_nativePeer,
+                    reinterpret_cast64<jlong>(self));
 
   ScopedObjectAccess soa(self);
   StackHandleScope<1> hs(self);
@@ -1114,6 +1259,7 @@
 }
 
 bool Thread::InitStackHwm() {
+  ScopedTrace trace("InitStackHwm");
   void* read_stack_base;
   size_t read_stack_size;
   size_t read_guard_size;
@@ -1236,34 +1382,6 @@
   LOG(FATAL) << ss.str();
 }
 
-void Thread::SetCanBeSuspendedByUserCode(bool can_be_suspended_by_user_code) {
-  CHECK_EQ(this, Thread::Current()) << "This function may only be called on the current thread. "
-                                    << *Thread::Current() << " tried to modify the suspendability "
-                                    << "of " << *this;
-  // NB This checks the new value! This ensures that we can only set can_be_suspended_by_user_code
-  // to false if !CanCallIntoJava().
-  DCHECK(!CanCallIntoJava() || can_be_suspended_by_user_code)
-      << "Threads able to call into java may not be marked as unsuspendable!";
-  if (can_be_suspended_by_user_code == CanBeSuspendedByUserCode()) {
-    // Don't need to do anything if nothing is changing.
-    return;
-  }
-  art::MutexLock mu(this, *Locks::user_code_suspension_lock_);
-  art::MutexLock thread_list_mu(this, *Locks::thread_suspend_count_lock_);
-
-  // We want to add the user-code suspend count if we are newly allowing user-code suspends and
-  // remove them if we are disabling them.
-  int adj = can_be_suspended_by_user_code ? GetUserCodeSuspendCount() : -GetUserCodeSuspendCount();
-  // Adjust the global suspend count appropriately. Use kInternal to not change the ForUserCode
-  // count.
-  if (adj != 0) {
-    bool suspend = ModifySuspendCountInternal(this, adj, nullptr, SuspendReason::kInternal);
-    CHECK(suspend) << this << " was unable to modify it's own suspend count!";
-  }
-  // Mark thread as accepting user-code suspensions.
-  can_be_suspended_by_user_code_ = can_be_suspended_by_user_code;
-}
-
 bool Thread::ModifySuspendCountInternal(Thread* self,
                                         int delta,
                                         AtomicInteger* suspend_barrier,
@@ -1285,17 +1403,6 @@
       LOG(ERROR) << "attempting to modify suspend count in an illegal way.";
       return false;
     }
-    DCHECK(this == self || this->IsSuspended())
-        << "Only self kForUserCode suspension on an unsuspended thread is allowed: " << this;
-    if (UNLIKELY(!CanBeSuspendedByUserCode())) {
-      VLOG(threads) << this << " is being requested to suspend for user code but that is disabled "
-                    << "the thread will not actually go to sleep.";
-      // Having the user_code_suspend_count still be around is useful but we don't need to actually
-      // do anything since we aren't going to 'really' suspend. Just adjust the
-      // user_code_suspend_count and return.
-      tls32_.user_code_suspend_count += delta;
-      return true;
-    }
   }
   if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) {
     UnsafeLogFatalForSuspendCount(self, this);
@@ -1385,7 +1492,7 @@
         done = pending_threads->CompareAndSetWeakRelaxed(cur_val, cur_val - 1);
 #if ART_USE_FUTEXES
         if (done && (cur_val - 1) == 0) {  // Weak CAS may fail spuriously.
-          futex(pending_threads->Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
+          futex(pending_threads->Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0);
         }
 #endif
       } while (!done);
@@ -1778,8 +1885,9 @@
 
   // Grab the scheduler stats for this thread.
   std::string scheduler_stats;
-  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
-    scheduler_stats.resize(scheduler_stats.size() - 1);  // Lose the trailing '\n'.
+  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)
+      && !scheduler_stats.empty()) {
+    scheduler_stats = android::base::Trim(scheduler_stats);  // Lose the trailing '\n'.
   } else {
     scheduler_stats = "0 0 0";
   }
@@ -1855,8 +1963,7 @@
       override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
-    ObjPtr<mirror::Class> c = m->GetDeclaringClass();
-    ObjPtr<mirror::DexCache> dex_cache = c->GetDexCache();
+    ObjPtr<mirror::DexCache> dex_cache = m->GetDexCache();
     int line_number = -1;
     if (dex_cache != nullptr) {  // be tolerant of bad input
       const DexFile* dex_file = dex_cache->GetDexFile();
@@ -2049,13 +2156,13 @@
       DumpKernelStack(os, GetTid(), "  kernel: ", false);
       ArtMethod* method =
           GetCurrentMethod(nullptr,
-                           /*check_suspended*/ !force_dump_stack,
-                           /*abort_on_error*/ !(dump_for_abort || force_dump_stack));
+                           /*check_suspended=*/ !force_dump_stack,
+                           /*abort_on_error=*/ !(dump_for_abort || force_dump_stack));
       DumpNativeStack(os, GetTid(), backtrace_map, "  native: ", method);
     }
     DumpJavaStack(os,
-                  /*check_suspended*/ !force_dump_stack,
-                  /*dump_locks*/ !force_dump_stack);
+                  /*check_suspended=*/ !force_dump_stack,
+                  /*dump_locks=*/ !force_dump_stack);
   } else {
     os << "Not able to dump stack of thread that isn't suspended";
   }
@@ -2156,9 +2263,8 @@
 Thread::Thread(bool daemon)
     : tls32_(daemon),
       wait_monitor_(nullptr),
-      can_call_into_java_(true),
-      can_be_suspended_by_user_code_(true) {
-  wait_mutex_ = new Mutex("a thread wait mutex");
+      is_runtime_thread_(false) {
+  wait_mutex_ = new Mutex("a thread wait mutex", LockLevel::kThreadWaitLock);
   wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
   tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
   tlsPtr_.name = new std::string(kThreadNameDuringStartup);
@@ -2168,6 +2274,9 @@
   tls32_.state_and_flags.as_struct.flags = 0;
   tls32_.state_and_flags.as_struct.state = kNative;
   tls32_.interrupted.store(false, std::memory_order_relaxed);
+  // Initialize with no permit; if the java Thread was unparked before being
+  // started, it will unpark itself before calling into java code.
+  tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed);
   memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
   std::fill(tlsPtr_.rosalloc_runs,
             tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread,
@@ -2179,6 +2288,15 @@
   tlsPtr_.flip_function = nullptr;
   tlsPtr_.thread_local_mark_stack = nullptr;
   tls32_.is_transitioning_to_runnable = false;
+  tls32_.use_mterp = false;
+}
+
+void Thread::NotifyInTheadList() {
+  tls32_.use_mterp = interpreter::CanUseMterp();
+}
+
+bool Thread::CanLoadClasses() const {
+  return !IsRuntimeThread() || !Runtime::Current()->IsJavaDebuggable();
 }
 
 bool Thread::IsStillStarting() const {
@@ -2475,12 +2593,15 @@
 }
 
 void Thread::Interrupt(Thread* self) {
-  MutexLock mu(self, *wait_mutex_);
-  if (tls32_.interrupted.load(std::memory_order_seq_cst)) {
-    return;
+  {
+    MutexLock mu(self, *wait_mutex_);
+    if (tls32_.interrupted.load(std::memory_order_seq_cst)) {
+      return;
+    }
+    tls32_.interrupted.store(true, std::memory_order_seq_cst);
+    NotifyLocked(self);
   }
-  tls32_.interrupted.store(true, std::memory_order_seq_cst);
-  NotifyLocked(self);
+  Unpark();
 }
 
 void Thread::Notify() {
@@ -2515,7 +2636,7 @@
         saved_frames_(saved_frames),
         max_saved_frames_(max_saved_frames) {}
 
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     // We want to skip frames up to and including the exception's constructor.
     // Note we also skip the frame if it doesn't have a method (namely the callee
     // save frame)
@@ -2603,7 +2724,7 @@
     self_->EndAssertNoThreadSuspension(nullptr);
   }
 
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (trace_ == nullptr) {
       return true;  // We're probably trying to fillInStackTrace for an OutOfMemoryError.
     }
@@ -2938,8 +3059,8 @@
   // Make sure the AnnotatedStackTraceElement.class is initialized, b/76208924 .
   class_linker->EnsureInitialized(soa.Self(),
                                   h_aste_class,
-                                  /* can_init_fields */ true,
-                                  /* can_init_parents */ true);
+                                  /* can_init_fields= */ true,
+                                  /* can_init_parents= */ true);
   if (soa.Self()->IsExceptionPending()) {
     // This should not fail in a healthy runtime.
     return nullptr;
@@ -3147,8 +3268,10 @@
 }
 
 void Thread::ThrowOutOfMemoryError(const char* msg) {
-  LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
-      msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : ""));
+  LOG(WARNING) << "Throwing OutOfMemoryError "
+               << '"' << msg << '"'
+               << " (VmSize " << GetProcessStatus("VmSize")
+               << (tls32_.throwing_OutOfMemoryError ? ", recursive case)" : ")");
   if (!tls32_.throwing_OutOfMemoryError) {
     tls32_.throwing_OutOfMemoryError = true;
     ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
@@ -3402,11 +3525,41 @@
     HandleWrapperObjPtr<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception));
     instrumentation->ExceptionThrownEvent(this, exception.Ptr());
   }
-  // Does instrumentation need to deoptimize the stack?
-  // Note: we do this *after* reporting the exception to instrumentation in case it
-  // now requires deoptimization. It may happen if a debugger is attached and requests
-  // new events (single-step, breakpoint, ...) when the exception is reported.
-  if (Dbg::IsForcedInterpreterNeededForException(this)) {
+  // Does instrumentation need to deoptimize the stack or otherwise go to interpreter for something?
+  // Note: we do this *after* reporting the exception to instrumentation in case it now requires
+  // deoptimization. It may happen if a debugger is attached and requests new events (single-step,
+  // breakpoint, ...) when the exception is reported.
+  //
+  // Note we need to check for both force_frame_pop and force_retry_instruction. The first is
+  // expected to happen fairly regularly but the second can only happen if we are using
+  // instrumentation trampolines (for example with DDMS tracing). That forces us to do deopt later
+  // and see every frame being popped. We don't need to handle it any differently.
+  ShadowFrame* cf;
+  bool force_deopt;
+  {
+    NthCallerVisitor visitor(this, 0, false);
+    visitor.WalkStack();
+    cf = visitor.GetCurrentShadowFrame();
+    if (cf == nullptr) {
+      cf = FindDebuggerShadowFrame(visitor.GetFrameId());
+    }
+    bool force_frame_pop = cf != nullptr && cf->GetForcePopFrame();
+    bool force_retry_instr = cf != nullptr && cf->GetForceRetryInstruction();
+    if (kIsDebugBuild && force_frame_pop) {
+      NthCallerVisitor penultimate_visitor(this, 1, false);
+      penultimate_visitor.WalkStack();
+      ShadowFrame* penultimate_frame = penultimate_visitor.GetCurrentShadowFrame();
+      if (penultimate_frame == nullptr) {
+        penultimate_frame = FindDebuggerShadowFrame(penultimate_visitor.GetFrameId());
+      }
+      DCHECK(penultimate_frame != nullptr &&
+             penultimate_frame->GetForceRetryInstruction())
+          << "Force pop frame without retry instruction found. penultimate frame is null: "
+          << (penultimate_frame == nullptr ? "true" : "false");
+    }
+    force_deopt = force_frame_pop || force_retry_instr;
+  }
+  if (Dbg::IsForcedInterpreterNeededForException(this) || force_deopt) {
     NthCallerVisitor visitor(this, 0, false);
     visitor.WalkStack();
     if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller_pc)) {
@@ -3414,11 +3567,19 @@
       const DeoptimizationMethodType method_type = DeoptimizationMethodType::kDefault;
       // Save the exception into the deoptimization context so it can be restored
       // before entering the interpreter.
+      if (force_deopt) {
+        VLOG(deopt) << "Deopting " << cf->GetMethod()->PrettyMethod() << " for frame-pop";
+        DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+        // Get rid of the exception since we are doing a framepop instead.
+        LOG(WARNING) << "Suppressing pending exception for retry-instruction/frame-pop: "
+                     << exception->Dump();
+        ClearException();
+      }
       PushDeoptimizationContext(
           JValue(),
-          false /* is_reference */,
-          exception,
-          false /* from_code */,
+          /* is_reference= */ false,
+          (force_deopt ? nullptr : exception),
+          /* from_code= */ false,
           method_type);
       artDeoptimize(this);
       UNREACHABLE();
@@ -3456,50 +3617,34 @@
   return result;
 }
 
-// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
-//       so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
-struct CurrentMethodVisitor final : public StackVisitor {
-  CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : StackVisitor(thread,
-                     context,
-                     StackVisitor::StackWalkKind::kIncludeInlinedFrames,
-                     check_suspended),
-        this_object_(nullptr),
-        method_(nullptr),
-        dex_pc_(0),
-        abort_on_error_(abort_on_error) {}
-  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* m = GetMethod();
-    if (m->IsRuntimeMethod()) {
-      // Continue if this is a runtime method.
-      return true;
-    }
-    if (context_ != nullptr) {
-      this_object_ = GetThisObject();
-    }
-    method_ = m;
-    dex_pc_ = GetDexPc(abort_on_error_);
-    return false;
-  }
-  ObjPtr<mirror::Object> this_object_;
-  ArtMethod* method_;
-  uint32_t dex_pc_;
-  const bool abort_on_error_;
-};
-
-ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc,
+ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc_out,
                                     bool check_suspended,
                                     bool abort_on_error) const {
-  CurrentMethodVisitor visitor(const_cast<Thread*>(this),
-                               nullptr,
-                               check_suspended,
-                               abort_on_error);
-  visitor.WalkStack(false);
-  if (dex_pc != nullptr) {
-    *dex_pc = visitor.dex_pc_;
+  // Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
+  //       so we don't abort in a special situation (thinlocked monitor) when dumping the Java
+  //       stack.
+  ArtMethod* method = nullptr;
+  uint32_t dex_pc = dex::kDexNoIndex;
+  StackVisitor::WalkStack(
+      [&](const StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        ArtMethod* m = visitor->GetMethod();
+        if (m->IsRuntimeMethod()) {
+          // Continue if this is a runtime method.
+          return true;
+        }
+        method = m;
+        dex_pc = visitor->GetDexPc(abort_on_error);
+        return false;
+      },
+      const_cast<Thread*>(this),
+      /* context= */ nullptr,
+      StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+      check_suspended);
+
+  if (dex_pc_out != nullptr) {
+    *dex_pc_out = dex_pc;
   }
-  return visitor.method_;
+  return method;
 }
 
 bool Thread::HoldsLock(ObjPtr<mirror::Object> object) const {
@@ -3520,7 +3665,7 @@
       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
         visitor_(visitor) {}
 
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (false) {
       LOG(INFO) << "Visiting stack roots in " << ArtMethod::PrettyMethod(GetMethod())
                 << StringPrintf("@ PC:%04x", GetDexPc());
@@ -3552,7 +3697,7 @@
       }
     }
     // Mark lock count map required for structured locking checks.
-    shadow_frame->GetLockCountData().VisitMonitors(visitor_, /* vreg */ -1, this);
+    shadow_frame->GetLockCountData().VisitMonitors(visitor_, /* vreg= */ -1, this);
   }
 
  private:
@@ -3568,7 +3713,7 @@
       if (kVerifyImageObjectsMarked) {
         gc::Heap* const heap = Runtime::Current()->GetHeap();
         gc::space::ContinuousSpace* space = heap->FindContinuousSpaceFromObject(klass,
-                                                                                /*fail_ok*/true);
+                                                                                /*fail_ok=*/true);
         if (space != nullptr && space->IsImageSpace()) {
           bool failed = false;
           if (!space->GetLiveBitmap()->Test(klass.Ptr())) {
@@ -3590,7 +3735,7 @@
         }
       }
       mirror::Object* new_ref = klass.Ptr();
-      visitor_(&new_ref, /* vreg */ -1, this);
+      visitor_(&new_ref, /* vreg= */ -1, this);
       if (new_ref != klass) {
         method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass());
       }
@@ -3609,8 +3754,8 @@
     if (!m->IsNative() && !m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) {
       const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
       DCHECK(method_header->IsOptimized());
-      StackReference<mirror::Object>* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
-          reinterpret_cast<uintptr_t>(cur_quick_frame));
+      StackReference<mirror::Object>* vreg_base =
+          reinterpret_cast<StackReference<mirror::Object>*>(cur_quick_frame);
       uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
       CodeInfo code_info(method_header, kPrecise
           ? CodeInfo::DecodeFlags::AllTables  // We will need dex register maps.
@@ -3663,7 +3808,7 @@
         mirror::Object* ref = ref_addr->AsMirrorPtr();
         if (ref != nullptr) {
           mirror::Object* new_ref = ref;
-          visitor_(&new_ref, /* vreg */ -1, this);
+          visitor_(&new_ref, /* vreg= */ -1, this);
           if (ref != new_ref) {
             ref_addr->Assign(new_ref);
           }
@@ -3856,9 +4001,9 @@
 
 void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
   if ((flags & VisitRootFlags::kVisitRootFlagPrecise) != 0) {
-    VisitRoots</* kPrecise */ true>(visitor);
+    VisitRoots</* kPrecise= */ true>(visitor);
   } else {
-    VisitRoots</* kPrecise */ false>(visitor);
+    VisitRoots</* kPrecise= */ false>(visitor);
   }
 }
 
@@ -4073,7 +4218,44 @@
 
 void Thread::SetReadBarrierEntrypoints() {
   // Make sure entrypoints aren't null.
-  UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active*/ true);
+  UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active=*/ true);
+}
+
+void Thread::ClearAllInterpreterCaches() {
+  static struct ClearInterpreterCacheClosure : Closure {
+    virtual void Run(Thread* thread) {
+      thread->GetInterpreterCache()->Clear(thread);
+    }
+  } closure;
+  Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
+}
+
+
+void Thread::ReleaseLongJumpContextInternal() {
+  // Each QuickExceptionHandler gets a long jump context and uses
+  // it for doing the long jump, after finding catch blocks/doing deoptimization.
+  // Both finding catch blocks and deoptimization can trigger another
+  // exception such as a result of class loading. So there can be nested
+  // cases of exception handling and multiple contexts being used.
+  // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context
+  // for reuse so there is no need to always allocate a new one each time when
+  // getting a context. Since we only keep one context for reuse, delete the
+  // existing one since the passed in context is yet to be used for longjump.
+  delete tlsPtr_.long_jump_context;
+}
+
+void Thread::SetNativePriority(int new_priority) {
+  // ART tests on JVM can reach this code path, use tid = 0 as shorthand for current thread.
+  PaletteStatus status = PaletteSchedSetPriority(0, new_priority);
+  CHECK(status == PaletteStatus::kOkay || status == PaletteStatus::kCheckErrno);
+}
+
+int Thread::GetNativePriority() {
+  int priority = 0;
+  // ART tests on JVM can reach this code path, use tid = 0 as shorthand for current thread.
+  PaletteStatus status = PaletteSchedGetPriority(0, &priority);
+  CHECK(status == PaletteStatus::kOkay || status == PaletteStatus::kCheckErrno);
+  return priority;
 }
 
 }  // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index d169a62..7a14fd7 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -17,8 +17,7 @@
 #ifndef ART_RUNTIME_THREAD_H_
 #define ART_RUNTIME_THREAD_H_
 
-#include <setjmp.h>
-
+#include <atomic>
 #include <bitset>
 #include <deque>
 #include <iosfwd>
@@ -26,22 +25,21 @@
 #include <memory>
 #include <string>
 
-#include "arch/context.h"
-#include "arch/instruction_set.h"
 #include "base/atomic.h"
 #include "base/enums.h"
-#include "base/globals.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/safe_map.h"
+#include "base/value_object.h"
 #include "entrypoints/jni/jni_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "handle_scope.h"
-#include "instrumentation.h"
+#include "interpreter/interpreter_cache.h"
 #include "jvalue.h"
 #include "managed_stack.h"
 #include "offsets.h"
 #include "read_barrier_config.h"
+#include "runtime_globals.h"
 #include "runtime_stats.h"
 #include "suspend_reason.h"
 #include "thread_state.h"
@@ -59,6 +57,10 @@
 }  // namespace collector
 }  // namespace gc
 
+namespace instrumentation {
+struct InstrumentationStackFrame;
+}  // namespace instrumentation
+
 namespace mirror {
 class Array;
 class Class;
@@ -468,16 +470,7 @@
   Context* GetLongJumpContext();
   void ReleaseLongJumpContext(Context* context) {
     if (tlsPtr_.long_jump_context != nullptr) {
-      // Each QuickExceptionHandler gets a long jump context and uses
-      // it for doing the long jump, after finding catch blocks/doing deoptimization.
-      // Both finding catch blocks and deoptimization can trigger another
-      // exception such as a result of class loading. So there can be nested
-      // cases of exception handling and multiple contexts being used.
-      // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context
-      // for reuse so there is no need to always allocate a new one each time when
-      // getting a context. Since we only keep one context for reuse, delete the
-      // existing one since the passed in context is yet to be used for longjump.
-      delete tlsPtr_.long_jump_context;
+      ReleaseLongJumpContextInternal();
     }
     tlsPtr_.long_jump_context = context;
   }
@@ -563,11 +556,11 @@
   bool Interrupted();
   // Implements java.lang.Thread.isInterrupted.
   bool IsInterrupted();
-  void Interrupt(Thread* self) REQUIRES(!*wait_mutex_);
+  void Interrupt(Thread* self) REQUIRES(!wait_mutex_);
   void SetInterrupted(bool i) {
     tls32_.interrupted.store(i, std::memory_order_seq_cst);
   }
-  void Notify() REQUIRES(!*wait_mutex_);
+  void Notify() REQUIRES(!wait_mutex_);
 
   ALWAYS_INLINE void PoisonObjectPointers() {
     ++poison_object_cookie_;
@@ -579,6 +572,11 @@
     return poison_object_cookie_;
   }
 
+  // Parking for 0ns of relative time means an untimed park, negative (though
+  // should be handled in java code) returns immediately
+  void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_);
+  void Unpark();
+
  private:
   void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
 
@@ -650,28 +648,35 @@
   //
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThinLockIdOffset() {
+  static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() {
     return ThreadOffset<pointer_size>(
         OFFSETOF_MEMBER(Thread, tls32_) +
         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> InterruptedOffset() {
+  static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
     return ThreadOffset<pointer_size>(
         OFFSETOF_MEMBER(Thread, tls32_) +
         OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadFlagsOffset() {
+  static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() {
     return ThreadOffset<pointer_size>(
         OFFSETOF_MEMBER(Thread, tls32_) +
         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> IsGcMarkingOffset() {
+  static constexpr ThreadOffset<pointer_size> UseMterpOffset() {
+    return ThreadOffset<pointer_size>(
+        OFFSETOF_MEMBER(Thread, tls32_) +
+        OFFSETOF_MEMBER(tls_32bit_sized_values, use_mterp));
+  }
+
+  template<PointerSize pointer_size>
+  static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
     return ThreadOffset<pointer_size>(
         OFFSETOF_MEMBER(Thread, tls32_) +
         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
@@ -686,21 +691,12 @@
 
  private:
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
+  static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
-    size_t scale;
-    size_t shrink;
-    if (pointer_size == kRuntimePointerSize) {
-      scale = 1;
-      shrink = 1;
-    } else if (pointer_size > kRuntimePointerSize) {
-      scale = static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize);
-      shrink = 1;
-    } else {
-      DCHECK_GT(kRuntimePointerSize, pointer_size);
-      scale = 1;
-      shrink = static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size);
-    }
+    size_t scale = (pointer_size > kRuntimePointerSize) ?
+      static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1;
+    size_t shrink = (kRuntimePointerSize > pointer_size) ?
+      static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1;
     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
   }
 
@@ -740,82 +736,70 @@
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> SelfOffset() {
+  static constexpr ThreadOffset<pointer_size> SelfOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
+  static constexpr ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
-    return ThreadOffsetFromTlsPtr<pointer_size>(
-        OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase));
-  }
-
-  template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> MterpAltIBaseOffset() {
-    return ThreadOffsetFromTlsPtr<pointer_size>(
-        OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase));
-  }
-
-  template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ExceptionOffset() {
+  static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> PeerOffset() {
+  static constexpr ThreadOffset<pointer_size> PeerOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
   }
 
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> CardTableOffset() {
+  static constexpr ThreadOffset<pointer_size> CardTableOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
+  static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadLocalPosOffset() {
+  static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
                                                                 thread_local_pos));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadLocalEndOffset() {
+  static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
                                                                 thread_local_end));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
+  static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
                                                                 thread_local_objects));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> RosAllocRunsOffset() {
+  static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
                                                                 rosalloc_runs));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
+  static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
                                                                 thread_local_alloc_stack_top));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
+  static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
                                                                 thread_local_alloc_stack_end));
   }
@@ -825,19 +809,7 @@
     return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
   }
 
-  uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const {
-    uint8_t* end = tlsPtr_.stack_end + (implicit_overflow_check
-                                            ? GetStackOverflowReservedBytes(kRuntimeISA)
-                                            : 0);
-    if (kIsDebugBuild) {
-      // In a debuggable build, but especially under ASAN, the access-checks interpreter has a
-      // potentially humongous stack size. We don't want to take too much of the stack regularly,
-      // so do not increase the regular reserved size (for compiled code etc) and only report the
-      // virtually smaller stack to the interpreter here.
-      end += GetStackOverflowReservedBytes(kRuntimeISA);
-    }
-    return end;
-  }
+  ALWAYS_INLINE uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const;
 
   uint8_t* GetStackEnd() const {
     return tlsPtr_.stack_end;
@@ -847,30 +819,26 @@
   void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Set the stack end to that to be used during regular execution
-  void ResetDefaultStackEnd() {
-    // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
-    // to throw a StackOverflowError.
-    tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
-  }
+  ALWAYS_INLINE void ResetDefaultStackEnd();
 
   bool IsHandlingStackOverflow() const {
     return tlsPtr_.stack_end == tlsPtr_.stack_begin;
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> StackEndOffset() {
+  static constexpr ThreadOffset<pointer_size> StackEndOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> JniEnvOffset() {
+  static constexpr ThreadOffset<pointer_size> JniEnvOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
+  static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
         ManagedStack::TaggedTopQuickFrameOffset());
@@ -892,7 +860,7 @@
   ALWAYS_INLINE ShadowFrame* PopShadowFrame();
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> TopShadowFrameOffset() {
+  static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
         ManagedStack::TopShadowFrameOffset());
@@ -921,7 +889,7 @@
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> TopHandleScopeOffset() {
+  static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
                                                                 top_handle_scope));
   }
@@ -989,25 +957,17 @@
     --tls32_.disable_thread_flip_count;
   }
 
-  // Returns true if the thread is subject to user_code_suspensions.
-  bool CanBeSuspendedByUserCode() const {
-    return can_be_suspended_by_user_code_;
+  // Returns true if the thread is a runtime thread (eg from a ThreadPool).
+  bool IsRuntimeThread() const {
+    return is_runtime_thread_;
   }
 
-  // Sets CanBeSuspenededByUserCode and adjusts the suspend-count as needed. This may only be called
-  // when running on the current thread. It is **absolutely required** that this be called only on
-  // the Thread::Current() thread.
-  void SetCanBeSuspendedByUserCode(bool can_be_suspended_by_user_code)
-      REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::user_code_suspension_lock_);
-
-  // Returns true if the thread is allowed to call into java.
-  bool CanCallIntoJava() const {
-    return can_call_into_java_;
+  void SetIsRuntimeThread(bool is_runtime_thread) {
+    is_runtime_thread_ = is_runtime_thread;
   }
 
-  void SetCanCallIntoJava(bool can_call_into_java) {
-    can_call_into_java_ = can_call_into_java;
-  }
+  // Returns true if the thread is allowed to load java classes.
+  bool CanLoadClasses() const;
 
   // Activates single step control for debugging. The thread takes the
   // ownership of the given SingleStepControl*. It is deleted by a call
@@ -1141,6 +1101,10 @@
     tls32_.state_and_flags.as_atomic_int.fetch_and(-1 ^ flag, std::memory_order_seq_cst);
   }
 
+  bool UseMterp() const {
+    return tls32_.use_mterp.load();
+  }
+
   void ResetQuickAllocEntryPointsForThread(bool is_marking);
 
   // Returns the remaining space in the TLAB.
@@ -1215,30 +1179,14 @@
   bool ProtectStack(bool fatal_on_error = true);
   bool UnprotectStack();
 
-  void SetMterpDefaultIBase(void* ibase) {
-    tlsPtr_.mterp_default_ibase = ibase;
-  }
-
   void SetMterpCurrentIBase(void* ibase) {
     tlsPtr_.mterp_current_ibase = ibase;
   }
 
-  void SetMterpAltIBase(void* ibase) {
-    tlsPtr_.mterp_alt_ibase = ibase;
-  }
-
-  const void* GetMterpDefaultIBase() const {
-    return tlsPtr_.mterp_default_ibase;
-  }
-
   const void* GetMterpCurrentIBase() const {
     return tlsPtr_.mterp_current_ibase;
   }
 
-  const void* GetMterpAltIBase() const {
-    return tlsPtr_.mterp_alt_ibase;
-  }
-
   bool HandlingSignal() const {
     return tls32_.handling_signal_;
   }
@@ -1299,11 +1247,37 @@
                                        jobject thread_group)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  ALWAYS_INLINE InterpreterCache* GetInterpreterCache() {
+    return &interpreter_cache_;
+  }
+
+  // Clear all thread-local interpreter caches.
+  //
+  // Since the caches are keyed by memory pointer to dex instructions, this must be
+  // called when any dex code is unloaded (before different code gets loaded at the
+  // same memory location).
+  //
+  // If presence of cache entry implies some pre-conditions, this must also be
+  // called if the pre-conditions might no longer hold true.
+  static void ClearAllInterpreterCaches();
+
+  template<PointerSize pointer_size>
+  static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() {
+    return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_));
+  }
+
+  static constexpr int InterpreterCacheSizeLog2() {
+    return WhichPowerOf2(InterpreterCache::kSize);
+  }
+
  private:
   explicit Thread(bool daemon);
   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
   void Destroy();
 
+  void NotifyInTheadList()
+      REQUIRES_SHARED(Locks::thread_list_lock_);
+
   // Attaches the calling native thread to the runtime, returning the new native peer.
   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
   template <typename PeerAction>
@@ -1425,6 +1399,8 @@
 
   static bool IsAotCompiler();
 
+  void ReleaseLongJumpContextInternal();
+
   // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
   // change from being Suspended to Runnable without a suspend request occurring.
   union PACKED(4) StateAndFlags {
@@ -1549,6 +1525,8 @@
     // Thread "interrupted" status; stays raised until queried or thrown.
     Atomic<bool32_t> interrupted;
 
+    AtomicInteger park_state_;
+
     // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
     // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
     // processing of the CC collector only. This is thread local so that we can enable/disable weak
@@ -1563,12 +1541,15 @@
     // critical section enter.
     uint32_t disable_thread_flip_count;
 
-    // If CanBeSuspendedByUserCode, how much of 'suspend_count_' is by request of user code, used to
-    // distinguish threads suspended by the runtime from those suspended by user code. Otherwise
-    // this is just a count of how many user-code suspends have been attempted (but were ignored).
+    // How much of 'suspend_count_' is by request of user code, used to distinguish threads
+    // suspended by the runtime from those suspended by user code.
     // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
     // told that AssertHeld should be good enough.
     int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
+
+    // True if everything is in the ideal state for fast interpretation.
+    // False if we need to switch to the C++ interpreter to handle special cases.
+    std::atomic<bool32_t> use_mterp;
   } tls32_;
 
   struct PACKED(8) tls_64bit_sized_values {
@@ -1593,8 +1574,7 @@
       last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
       thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
       thread_local_limit(nullptr),
-      thread_local_objects(0), mterp_current_ibase(nullptr), mterp_default_ibase(nullptr),
-      mterp_alt_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
+      thread_local_objects(0), mterp_current_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
       thread_local_alloc_stack_end(nullptr),
       flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr),
       async_exception(nullptr) {
@@ -1731,10 +1711,8 @@
     JniEntryPoints jni_entrypoints;
     QuickEntryPoints quick_entrypoints;
 
-    // Mterp jump table bases.
+    // Mterp jump table base.
     void* mterp_current_ibase;
-    void* mterp_default_ibase;
-    void* mterp_alt_ibase;
 
     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
     void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
@@ -1759,6 +1737,14 @@
     mirror::Throwable* async_exception;
   } tlsPtr_;
 
+  // Small thread-local cache to be used from the interpreter.
+  // It is keyed by dex instruction pointer.
+  // The value is opcode-depended (e.g. field offset).
+  InterpreterCache interpreter_cache_;
+
+  // All fields below this line should not be accessed by native code. This means these fields can
+  // be modified, rearranged, added or removed without having to modify asm_support.h
+
   // Guards the 'wait_monitor_' members.
   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
@@ -1780,13 +1766,8 @@
   // compiled code or entrypoints.
   SafeMap<std::string, std::unique_ptr<TLSData>> custom_tls_ GUARDED_BY(Locks::custom_tls_lock_);
 
-  // True if the thread is allowed to call back into java (for e.g. during class resolution).
-  // By default this is true.
-  bool can_call_into_java_;
-
-  // True if the thread is subject to user-code suspension. By default this is true. This can only
-  // be false for threads where '!can_call_into_java_'.
-  bool can_be_suspended_by_user_code_;
+  // True if the thread is some form of runtime thread (ex, GC or JIT).
+  bool is_runtime_thread_;
 
   friend class Dbg;  // For SetStateUnsafe.
   friend class gc::collector::SemiSpace;  // For getting stack traces.
diff --git a/runtime/thread_android.cc b/runtime/thread_android.cc
index 8ff6c52..f333400 100644
--- a/runtime/thread_android.cc
+++ b/runtime/thread_android.cc
@@ -16,84 +16,8 @@
 
 #include "thread.h"
 
-#include <errno.h>
-#include <limits.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-
-#include <cutils/sched_policy.h>
-#include <utils/threads.h>
-
-#include "base/macros.h"
-
 namespace art {
 
-// Conversion map for "nice" values.
-//
-// We use Android thread priority constants to be consistent with the rest
-// of the system.  In some cases adjacent entries may overlap.
-//
-static const int kNiceValues[10] = {
-  ANDROID_PRIORITY_LOWEST,                // 1 (MIN_PRIORITY)
-  ANDROID_PRIORITY_BACKGROUND + 6,
-  ANDROID_PRIORITY_BACKGROUND + 3,
-  ANDROID_PRIORITY_BACKGROUND,
-  ANDROID_PRIORITY_NORMAL,                // 5 (NORM_PRIORITY)
-  ANDROID_PRIORITY_NORMAL - 2,
-  ANDROID_PRIORITY_NORMAL - 4,
-  ANDROID_PRIORITY_URGENT_DISPLAY + 3,
-  ANDROID_PRIORITY_URGENT_DISPLAY + 2,
-  ANDROID_PRIORITY_URGENT_DISPLAY         // 10 (MAX_PRIORITY)
-};
-
-void Thread::SetNativePriority(int newPriority) {
-  if (newPriority < 1 || newPriority > 10) {
-    LOG(WARNING) << "bad priority " << newPriority;
-    newPriority = 5;
-  }
-
-  int newNice = kNiceValues[newPriority-1];
-  pid_t tid = GetTid();
-
-  // TODO: b/18249098 The code below is broken. It uses getpriority() as a proxy for whether a
-  // thread is already in the SP_FOREGROUND cgroup. This is not necessarily true for background
-  // processes, where all threads are in the SP_BACKGROUND cgroup. This means that callers will
-  // have to call setPriority twice to do what they want :
-  //
-  //     Thread.setPriority(Thread.MIN_PRIORITY);  // no-op wrt to cgroups
-  //     Thread.setPriority(Thread.MAX_PRIORITY);  // will actually change cgroups.
-  if (newNice >= ANDROID_PRIORITY_BACKGROUND) {
-    set_sched_policy(tid, SP_BACKGROUND);
-  } else if (getpriority(PRIO_PROCESS, tid) >= ANDROID_PRIORITY_BACKGROUND) {
-    set_sched_policy(tid, SP_FOREGROUND);
-  }
-
-  if (setpriority(PRIO_PROCESS, tid, newNice) != 0) {
-    PLOG(INFO) << *this << " setPriority(PRIO_PROCESS, " << tid << ", " << newNice << ") failed";
-  }
-}
-
-int Thread::GetNativePriority() {
-  errno = 0;
-  int native_priority = getpriority(PRIO_PROCESS, 0);
-  if (native_priority == -1 && errno != 0) {
-    PLOG(WARNING) << "getpriority failed";
-    return kNormThreadPriority;
-  }
-
-  int managed_priority = kMinThreadPriority;
-  for (size_t i = 0; i < arraysize(kNiceValues); i++) {
-    if (native_priority >= kNiceValues[i]) {
-      break;
-    }
-    managed_priority++;
-  }
-  if (managed_priority > kMaxThreadPriority) {
-    managed_priority = kMaxThreadPriority;
-  }
-  return managed_priority;
-}
-
 void Thread::SetUpAlternateSignalStack() {
   // Bionic does this for us.
 }
diff --git a/runtime/thread_linux.cc b/runtime/thread_linux.cc
index d05fecf..3ed4276 100644
--- a/runtime/thread_linux.cc
+++ b/runtime/thread_linux.cc
@@ -23,14 +23,6 @@
 
 namespace art {
 
-void Thread::SetNativePriority(int) {
-  // Do nothing.
-}
-
-int Thread::GetNativePriority() {
-  return kNormThreadPriority;
-}
-
 static void SigAltStack(stack_t* new_stack, stack_t* old_stack) {
   if (sigaltstack(new_stack, old_stack) == -1) {
     PLOG(FATAL) << "sigaltstack failed";
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index cddc275..a5406ea 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -438,7 +438,7 @@
   // Wake up the threads blocking for weak ref access so that they will respond to the empty
   // checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state.
   Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
-  Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint*/true);
+  Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint=*/true);
   {
     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
     uint64_t total_wait_time = 0;
@@ -491,9 +491,9 @@
               // Found a runnable thread that hasn't responded to the empty checkpoint request.
               // Assume it's stuck and safe to dump its stack.
               thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT),
-                           /*dump_native_stack*/ true,
-                           /*backtrace_map*/ nullptr,
-                           /*force_dump_stack*/ true);
+                           /*dump_native_stack=*/ true,
+                           /*backtrace_map=*/ nullptr,
+                           /*force_dump_stack=*/ true);
             }
           }
         }
@@ -683,7 +683,7 @@
       AssertThreadsAreSuspended(self, self);
     }
   }
-  ATRACE_BEGIN((std::string("Mutator threads suspended for ") + cause).c_str());
+  ATraceBegin((std::string("Mutator threads suspended for ") + cause).c_str());
 
   if (self != nullptr) {
     VLOG(threads) << *self << " SuspendAll complete";
@@ -764,16 +764,31 @@
     int32_t cur_val = pending_threads.load(std::memory_order_relaxed);
     if (LIKELY(cur_val > 0)) {
 #if ART_USE_FUTEXES
-      if (futex(pending_threads.Address(), FUTEX_WAIT, cur_val, &wait_timeout, nullptr, 0) != 0) {
-        // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
-        if ((errno != EAGAIN) && (errno != EINTR)) {
-          if (errno == ETIMEDOUT) {
-            LOG(kIsDebugBuild ? ::android::base::FATAL : ::android::base::ERROR)
-                << "Timed out waiting for threads to suspend, waited for "
-                << PrettyDuration(NanoTime() - start_time);
-          } else {
-            PLOG(FATAL) << "futex wait failed for SuspendAllInternal()";
+      if (futex(pending_threads.Address(), FUTEX_WAIT_PRIVATE, cur_val, &wait_timeout, nullptr, 0)
+          != 0) {
+        if ((errno == EAGAIN) || (errno == EINTR)) {
+          // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
+          continue;
+        }
+        if (errno == ETIMEDOUT) {
+          const uint64_t wait_time = NanoTime() - start_time;
+          MutexLock mu(self, *Locks::thread_list_lock_);
+          MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
+          std::ostringstream oss;
+          for (const auto& thread : list_) {
+            if (thread == ignore1 || thread == ignore2) {
+              continue;
+            }
+            if (!thread->IsSuspended()) {
+              oss << std::endl << "Thread not suspended: " << *thread;
+            }
           }
+          LOG(kIsDebugBuild ? ::android::base::FATAL : ::android::base::ERROR)
+              << "Timed out waiting for threads to suspend, waited for "
+              << PrettyDuration(wait_time)
+              << oss.str();
+        } else {
+          PLOG(FATAL) << "futex wait failed for SuspendAllInternal()";
         }
       }  // else re-check pending_threads in the next iteration (this may be a spurious wake-up).
 #else
@@ -796,7 +811,7 @@
     VLOG(threads) << "Thread[null] ResumeAll starting";
   }
 
-  ATRACE_END();
+  ATraceEnd();
 
   ScopedTrace trace("Resuming mutator threads");
 
@@ -840,8 +855,8 @@
 }
 
 bool ThreadList::Resume(Thread* thread, SuspendReason reason) {
-  // This assumes there was an ATRACE_BEGIN when we suspended the thread.
-  ATRACE_END();
+  // This assumes there was an ATraceBegin when we suspended the thread.
+  ATraceEnd();
 
   Thread* self = Thread::Current();
   DCHECK_NE(thread, self);
@@ -902,8 +917,6 @@
                                         bool request_suspension,
                                         SuspendReason reason,
                                         bool* timed_out) {
-  CHECK_NE(reason, SuspendReason::kForUserCode) << "Cannot suspend for user-code by peer. Must be "
-                                                << "done directly on the thread.";
   const uint64_t start_time = NanoTime();
   useconds_t sleep_us = kThreadSuspendInitialSleepUs;
   *timed_out = false;
@@ -974,10 +987,10 @@
         // done.
         if (thread->IsSuspended()) {
           VLOG(threads) << "SuspendThreadByPeer thread suspended: " << *thread;
-          if (ATRACE_ENABLED()) {
+          if (ATraceEnabled()) {
             std::string name;
             thread->GetThreadName(name);
-            ATRACE_BEGIN(StringPrintf("SuspendThreadByPeer suspended %s for peer=%p", name.c_str(),
+            ATraceBegin(StringPrintf("SuspendThreadByPeer suspended %s for peer=%p", name.c_str(),
                                       peer).c_str());
           }
           return thread;
@@ -1084,10 +1097,10 @@
         // count, or else we've waited and it has self suspended) or is the current thread, we're
         // done.
         if (thread->IsSuspended()) {
-          if (ATRACE_ENABLED()) {
+          if (ATraceEnabled()) {
             std::string name;
             thread->GetThreadName(name);
-            ATRACE_BEGIN(StringPrintf("SuspendThreadByThreadId suspended %s id=%d",
+            ATraceBegin(StringPrintf("SuspendThreadByThreadId suspended %s id=%d",
                                       name.c_str(), thread_id).c_str());
           }
           VLOG(threads) << "SuspendThreadByThreadId thread suspended: " << *thread;
@@ -1433,6 +1446,7 @@
     }
     self->SetWeakRefAccessEnabled(cc->IsWeakRefAccessEnabled());
   }
+  self->NotifyInTheadList();
 }
 
 void ThreadList::Unregister(Thread* self) {
@@ -1462,24 +1476,26 @@
     // Remove and delete the Thread* while holding the thread_list_lock_ and
     // thread_suspend_count_lock_ so that the unregistering thread cannot be suspended.
     // Note: deliberately not using MutexLock that could hold a stale self pointer.
-    MutexLock mu(self, *Locks::thread_list_lock_);
-    if (!Contains(self)) {
-      std::string thread_name;
-      self->GetThreadName(thread_name);
-      std::ostringstream os;
-      DumpNativeStack(os, GetTid(), nullptr, "  native: ", nullptr);
-      LOG(ERROR) << "Request to unregister unattached thread " << thread_name << "\n" << os.str();
-      break;
-    } else {
-      MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
-      if (!self->IsSuspended()) {
-        list_.remove(self);
+    {
+      MutexLock mu(self, *Locks::thread_list_lock_);
+      if (!Contains(self)) {
+        std::string thread_name;
+        self->GetThreadName(thread_name);
+        std::ostringstream os;
+        DumpNativeStack(os, GetTid(), nullptr, "  native: ", nullptr);
+        LOG(ERROR) << "Request to unregister unattached thread " << thread_name << "\n" << os.str();
         break;
+      } else {
+        MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
+        if (!self->IsSuspended()) {
+          list_.remove(self);
+          break;
+        }
       }
-      // In the case where we are not suspended yet, sleep to leave other threads time to execute.
-      // This is important if there are realtime threads. b/111277984
-      usleep(1);
     }
+    // In the case where we are not suspended yet, sleep to leave other threads time to execute.
+    // This is important if there are realtime threads. b/111277984
+    usleep(1);
     // We failed to remove the thread due to a suspend request, loop and try again.
   }
   delete self;
@@ -1561,7 +1577,7 @@
     }
   }
   LOG(FATAL) << "Out of internal thread ids";
-  return 0;
+  UNREACHABLE();
 }
 
 void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) {
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 28fc59c..e1c756d 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -1,3 +1,4 @@
+
 /*
  * Copyright (C) 2012 The Android Open Source Project
  *
@@ -47,10 +48,9 @@
   stack_size += kPageSize;
   std::string error_msg;
   stack_ = MemMap::MapAnonymous(name.c_str(),
-                                /* addr */ nullptr,
                                 stack_size,
                                 PROT_READ | PROT_WRITE,
-                                /* low_4gb */ false,
+                                /*low_4gb=*/ false,
                                 &error_msg);
   CHECK(stack_.IsValid()) << error_msg;
   CHECK_ALIGNED(stack_.Begin(), kPageSize);
@@ -87,7 +87,7 @@
 void ThreadPoolWorker::Run() {
   Thread* self = Thread::Current();
   Task* task = nullptr;
-  thread_pool_->creation_barier_.Wait(self);
+  thread_pool_->creation_barier_.Pass(self);
   while ((task = thread_pool_->GetTask(self)) != nullptr) {
     task->Run(self);
     task->Finalize();
@@ -102,15 +102,10 @@
                                      nullptr,
                                      worker->thread_pool_->create_peers_));
   worker->thread_ = Thread::Current();
-  // Thread pool workers cannot call into java.
-  worker->thread_->SetCanCallIntoJava(false);
-  // Thread pool workers should not be getting paused by user-code.
-  worker->thread_->SetCanBeSuspendedByUserCode(false);
+  // Mark thread pool workers as runtime-threads.
+  worker->thread_->SetIsRuntimeThread(true);
   // Do work until its time to shut down.
   worker->Run();
-  // Thread pool worker is finished. We want to allow suspension during shutdown.
-  worker->thread_->SetCanBeSuspendedByUserCode(true);
-  // Thread shuts down.
   runtime->DetachCurrentThread();
   return nullptr;
 }
@@ -129,7 +124,10 @@
   tasks_.clear();
 }
 
-ThreadPool::ThreadPool(const char* name, size_t num_threads, bool create_peers)
+ThreadPool::ThreadPool(const char* name,
+                       size_t num_threads,
+                       bool create_peers,
+                       size_t worker_stack_size)
   : name_(name),
     task_queue_lock_("task queue lock"),
     task_queue_condition_("task queue condition", task_queue_lock_),
@@ -139,28 +137,41 @@
     waiting_count_(0),
     start_time_(0),
     total_wait_time_(0),
-    // Add one since the caller of constructor waits on the barrier too.
-    creation_barier_(num_threads + 1),
+    creation_barier_(0),
     max_active_workers_(num_threads),
-    create_peers_(create_peers) {
+    create_peers_(create_peers),
+    worker_stack_size_(worker_stack_size) {
+  CreateThreads();
+}
+
+void ThreadPool::CreateThreads() {
+  CHECK(threads_.empty());
   Thread* self = Thread::Current();
-  while (GetThreadCount() < num_threads) {
-    const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(),
-                                                 GetThreadCount());
-    threads_.push_back(
-        new ThreadPoolWorker(this, worker_name, ThreadPoolWorker::kDefaultStackSize));
+  {
+    MutexLock mu(self, task_queue_lock_);
+    shutting_down_ = false;
+    // Add one since the caller of constructor waits on the barrier too.
+    creation_barier_.Init(self, max_active_workers_);
+    while (GetThreadCount() < max_active_workers_) {
+      const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(),
+                                                   GetThreadCount());
+      threads_.push_back(
+          new ThreadPoolWorker(this, worker_name, worker_stack_size_));
+    }
   }
-  // Wait for all of the threads to attach.
-  creation_barier_.Wait(self);
 }
 
-void ThreadPool::SetMaxActiveWorkers(size_t threads) {
-  MutexLock mu(Thread::Current(), task_queue_lock_);
-  CHECK_LE(threads, GetThreadCount());
-  max_active_workers_ = threads;
+void ThreadPool::WaitForWorkersToBeCreated() {
+  creation_barier_.Increment(Thread::Current(), 0);
 }
 
-ThreadPool::~ThreadPool() {
+const std::vector<ThreadPoolWorker*>& ThreadPool::GetWorkers() {
+  // Wait for all the workers to be created before returning them.
+  WaitForWorkersToBeCreated();
+  return threads_;
+}
+
+void ThreadPool::DeleteThreads() {
   {
     Thread* self = Thread::Current();
     MutexLock mu(self, task_queue_lock_);
@@ -170,10 +181,22 @@
     task_queue_condition_.Broadcast(self);
     completion_condition_.Broadcast(self);
   }
-  // Wait for the threads to finish.
+  // Wait for the threads to finish. We expect the user of the pool
+  // not to run multi-threaded calls to `CreateThreads` and `DeleteThreads`,
+  // so we don't guard the field here.
   STLDeleteElements(&threads_);
 }
 
+void ThreadPool::SetMaxActiveWorkers(size_t max_workers) {
+  MutexLock mu(Thread::Current(), task_queue_lock_);
+  CHECK_LE(max_workers, GetThreadCount());
+  max_active_workers_ = max_workers;
+}
+
+ThreadPool::~ThreadPool() {
+  DeleteThreads();
+}
+
 void ThreadPool::StartWorkers(Thread* self) {
   MutexLock mu(self, task_queue_lock_);
   started_ = true;
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 98a1193..0a2a50c 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_THREAD_POOL_H_
 
 #include <deque>
+#include <functional>
 #include <vector>
 
 #include "barrier.h"
@@ -48,6 +49,18 @@
   }
 };
 
+class FunctionTask : public SelfDeletingTask {
+ public:
+  explicit FunctionTask(std::function<void(Thread*)>&& func) : func_(std::move(func)) {}
+
+  void Run(Thread* self) override {
+    func_(self);
+  }
+
+ private:
+  std::function<void(Thread*)> func_;
+};
+
 class ThreadPoolWorker {
  public:
   static const size_t kDefaultStackSize = 1 * MB;
@@ -88,9 +101,7 @@
     return threads_.size();
   }
 
-  const std::vector<ThreadPoolWorker*>& GetWorkers() const {
-    return threads_;
-  }
+  const std::vector<ThreadPoolWorker*>& GetWorkers();
 
   // Broadcast to the workers and tell them to empty out the work queue.
   void StartWorkers(Thread* self) REQUIRES(!task_queue_lock_);
@@ -110,9 +121,18 @@
   // If create_peers is true, all worker threads will have a Java peer object. Note that if the
   // pool is asked to do work on the current thread (see Wait), a peer may not be available. Wait
   // will conservatively abort if create_peers and do_work are true.
-  ThreadPool(const char* name, size_t num_threads, bool create_peers = false);
+  ThreadPool(const char* name,
+             size_t num_threads,
+             bool create_peers = false,
+             size_t worker_stack_size = ThreadPoolWorker::kDefaultStackSize);
   virtual ~ThreadPool();
 
+  // Create the threads of this pool.
+  void CreateThreads();
+
+  // Stops and deletes all threads in this pool.
+  void DeleteThreads();
+
   // Wait for all tasks currently on queue to get completed. If the pool has been stopped, only
   // wait till all already running tasks are done.
   // When the pool was created with peers for workers, do_work must not be true (see ThreadPool()).
@@ -132,6 +152,9 @@
   // Set the "nice" priorty for threads in the pool.
   void SetPthreadPriority(int priority);
 
+  // Wait for workers to be created.
+  void WaitForWorkersToBeCreated();
+
  protected:
   // get a task to run, blocks if there are no tasks left
   virtual Task* GetTask(Thread* self) REQUIRES(!task_queue_lock_);
@@ -158,7 +181,6 @@
   // How many worker threads are waiting on the condition.
   volatile size_t waiting_count_ GUARDED_BY(task_queue_lock_);
   std::deque<Task*> tasks_ GUARDED_BY(task_queue_lock_);
-  // TODO: make this immutable/const?
   std::vector<ThreadPoolWorker*> threads_;
   // Work balance detection.
   uint64_t start_time_ GUARDED_BY(task_queue_lock_);
@@ -166,6 +188,7 @@
   Barrier creation_barier_;
   size_t max_active_workers_ GUARDED_BY(task_queue_lock_);
   const bool create_peers_;
+  const size_t worker_stack_size_;
 
  private:
   friend class ThreadPoolWorker;
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index d784200..9e7c44a 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -29,7 +29,7 @@
  public:
   explicit CountTask(AtomicInteger* count) : count_(count), verbose_(false) {}
 
-  void Run(Thread* self) {
+  void Run(Thread* self) override {
     if (verbose_) {
       LOG(INFO) << "Running: " << *self;
     }
@@ -39,7 +39,7 @@
     ++*count_;
   }
 
-  void Finalize() {
+  void Finalize() override {
     if (verbose_) {
       LOG(INFO) << "Finalizing: " << *Thread::Current();
     }
@@ -119,7 +119,7 @@
   // Drain the task list. Note: we have to restart here, as no tasks will be finished when
   // the pool is stopped.
   thread_pool.StartWorkers(self);
-  thread_pool.Wait(self, /* do_work */ true, false);
+  thread_pool.Wait(self, /* do_work= */ true, false);
 }
 
 class TreeTask : public Task {
@@ -129,7 +129,7 @@
         count_(count),
         depth_(depth) {}
 
-  void Run(Thread* self) {
+  void Run(Thread* self) override {
     if (depth_ > 1) {
       thread_pool_->AddTask(self, new TreeTask(thread_pool_, count_, depth_ - 1));
       thread_pool_->AddTask(self, new TreeTask(thread_pool_, count_, depth_ - 1));
@@ -138,7 +138,7 @@
     ++*count_;
   }
 
-  void Finalize() {
+  void Finalize() override {
     delete this;
   }
 
@@ -164,12 +164,12 @@
  public:
   PeerTask() {}
 
-  void Run(Thread* self) {
+  void Run(Thread* self) override {
     ScopedObjectAccess soa(self);
     CHECK(self->GetPeer() != nullptr);
   }
 
-  void Finalize() {
+  void Finalize() override {
     delete this;
   }
 };
@@ -178,12 +178,12 @@
  public:
   NoPeerTask() {}
 
-  void Run(Thread* self) {
+  void Run(Thread* self) override {
     ScopedObjectAccess soa(self);
     CHECK(self->GetPeer() == nullptr);
   }
 
-  void Finalize() {
+  void Finalize() override {
     delete this;
   }
 };
diff --git a/runtime/thread_state.h b/runtime/thread_state.h
index 8edfeec..e57a040 100644
--- a/runtime/thread_state.h
+++ b/runtime/thread_state.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_THREAD_STATE_H_
 #define ART_RUNTIME_THREAD_STATE_H_
 
-#include <ostream>
+#include <iosfwd>
 
 namespace art {
 
diff --git a/runtime/ti/agent.cc b/runtime/ti/agent.cc
index 608f0ee..cdfe727 100644
--- a/runtime/ti/agent.cc
+++ b/runtime/ti/agent.cc
@@ -20,6 +20,7 @@
 #include "nativehelper/scoped_local_ref.h"
 #include "nativeloader/native_loader.h"
 
+#include "base/logging.h"
 #include "base/strlcpy.h"
 #include "jni/java_vm_ext.h"
 #include "runtime.h"
@@ -117,24 +118,29 @@
                                            : JavaVMExt::GetLibrarySearchPath(env, class_loader));
 
   bool needs_native_bridge = false;
-  std::string nativeloader_error_msg;
+  char* nativeloader_error_msg = nullptr;
   void* dlopen_handle = android::OpenNativeLibrary(env,
                                                    Runtime::Current()->GetTargetSdkVersion(),
                                                    name_.c_str(),
                                                    class_loader,
+                                                   nullptr,
                                                    library_path.get(),
                                                    &needs_native_bridge,
                                                    &nativeloader_error_msg);
   if (dlopen_handle == nullptr) {
     *error_msg = StringPrintf("Unable to dlopen %s: %s",
                               name_.c_str(),
-                              nativeloader_error_msg.c_str());
+                              nativeloader_error_msg);
+    android::NativeLoaderFreeErrorMessage(nativeloader_error_msg);
     *error = kLoadingError;
     return nullptr;
   }
   if (needs_native_bridge) {
     // TODO: Consider support?
-    android::CloseNativeLibrary(dlopen_handle, needs_native_bridge);
+    // The result of this call and error_msg is ignored because the most
+    // relevant error is that native bridge is unsupported.
+    android::CloseNativeLibrary(dlopen_handle, needs_native_bridge, &nativeloader_error_msg);
+    android::NativeLoaderFreeErrorMessage(nativeloader_error_msg);
     *error_msg = StringPrintf("Native-bridge agents unsupported: %s", name_.c_str());
     *error = kLoadingError;
     return nullptr;
@@ -174,7 +180,7 @@
   }
 }
 
-Agent::Agent(Agent&& other)
+Agent::Agent(Agent&& other) noexcept
     : dlopen_handle_(nullptr),
       onload_(nullptr),
       onattach_(nullptr),
@@ -182,7 +188,7 @@
   *this = std::move(other);
 }
 
-Agent& Agent::operator=(Agent&& other) {
+Agent& Agent::operator=(Agent&& other) noexcept {
   if (this != &other) {
     if (dlopen_handle_ != nullptr) {
       Unload();
diff --git a/runtime/ti/agent.h b/runtime/ti/agent.h
index 24a6f1c..598c8ff 100644
--- a/runtime/ti/agent.h
+++ b/runtime/ti/agent.h
@@ -22,7 +22,8 @@
 
 #include <memory>
 
-#include "base/logging.h"
+#include <android-base/logging.h>
+#include <android-base/macros.h>
 
 namespace art {
 namespace ti {
@@ -105,8 +106,8 @@
   // TODO We need to acquire some locks probably.
   void Unload();
 
-  Agent(Agent&& other);
-  Agent& operator=(Agent&& other);
+  Agent(Agent&& other) noexcept;
+  Agent& operator=(Agent&& other) noexcept;
 
   ~Agent();
 
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 949fabe..074e846 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -38,6 +38,8 @@
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "gc/scoped_gc_critical_section.h"
 #include "instrumentation.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache-inl.h"
 #include "mirror/object-inl.h"
@@ -58,32 +60,6 @@
 static constexpr uint8_t kOpNewThread = 2U;
 static constexpr uint8_t kOpTraceSummary = 3U;
 
-class BuildStackTraceVisitor : public StackVisitor {
- public:
-  explicit BuildStackTraceVisitor(Thread* thread)
-      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        method_trace_(Trace::AllocStackTrace()) {}
-
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* m = GetMethod();
-    // Ignore runtime frames (in particular callee save).
-    if (!m->IsRuntimeMethod()) {
-      method_trace_->push_back(m);
-    }
-    return true;
-  }
-
-  // Returns a stack trace where the topmost frame corresponds with the first element of the vector.
-  std::vector<ArtMethod*>* GetStackTrace() const {
-    return method_trace_;
-  }
-
- private:
-  std::vector<ArtMethod*>* const method_trace_;
-
-  DISALLOW_COPY_AND_ASSIGN(BuildStackTraceVisitor);
-};
-
 static const char     kTraceTokenChar             = '*';
 static const uint16_t kTraceHeaderLength          = 32;
 static const uint32_t kTraceMagicValue            = 0x574f4c53;
@@ -228,9 +204,19 @@
 }
 
 static void GetSample(Thread* thread, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
-  BuildStackTraceVisitor build_trace_visitor(thread);
-  build_trace_visitor.WalkStack();
-  std::vector<ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace();
+  std::vector<ArtMethod*>* const stack_trace = Trace::AllocStackTrace();
+  StackVisitor::WalkStack(
+      [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        ArtMethod* m = stack_visitor->GetMethod();
+        // Ignore runtime frames (in particular callee save).
+        if (!m->IsRuntimeMethod()) {
+          stack_trace->push_back(m);
+        }
+        return true;
+      },
+      thread,
+      /* context= */ nullptr,
+      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
   Trace* the_trace = reinterpret_cast<Trace*>(arg);
   the_trace->CompareAndUpdateStackTrace(thread, stack_trace);
 }
@@ -406,6 +392,15 @@
   // Enable count of allocs if specified in the flags.
   bool enable_stats = false;
 
+  if (runtime->GetJit() != nullptr) {
+    // TODO b/110263880 It would be better if we didn't need to do this.
+    // Since we need to hold the method entrypoint across a suspend to ensure instrumentation
+    // hooks are called correctly we have to disable jit-gc to ensure that the entrypoint doesn't
+    // go away. Furthermore we need to leave this off permanently since one could get the same
+    // effect by causing this to be toggled on and off.
+    runtime->GetJit()->GetCodeCache()->SetGarbageCollectCode(false);
+  }
+
   // Create Trace object.
   {
     // Required since EnableMethodTracing calls ConfigureStubs which visits class linker classes.
@@ -435,7 +430,7 @@
         // want to use the trampolines anyway since it is faster. It makes the story with disabling
         // jit-gc more complex though.
         runtime->GetInstrumentation()->EnableMethodTracing(
-            kTracerInstrumentationKey, /*needs_interpreter*/!runtime->IsJavaDebuggable());
+            kTracerInstrumentationKey, /*needs_interpreter=*/!runtime->IsJavaDebuggable());
       }
     }
   }
@@ -533,106 +528,6 @@
   }
 }
 
-void Trace::Pause() {
-  bool stop_alloc_counting = false;
-  Runtime* runtime = Runtime::Current();
-  Trace* the_trace = nullptr;
-
-  Thread* const self = Thread::Current();
-  pthread_t sampling_pthread = 0U;
-  {
-    MutexLock mu(self, *Locks::trace_lock_);
-    if (the_trace_ == nullptr) {
-      LOG(ERROR) << "Trace pause requested, but no trace currently running";
-      return;
-    } else {
-      the_trace = the_trace_;
-      sampling_pthread = sampling_pthread_;
-    }
-  }
-
-  if (sampling_pthread != 0U) {
-    {
-      MutexLock mu(self, *Locks::trace_lock_);
-      the_trace_ = nullptr;
-    }
-    CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, nullptr), "sampling thread shutdown");
-    sampling_pthread_ = 0U;
-    {
-      MutexLock mu(self, *Locks::trace_lock_);
-      the_trace_ = the_trace;
-    }
-  }
-
-  if (the_trace != nullptr) {
-    gc::ScopedGCCriticalSection gcs(self,
-                                    gc::kGcCauseInstrumentation,
-                                    gc::kCollectorTypeInstrumentation);
-    ScopedSuspendAll ssa(__FUNCTION__);
-    stop_alloc_counting = (the_trace->flags_ & Trace::kTraceCountAllocs) != 0;
-
-    if (the_trace->trace_mode_ == TraceMode::kSampling) {
-      MutexLock mu(self, *Locks::thread_list_lock_);
-      runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
-    } else {
-      runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey);
-      runtime->GetInstrumentation()->RemoveListener(
-          the_trace,
-          instrumentation::Instrumentation::kMethodEntered |
-          instrumentation::Instrumentation::kMethodExited |
-          instrumentation::Instrumentation::kMethodUnwind);
-    }
-  }
-
-  if (stop_alloc_counting) {
-    // Can be racy since SetStatsEnabled is not guarded by any locks.
-    Runtime::Current()->SetStatsEnabled(false);
-  }
-}
-
-void Trace::Resume() {
-  Thread* self = Thread::Current();
-  Trace* the_trace;
-  {
-    MutexLock mu(self, *Locks::trace_lock_);
-    if (the_trace_ == nullptr) {
-      LOG(ERROR) << "No trace to resume (or sampling mode), ignoring this request";
-      return;
-    }
-    the_trace = the_trace_;
-  }
-
-  Runtime* runtime = Runtime::Current();
-
-  // Enable count of allocs if specified in the flags.
-  bool enable_stats = (the_trace->flags_ & kTraceCountAllocs) != 0;
-
-  {
-    gc::ScopedGCCriticalSection gcs(self,
-                                    gc::kGcCauseInstrumentation,
-                                    gc::kCollectorTypeInstrumentation);
-    ScopedSuspendAll ssa(__FUNCTION__);
-
-    // Reenable.
-    if (the_trace->trace_mode_ == TraceMode::kSampling) {
-      CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, nullptr, &RunSamplingThread,
-          reinterpret_cast<void*>(the_trace->interval_us_)), "Sampling profiler thread");
-    } else {
-      runtime->GetInstrumentation()->AddListener(the_trace,
-                                                 instrumentation::Instrumentation::kMethodEntered |
-                                                 instrumentation::Instrumentation::kMethodExited |
-                                                 instrumentation::Instrumentation::kMethodUnwind);
-      // TODO: In full-PIC mode, we don't need to fully deopt.
-      runtime->GetInstrumentation()->EnableMethodTracing(kTracerInstrumentationKey);
-    }
-  }
-
-  // Can't call this when holding the mutator lock.
-  if (enable_stats) {
-    runtime->SetStatsEnabled(true);
-  }
-}
-
 TracingMode Trace::GetMethodTracingMode() {
   MutexLock mu(Thread::Current(), *Locks::trace_lock_);
   if (the_trace_ == nullptr) {
@@ -888,15 +783,6 @@
   LOG(ERROR) << "Unexpected branch event in tracing" << ArtMethod::PrettyMethod(method);
 }
 
-void Trace::InvokeVirtualOrInterface(Thread*,
-                                     Handle<mirror::Object>,
-                                     ArtMethod* method,
-                                     uint32_t dex_pc,
-                                     ArtMethod*) {
-  LOG(ERROR) << "Unexpected invoke event in tracing" << ArtMethod::PrettyMethod(method)
-             << " " << dex_pc;
-}
-
 void Trace::WatchedFramePop(Thread* self ATTRIBUTE_UNUSED,
                             const ShadowFrame& frame ATTRIBUTE_UNUSED) {
   LOG(ERROR) << "Unexpected WatchedFramePop event in tracing";
@@ -1124,7 +1010,7 @@
 
 void Trace::DumpThreadList(std::ostream& os) {
   Thread* self = Thread::Current();
-  for (auto it : exited_threads_) {
+  for (const auto& it : exited_threads_) {
     os << it.first << "\t" << it.second << "\n";
   }
   Locks::thread_list_lock_->AssertNotHeld(self);
diff --git a/runtime/trace.h b/runtime/trace.h
index 5d96493..567f6ed 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -27,11 +27,12 @@
 #include <vector>
 
 #include "base/atomic.h"
-#include "base/globals.h"
+#include "base/locks.h"
 #include "base/macros.h"
 #include "base/os.h"
 #include "base/safe_map.h"
 #include "instrumentation.h"
+#include "runtime_globals.h"
 
 namespace unix_file {
 class FdFile;
@@ -42,6 +43,7 @@
 class ArtField;
 class ArtMethod;
 class DexFile;
+class LOCKABLE Mutex;
 class ShadowFrame;
 class Thread;
 
@@ -154,9 +156,6 @@
       REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
                !Locks::trace_lock_);
 
-  static void Pause() REQUIRES(!Locks::trace_lock_, !Locks::thread_list_lock_);
-  static void Resume() REQUIRES(!Locks::trace_lock_);
-
   // Stop tracing. This will finish the trace and write it to file/send it via DDMS.
   static void Stop()
       REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_);
@@ -173,63 +172,57 @@
   uint32_t GetClockOverheadNanoSeconds();
 
   void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_);
 
   // InstrumentationListener implementation.
   void MethodEntered(Thread* thread,
                      Handle<mirror::Object> this_object,
                      ArtMethod* method,
                      uint32_t dex_pc)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
       override;
   void MethodExited(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc,
                     const JValue& return_value)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
       override;
   void MethodUnwind(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
       override;
   void DexPcMoved(Thread* thread,
                   Handle<mirror::Object> this_object,
                   ArtMethod* method,
                   uint32_t new_dex_pc)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
       override;
   void FieldRead(Thread* thread,
                  Handle<mirror::Object> this_object,
                  ArtMethod* method,
                  uint32_t dex_pc,
                  ArtField* field)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
   void FieldWritten(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc,
                     ArtField* field,
                     const JValue& field_value)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
   void ExceptionThrown(Thread* thread,
                        Handle<mirror::Throwable> exception_object)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
   void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
   void Branch(Thread* thread,
               ArtMethod* method,
               uint32_t dex_pc,
               int32_t dex_pc_offset)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
-  void InvokeVirtualOrInterface(Thread* thread,
-                                Handle<mirror::Object> this_object,
-                                ArtMethod* caller,
-                                uint32_t dex_pc,
-                                ArtMethod* callee)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
   void WatchedFramePop(Thread* thread, const ShadowFrame& frame)
       REQUIRES_SHARED(Locks::mutator_lock_) override;
   // Reuse an old stack trace if it exists, otherwise allocate a new one.
@@ -264,20 +257,20 @@
       // how to annotate this.
       NO_THREAD_SAFETY_ANALYSIS;
   void FinishTracing()
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_);
 
   void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff);
 
   void LogMethodTraceEvent(Thread* thread, ArtMethod* method,
                            instrumentation::Instrumentation::InstrumentationEvent event,
                            uint32_t thread_clock_diff, uint32_t wall_clock_diff)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_);
 
   // Methods to output traced methods and threads.
   void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods)
-      REQUIRES(!*unique_methods_lock_);
+      REQUIRES(!unique_methods_lock_);
   void DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_);
   void DumpThreadList(std::ostream& os) REQUIRES(!Locks::thread_list_lock_);
 
   // Methods to register seen entitites in streaming mode. The methods return true if the entity
@@ -295,15 +288,15 @@
   void FlushBuf()
       REQUIRES(streaming_lock_);
 
-  uint32_t EncodeTraceMethod(ArtMethod* method) REQUIRES(!*unique_methods_lock_);
+  uint32_t EncodeTraceMethod(ArtMethod* method) REQUIRES(!unique_methods_lock_);
   uint32_t EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action)
-      REQUIRES(!*unique_methods_lock_);
-  ArtMethod* DecodeTraceMethod(uint32_t tmid) REQUIRES(!*unique_methods_lock_);
-  std::string GetMethodLine(ArtMethod* method) REQUIRES(!*unique_methods_lock_)
+      REQUIRES(!unique_methods_lock_);
+  ArtMethod* DecodeTraceMethod(uint32_t tmid) REQUIRES(!unique_methods_lock_);
+  std::string GetMethodLine(ArtMethod* method) REQUIRES(!unique_methods_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_);
 
   // Singleton instance of the Trace or null when no method tracing is active.
   static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_);
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index c9766bc..62482fd 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -18,6 +18,7 @@
 
 #include <android-base/logging.h>
 
+#include "base/mutex-inl.h"
 #include "base/stl_util.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc_root-inl.h"
@@ -320,7 +321,7 @@
 
 void Transaction::VisitObjectLogs(RootVisitor* visitor) {
   // List of moving roots.
-  typedef std::pair<mirror::Object*, mirror::Object*> ObjectPair;
+  using ObjectPair = std::pair<mirror::Object*, mirror::Object*>;
   std::list<ObjectPair> moving_roots;
 
   // Visit roots.
@@ -348,7 +349,7 @@
 
 void Transaction::VisitArrayLogs(RootVisitor* visitor) {
   // List of moving roots.
-  typedef std::pair<mirror::Array*, mirror::Array*> ArrayPair;
+  using ArrayPair = std::pair<mirror::Array*, mirror::Array*>;
   std::list<ArrayPair> moving_roots;
 
   for (auto& it : array_logs_) {
@@ -530,7 +531,7 @@
       break;
     default:
       LOG(FATAL) << "Unknown value kind " << static_cast<int>(field_value.kind);
-      break;
+      UNREACHABLE();
   }
 }
 
@@ -557,7 +558,7 @@
           break;
         default:
           LOG(FATAL) << "Unknown interned string kind";
-          break;
+          UNREACHABLE();
       }
       break;
     }
@@ -571,13 +572,13 @@
           break;
         default:
           LOG(FATAL) << "Unknown interned string kind";
-          break;
+          UNREACHABLE();
       }
       break;
     }
     default:
       LOG(FATAL) << "Unknown interned string op";
-      break;
+      UNREACHABLE();
   }
 }
 
@@ -668,9 +669,10 @@
       break;
     case Primitive::kPrimNot:
       LOG(FATAL) << "ObjectArray should be treated as Object";
-      break;
+      UNREACHABLE();
     default:
       LOG(FATAL) << "Unsupported type " << array_type;
+      UNREACHABLE();
   }
 }
 
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index 370a619..69ded3d 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -21,7 +21,8 @@
 #include "class_linker-inl.h"
 #include "common_runtime_test.h"
 #include "dex/dex_file.h"
-#include "mirror/array-inl.h"
+#include "mirror/array-alloc-inl.h"
+#include "mirror/class-alloc-inl.h"
 #include "scoped_thread_state_change-inl.h"
 
 namespace art {
@@ -488,7 +489,7 @@
 
   // Go search the dex file to find the string id of our string.
   static const char* kResolvedString = "ResolvedString";
-  const DexFile::StringId* string_id = dex_file->FindStringId(kResolvedString);
+  const dex::StringId* string_id = dex_file->FindStringId(kResolvedString);
   ASSERT_TRUE(string_id != nullptr);
   dex::StringIndex string_idx = dex_file->GetIndexForStringId(*string_id);
   ASSERT_TRUE(string_idx.IsValid());
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index c0ea6be..3512efe 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -22,10 +22,10 @@
 #include <android-base/logging.h>
 
 #include "base/bit_utils.h"
-#include "base/globals.h"
 #include "dex/primitive.h"
 #include "gc_root.h"
 #include "mirror/dex_cache.h"
+#include "runtime_globals.h"
 
 namespace art {
 
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 452cd8e..72c42b9 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -31,7 +31,6 @@
 #include "dex/class_accessor-inl.h"
 #include "dex/dex_file.h"
 #include "dex/dex_file_loader.h"
-#include "dex/hidden_api_access_flags.h"
 #include "dex_to_dex_decompiler.h"
 #include "quicken_info.h"
 
@@ -150,11 +149,11 @@
       (writable || unquicken) ? PROT_READ | PROT_WRITE : PROT_READ,
       unquicken ? MAP_PRIVATE : MAP_SHARED,
       file_fd,
-      /* start */ 0u,
+      /* start= */ 0u,
       low_4gb,
       vdex_filename.c_str(),
       mmap_reuse,
-      /* reservation */ nullptr,
+      /* reservation= */ nullptr,
       error_msg);
   if (!mmap.IsValid()) {
     *error_msg = "Failed to mmap file " + vdex_filename + " : " + *error_msg;
@@ -173,7 +172,7 @@
       return nullptr;
     }
     vdex->Unquicken(MakeNonOwningPointerVector(unique_ptr_dex_files),
-                    /* decompile_return_instruction */ false);
+                    /* decompile_return_instruction= */ false);
     // Update the quickening info size to pretend there isn't any.
     size_t offset = vdex->GetDexSectionHeaderOffset();
     reinterpret_cast<DexSectionHeader*>(vdex->mmap_.Begin() + offset)->quickening_info_size_ = 0;
@@ -213,13 +212,13 @@
     std::unique_ptr<const DexFile> dex(dex_file_loader.OpenWithDataSection(
         dex_file_start,
         size,
-        /*data_base*/ nullptr,
-        /*data_size*/ 0u,
+        /*data_base=*/ nullptr,
+        /*data_size=*/ 0u,
         location,
         GetLocationChecksum(i),
-        nullptr /*oat_dex_file*/,
-        false /*verify*/,
-        false /*verify_checksum*/,
+        /*oat_dex_file=*/ nullptr,
+        /*verify=*/ false,
+        /*verify_checksum=*/ false,
         error_msg));
     if (dex == nullptr) {
       return false;
@@ -282,12 +281,12 @@
     return;
   }
   // Make sure to not unquicken the same code item multiple times.
-  std::unordered_set<const DexFile::CodeItem*> unquickened_code_item;
+  std::unordered_set<const dex::CodeItem*> unquickened_code_item;
   CompactOffsetTable::Accessor accessor(GetQuickenInfoOffsetTable(source_dex_begin,
                                                                   quickening_info));
   for (ClassAccessor class_accessor : target_dex_file.GetClasses()) {
     for (const ClassAccessor::Method& method : class_accessor.GetMethods()) {
-      const DexFile::CodeItem* code_item = method.GetCodeItem();
+      const dex::CodeItem* code_item = method.GetCodeItem();
       if (code_item != nullptr && unquickened_code_item.emplace(code_item).second) {
         const uint32_t offset = accessor.GetOffset(method.GetIndex());
         // Offset being 0 means not quickened.
diff --git a/runtime/vdex_file_test.cc b/runtime/vdex_file_test.cc
index ced6e28..9d92b42 100644
--- a/runtime/vdex_file_test.cc
+++ b/runtime/vdex_file_test.cc
@@ -34,14 +34,14 @@
   std::unique_ptr<VdexFile> vdex = VdexFile::Open(tmp.GetFd(),
                                                   0,
                                                   tmp.GetFilename(),
-                                                  /*writable*/false,
-                                                  /*low_4gb*/false,
-                                                  /*quicken*/false,
+                                                  /*writable=*/false,
+                                                  /*low_4gb=*/false,
+                                                  /*unquicken=*/false,
                                                   &error_msg);
   EXPECT_TRUE(vdex == nullptr);
 
   vdex = VdexFile::Open(
-      tmp.GetFilename(), /*writable*/false, /*low_4gb*/false, /*quicken*/ false, &error_msg);
+      tmp.GetFilename(), /*writable=*/false, /*low_4gb=*/false, /*unquicken=*/ false, &error_msg);
   EXPECT_TRUE(vdex == nullptr);
 }
 
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 5fce892..91eba21 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -28,6 +28,7 @@
 #include "base/indenter.h"
 #include "base/logging.h"  // For VLOG.
 #include "base/mutex-inl.h"
+#include "base/sdk_version.h"
 #include "base/stl_util.h"
 #include "base/systrace.h"
 #include "base/time_utils.h"
@@ -157,7 +158,7 @@
   bool early_failure = false;
   std::string failure_message;
   const DexFile& dex_file = klass->GetDexFile();
-  const DexFile::ClassDef* class_def = klass->GetClassDef();
+  const dex::ClassDef* class_def = klass->GetClassDef();
   ObjPtr<mirror::Class> super = klass->GetSuperClass();
   std::string temp;
   if (super == nullptr && strcmp("Ljava/lang/Object;", klass->GetDescriptor(&temp)) != 0) {
@@ -209,7 +210,7 @@
                                         const DexFile* dex_file,
                                         Handle<mirror::DexCache> dex_cache,
                                         Handle<mirror::ClassLoader> class_loader,
-                                        const DexFile::ClassDef& class_def,
+                                        const dex::ClassDef& class_def,
                                         CompilerCallbacks* callbacks,
                                         bool allow_soft_failures,
                                         HardFailLogMode log_level,
@@ -242,7 +243,7 @@
     *previous_idx = method_idx;
     const InvokeType type = method.GetInvokeType(class_def.access_flags_);
     ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
-        method_idx, dex_cache, class_loader, /* referrer */ nullptr, type);
+        method_idx, dex_cache, class_loader, /* referrer= */ nullptr, type);
     if (resolved_method == nullptr) {
       DCHECK(self->IsExceptionPending());
       // We couldn't resolve the method, but continue regardless.
@@ -263,7 +264,7 @@
                                                       callbacks,
                                                       allow_soft_failures,
                                                       log_level,
-                                                      /*need_precise_constants*/ false,
+                                                      /*need_precise_constants=*/ false,
                                                       api_level,
                                                       &hard_failure_msg);
     if (result.kind == FailureKind::kHardFailure) {
@@ -318,8 +319,8 @@
                                                          const DexFile* dex_file,
                                                          Handle<mirror::DexCache> dex_cache,
                                                          Handle<mirror::ClassLoader> class_loader,
-                                                         const DexFile::ClassDef& class_def,
-                                                         const DexFile::CodeItem* code_item,
+                                                         const dex::ClassDef& class_def,
+                                                         const dex::CodeItem* code_item,
                                                          ArtMethod* method,
                                                          uint32_t method_access_flags,
                                                          CompilerCallbacks* callbacks,
@@ -340,11 +341,11 @@
                           method_idx,
                           method,
                           method_access_flags,
-                          true /* can_load_classes */,
+                          /* can_load_classes= */ true,
                           allow_soft_failures,
                           need_precise_constants,
-                          false /* verify to dump */,
-                          true /* allow_thread_suspension */,
+                          /* verify to dump */ false,
+                          /* allow_thread_suspension= */ true,
                           api_level);
   if (verifier.Verify()) {
     // Verification completed, however failures may be pending that didn't cause the verification
@@ -461,8 +462,8 @@
                                                     const DexFile* dex_file,
                                                     Handle<mirror::DexCache> dex_cache,
                                                     Handle<mirror::ClassLoader> class_loader,
-                                                    const DexFile::ClassDef& class_def,
-                                                    const DexFile::CodeItem* code_item,
+                                                    const dex::ClassDef& class_def,
+                                                    const dex::CodeItem* code_item,
                                                     ArtMethod* method,
                                                     uint32_t method_access_flags,
                                                     uint32_t api_level) {
@@ -475,11 +476,11 @@
                                                 dex_method_idx,
                                                 method,
                                                 method_access_flags,
-                                                true /* can_load_classes */,
-                                                true /* allow_soft_failures */,
-                                                true /* need_precise_constants */,
-                                                true /* verify_to_dump */,
-                                                true /* allow_thread_suspension */,
+                                                /* can_load_classes= */ true,
+                                                /* allow_soft_failures= */ true,
+                                                /* need_precise_constants= */ true,
+                                                /* verify_to_dump= */ true,
+                                                /* allow_thread_suspension= */ true,
                                                 api_level);
   verifier->Verify();
   verifier->DumpFailures(vios->Stream());
@@ -499,8 +500,8 @@
                                const DexFile* dex_file,
                                Handle<mirror::DexCache> dex_cache,
                                Handle<mirror::ClassLoader> class_loader,
-                               const DexFile::ClassDef& class_def,
-                               const DexFile::CodeItem* code_item,
+                               const dex::ClassDef& class_def,
+                               const dex::CodeItem* code_item,
                                uint32_t dex_method_idx,
                                ArtMethod* method,
                                uint32_t method_access_flags,
@@ -570,11 +571,11 @@
                           m->GetDexMethodIndex(),
                           m,
                           m->GetAccessFlags(),
-                          false /* can_load_classes */,
-                          true  /* allow_soft_failures */,
-                          false /* need_precise_constants */,
-                          false /* verify_to_dump */,
-                          false /* allow_thread_suspension */,
+                          /* can_load_classes= */ false,
+                          /* allow_soft_failures= */ true,
+                          /* need_precise_constants= */ false,
+                          /* verify_to_dump= */ false,
+                          /* allow_thread_suspension= */ false,
                           api_level);
   verifier.interesting_dex_pc_ = dex_pc;
   verifier.monitor_enter_dex_pcs_ = monitor_enter_dex_pcs;
@@ -601,7 +602,7 @@
 bool MethodVerifier::Verify() {
   // Some older code doesn't correctly mark constructors as such. Test for this case by looking at
   // the name.
-  const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+  const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
   const char* method_name = dex_file_->StringDataByIdx(method_id.name_idx_);
   bool instance_constructor_by_name = strcmp("<init>", method_name) == 0;
   bool static_constructor_by_name = strcmp("<clinit>", method_name) == 0;
@@ -916,7 +917,7 @@
     return true;
   }
   const uint32_t insns_size = code_item_accessor_.InsnsSizeInCodeUnits();
-  for (const DexFile::TryItem& try_item : code_item_accessor_.TryItems()) {
+  for (const dex::TryItem& try_item : code_item_accessor_.TryItems()) {
     const uint32_t start = try_item.start_addr_;
     const uint32_t end = start + try_item.insn_count_;
     if ((start >= end) || (start >= insns_size) || (end > insns_size)) {
@@ -1636,7 +1637,7 @@
     cur_arg++;
   }
 
-  const DexFile::ProtoId& proto_id =
+  const dex::ProtoId& proto_id =
       dex_file_->GetMethodPrototype(dex_file_->GetMethodId(dex_method_idx_));
   DexFileParameterIterator iterator(*dex_file_, proto_id);
 
@@ -1875,7 +1876,7 @@
 // Returns the index of the first final instance field of the given class, or kDexNoIndex if there
 // is no such field.
 static uint32_t GetFirstFinalInstanceFieldIndex(const DexFile& dex_file, dex::TypeIndex type_idx) {
-  const DexFile::ClassDef* class_def = dex_file.FindClassDef(type_idx);
+  const dex::ClassDef* class_def = dex_file.FindClassDef(type_idx);
   DCHECK(class_def != nullptr);
   ClassAccessor accessor(dex_file, *class_def);
   for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) {
@@ -2653,6 +2654,7 @@
             // See if instance-of was preceded by a move-object operation, common due to the small
             // register encoding space of instance-of, and propagate type information to the source
             // of the move-object.
+            // Note: this is only valid if the move source was not clobbered.
             uint32_t move_idx = instance_of_idx - 1;
             while (0 != move_idx && !GetInstructionFlags(move_idx).IsOpcode()) {
               move_idx--;
@@ -2662,28 +2664,25 @@
                             work_insn_idx_)) {
               break;
             }
+            auto maybe_update_fn = [&instance_of_inst, update_line, this, &cast_type](
+                uint16_t move_src,
+                uint16_t move_trg)
+                REQUIRES_SHARED(Locks::mutator_lock_) {
+              if (move_trg == instance_of_inst.VRegB_22c() &&
+                  move_src != instance_of_inst.VRegA_22c()) {
+                update_line->SetRegisterType<LockOp::kKeep>(this, move_src, cast_type);
+              }
+            };
             const Instruction& move_inst = code_item_accessor_.InstructionAt(move_idx);
             switch (move_inst.Opcode()) {
               case Instruction::MOVE_OBJECT:
-                if (move_inst.VRegA_12x() == instance_of_inst.VRegB_22c()) {
-                  update_line->SetRegisterType<LockOp::kKeep>(this,
-                                                              move_inst.VRegB_12x(),
-                                                              cast_type);
-                }
+                maybe_update_fn(move_inst.VRegB_12x(), move_inst.VRegA_12x());
                 break;
               case Instruction::MOVE_OBJECT_FROM16:
-                if (move_inst.VRegA_22x() == instance_of_inst.VRegB_22c()) {
-                  update_line->SetRegisterType<LockOp::kKeep>(this,
-                                                              move_inst.VRegB_22x(),
-                                                              cast_type);
-                }
+                maybe_update_fn(move_inst.VRegB_22x(), move_inst.VRegA_22x());
                 break;
               case Instruction::MOVE_OBJECT_16:
-                if (move_inst.VRegA_32x() == instance_of_inst.VRegB_22c()) {
-                  update_line->SetRegisterType<LockOp::kKeep>(this,
-                                                              move_inst.VRegB_32x(),
-                                                              cast_type);
-                }
+                maybe_update_fn(move_inst.VRegB_32x(), move_inst.VRegA_32x());
                 break;
               default:
                 break;
@@ -2884,7 +2883,7 @@
       }
       if (return_type == nullptr) {
         uint32_t method_idx = GetMethodIdxOfInvoke(inst);
-        const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
+        const dex::MethodId& method_id = dex_file_->GetMethodId(method_idx);
         dex::TypeIndex return_type_idx =
             dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
         const char* descriptor = dex_file_->StringByTypeIdx(return_type_idx);
@@ -2907,7 +2906,7 @@
       const RegType* return_type = nullptr;
       if (called_method == nullptr) {
         uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
-        const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
+        const dex::MethodId& method_id = dex_file_->GetMethodId(method_idx);
         is_constructor = strcmp("<init>", dex_file_->StringDataByIdx(method_id.name_idx_)) == 0;
         dex::TypeIndex return_type_idx =
             dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
@@ -2985,7 +2984,7 @@
         const char* descriptor;
         if (called_method == nullptr) {
           uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
-          const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
+          const dex::MethodId& method_id = dex_file_->GetMethodId(method_idx);
           dex::TypeIndex return_type_idx =
               dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
           descriptor = dex_file_->StringByTypeIdx(return_type_idx);
@@ -3040,7 +3039,7 @@
       const char* descriptor;
       if (abs_method == nullptr) {
         uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
-        const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
+        const dex::MethodId& method_id = dex_file_->GetMethodId(method_idx);
         dex::TypeIndex return_type_idx =
             dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
         descriptor = dex_file_->StringByTypeIdx(return_type_idx);
@@ -3105,7 +3104,7 @@
       // method handle produced by step 1. The dex file verifier has checked ranges for
       // the first three arguments and CheckCallSite has checked the method handle type.
       const dex::ProtoIndex proto_idx = dex_file_->GetProtoIndexForCallSite(call_site_idx);
-      const DexFile::ProtoId& proto_id = dex_file_->GetProtoId(proto_idx);
+      const dex::ProtoId& proto_id = dex_file_->GetProtoId(proto_idx);
       DexFileParameterIterator param_it(*dex_file_, proto_id);
       // Treat method as static as it has yet to be determined.
       VerifyInvocationArgsFromIterator(&param_it, inst, METHOD_STATIC, is_range, nullptr);
@@ -3496,7 +3495,7 @@
    */
   if ((opcode_flags & Instruction::kThrow) != 0 && GetInstructionFlags(work_insn_idx_).IsInTry()) {
     bool has_catch_all_handler = false;
-    const DexFile::TryItem* try_item = code_item_accessor_.FindTryItem(work_insn_idx_);
+    const dex::TryItem* try_item = code_item_accessor_.FindTryItem(work_insn_idx_);
     CHECK(try_item != nullptr);
     CatchHandlerIterator iterator(code_item_accessor_, *try_item);
 
@@ -3677,9 +3676,10 @@
   // Note: we do this for unresolved classes to trigger re-verification at runtime.
   if (C == CheckAccess::kYes &&
       result->IsNonZeroReferenceTypes() &&
-      (api_level_ >= 28u || !result->IsUnresolvedTypes())) {
+      (IsSdkVersionSetAndAtLeast(api_level_, SdkVersion::kP) || !result->IsUnresolvedTypes())) {
     const RegType& referrer = GetDeclaringClass();
-    if ((api_level_ >= 28u || !referrer.IsUnresolvedTypes()) && !referrer.CanAccess(*result)) {
+    if ((IsSdkVersionSetAndAtLeast(api_level_, SdkVersion::kP) || !referrer.IsUnresolvedTypes()) &&
+        !referrer.CanAccess(*result)) {
       Fail(VERIFY_ERROR_ACCESS_CLASS) << "(possibly) illegal class access: '"
                                       << referrer << "' -> '" << *result << "'";
     }
@@ -3747,7 +3747,7 @@
 
 ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(
     uint32_t dex_method_idx, MethodType method_type) {
-  const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
+  const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
   const RegType& klass_type = ResolveClass<CheckAccess::kYes>(method_id.class_idx_);
   if (klass_type.IsConflict()) {
     std::string append(" in attempt to access method ");
@@ -4091,7 +4091,7 @@
   }
 
   // Check method handle kind is valid.
-  const DexFile::MethodHandleItem& mh = dex_file_->GetMethodHandle(index[0]);
+  const dex::MethodHandleItem& mh = dex_file_->GetMethodHandle(index[0]);
   if (mh.method_handle_type_ != static_cast<uint16_t>(DexFile::MethodHandleType::kInvokeStatic)) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Call site #" << call_site_idx
                                       << " argument 0 method handle type is not InvokeStatic: "
@@ -4123,7 +4123,7 @@
  private:
   ArtMethod* res_method_;
   size_t pos_;
-  const DexFile::TypeList* params_;
+  const dex::TypeList* params_;
   const size_t params_size_;
 };
 
@@ -4229,7 +4229,7 @@
     return false;
   }
 
-  const DexFile::TypeList* types = method->GetParameterTypeList();
+  const dex::TypeList* types = method->GetParameterTypeList();
   if (types->Size() != 1) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD)
         << "Signature polymorphic method has too many arguments " << types->Size() << " != 1";
@@ -4551,7 +4551,7 @@
 }
 
 ArtField* MethodVerifier::GetStaticField(int field_idx) {
-  const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
+  const dex::FieldId& field_id = dex_file_->GetFieldId(field_idx);
   // Check access to class
   const RegType& klass_type = ResolveClass<CheckAccess::kYes>(field_id.class_idx_);
   if (klass_type.IsConflict()) {  // bad class
@@ -4562,7 +4562,9 @@
   }
   if (klass_type.IsUnresolvedTypes()) {
     // Accessibility checks depend on resolved fields.
-    DCHECK(klass_type.Equals(GetDeclaringClass()) || !failures_.empty() || api_level_ < 28u);
+    DCHECK(klass_type.Equals(GetDeclaringClass()) ||
+           !failures_.empty() ||
+           IsSdkVersionSetAndLessThan(api_level_, SdkVersion::kP));
 
     return nullptr;  // Can't resolve Class so no more to do here, will do checking at runtime.
   }
@@ -4592,7 +4594,7 @@
 }
 
 ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) {
-  const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
+  const dex::FieldId& field_id = dex_file_->GetFieldId(field_idx);
   // Check access to class.
   const RegType& klass_type = ResolveClass<CheckAccess::kYes>(field_id.class_idx_);
   if (klass_type.IsConflict()) {
@@ -4603,7 +4605,9 @@
   }
   if (klass_type.IsUnresolvedTypes()) {
     // Accessibility checks depend on resolved fields.
-    DCHECK(klass_type.Equals(GetDeclaringClass()) || !failures_.empty() || api_level_ < 28u);
+    DCHECK(klass_type.Equals(GetDeclaringClass()) ||
+           !failures_.empty() ||
+           IsSdkVersionSetAndLessThan(api_level_, SdkVersion::kP));
 
     return nullptr;  // Can't resolve Class so no more to do here
   }
@@ -4739,7 +4743,7 @@
       DCHECK(!can_load_classes_ || self_->IsExceptionPending());
       self_->ClearException();
     }
-  } else if (api_level_ >= 28u) {
+  } else if (IsSdkVersionSetAndAtLeast(api_level_, SdkVersion::kP)) {
     // If we don't have the field (it seems we failed resolution) and this is a PUT, we need to
     // redo verification at runtime as the field may be final, unless the field id shows it's in
     // the same class.
@@ -4750,7 +4754,7 @@
     //
     // Note: see b/34966607. This and above may be changed in the future.
     if (kAccType == FieldAccessType::kAccPut) {
-      const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
+      const dex::FieldId& field_id = dex_file_->GetFieldId(field_idx);
       const char* field_class_descriptor = dex_file_->GetFieldDeclaringClassDescriptor(field_id);
       const RegType* field_class_type = &reg_types_.FromDescriptor(GetClassLoader(),
                                                                    field_class_descriptor,
@@ -4766,7 +4770,7 @@
     }
   }
   if (field_type == nullptr) {
-    const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
+    const dex::FieldId& field_id = dex_file_->GetFieldId(field_idx);
     const char* descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
     field_type = &reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
   }
@@ -4929,8 +4933,8 @@
       }
     }
     if (return_type_ == nullptr) {
-      const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
-      const DexFile::ProtoId& proto_id = dex_file_->GetMethodPrototype(method_id);
+      const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+      const dex::ProtoId& proto_id = dex_file_->GetMethodPrototype(method_id);
       dex::TypeIndex return_type_idx = proto_id.return_type_idx_;
       const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(return_type_idx));
       return_type_ = &reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
@@ -4941,7 +4945,7 @@
 
 const RegType& MethodVerifier::GetDeclaringClass() {
   if (declaring_class_ == nullptr) {
-    const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+    const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
     const char* descriptor
         = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
     if (method_being_verified_ != nullptr) {
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index eef2280..c178df0 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -26,7 +26,6 @@
 #include "base/scoped_arena_containers.h"
 #include "base/value_object.h"
 #include "dex/code_item_accessors.h"
-#include "dex/dex_file.h"
 #include "dex/dex_file_types.h"
 #include "dex/method_reference.h"
 #include "handle.h"
@@ -39,11 +38,17 @@
 
 class ClassLinker;
 class CompilerCallbacks;
+class DexFile;
 class Instruction;
 struct ReferenceMap2Visitor;
 class Thread;
 class VariableIndentationOutputStream;
 
+namespace dex {
+struct ClassDef;
+struct CodeItem;
+}  // namespace dex
+
 namespace mirror {
 class DexCache;
 }  // namespace mirror
@@ -107,7 +112,7 @@
                                  const DexFile* dex_file,
                                  Handle<mirror::DexCache> dex_cache,
                                  Handle<mirror::ClassLoader> class_loader,
-                                 const DexFile::ClassDef& class_def,
+                                 const dex::ClassDef& class_def,
                                  CompilerCallbacks* callbacks,
                                  bool allow_soft_failures,
                                  HardFailLogMode log_level,
@@ -121,8 +126,8 @@
                                              const DexFile* dex_file,
                                              Handle<mirror::DexCache> dex_cache,
                                              Handle<mirror::ClassLoader> class_loader,
-                                             const DexFile::ClassDef& class_def,
-                                             const DexFile::CodeItem* code_item, ArtMethod* method,
+                                             const dex::ClassDef& class_def,
+                                             const dex::CodeItem* code_item, ArtMethod* method,
                                              uint32_t method_access_flags,
                                              uint32_t api_level)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -238,8 +243,8 @@
                  const DexFile* dex_file,
                  Handle<mirror::DexCache> dex_cache,
                  Handle<mirror::ClassLoader> class_loader,
-                 const DexFile::ClassDef& class_def,
-                 const DexFile::CodeItem* code_item,
+                 const dex::ClassDef& class_def,
+                 const dex::CodeItem* code_item,
                  uint32_t method_idx,
                  ArtMethod* method,
                  uint32_t access_flags,
@@ -297,8 +302,8 @@
                                   const DexFile* dex_file,
                                   Handle<mirror::DexCache> dex_cache,
                                   Handle<mirror::ClassLoader> class_loader,
-                                  const DexFile::ClassDef& class_def_idx,
-                                  const DexFile::CodeItem* code_item,
+                                  const dex::ClassDef& class_def_idx,
+                                  const dex::CodeItem* code_item,
                                   ArtMethod* method,
                                   uint32_t method_access_flags,
                                   CompilerCallbacks* callbacks,
@@ -716,7 +721,7 @@
   Handle<mirror::DexCache> dex_cache_ GUARDED_BY(Locks::mutator_lock_);
   // The class loader for the declaring class of the method.
   Handle<mirror::ClassLoader> class_loader_ GUARDED_BY(Locks::mutator_lock_);
-  const DexFile::ClassDef& class_def_;  // The class def of the declaring class of the method.
+  const dex::ClassDef& class_def_;  // The class def of the declaring class of the method.
   const CodeItemDataAccessor code_item_accessor_;
   const RegType* declaring_class_;  // Lazily computed reg type of the method's declaring class.
   // Instruction widths and flags, one entry per code unit.
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index cedc583..36890a6 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -42,7 +42,7 @@
     // Verify the class
     std::string error_msg;
     FailureKind failure = MethodVerifier::VerifyClass(
-        self, klass, nullptr, true, HardFailLogMode::kLogWarning, /* api_level */ 0u, &error_msg);
+        self, klass, nullptr, true, HardFailLogMode::kLogWarning, /* api_level= */ 0u, &error_msg);
 
     if (android::base::StartsWith(descriptor, "Ljava/lang/invoke")) {
       ASSERT_TRUE(failure == FailureKind::kSoftFailure ||
@@ -57,7 +57,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Verify all the classes defined in this file
     for (size_t i = 0; i < dex.NumClassDefs(); i++) {
-      const DexFile::ClassDef& class_def = dex.GetClassDef(i);
+      const dex::ClassDef& class_def = dex.GetClassDef(i);
       const char* descriptor = dex.GetClassDescriptor(class_def);
       VerifyClass(descriptor);
     }
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 4a3f9e6..150d35c 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -55,18 +55,22 @@
 const NullType* NullType::instance_ = nullptr;
 
 PrimitiveType::PrimitiveType(ObjPtr<mirror::Class> klass,
-                             const StringPiece& descriptor,
+                             const std::string_view& descriptor,
                              uint16_t cache_id)
     : RegType(klass, descriptor, cache_id) {
   CHECK(klass != nullptr);
   CHECK(!descriptor.empty());
 }
 
-Cat1Type::Cat1Type(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id)
+Cat1Type::Cat1Type(ObjPtr<mirror::Class> klass,
+                   const std::string_view& descriptor,
+                   uint16_t cache_id)
     : PrimitiveType(klass, descriptor, cache_id) {
 }
 
-Cat2Type::Cat2Type(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id)
+Cat2Type::Cat2Type(ObjPtr<mirror::Class> klass,
+                   const std::string_view& descriptor,
+                   uint16_t cache_id)
     : PrimitiveType(klass, descriptor, cache_id) {
 }
 
@@ -132,7 +136,7 @@
 }
 
 const DoubleHiType* DoubleHiType::CreateInstance(ObjPtr<mirror::Class> klass,
-                                                 const StringPiece& descriptor,
+                                                 const std::string_view& descriptor,
                                                  uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new DoubleHiType(klass, descriptor, cache_id);
@@ -147,7 +151,7 @@
 }
 
 const DoubleLoType* DoubleLoType::CreateInstance(ObjPtr<mirror::Class> klass,
-                                                 const StringPiece& descriptor,
+                                                 const std::string_view& descriptor,
                                                  uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new DoubleLoType(klass, descriptor, cache_id);
@@ -162,7 +166,7 @@
 }
 
 const LongLoType* LongLoType::CreateInstance(ObjPtr<mirror::Class> klass,
-                                             const StringPiece& descriptor,
+                                             const std::string_view& descriptor,
                                              uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new LongLoType(klass, descriptor, cache_id);
@@ -170,7 +174,7 @@
 }
 
 const LongHiType* LongHiType::CreateInstance(ObjPtr<mirror::Class> klass,
-                                             const StringPiece& descriptor,
+                                             const std::string_view& descriptor,
                                              uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new LongHiType(klass, descriptor, cache_id);
@@ -192,7 +196,7 @@
 }
 
 const FloatType* FloatType::CreateInstance(ObjPtr<mirror::Class> klass,
-                                           const StringPiece& descriptor,
+                                           const std::string_view& descriptor,
                                            uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new FloatType(klass, descriptor, cache_id);
@@ -207,7 +211,7 @@
 }
 
 const CharType* CharType::CreateInstance(ObjPtr<mirror::Class> klass,
-                                         const StringPiece& descriptor,
+                                         const std::string_view& descriptor,
                                          uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new CharType(klass, descriptor, cache_id);
@@ -222,7 +226,7 @@
 }
 
 const ShortType* ShortType::CreateInstance(ObjPtr<mirror::Class> klass,
-                                           const StringPiece& descriptor,
+                                           const std::string_view& descriptor,
                                            uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new ShortType(klass, descriptor, cache_id);
@@ -237,7 +241,7 @@
 }
 
 const ByteType* ByteType::CreateInstance(ObjPtr<mirror::Class> klass,
-                                         const StringPiece& descriptor,
+                                         const std::string_view& descriptor,
                                          uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new ByteType(klass, descriptor, cache_id);
@@ -252,7 +256,7 @@
 }
 
 const IntegerType* IntegerType::CreateInstance(ObjPtr<mirror::Class> klass,
-                                               const StringPiece& descriptor,
+                                               const std::string_view& descriptor,
                                                uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new IntegerType(klass, descriptor, cache_id);
@@ -267,7 +271,7 @@
 }
 
 const ConflictType* ConflictType::CreateInstance(ObjPtr<mirror::Class> klass,
-                                                 const StringPiece& descriptor,
+                                                 const std::string_view& descriptor,
                                                  uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new ConflictType(klass, descriptor, cache_id);
@@ -282,7 +286,7 @@
 }
 
 const BooleanType* BooleanType::CreateInstance(ObjPtr<mirror::Class> klass,
-                                               const StringPiece& descriptor,
+                                               const std::string_view& descriptor,
                                                uint16_t cache_id) {
   CHECK(BooleanType::instance_ == nullptr);
   instance_ = new BooleanType(klass, descriptor, cache_id);
@@ -301,7 +305,7 @@
 }
 
 const UndefinedType* UndefinedType::CreateInstance(ObjPtr<mirror::Class> klass,
-                                                   const StringPiece& descriptor,
+                                                   const std::string_view& descriptor,
                                                    uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new UndefinedType(klass, descriptor, cache_id);
@@ -316,7 +320,7 @@
 }
 
 PreciseReferenceType::PreciseReferenceType(ObjPtr<mirror::Class> klass,
-                                           const StringPiece& descriptor,
+                                           const std::string_view& descriptor,
                                            uint16_t cache_id)
     : RegType(klass, descriptor, cache_id) {
   // Note: no check for IsInstantiable() here. We may produce this in case an InstantiationError
@@ -352,47 +356,47 @@
 
 std::string UnresolvedReferenceType::Dump() const {
   std::stringstream result;
-  result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor().as_string().c_str());
+  result << "Unresolved Reference: " << PrettyDescriptor(std::string(GetDescriptor()).c_str());
   return result.str();
 }
 
 std::string UnresolvedUninitializedRefType::Dump() const {
   std::stringstream result;
-  result << "Unresolved And Uninitialized Reference" << ": "
-      << PrettyDescriptor(GetDescriptor().as_string().c_str())
+  result << "Unresolved And Uninitialized Reference: "
+      << PrettyDescriptor(std::string(GetDescriptor()).c_str())
       << " Allocation PC: " << GetAllocationPc();
   return result.str();
 }
 
 std::string UnresolvedUninitializedThisRefType::Dump() const {
   std::stringstream result;
-  result << "Unresolved And Uninitialized This Reference"
-      << PrettyDescriptor(GetDescriptor().as_string().c_str());
+  result << "Unresolved And Uninitialized This Reference: "
+      << PrettyDescriptor(std::string(GetDescriptor()).c_str());
   return result.str();
 }
 
 std::string ReferenceType::Dump() const {
   std::stringstream result;
-  result << "Reference" << ": " << mirror::Class::PrettyDescriptor(GetClass());
+  result << "Reference: " << mirror::Class::PrettyDescriptor(GetClass());
   return result.str();
 }
 
 std::string PreciseReferenceType::Dump() const {
   std::stringstream result;
-  result << "Precise Reference" << ": "<< mirror::Class::PrettyDescriptor(GetClass());
+  result << "Precise Reference: " << mirror::Class::PrettyDescriptor(GetClass());
   return result.str();
 }
 
 std::string UninitializedReferenceType::Dump() const {
   std::stringstream result;
-  result << "Uninitialized Reference" << ": " << mirror::Class::PrettyDescriptor(GetClass());
+  result << "Uninitialized Reference: " << mirror::Class::PrettyDescriptor(GetClass());
   result << " Allocation PC: " << GetAllocationPc();
   return result.str();
 }
 
 std::string UninitializedThisReferenceType::Dump() const {
   std::stringstream result;
-  result << "Uninitialized This Reference" << ": " << mirror::Class::PrettyDescriptor(GetClass());
+  result << "Uninitialized This Reference: " << mirror::Class::PrettyDescriptor(GetClass());
   result << "Allocation PC: " << GetAllocationPc();
   return result.str();
 }
@@ -756,13 +760,13 @@
         VerifierDeps::MaybeRecordAssignability(verifier->GetDexFile(),
                                                join_class,
                                                GetClass(),
-                                               /* strict */ true,
-                                               /* is_assignable */ true);
+                                               /* is_strict= */ true,
+                                               /* is_assignable= */ true);
         VerifierDeps::MaybeRecordAssignability(verifier->GetDexFile(),
                                                join_class,
                                                incoming_type.GetClass(),
-                                               /* strict */ true,
-                                               /* is_assignable */ true);
+                                               /* is_strict= */ true,
+                                               /* is_assignable= */ true);
       }
       if (GetClass() == join_class && !IsPreciseReference()) {
         return *this;
@@ -771,7 +775,7 @@
       } else {
         std::string temp;
         const char* descriptor = join_class->GetDescriptor(&temp);
-        return reg_types->FromClass(descriptor, join_class, /* precise */ false);
+        return reg_types->FromClass(descriptor, join_class, /* precise= */ false);
       }
     }
   } else {
@@ -990,7 +994,7 @@
 }
 
 const NullType* NullType::CreateInstance(ObjPtr<mirror::Class> klass,
-                                         const StringPiece& descriptor,
+                                         const std::string_view& descriptor,
                                          uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new NullType(klass, descriptor, cache_id);
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 3099b23..56073db 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -21,12 +21,12 @@
 #include <limits>
 #include <set>
 #include <string>
+#include <string_view>
 
 #include "base/arena_object.h"
 #include "base/bit_vector.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
-#include "base/stringpiece.h"
 #include "dex/primitive.h"
 #include "gc_root.h"
 #include "handle_scope.h"
@@ -185,7 +185,7 @@
   bool IsJavaLangObjectArray() const
       REQUIRES_SHARED(Locks::mutator_lock_);
   bool IsInstantiableTypes() const REQUIRES_SHARED(Locks::mutator_lock_);
-  const StringPiece& GetDescriptor() const {
+  const std::string_view& GetDescriptor() const {
     DCHECK(HasClass() ||
            (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
             !IsUnresolvedSuperClass()));
@@ -319,7 +319,7 @@
 
  protected:
   RegType(ObjPtr<mirror::Class> klass,
-          const StringPiece& descriptor,
+          const std::string_view& descriptor,
           uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
       : descriptor_(descriptor),
         klass_(klass),
@@ -336,7 +336,7 @@
 
   virtual AssignmentType GetAssignmentTypeImpl() const = 0;
 
-  const StringPiece descriptor_;
+  const std::string_view descriptor_;
   mutable GcRoot<mirror::Class> klass_;  // Non-const only due to moving classes.
   const uint16_t cache_id_;
 
@@ -389,7 +389,7 @@
 
   // Create the singleton instance.
   static const ConflictType* CreateInstance(ObjPtr<mirror::Class> klass,
-                                            const StringPiece& descriptor,
+                                            const std::string_view& descriptor,
                                             uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -402,7 +402,7 @@
 
  private:
   ConflictType(ObjPtr<mirror::Class> klass,
-               const StringPiece& descriptor,
+               const std::string_view& descriptor,
                uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {
     CheckConstructorInvariants(this);
@@ -425,7 +425,7 @@
 
   // Create the singleton instance.
   static const UndefinedType* CreateInstance(ObjPtr<mirror::Class> klass,
-                                             const StringPiece& descriptor,
+                                             const std::string_view& descriptor,
                                              uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -438,7 +438,7 @@
 
  private:
   UndefinedType(ObjPtr<mirror::Class> klass,
-                const StringPiece& descriptor,
+                const std::string_view& descriptor,
                 uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {
     CheckConstructorInvariants(this);
@@ -450,7 +450,7 @@
 class PrimitiveType : public RegType {
  public:
   PrimitiveType(ObjPtr<mirror::Class> klass,
-                const StringPiece& descriptor,
+                const std::string_view& descriptor,
                 uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool HasClassVirtual() const override { return true; }
@@ -458,7 +458,8 @@
 
 class Cat1Type : public PrimitiveType {
  public:
-  Cat1Type(ObjPtr<mirror::Class> klass, const StringPiece& descriptor,
+  Cat1Type(ObjPtr<mirror::Class> klass,
+           const std::string_view& descriptor,
            uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
@@ -467,7 +468,7 @@
   bool IsInteger() const override { return true; }
   std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const IntegerType* CreateInstance(ObjPtr<mirror::Class> klass,
-                                           const StringPiece& descriptor,
+                                           const std::string_view& descriptor,
                                            uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
   static const IntegerType* GetInstance() PURE;
@@ -479,7 +480,7 @@
 
  private:
   IntegerType(ObjPtr<mirror::Class> klass,
-              const StringPiece& descriptor,
+              const std::string_view& descriptor,
               uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {
     CheckConstructorInvariants(this);
@@ -492,7 +493,7 @@
   bool IsBoolean() const override { return true; }
   std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const BooleanType* CreateInstance(ObjPtr<mirror::Class> klass,
-                                           const StringPiece& descriptor,
+                                           const std::string_view& descriptor,
                                            uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
   static const BooleanType* GetInstance() PURE;
@@ -504,7 +505,7 @@
 
  private:
   BooleanType(ObjPtr<mirror::Class> klass,
-              const StringPiece& descriptor,
+              const std::string_view& descriptor,
               uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {
     CheckConstructorInvariants(this);
@@ -518,7 +519,7 @@
   bool IsByte() const override { return true; }
   std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const ByteType* CreateInstance(ObjPtr<mirror::Class> klass,
-                                        const StringPiece& descriptor,
+                                        const std::string_view& descriptor,
                                         uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
   static const ByteType* GetInstance() PURE;
@@ -530,7 +531,7 @@
 
  private:
   ByteType(ObjPtr<mirror::Class> klass,
-           const StringPiece& descriptor,
+           const std::string_view& descriptor,
            uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {
     CheckConstructorInvariants(this);
@@ -543,7 +544,7 @@
   bool IsShort() const override { return true; }
   std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const ShortType* CreateInstance(ObjPtr<mirror::Class> klass,
-                                         const StringPiece& descriptor,
+                                         const std::string_view& descriptor,
                                          uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
   static const ShortType* GetInstance() PURE;
@@ -554,7 +555,7 @@
   }
 
  private:
-  ShortType(ObjPtr<mirror::Class> klass, const StringPiece& descriptor,
+  ShortType(ObjPtr<mirror::Class> klass, const std::string_view& descriptor,
             uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {
     CheckConstructorInvariants(this);
@@ -567,7 +568,7 @@
   bool IsChar() const override { return true; }
   std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const CharType* CreateInstance(ObjPtr<mirror::Class> klass,
-                                        const StringPiece& descriptor,
+                                        const std::string_view& descriptor,
                                         uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
   static const CharType* GetInstance() PURE;
@@ -579,7 +580,7 @@
 
  private:
   CharType(ObjPtr<mirror::Class> klass,
-           const StringPiece& descriptor,
+           const std::string_view& descriptor,
            uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {
     CheckConstructorInvariants(this);
@@ -592,7 +593,7 @@
   bool IsFloat() const override { return true; }
   std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const FloatType* CreateInstance(ObjPtr<mirror::Class> klass,
-                                         const StringPiece& descriptor,
+                                         const std::string_view& descriptor,
                                          uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
   static const FloatType* GetInstance() PURE;
@@ -604,7 +605,7 @@
 
  private:
   FloatType(ObjPtr<mirror::Class> klass,
-            const StringPiece& descriptor,
+            const std::string_view& descriptor,
             uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {
     CheckConstructorInvariants(this);
@@ -615,7 +616,7 @@
 class Cat2Type : public PrimitiveType {
  public:
   Cat2Type(ObjPtr<mirror::Class> klass,
-           const StringPiece& descriptor,
+           const std::string_view& descriptor,
            uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
@@ -625,7 +626,7 @@
   bool IsLongLo() const override { return true; }
   bool IsLong() const override { return true; }
   static const LongLoType* CreateInstance(ObjPtr<mirror::Class> klass,
-                                          const StringPiece& descriptor,
+                                          const std::string_view& descriptor,
                                           uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
   static const LongLoType* GetInstance() PURE;
@@ -637,7 +638,7 @@
 
  private:
   LongLoType(ObjPtr<mirror::Class> klass,
-             const StringPiece& descriptor,
+             const std::string_view& descriptor,
              uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {
     CheckConstructorInvariants(this);
@@ -650,7 +651,7 @@
   std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   bool IsLongHi() const override { return true; }
   static const LongHiType* CreateInstance(ObjPtr<mirror::Class> klass,
-                                          const StringPiece& descriptor,
+                                          const std::string_view& descriptor,
                                           uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
   static const LongHiType* GetInstance() PURE;
@@ -662,7 +663,7 @@
 
  private:
   LongHiType(ObjPtr<mirror::Class> klass,
-             const StringPiece& descriptor,
+             const std::string_view& descriptor,
              uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {
     CheckConstructorInvariants(this);
@@ -676,7 +677,7 @@
   bool IsDoubleLo() const override { return true; }
   bool IsDouble() const override { return true; }
   static const DoubleLoType* CreateInstance(ObjPtr<mirror::Class> klass,
-                                            const StringPiece& descriptor,
+                                            const std::string_view& descriptor,
                                             uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
   static const DoubleLoType* GetInstance() PURE;
@@ -688,7 +689,7 @@
 
  private:
   DoubleLoType(ObjPtr<mirror::Class> klass,
-               const StringPiece& descriptor,
+               const std::string_view& descriptor,
                uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {
     CheckConstructorInvariants(this);
@@ -701,7 +702,7 @@
   std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   bool IsDoubleHi() const override { return true; }
   static const DoubleHiType* CreateInstance(ObjPtr<mirror::Class> klass,
-                                            const StringPiece& descriptor,
+                                            const std::string_view& descriptor,
                                             uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
   static const DoubleHiType* GetInstance() PURE;
@@ -713,7 +714,7 @@
 
  private:
   DoubleHiType(ObjPtr<mirror::Class> klass,
-               const StringPiece& descriptor,
+               const std::string_view& descriptor,
                uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {
     CheckConstructorInvariants(this);
@@ -886,7 +887,7 @@
 
   // Create the singleton instance.
   static const NullType* CreateInstance(ObjPtr<mirror::Class> klass,
-                                        const StringPiece& descriptor,
+                                        const std::string_view& descriptor,
                                         uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -905,7 +906,7 @@
   }
 
  private:
-  NullType(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id)
+  NullType(ObjPtr<mirror::Class> klass, const std::string_view& descriptor, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {
     CheckConstructorInvariants(this);
@@ -920,7 +921,7 @@
 class UninitializedType : public RegType {
  public:
   UninitializedType(ObjPtr<mirror::Class> klass,
-                    const StringPiece& descriptor,
+                    const std::string_view& descriptor,
                     uint32_t allocation_pc,
                     uint16_t cache_id)
       : RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {}
@@ -945,7 +946,7 @@
 class UninitializedReferenceType final : public UninitializedType {
  public:
   UninitializedReferenceType(ObjPtr<mirror::Class> klass,
-                             const StringPiece& descriptor,
+                             const std::string_view& descriptor,
                              uint32_t allocation_pc,
                              uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
@@ -964,8 +965,9 @@
 // constructor.
 class UnresolvedUninitializedRefType final : public UninitializedType {
  public:
-  UnresolvedUninitializedRefType(const StringPiece& descriptor,
-                                 uint32_t allocation_pc, uint16_t cache_id)
+  UnresolvedUninitializedRefType(const std::string_view& descriptor,
+                                 uint32_t allocation_pc,
+                                 uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
     CheckConstructorInvariants(this);
@@ -986,7 +988,7 @@
 class UninitializedThisReferenceType final : public UninitializedType {
  public:
   UninitializedThisReferenceType(ObjPtr<mirror::Class> klass,
-                                 const StringPiece& descriptor,
+                                 const std::string_view& descriptor,
                                  uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : UninitializedType(klass, descriptor, 0, cache_id) {
@@ -1005,8 +1007,7 @@
 
 class UnresolvedUninitializedThisRefType final : public UninitializedType {
  public:
-  UnresolvedUninitializedThisRefType(const StringPiece& descriptor,
-                                     uint16_t cache_id)
+  UnresolvedUninitializedThisRefType(const std::string_view& descriptor, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : UninitializedType(nullptr, descriptor, 0, cache_id) {
     CheckConstructorInvariants(this);
@@ -1027,7 +1028,7 @@
 class ReferenceType final : public RegType {
  public:
   ReferenceType(ObjPtr<mirror::Class> klass,
-                const StringPiece& descriptor,
+                const std::string_view& descriptor,
                 uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {
     CheckConstructorInvariants(this);
@@ -1052,7 +1053,7 @@
 class PreciseReferenceType final : public RegType {
  public:
   PreciseReferenceType(ObjPtr<mirror::Class> klass,
-                       const StringPiece& descriptor,
+                       const std::string_view& descriptor,
                        uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -1072,7 +1073,7 @@
 // Common parent of unresolved types.
 class UnresolvedType : public RegType {
  public:
-  UnresolvedType(const StringPiece& descriptor, uint16_t cache_id)
+  UnresolvedType(const std::string_view& descriptor, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : RegType(nullptr, descriptor, cache_id) {}
 
@@ -1088,7 +1089,7 @@
 // of this type must be conservative.
 class UnresolvedReferenceType final : public UnresolvedType {
  public:
-  UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id)
+  UnresolvedReferenceType(const std::string_view& descriptor, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : UnresolvedType(descriptor, cache_id) {
     CheckConstructorInvariants(this);
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index 9f87adf..f62e8b6 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -126,7 +126,7 @@
 inline const PreciseReferenceType& RegTypeCache::JavaLangClass() {
   const RegType* result = &FromClass("Ljava/lang/Class;",
                                      GetClassRoot<mirror::Class>(),
-                                     /* precise */ true);
+                                     /* precise= */ true);
   DCHECK(result->IsPreciseReference());
   return *down_cast<const PreciseReferenceType*>(result);
 }
@@ -135,7 +135,7 @@
   // String is final and therefore always precise.
   const RegType* result = &FromClass("Ljava/lang/String;",
                                      GetClassRoot<mirror::String>(),
-                                     /* precise */ true);
+                                     /* precise= */ true);
   DCHECK(result->IsPreciseReference());
   return *down_cast<const PreciseReferenceType*>(result);
 }
@@ -143,7 +143,7 @@
 inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodHandle() {
   const RegType* result = &FromClass("Ljava/lang/invoke/MethodHandle;",
                                      GetClassRoot<mirror::MethodHandle>(),
-                                     /* precise */ true);
+                                     /* precise= */ true);
   DCHECK(result->IsPreciseReference());
   return *down_cast<const PreciseReferenceType*>(result);
 }
@@ -151,7 +151,7 @@
 inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodType() {
   const RegType* result = &FromClass("Ljava/lang/invoke/MethodType;",
                                      GetClassRoot<mirror::MethodType>(),
-                                     /* precise */ true);
+                                     /* precise= */ true);
   DCHECK(result->IsPreciseReference());
   return *down_cast<const PreciseReferenceType*>(result);
 }
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index f1f3488..7bff255 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -135,7 +135,7 @@
   }
 }
 
-bool RegTypeCache::MatchDescriptor(size_t idx, const StringPiece& descriptor, bool precise) {
+bool RegTypeCache::MatchDescriptor(size_t idx, const std::string_view& descriptor, bool precise) {
   const RegType* entry = entries_[idx];
   if (descriptor != entry->descriptor_) {
     return false;
@@ -170,20 +170,20 @@
   return klass;
 }
 
-StringPiece RegTypeCache::AddString(const StringPiece& string_piece) {
-  char* ptr = allocator_.AllocArray<char>(string_piece.length());
-  memcpy(ptr, string_piece.data(), string_piece.length());
-  return StringPiece(ptr, string_piece.length());
+std::string_view RegTypeCache::AddString(const std::string_view& str) {
+  char* ptr = allocator_.AllocArray<char>(str.length());
+  memcpy(ptr, str.data(), str.length());
+  return std::string_view(ptr, str.length());
 }
 
 const RegType& RegTypeCache::From(ObjPtr<mirror::ClassLoader> loader,
                                   const char* descriptor,
                                   bool precise) {
-  StringPiece sp_descriptor(descriptor);
-  // Try looking up the class in the cache first. We use a StringPiece to avoid continual strlen
-  // operations on the descriptor.
+  std::string_view sv_descriptor(descriptor);
+  // Try looking up the class in the cache first. We use a std::string_view to avoid
+  // repeated strlen operations on the descriptor.
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
-    if (MatchDescriptor(i, sp_descriptor, precise)) {
+    if (MatchDescriptor(i, sv_descriptor, precise)) {
       return *(entries_[i]);
     }
   }
@@ -205,9 +205,9 @@
       DCHECK(!(klass->IsAbstract()) || klass->IsArrayClass());
       DCHECK(!klass->IsInterface());
       entry =
-          new (&allocator_) PreciseReferenceType(klass, AddString(sp_descriptor), entries_.size());
+          new (&allocator_) PreciseReferenceType(klass, AddString(sv_descriptor), entries_.size());
     } else {
-      entry = new (&allocator_) ReferenceType(klass, AddString(sp_descriptor), entries_.size());
+      entry = new (&allocator_) ReferenceType(klass, AddString(sv_descriptor), entries_.size());
     }
     return AddEntry(entry);
   } else {  // Class not resolved.
@@ -221,7 +221,7 @@
     }
     if (IsValidDescriptor(descriptor)) {
       return AddEntry(
-          new (&allocator_) UnresolvedReferenceType(AddString(sp_descriptor), entries_.size()));
+          new (&allocator_) UnresolvedReferenceType(AddString(sv_descriptor), entries_.size()));
     } else {
       // The descriptor is broken return the unknown type as there's nothing sensible that
       // could be done at runtime
@@ -254,7 +254,7 @@
   return nullptr;
 }
 
-const RegType* RegTypeCache::InsertClass(const StringPiece& descriptor,
+const RegType* RegTypeCache::InsertClass(const std::string_view& descriptor,
                                          ObjPtr<mirror::Class> klass,
                                          bool precise) {
   // No reference to the class was found, create new reference.
@@ -272,7 +272,7 @@
   DCHECK(klass != nullptr);
   const RegType* reg_type = FindClass(klass, precise);
   if (reg_type == nullptr) {
-    reg_type = InsertClass(AddString(StringPiece(descriptor)), klass, precise);
+    reg_type = InsertClass(AddString(std::string_view(descriptor)), klass, precise);
   }
   return *reg_type;
 }
@@ -438,14 +438,14 @@
 
     // Is the resolved part a primitive array?
     if (resolved_merged_is_array && !resolved_parts_merged.IsObjectArrayTypes()) {
-      return JavaLangObject(false /* precise */);
+      return JavaLangObject(/* precise= */ false);
     }
 
     // Is any part not an array (but exists)?
     if ((!left_unresolved_is_array && left_resolved != &left) ||
         (!right_unresolved_is_array && right_resolved != &right) ||
         !resolved_merged_is_array) {
-      return JavaLangObject(false /* precise */);
+      return JavaLangObject(/* precise= */ false);
     }
   }
 
@@ -488,7 +488,7 @@
 
 const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
   UninitializedType* entry = nullptr;
-  const StringPiece& descriptor(type.GetDescriptor());
+  const std::string_view& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
       const RegType* cur_entry = entries_[i];
@@ -525,7 +525,7 @@
   RegType* entry;
 
   if (uninit_type.IsUnresolvedTypes()) {
-    const StringPiece& descriptor(uninit_type.GetDescriptor());
+    const std::string_view& descriptor(uninit_type.GetDescriptor());
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
       const RegType* cur_entry = entries_[i];
       if (cur_entry->IsUnresolvedReference() &&
@@ -575,7 +575,7 @@
 
 const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
   UninitializedType* entry;
-  const StringPiece& descriptor(type.GetDescriptor());
+  const std::string_view& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
       const RegType* cur_entry = entries_[i];
@@ -656,7 +656,7 @@
     return Conflict();
   } else if (array.IsUnresolvedTypes()) {
     DCHECK(!array.IsUnresolvedMergedReference());  // Caller must make sure not to ask for this.
-    const std::string descriptor(array.GetDescriptor().as_string());
+    const std::string descriptor(array.GetDescriptor());
     return FromDescriptor(loader, descriptor.c_str() + 1, false);
   } else {
     ObjPtr<mirror::Class> klass = array.GetClass()->GetComponentType();
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index d668222..a9a8116 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_VERIFIER_REG_TYPE_CACHE_H_
 
 #include <stdint.h>
+#include <string_view>
 #include <vector>
 
 #include "base/casts.h"
@@ -32,7 +33,6 @@
 class ClassLoader;
 }  // namespace mirror
 class ScopedArenaAllocator;
-class StringPiece;
 
 namespace verifier {
 
@@ -80,7 +80,7 @@
   const RegType* FindClass(ObjPtr<mirror::Class> klass, bool precise) const
       REQUIRES_SHARED(Locks::mutator_lock_);
   // Insert a new class with a specified descriptor, must not already be in the cache.
-  const RegType* InsertClass(const StringPiece& descriptor,
+  const RegType* InsertClass(const std::string_view& descriptor,
                              ObjPtr<mirror::Class> klass,
                              bool precise)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -164,7 +164,7 @@
   void FillPrimitiveAndSmallConstantTypes() REQUIRES_SHARED(Locks::mutator_lock_);
   ObjPtr<mirror::Class> ResolveClass(const char* descriptor, ObjPtr<mirror::ClassLoader> loader)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  bool MatchDescriptor(size_t idx, const StringPiece& descriptor, bool precise)
+  bool MatchDescriptor(size_t idx, const std::string_view& descriptor, bool precise)
       REQUIRES_SHARED(Locks::mutator_lock_);
   const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -173,9 +173,9 @@
   template <class RegTypeType>
   RegTypeType& AddEntry(RegTypeType* new_entry) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Add a string piece to the arena allocator so that it stays live for the lifetime of the
-  // verifier.
-  StringPiece AddString(const StringPiece& string_piece);
+  // Add a string to the arena allocator so that it stays live for the lifetime of the
+  // verifier and return a string view.
+  std::string_view AddString(const std::string_view& str);
 
   static void CreatePrimitiveAndSmallConstantTypes() REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 0430d20..3224385 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -80,8 +80,8 @@
   EXPECT_FALSE(precise_lo.CheckWidePair(precise_const));
   EXPECT_TRUE(precise_lo.CheckWidePair(precise_hi));
   // Test Merging.
-  EXPECT_TRUE((long_lo.Merge(precise_lo, &cache, /* verifier */ nullptr)).IsLongTypes());
-  EXPECT_TRUE((long_hi.Merge(precise_hi, &cache, /* verifier */ nullptr)).IsLongHighTypes());
+  EXPECT_TRUE((long_lo.Merge(precise_lo, &cache, /* verifier= */ nullptr)).IsLongTypes());
+  EXPECT_TRUE((long_hi.Merge(precise_hi, &cache, /* verifier= */ nullptr)).IsLongHighTypes());
 }
 
 TEST_F(RegTypeTest, Primitives) {
@@ -429,7 +429,7 @@
   const RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
   const RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
   const RegType& unresolved_merged = cache.FromUnresolvedMerge(
-      unresolved_ref, unresolved_ref_another, /* verifier */ nullptr);
+      unresolved_ref, unresolved_ref_another, /* verifier= */ nullptr);
 
   std::string expected = "Unresolved Reference: java.lang.DoesNotExist";
   EXPECT_EQ(expected, unresolved_ref.Dump());
@@ -490,14 +490,14 @@
   RegTypeCache cache_new(true, allocator);
   const RegType& string = cache_new.JavaLangString();
   const RegType& Object = cache_new.JavaLangObject(true);
-  EXPECT_TRUE(string.Merge(Object, &cache_new, /* verifier */ nullptr).IsJavaLangObject());
+  EXPECT_TRUE(string.Merge(Object, &cache_new, /* verifier= */ nullptr).IsJavaLangObject());
   // Merge two unresolved types.
   const RegType& ref_type_0 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
   const RegType& ref_type_1 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistToo;", true);
   EXPECT_FALSE(ref_type_0.Equals(ref_type_1));
 
-  const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new, /* verifier */ nullptr);
+  const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new, /* verifier= */ nullptr);
   EXPECT_TRUE(merged.IsUnresolvedMergedReference());
   RegType& merged_nonconst = const_cast<RegType&>(merged);
 
@@ -520,22 +520,22 @@
   const RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
   {
     // float MERGE precise cst => float.
-    const RegType& merged = float_type.Merge(precise_cst, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = float_type.Merge(precise_cst, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsFloat());
   }
   {
     // precise cst MERGE float => float.
-    const RegType& merged = precise_cst.Merge(float_type, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = precise_cst.Merge(float_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsFloat());
   }
   {
     // float MERGE imprecise cst => float.
-    const RegType& merged = float_type.Merge(imprecise_cst, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = float_type.Merge(imprecise_cst, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsFloat());
   }
   {
     // imprecise cst MERGE float => float.
-    const RegType& merged = imprecise_cst.Merge(float_type, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = imprecise_cst.Merge(float_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsFloat());
   }
 }
@@ -556,46 +556,46 @@
   const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
   {
     // lo MERGE precise cst lo => lo.
-    const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // precise cst lo MERGE lo => lo.
-    const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // lo MERGE imprecise cst lo => lo.
     const RegType& merged = long_lo_type.Merge(
-        imprecise_cst_lo, &cache_new, /* verifier */ nullptr);
+        imprecise_cst_lo, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // imprecise cst lo MERGE lo => lo.
     const RegType& merged = imprecise_cst_lo.Merge(
-        long_lo_type, &cache_new, /* verifier */ nullptr);
+        long_lo_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // hi MERGE precise cst hi => hi.
-    const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongHi());
   }
   {
     // precise cst hi MERGE hi => hi.
-    const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongHi());
   }
   {
     // hi MERGE imprecise cst hi => hi.
     const RegType& merged = long_hi_type.Merge(
-        imprecise_cst_hi, &cache_new, /* verifier */ nullptr);
+        imprecise_cst_hi, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongHi());
   }
   {
     // imprecise cst hi MERGE hi => hi.
     const RegType& merged = imprecise_cst_hi.Merge(
-        long_hi_type, &cache_new, /* verifier */ nullptr);
+        long_hi_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongHi());
   }
 }
@@ -617,49 +617,49 @@
   {
     // lo MERGE precise cst lo => lo.
     const RegType& merged = double_lo_type.Merge(
-        precise_cst_lo, &cache_new, /* verifier */ nullptr);
+        precise_cst_lo, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // precise cst lo MERGE lo => lo.
     const RegType& merged = precise_cst_lo.Merge(
-        double_lo_type, &cache_new, /* verifier */ nullptr);
+        double_lo_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // lo MERGE imprecise cst lo => lo.
     const RegType& merged = double_lo_type.Merge(
-        imprecise_cst_lo, &cache_new, /* verifier */ nullptr);
+        imprecise_cst_lo, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // imprecise cst lo MERGE lo => lo.
     const RegType& merged = imprecise_cst_lo.Merge(
-        double_lo_type, &cache_new, /* verifier */ nullptr);
+        double_lo_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // hi MERGE precise cst hi => hi.
     const RegType& merged = double_hi_type.Merge(
-        precise_cst_hi, &cache_new, /* verifier */ nullptr);
+        precise_cst_hi, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
   {
     // precise cst hi MERGE hi => hi.
     const RegType& merged = precise_cst_hi.Merge(
-        double_hi_type, &cache_new, /* verifier */ nullptr);
+        double_hi_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
   {
     // hi MERGE imprecise cst hi => hi.
     const RegType& merged = double_hi_type.Merge(
-        imprecise_cst_hi, &cache_new, /* verifier */ nullptr);
+        imprecise_cst_hi, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
   {
     // imprecise cst hi MERGE hi => hi.
     const RegType& merged = imprecise_cst_hi.Merge(
-        double_hi_type, &cache_new, /* verifier */ nullptr);
+        double_hi_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
 }
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 9bb60bb..de66bf5 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -23,7 +23,7 @@
 
 #include <android-base/logging.h>
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "base/safe_map.h"
 #include "base/scoped_arena_containers.h"
 
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index fb91976..bdcadd9 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -22,6 +22,7 @@
 #include "art_method-inl.h"
 #include "base/indenter.h"
 #include "base/leb128.h"
+#include "base/mutex-inl.h"
 #include "base/stl_util.h"
 #include "compiler_callbacks.h"
 #include "dex/dex_file-inl.h"
@@ -43,7 +44,7 @@
 }
 
 VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files)
-    : VerifierDeps(dex_files, /*output_only*/ true) {}
+    : VerifierDeps(dex_files, /*output_only=*/ true) {}
 
 void VerifierDeps::MergeWith(const VerifierDeps& other,
                              const std::vector<const DexFile*>& dex_files) {
@@ -98,9 +99,9 @@
     DCHECK(dex_cache != nullptr) << klass->PrettyClass();
     if (dex_cache->GetDexFile() == &dex_file) {
       // FindStringId is slow, try to go through the class def if we have one.
-      const DexFile::ClassDef* class_def = klass->GetClassDef();
+      const dex::ClassDef* class_def = klass->GetClassDef();
       DCHECK(class_def != nullptr) << klass->PrettyClass();
-      const DexFile::TypeId& type_id = dex_file.GetTypeId(class_def->class_idx_);
+      const dex::TypeId& type_id = dex_file.GetTypeId(class_def->class_idx_);
       if (kIsDebugBuild) {
         std::string temp;
         CHECK_EQ(GetIdFromString(dex_file, klass->GetDescriptor(&temp)), type_id.descriptor_idx_);
@@ -118,9 +119,9 @@
                                                       ObjPtr<mirror::Class> klass)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   if (!klass->IsArrayClass()) {
-    const DexFile::TypeId& type_id = dex_file.GetTypeId(type_idx);
+    const dex::TypeId& type_id = dex_file.GetTypeId(type_idx);
     const DexFile& klass_dex = klass->GetDexFile();
-    const DexFile::TypeId& klass_type_id = klass_dex.GetTypeId(klass->GetClassDef()->class_idx_);
+    const dex::TypeId& klass_type_id = klass_dex.GetTypeId(klass->GetClassDef()->class_idx_);
     if (strcmp(dex_file.GetTypeDescriptor(type_id),
                klass_dex.GetTypeDescriptor(klass_type_id)) == 0) {
       return type_id.descriptor_idx_;
@@ -200,7 +201,7 @@
 }
 
 dex::StringIndex VerifierDeps::GetIdFromString(const DexFile& dex_file, const std::string& str) {
-  const DexFile::StringId* string_id = dex_file.FindStringId(str.c_str());
+  const dex::StringId* string_id = dex_file.FindStringId(str.c_str());
   if (string_id != nullptr) {
     // String is in the DEX file. Return its ID.
     return dex_file.GetIndexForStringId(*string_id);
@@ -439,7 +440,7 @@
       AddAssignability(dex_file,
                        destination_component,
                        source_component,
-                       /* is_strict */ true,
+                       /* is_strict= */ true,
                        is_assignable);
       return;
     }
@@ -707,7 +708,7 @@
 
 VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files,
                            ArrayRef<const uint8_t> data)
-    : VerifierDeps(dex_files, /*output_only*/ false) {
+    : VerifierDeps(dex_files, /*output_only=*/ false) {
   if (data.empty()) {
     // Return eagerly, as the first thing we expect from VerifierDeps data is
     // the number of created strings, even if there is no dependency.
@@ -804,7 +805,7 @@
     }
 
     for (const FieldResolution& entry : dep.second->fields_) {
-      const DexFile::FieldId& field_id = dex_file.GetFieldId(entry.GetDexFieldIndex());
+      const dex::FieldId& field_id = dex_file.GetFieldId(entry.GetDexFieldIndex());
       vios->Stream()
           << dex_file.GetFieldDeclaringClassDescriptor(field_id) << "->"
           << dex_file.GetFieldName(field_id) << ":"
@@ -822,7 +823,7 @@
     }
 
     for (const MethodResolution& method : dep.second->methods_) {
-      const DexFile::MethodId& method_id = dex_file.GetMethodId(method.GetDexMethodIndex());
+      const dex::MethodId& method_id = dex_file.GetMethodId(method.GetDexMethodIndex());
       vios->Stream()
           << dex_file.GetMethodDeclaringClassDescriptor(method_id) << "->"
           << dex_file.GetMethodName(method_id)
@@ -948,7 +949,7 @@
 }
 
 static std::string GetFieldDescription(const DexFile& dex_file, uint32_t index) {
-  const DexFile::FieldId& field_id = dex_file.GetFieldId(index);
+  const dex::FieldId& field_id = dex_file.GetFieldId(index);
   return std::string(dex_file.GetFieldDeclaringClassDescriptor(field_id))
       + "->"
       + dex_file.GetFieldName(field_id)
@@ -964,7 +965,7 @@
   // and have the same recorded flags.
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   for (const auto& entry : fields) {
-    const DexFile::FieldId& field_id = dex_file.GetFieldId(entry.GetDexFieldIndex());
+    const dex::FieldId& field_id = dex_file.GetFieldId(entry.GetDexFieldIndex());
     StringPiece name(dex_file.StringDataByIdx(field_id.name_idx_));
     StringPiece type(dex_file.StringDataByIdx(dex_file.GetTypeId(field_id.type_idx_).descriptor_idx_));
     // Only use field_id.class_idx_ when the entry is unresolved, which is rare.
@@ -1010,7 +1011,7 @@
 }
 
 static std::string GetMethodDescription(const DexFile& dex_file, uint32_t index) {
-  const DexFile::MethodId& method_id = dex_file.GetMethodId(index);
+  const dex::MethodId& method_id = dex_file.GetMethodId(index);
   return std::string(dex_file.GetMethodDeclaringClassDescriptor(method_id))
       + "->"
       + dex_file.GetMethodName(method_id)
@@ -1025,7 +1026,7 @@
   PointerSize pointer_size = class_linker->GetImagePointerSize();
 
   for (const auto& entry : methods) {
-    const DexFile::MethodId& method_id = dex_file.GetMethodId(entry.GetDexMethodIndex());
+    const dex::MethodId& method_id = dex_file.GetMethodId(entry.GetDexMethodIndex());
 
     const char* name = dex_file.GetMethodName(method_id);
     const Signature signature = dex_file.GetMethodSignature(method_id);
@@ -1089,9 +1090,9 @@
                                  const DexFileDeps& deps,
                                  Thread* self) const {
   bool result = VerifyAssignability(
-      class_loader, dex_file, deps.assignable_types_, /* expected_assignability */ true, self);
+      class_loader, dex_file, deps.assignable_types_, /* expected_assignability= */ true, self);
   result = result && VerifyAssignability(
-      class_loader, dex_file, deps.unassignable_types_, /* expected_assignability */ false, self);
+      class_loader, dex_file, deps.unassignable_types_, /* expected_assignability= */ false, self);
 
   result = result && VerifyClasses(class_loader, dex_file, deps.classes_, self);
   result = result && VerifyFields(class_loader, dex_file, deps.fields_, self);
diff --git a/runtime/verifier/verifier_deps.h b/runtime/verifier/verifier_deps.h
index 0146b17..dfd4a5c 100644
--- a/runtime/verifier/verifier_deps.h
+++ b/runtime/verifier/verifier_deps.h
@@ -22,7 +22,7 @@
 #include <vector>
 
 #include "base/array_ref.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "dex/dex_file_types.h"
 #include "handle.h"
 #include "obj_ptr.h"
diff --git a/runtime/verify_object.cc b/runtime/verify_object.cc
index 70ca13f..2b8c7da 100644
--- a/runtime/verify_object.cc
+++ b/runtime/verify_object.cc
@@ -17,11 +17,11 @@
 #include "verify_object-inl.h"
 
 #include "base/bit_utils.h"
-#include "base/globals.h"
 #include "gc/heap.h"
 #include "mirror/object-inl.h"
 #include "obj_ptr-inl.h"
 #include "runtime.h"
+#include "runtime_globals.h"
 
 namespace art {
 
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 206418f..955a455 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -48,6 +48,7 @@
 jclass WellKnownClasses::dalvik_system_DexPathList;
 jclass WellKnownClasses::dalvik_system_DexPathList__Element;
 jclass WellKnownClasses::dalvik_system_EmulatedStackFrame;
+jclass WellKnownClasses::dalvik_system_InMemoryDexClassLoader;
 jclass WellKnownClasses::dalvik_system_PathClassLoader;
 jclass WellKnownClasses::dalvik_system_VMRuntime;
 jclass WellKnownClasses::java_lang_annotation_Annotation__array;
@@ -83,6 +84,7 @@
 
 jmethodID WellKnownClasses::dalvik_system_BaseDexClassLoader_getLdLibraryPath;
 jmethodID WellKnownClasses::dalvik_system_VMRuntime_runFinalization;
+jmethodID WellKnownClasses::dalvik_system_VMRuntime_hiddenApiUsed;
 jmethodID WellKnownClasses::java_lang_Boolean_valueOf;
 jmethodID WellKnownClasses::java_lang_Byte_valueOf;
 jmethodID WellKnownClasses::java_lang_Character_valueOf;
@@ -119,15 +121,18 @@
 jfieldID WellKnownClasses::dalvik_system_DexFile_cookie;
 jfieldID WellKnownClasses::dalvik_system_DexFile_fileName;
 jfieldID WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList;
+jfieldID WellKnownClasses::dalvik_system_BaseDexClassLoader_sharedLibraryLoaders;
 jfieldID WellKnownClasses::dalvik_system_DexPathList_dexElements;
 jfieldID WellKnownClasses::dalvik_system_DexPathList__Element_dexFile;
 jfieldID WellKnownClasses::dalvik_system_VMRuntime_nonSdkApiUsageConsumer;
+jfieldID WellKnownClasses::java_lang_Thread_parkBlocker;
 jfieldID WellKnownClasses::java_lang_Thread_daemon;
 jfieldID WellKnownClasses::java_lang_Thread_group;
 jfieldID WellKnownClasses::java_lang_Thread_lock;
 jfieldID WellKnownClasses::java_lang_Thread_name;
 jfieldID WellKnownClasses::java_lang_Thread_priority;
 jfieldID WellKnownClasses::java_lang_Thread_nativePeer;
+jfieldID WellKnownClasses::java_lang_Thread_unparkedBeforeStart;
 jfieldID WellKnownClasses::java_lang_ThreadGroup_groups;
 jfieldID WellKnownClasses::java_lang_ThreadGroup_ngroups;
 jfieldID WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup;
@@ -273,7 +278,7 @@
       STRING_INIT_LIST(TO_STRING_FACTORY)
   #undef TO_STRING_FACTORY
   LOG(FATAL) << "Could not find StringFactory method for String.<init>";
-  return nullptr;
+  UNREACHABLE();
 }
 
 uint32_t WellKnownClasses::StringInitToEntryPoint(ArtMethod* string_init) {
@@ -285,13 +290,13 @@
       STRING_INIT_LIST(TO_ENTRY_POINT)
   #undef TO_ENTRY_POINT
   LOG(FATAL) << "Could not find StringFactory method for String.<init>";
-  return 0;
+  UNREACHABLE();
 }
 #undef STRING_INIT_LIST
 
 void WellKnownClasses::Init(JNIEnv* env) {
   hiddenapi::ScopedHiddenApiEnforcementPolicySetting hiddenapi_exemption(
-      hiddenapi::EnforcementPolicy::kNoChecks);
+      hiddenapi::EnforcementPolicy::kDisabled);
 
   dalvik_annotation_optimization_CriticalNative =
       CacheClass(env, "dalvik/annotation/optimization/CriticalNative");
@@ -303,6 +308,7 @@
   dalvik_system_DexPathList = CacheClass(env, "dalvik/system/DexPathList");
   dalvik_system_DexPathList__Element = CacheClass(env, "dalvik/system/DexPathList$Element");
   dalvik_system_EmulatedStackFrame = CacheClass(env, "dalvik/system/EmulatedStackFrame");
+  dalvik_system_InMemoryDexClassLoader = CacheClass(env, "dalvik/system/InMemoryDexClassLoader");
   dalvik_system_PathClassLoader = CacheClass(env, "dalvik/system/PathClassLoader");
   dalvik_system_VMRuntime = CacheClass(env, "dalvik/system/VMRuntime");
 
@@ -339,6 +345,7 @@
 
   dalvik_system_BaseDexClassLoader_getLdLibraryPath = CacheMethod(env, dalvik_system_BaseDexClassLoader, false, "getLdLibraryPath", "()Ljava/lang/String;");
   dalvik_system_VMRuntime_runFinalization = CacheMethod(env, dalvik_system_VMRuntime, true, "runFinalization", "(J)V");
+  dalvik_system_VMRuntime_hiddenApiUsed = CacheMethod(env, dalvik_system_VMRuntime, true, "hiddenApiUsed", "(Ljava/lang/String;Ljava/lang/String;IZ)V");
   java_lang_ClassNotFoundException_init = CacheMethod(env, java_lang_ClassNotFoundException, false, "<init>", "(Ljava/lang/String;Ljava/lang/Throwable;)V");
   java_lang_ClassLoader_loadClass = CacheMethod(env, java_lang_ClassLoader, false, "loadClass", "(Ljava/lang/String;)Ljava/lang/Class;");
 
@@ -365,17 +372,20 @@
   org_apache_harmony_dalvik_ddmc_DdmServer_dispatch = CacheMethod(env, org_apache_harmony_dalvik_ddmc_DdmServer, true, "dispatch", "(I[BII)Lorg/apache/harmony/dalvik/ddmc/Chunk;");
 
   dalvik_system_BaseDexClassLoader_pathList = CacheField(env, dalvik_system_BaseDexClassLoader, false, "pathList", "Ldalvik/system/DexPathList;");
+  dalvik_system_BaseDexClassLoader_sharedLibraryLoaders = CacheField(env, dalvik_system_BaseDexClassLoader, false, "sharedLibraryLoaders", "[Ljava/lang/ClassLoader;");
   dalvik_system_DexFile_cookie = CacheField(env, dalvik_system_DexFile, false, "mCookie", "Ljava/lang/Object;");
   dalvik_system_DexFile_fileName = CacheField(env, dalvik_system_DexFile, false, "mFileName", "Ljava/lang/String;");
   dalvik_system_DexPathList_dexElements = CacheField(env, dalvik_system_DexPathList, false, "dexElements", "[Ldalvik/system/DexPathList$Element;");
   dalvik_system_DexPathList__Element_dexFile = CacheField(env, dalvik_system_DexPathList__Element, false, "dexFile", "Ldalvik/system/DexFile;");
   dalvik_system_VMRuntime_nonSdkApiUsageConsumer = CacheField(env, dalvik_system_VMRuntime, true, "nonSdkApiUsageConsumer", "Ljava/util/function/Consumer;");
+  java_lang_Thread_parkBlocker = CacheField(env, java_lang_Thread, false, "parkBlocker", "Ljava/lang/Object;");
   java_lang_Thread_daemon = CacheField(env, java_lang_Thread, false, "daemon", "Z");
   java_lang_Thread_group = CacheField(env, java_lang_Thread, false, "group", "Ljava/lang/ThreadGroup;");
   java_lang_Thread_lock = CacheField(env, java_lang_Thread, false, "lock", "Ljava/lang/Object;");
   java_lang_Thread_name = CacheField(env, java_lang_Thread, false, "name", "Ljava/lang/String;");
   java_lang_Thread_priority = CacheField(env, java_lang_Thread, false, "priority", "I");
   java_lang_Thread_nativePeer = CacheField(env, java_lang_Thread, false, "nativePeer", "J");
+  java_lang_Thread_unparkedBeforeStart = CacheField(env, java_lang_Thread, false, "unparkedBeforeStart", "Z");
   java_lang_ThreadGroup_groups = CacheField(env, java_lang_ThreadGroup, false, "groups", "[Ljava/lang/ThreadGroup;");
   java_lang_ThreadGroup_ngroups = CacheField(env, java_lang_ThreadGroup, false, "ngroups", "I");
   java_lang_ThreadGroup_mainThreadGroup = CacheField(env, java_lang_ThreadGroup, true, "mainThreadGroup", "Ljava/lang/ThreadGroup;");
@@ -418,7 +428,7 @@
   // to make sure these JNI methods are available.
   java_lang_Runtime_nativeLoad =
       CacheMethod(env, java_lang_Runtime.get(), true, "nativeLoad",
-                  "(Ljava/lang/String;Ljava/lang/ClassLoader;)"
+                  "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/Class;)"
                       "Ljava/lang/String;");
   java_lang_reflect_Proxy_init =
     CacheMethod(env, java_lang_reflect_Proxy, false, "<init>",
@@ -478,6 +488,7 @@
 
   dalvik_system_BaseDexClassLoader_getLdLibraryPath = nullptr;
   dalvik_system_VMRuntime_runFinalization = nullptr;
+  dalvik_system_VMRuntime_hiddenApiUsed = nullptr;
   java_lang_Boolean_valueOf = nullptr;
   java_lang_Byte_valueOf = nullptr;
   java_lang_Character_valueOf = nullptr;
@@ -516,6 +527,7 @@
   dalvik_system_DexPathList_dexElements = nullptr;
   dalvik_system_DexPathList__Element_dexFile = nullptr;
   dalvik_system_VMRuntime_nonSdkApiUsageConsumer = nullptr;
+  java_lang_Thread_parkBlocker = nullptr;
   java_lang_Thread_daemon = nullptr;
   java_lang_Thread_group = nullptr;
   java_lang_Thread_lock = nullptr;
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index ce5ab1d..872b562 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_WELL_KNOWN_CLASSES_H_
 #define ART_RUNTIME_WELL_KNOWN_CLASSES_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "jni.h"
 #include "obj_ptr.h"
 
@@ -57,6 +57,7 @@
   static jclass dalvik_system_DexPathList;
   static jclass dalvik_system_DexPathList__Element;
   static jclass dalvik_system_EmulatedStackFrame;
+  static jclass dalvik_system_InMemoryDexClassLoader;
   static jclass dalvik_system_PathClassLoader;
   static jclass dalvik_system_VMRuntime;
   static jclass java_lang_annotation_Annotation__array;
@@ -92,6 +93,7 @@
 
   static jmethodID dalvik_system_BaseDexClassLoader_getLdLibraryPath;
   static jmethodID dalvik_system_VMRuntime_runFinalization;
+  static jmethodID dalvik_system_VMRuntime_hiddenApiUsed;
   static jmethodID java_lang_Boolean_valueOf;
   static jmethodID java_lang_Byte_valueOf;
   static jmethodID java_lang_Character_valueOf;
@@ -126,17 +128,20 @@
   static jmethodID org_apache_harmony_dalvik_ddmc_DdmServer_dispatch;
 
   static jfieldID dalvik_system_BaseDexClassLoader_pathList;
+  static jfieldID dalvik_system_BaseDexClassLoader_sharedLibraryLoaders;
   static jfieldID dalvik_system_DexFile_cookie;
   static jfieldID dalvik_system_DexFile_fileName;
   static jfieldID dalvik_system_DexPathList_dexElements;
   static jfieldID dalvik_system_DexPathList__Element_dexFile;
   static jfieldID dalvik_system_VMRuntime_nonSdkApiUsageConsumer;
+  static jfieldID java_lang_Thread_parkBlocker;
   static jfieldID java_lang_Thread_daemon;
   static jfieldID java_lang_Thread_group;
   static jfieldID java_lang_Thread_lock;
   static jfieldID java_lang_Thread_name;
   static jfieldID java_lang_Thread_priority;
   static jfieldID java_lang_Thread_nativePeer;
+  static jfieldID java_lang_Thread_unparkedBeforeStart;
   static jfieldID java_lang_ThreadGroup_groups;
   static jfieldID java_lang_ThreadGroup_ngroups;
   static jfieldID java_lang_ThreadGroup_mainThreadGroup;
diff --git a/sigchainlib/Android.bp b/sigchainlib/Android.bp
index a151d7a..5f055ec 100644
--- a/sigchainlib/Android.bp
+++ b/sigchainlib/Android.bp
@@ -16,7 +16,6 @@
 
 cc_library {
     name: "libsigchain",
-    cpp_std: "gnu++17",
 
     host_supported: true,
     defaults: ["art_defaults"],
@@ -38,12 +37,6 @@
             whole_static_libs: ["libasync_safe"],
         },
     },
-    // Sigchainlib is whole-statically linked into binaries. For Android.mk-based binaries,
-    // this will drag ASAN symbols into the binary, even for modules using LOCAL_SANITIZE := never.
-    // So disable sanitization for now. b/38456126
-    sanitize: {
-        never: true,
-    },
 }
 
 // Create a dummy version of libsigchain which expose the necessary symbols
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index cbc3ff8..08ee690 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -100,7 +100,7 @@
 
 template<typename T>
 static void lookup_next_symbol(T* output, T wrapper, const char* name) {
-  void* sym = dlsym(RTLD_NEXT, name);
+  void* sym = dlsym(RTLD_NEXT, name);  // NOLINT glibc triggers cert-dcl16-c with RTLD_NEXT.
   if (sym == nullptr) {
     sym = dlsym(RTLD_DEFAULT, name);
     if (sym == wrapper || sym == sigaction) {
diff --git a/sigchainlib/sigchain_test.cc b/sigchainlib/sigchain_test.cc
index 53e1e40..bb99787 100644
--- a/sigchainlib/sigchain_test.cc
+++ b/sigchainlib/sigchain_test.cc
@@ -38,7 +38,7 @@
 #include "sigchain.h"
 
 #if !defined(__BIONIC__)
-typedef sigset_t sigset64_t;
+using sigset64_t = sigset_t;
 
 static int sigemptyset64(sigset64_t* set) {
   return sigemptyset(set);
diff --git a/simulator/Android.bp b/simulator/Android.bp
index 8690426..223c891 100644
--- a/simulator/Android.bp
+++ b/simulator/Android.bp
@@ -45,7 +45,7 @@
     shared_libs: [
         "libart",
         "libartbase",
-        "libvixl-arm64",
+        "libvixl",
     ],
 }
 
@@ -58,7 +58,7 @@
     shared_libs: [
         "libartd",
         "libartbased",
-        "libvixld-arm64",
+        "libvixld",
     ],
 }
 
diff --git a/simulator/code_simulator_container.cc b/simulator/code_simulator_container.cc
index 3206bc7..dc553df 100644
--- a/simulator/code_simulator_container.cc
+++ b/simulator/code_simulator_container.cc
@@ -34,13 +34,13 @@
   if (libart_simulator_handle_ == nullptr) {
     VLOG(simulator) << "Could not load " << libart_simulator_so_name << ": " << dlerror();
   } else {
-    typedef CodeSimulator* (*create_code_simulator_ptr_)(InstructionSet target_isa);
-    create_code_simulator_ptr_ create_code_simulator_ =
-        reinterpret_cast<create_code_simulator_ptr_>(
+    using CreateCodeSimulatorPtr = CodeSimulator*(*)(InstructionSet);
+    CreateCodeSimulatorPtr create_code_simulator =
+        reinterpret_cast<CreateCodeSimulatorPtr>(
             dlsym(libart_simulator_handle_, "CreateCodeSimulator"));
-    DCHECK(create_code_simulator_ != nullptr) << "Fail to find symbol of CreateCodeSimulator: "
+    DCHECK(create_code_simulator != nullptr) << "Fail to find symbol of CreateCodeSimulator: "
         << dlerror();
-    simulator_ = create_code_simulator_(target_isa);
+    simulator_ = create_code_simulator(target_isa);
   }
 }
 
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 33a8f5b..540e6ce 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -62,7 +62,7 @@
   int attach_result = jvm->AttachCurrentThread(&env, &args);
   CHECK_EQ(attach_result, 0);
 
-  typedef void (*Fn)(JNIEnv*);
+  using Fn = void(*)(JNIEnv*);
   Fn fn = reinterpret_cast<Fn>(arg);
   fn(env);
 
@@ -704,7 +704,7 @@
   }
 
  private:
-  void TestCalls(const char* declaring_class, std::vector<const char*> methods) {
+  void TestCalls(const char* declaring_class, const std::vector<const char*>& methods) {
     jmethodID new_method = env_->GetMethodID(concrete_class_, "<init>", "()V");
     jobject obj = env_->NewObject(concrete_class_, new_method);
     CHECK(!env_->ExceptionCheck());
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 1ce20e2..4c344a3 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -37,7 +37,7 @@
   explicit ReferenceMap2Visitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
       : CheckReferenceMapVisitor(thread) {}
 
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (CheckReferenceMapVisitor::VisitFrame()) {
       return true;
     }
diff --git a/test/004-StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc
index 53e0dae..81c27ec 100644
--- a/test/004-StackWalk/stack_walk_jni.cc
+++ b/test/004-StackWalk/stack_walk_jni.cc
@@ -33,7 +33,7 @@
   explicit TestReferenceMapVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
       : CheckReferenceMapVisitor(thread) {}
 
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (CheckReferenceMapVisitor::VisitFrame()) {
       return true;
     }
diff --git a/test/004-ThreadStress/src-art/Main.java b/test/004-ThreadStress/src-art/Main.java
index 3a89f4f..e717852 100644
--- a/test/004-ThreadStress/src-art/Main.java
+++ b/test/004-ThreadStress/src-art/Main.java
@@ -26,6 +26,7 @@
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.Semaphore;
+import java.util.concurrent.locks.LockSupport;
 
 // Run on host with:
 //   javac ThreadTest.java && java ThreadStress && rm *.class
@@ -52,6 +53,7 @@
 //    -sleep:X .......... frequency of Sleep (double)
 //    -wait:X ........... frequency of Wait (double)
 //    -timedwait:X ...... frequency of TimedWait (double)
+//    -timedpark:X ...... frequency of TimedPark (double)
 //    -syncandwork:X .... frequency of SyncAndWork (double)
 //    -queuedwait:X ..... frequency of QueuedWait (double)
 
@@ -251,6 +253,18 @@
         }
     }
 
+    private final static class TimedPark extends Operation {
+        private final static int SLEEP_TIME = 100;
+
+        public TimedPark() {}
+
+        @Override
+        public boolean perform() {
+            LockSupport.parkNanos(this, 100*1000000);
+            return true;
+        }
+    }
+
     private final static class SyncAndWork extends Operation {
         private final Object lock;
 
@@ -320,7 +334,8 @@
         frequencyMap.put(new NonMovingAlloc(), 0.025);        //   5/200
         frequencyMap.put(new StackTrace(), 0.1);              //  20/200
         frequencyMap.put(new Exit(), 0.225);                  //  45/200
-        frequencyMap.put(new Sleep(), 0.125);                 //  25/200
+        frequencyMap.put(new Sleep(), 0.075);                 //  15/200
+        frequencyMap.put(new TimedPark(), 0.05);              //  10/200
         frequencyMap.put(new TimedWait(lock), 0.05);          //  10/200
         frequencyMap.put(new Wait(lock), 0.075);              //  15/200
         frequencyMap.put(new QueuedWait(semaphore), 0.05);    //  10/200
@@ -341,9 +356,10 @@
     private final static Map<Operation, Double> createLockFrequencyMap(Object lock) {
       Map<Operation, Double> frequencyMap = new HashMap<Operation, Double>();
       frequencyMap.put(new Sleep(), 0.2);                     //  40/200
-      frequencyMap.put(new TimedWait(lock), 0.2);             //  40/200
+      frequencyMap.put(new TimedWait(lock), 0.1);             //  20/200
       frequencyMap.put(new Wait(lock), 0.2);                  //  40/200
       frequencyMap.put(new SyncAndWork(lock), 0.4);           //  80/200
+      frequencyMap.put(new TimedPark(), 0.1);                 //  20/200
 
       return frequencyMap;
     }
@@ -389,6 +405,8 @@
             op = new Wait(lock);
         } else if (split[0].equals("-timedwait")) {
             op = new TimedWait(lock);
+        } else if (split[0].equals("-timedpark")) {
+            op = new TimedPark();
         } else if (split[0].equals("-syncandwork")) {
             op = new SyncAndWork(lock);
         } else if (split[0].equals("-queuedwait")) {
@@ -693,7 +711,7 @@
         }
 
         // The notifier thread is a daemon just loops forever to wake
-        // up threads in operation Wait.
+        // up threads in operations Wait and Park.
         if (lock != null) {
             Thread notifier = new Thread("Notifier") {
                 public void run() {
@@ -701,6 +719,11 @@
                         synchronized (lock) {
                             lock.notifyAll();
                         }
+                        for (Thread runner : runners) {
+                          if (runner != null) {
+                            LockSupport.unpark(runner);
+                          }
+                        }
                     }
                 }
             };
diff --git a/test/004-UnsafeTest/src/Main.java b/test/004-UnsafeTest/src/Main.java
index d43d374..9176e89 100644
--- a/test/004-UnsafeTest/src/Main.java
+++ b/test/004-UnsafeTest/src/Main.java
@@ -32,6 +32,20 @@
     }
   }
 
+  private static void check(float actual, float expected, String msg) {
+    if (actual != expected) {
+      System.out.println(msg + " : " + actual + " != " + expected);
+      System.exit(1);
+    }
+  }
+
+  private static void check(double actual, double expected, String msg) {
+    if (actual != expected) {
+      System.out.println(msg + " : " + actual + " != " + expected);
+      System.exit(1);
+    }
+  }
+
   private static void check(Object actual, Object expected, String msg) {
     if (actual != expected) {
       System.out.println(msg + " : " + actual + " != " + expected);
@@ -54,6 +68,7 @@
     testArrayIndexScale(unsafe);
     testGetAndPutAndCAS(unsafe);
     testGetAndPutVolatile(unsafe);
+    testCopyMemoryPrimitiveArrays(unsafe);
   }
 
   private static void testArrayBaseOffset(Unsafe unsafe) {
@@ -237,6 +252,38 @@
           "Unsafe.getObjectVolatile(Object, long)");
   }
 
+  // Regression test for "copyMemory" operations hitting a DCHECK() for float/double arrays.
+  private static void testCopyMemoryPrimitiveArrays(Unsafe unsafe) {
+    int size = 4 * 1024;
+    long memory = unsafeTestMalloc(size);
+
+    int floatSize = 4;
+    float[] inputFloats = new float[size / floatSize];
+    for (int i = 0; i != inputFloats.length; ++i) {
+      inputFloats[i] = ((float)i) + 0.5f;
+    }
+    float[] outputFloats = new float[size / floatSize];
+    unsafe.copyMemoryFromPrimitiveArray(inputFloats, 0, memory, size);
+    unsafe.copyMemoryToPrimitiveArray(memory, outputFloats, 0, size);
+    for (int i = 0; i != inputFloats.length; ++i) {
+      check(inputFloats[i], outputFloats[i], "unsafe.copyMemory/float");
+    }
+
+    int doubleSize = 8;
+    double[] inputDoubles = new double[size / doubleSize];
+    for (int i = 0; i != inputDoubles.length; ++i) {
+      inputDoubles[i] = ((double)i) + 0.5;
+    }
+    double[] outputDoubles = new double[size / doubleSize];
+    unsafe.copyMemoryFromPrimitiveArray(inputDoubles, 0, memory, size);
+    unsafe.copyMemoryToPrimitiveArray(memory, outputDoubles, 0, size);
+    for (int i = 0; i != inputDoubles.length; ++i) {
+      check(inputDoubles[i], outputDoubles[i], "unsafe.copyMemory/double");
+    }
+
+    unsafeTestFree(memory);
+  }
+
   private static class TestClass {
     public int intVar = 0;
     public long longVar = 0;
@@ -251,4 +298,6 @@
 
   private static native int vmArrayBaseOffset(Class<?> clazz);
   private static native int vmArrayIndexScale(Class<?> clazz);
+  private static native long unsafeTestMalloc(long size);
+  private static native void unsafeTestFree(long memory);
 }
diff --git a/test/004-UnsafeTest/unsafe_test.cc b/test/004-UnsafeTest/unsafe_test.cc
index 18d9ea8..e970aaa 100644
--- a/test/004-UnsafeTest/unsafe_test.cc
+++ b/test/004-UnsafeTest/unsafe_test.cc
@@ -15,6 +15,7 @@
  */
 
 #include "art_method-inl.h"
+#include "base/casts.h"
 #include "jni.h"
 #include "mirror/array.h"
 #include "mirror/class-inl.h"
@@ -37,4 +38,16 @@
   return Primitive::ComponentSize(klass->GetComponentType()->GetPrimitiveType());
 }
 
+extern "C" JNIEXPORT jlong JNICALL Java_Main_unsafeTestMalloc(JNIEnv*, jclass, jlong size) {
+  void* memory = malloc(dchecked_integral_cast<size_t>(size));
+  CHECK(memory != nullptr);
+  return reinterpret_cast64<jlong>(memory);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_unsafeTestFree(JNIEnv*, jclass, jlong memory) {
+  void* mem = reinterpret_cast64<void*>(memory);
+  CHECK(mem != nullptr);
+  free(mem);
+}
+
 }  // namespace art
diff --git a/test/004-checker-UnsafeTest18/src/Main.java b/test/004-checker-UnsafeTest18/src/Main.java
index 282f9ce..927d0da 100644
--- a/test/004-checker-UnsafeTest18/src/Main.java
+++ b/test/004-checker-UnsafeTest18/src/Main.java
@@ -47,21 +47,21 @@
   // Setters.
   //
 
-  /// CHECK-START: int Main.set32(java.lang.Object, long, int) intrinsics_recognition (after)
+  /// CHECK-START: int Main.set32(java.lang.Object, long, int) builder (after)
   /// CHECK-DAG: <<Result:i\d+>> InvokeVirtual intrinsic:UnsafeGetAndSetInt
   /// CHECK-DAG:                 Return [<<Result>>]
   private static int set32(Object o, long offset, int newValue) {
     return unsafe.getAndSetInt(o, offset, newValue);
   }
 
-  /// CHECK-START: long Main.set64(java.lang.Object, long, long) intrinsics_recognition (after)
+  /// CHECK-START: long Main.set64(java.lang.Object, long, long) builder (after)
   /// CHECK-DAG: <<Result:j\d+>> InvokeVirtual intrinsic:UnsafeGetAndSetLong
   /// CHECK-DAG:                 Return [<<Result>>]
   private static long set64(Object o, long offset, long newValue) {
     return unsafe.getAndSetLong(o, offset, newValue);
   }
 
-  /// CHECK-START: java.lang.Object Main.setObj(java.lang.Object, long, java.lang.Object) intrinsics_recognition (after)
+  /// CHECK-START: java.lang.Object Main.setObj(java.lang.Object, long, java.lang.Object) builder (after)
   /// CHECK-DAG: <<Result:l\d+>> InvokeVirtual intrinsic:UnsafeGetAndSetObject
   /// CHECK-DAG:                 Return [<<Result>>]
   private static Object setObj(Object o, long offset, Object newValue) {
@@ -72,14 +72,14 @@
   // Adders.
   //
 
-  /// CHECK-START: int Main.add32(java.lang.Object, long, int) intrinsics_recognition (after)
+  /// CHECK-START: int Main.add32(java.lang.Object, long, int) builder (after)
   /// CHECK-DAG: <<Result:i\d+>> InvokeVirtual intrinsic:UnsafeGetAndAddInt
   /// CHECK-DAG:                 Return [<<Result>>]
   private static int add32(Object o, long offset, int delta) {
     return unsafe.getAndAddInt(o, offset, delta);
   }
 
-  /// CHECK-START: long Main.add64(java.lang.Object, long, long) intrinsics_recognition (after)
+  /// CHECK-START: long Main.add64(java.lang.Object, long, long) builder (after)
   /// CHECK-DAG: <<Result:j\d+>> InvokeVirtual intrinsic:UnsafeGetAndAddLong
   /// CHECK-DAG:                 Return [<<Result>>]
   private static long add64(Object o, long offset, long delta) {
@@ -90,7 +90,7 @@
   // Fences (native).
   //
 
-  /// CHECK-START: void Main.load() intrinsics_recognition (after)
+  /// CHECK-START: void Main.load() builder (after)
   /// CHECK-DAG: InvokeVirtual intrinsic:UnsafeLoadFence
   //
   /// CHECK-START: void Main.load() instruction_simplifier (after)
@@ -102,7 +102,7 @@
     unsafe.loadFence();
   }
 
-  /// CHECK-START: void Main.store() intrinsics_recognition (after)
+  /// CHECK-START: void Main.store() builder (after)
   /// CHECK-DAG: InvokeVirtual intrinsic:UnsafeStoreFence
   //
   /// CHECK-START: void Main.store() instruction_simplifier (after)
@@ -114,7 +114,7 @@
     unsafe.storeFence();
   }
 
-  /// CHECK-START: void Main.full() intrinsics_recognition (after)
+  /// CHECK-START: void Main.full() builder (after)
   /// CHECK-DAG: InvokeVirtual intrinsic:UnsafeFullFence
   //
   /// CHECK-START: void Main.full() instruction_simplifier (after)
diff --git a/test/021-string2/src/Main.java b/test/021-string2/src/Main.java
index c713aa4..141a089 100644
--- a/test/021-string2/src/Main.java
+++ b/test/021-string2/src/Main.java
@@ -15,14 +15,13 @@
  */
 
 import junit.framework.Assert;
-import java.lang.reflect.Method;
 import java.util.Locale;
 
 /**
  * more string tests
  */
 public class Main {
-    public static void main(String args[]) throws Exception {
+    public static void main(String args[]) {
         String test = "0123456789";
         String test1 = new String("0123456789");    // different object
         String test2 = new String("0123456780");    // different value
@@ -86,9 +85,7 @@
         Assert.assertEquals("this is a path", test.replaceAll("/", " "));
         Assert.assertEquals("this is a path", test.replace("/", " "));
 
-        Class<?> Strings = Class.forName("com.android.org.bouncycastle.util.Strings");
-        Method fromUTF8ByteArray = Strings.getDeclaredMethod("fromUTF8ByteArray", byte[].class);
-        String result = (String) fromUTF8ByteArray.invoke(null, new byte[] {'O', 'K'});
+        String result = new String(new char[] { 'O', 'K' });
         System.out.println(result);
 
         testCompareToAndEquals();
diff --git a/test/050-sync-test/src/Main.java b/test/050-sync-test/src/Main.java
index 734b51e..ba37818 100644
--- a/test/050-sync-test/src/Main.java
+++ b/test/050-sync-test/src/Main.java
@@ -133,11 +133,13 @@
 class SleepyThread extends Thread {
     private SleepyThread mOther;
     private Integer[] mWaitOnMe;      // any type of object will do
+    private volatile boolean otherDone;
 
     private static int count = 0;
 
     SleepyThread(SleepyThread other) {
         mOther = other;
+        otherDone = false;
         mWaitOnMe = new Integer[] { 1, 2 };
 
         setName("thread#" + count);
@@ -158,9 +160,11 @@
             boolean intr = false;
 
             try {
+              do {
                 synchronized (mWaitOnMe) {
                     mWaitOnMe.wait(9000);
                 }
+              } while (!otherDone);
             } catch (InterruptedException ie) {
                 // Expecting this; interrupted should be false.
                 System.out.println(Thread.currentThread().getName() +
@@ -182,6 +186,7 @@
             System.out.println("interrupting other (isAlive="
                 + mOther.isAlive() + ")");
             mOther.interrupt();
+            mOther.otherDone = true;
         }
     }
 }
diff --git a/test/089-many-methods/check b/test/089-many-methods/check
index 1f71e8e..e09a291 100755
--- a/test/089-many-methods/check
+++ b/test/089-many-methods/check
@@ -14,5 +14,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-grep Error "$2" > "$2.tmp"
-diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
+EXPECTED_ERROR="Cannot fit requested classes in a single dex"
+if ! grep -q "$EXPECTED_ERROR" "$2"; then
+  exit 1
+else
+  exit 0
+fi
diff --git a/test/089-many-methods/expected.txt b/test/089-many-methods/expected.txt
index bb6ba3c..b75bde4 100644
--- a/test/089-many-methods/expected.txt
+++ b/test/089-many-methods/expected.txt
@@ -1 +1 @@
-Error: Cannot fit requested classes in a single dex file (# fields: 131000 > 65536)
+See the 'check' script for the expectation!
diff --git a/test/099-vmdebug/check b/test/099-vmdebug/check
index d124ce8..6a3fed5 100755
--- a/test/099-vmdebug/check
+++ b/test/099-vmdebug/check
@@ -15,6 +15,6 @@
 # limitations under the License.
 
 # Strip the process pids and line numbers from exact error messages.
-sed -e '/^dalvikvm\(\|32\|64\) E.*\] /d' "$2" > "$2.tmp"
+sed -e '/^.*dalvikvm\(\|32\|64\) E.*\] /d' "$2" > "$2.tmp"
 
 diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/1001-app-image-regions/app_image_regions.cc b/test/1001-app-image-regions/app_image_regions.cc
new file mode 100644
index 0000000..dc16a84
--- /dev/null
+++ b/test/1001-app-image-regions/app_image_regions.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni.h"
+
+#include "gc/heap.h"
+#include "gc/space/image_space.h"
+#include "gc/space/space-inl.h"
+#include "gc/space/region_space.h"
+#include "image.h"
+#include "mirror/class.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+
+namespace art {
+
+namespace {
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_getRegionSize(JNIEnv*, jclass) {
+  return gc::space::RegionSpace::kRegionSize;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_checkAppImageSectionSize(JNIEnv*, jclass, jclass c) {
+  ScopedObjectAccess soa(Thread::Current());
+  ObjPtr<mirror::Class> klass_ptr = soa.Decode<mirror::Class>(c);
+  for (auto* space : Runtime::Current()->GetHeap()->GetContinuousSpaces()) {
+    if (space->IsImageSpace()) {
+      auto* image_space = space->AsImageSpace();
+      const auto& image_header = image_space->GetImageHeader();
+      if (image_header.IsAppImage() && image_space->HasAddress(klass_ptr.Ptr())) {
+        return image_header.GetObjectsSection().Size();
+      }
+    }
+  }
+  return 0;
+}
+
+}  // namespace
+
+}  // namespace art
diff --git a/test/1001-app-image-regions/build b/test/1001-app-image-regions/build
new file mode 100755
index 0000000..16c3a5b
--- /dev/null
+++ b/test/1001-app-image-regions/build
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+count=10000
+echo "LMain;" >> profile
+for i in $(seq 1 "$count"); do
+  echo "LOther\$Inner${i};" >> "profile"
+done
+
+# Generate the other class.
+other_file="src/Other.java"
+echo "class Other {" >> "${other_file}"
+for i in $(seq 1 "$count"); do
+  echo "  static class Inner${i} { void test(){} }" >> "${other_file}"
+done
+echo "}" >> "${other_file}"
+
+./default-build "$@"
diff --git a/test/1001-app-image-regions/expected.txt b/test/1001-app-image-regions/expected.txt
new file mode 100644
index 0000000..9fd87ca
--- /dev/null
+++ b/test/1001-app-image-regions/expected.txt
@@ -0,0 +1,4 @@
+JNI_OnLoad called
+App image loaded true
+Region size 262144
+App image section size large enough true
diff --git a/test/1001-app-image-regions/info.txt b/test/1001-app-image-regions/info.txt
new file mode 100644
index 0000000..b13f276
--- /dev/null
+++ b/test/1001-app-image-regions/info.txt
@@ -0,0 +1 @@
+Tests that an app image with many classes is generated and loaded correctly.
diff --git a/test/1001-app-image-regions/run b/test/1001-app-image-regions/run
new file mode 100644
index 0000000..128aa2e
--- /dev/null
+++ b/test/1001-app-image-regions/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec ${RUN} $@ --profile -Xcompiler-option --compiler-filter=speed-profile
diff --git a/test/1001-app-image-regions/src/Main.java b/test/1001-app-image-regions/src/Main.java
new file mode 100644
index 0000000..c41a606
--- /dev/null
+++ b/test/1001-app-image-regions/src/Main.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+
+class Main {
+  public static void main(String[] args) {
+    System.loadLibrary(args[0]);
+    System.out.println("App image loaded " + checkAppImageLoaded());
+    int regionSize = getRegionSize();
+    int objectsSectionSize = checkAppImageSectionSize(Main.class);
+    System.out.println("Region size " + regionSize);
+    System.out.println("App image section size large enough " + (objectsSectionSize > regionSize));
+    if (objectsSectionSize <= regionSize) {
+      System.out.println("Section size " + objectsSectionSize);
+    }
+  }
+
+  public static native boolean checkAppImageLoaded();
+  public static native int getRegionSize();
+  public static native int checkAppImageSectionSize(Class c);
+}
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index a74f763..cc7e806 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -45,7 +45,7 @@
 
 static jint trampoline_JNI_OnLoad(JavaVM* vm, void* reserved) {
   JNIEnv* env = nullptr;
-  typedef jint (*FnPtr_t)(JavaVM*, void*);
+  using FnPtr_t = jint(*)(JavaVM*, void*);
   FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("JNI_OnLoad")->fnPtr);
 
   vm->GetEnv(reinterpret_cast<void **>(&env), JNI_VERSION_1_6);
@@ -91,9 +91,8 @@
   return fnPtr(vm, reserved);
 }
 
-static void trampoline_Java_Main_testFindClassOnAttachedNativeThread(JNIEnv* env,
-                                                                     jclass klass) {
-  typedef void (*FnPtr_t)(JNIEnv*, jclass);
+static void trampoline_Java_Main_testFindClassOnAttachedNativeThread(JNIEnv* env, jclass klass) {
+  using FnPtr_t = void(*)(JNIEnv*, jclass);
   FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
     (find_native_bridge_method("testFindClassOnAttachedNativeThread")->fnPtr);
   printf("%s called!\n", __FUNCTION__);
@@ -102,7 +101,7 @@
 
 static void trampoline_Java_Main_testFindFieldOnAttachedNativeThreadNative(JNIEnv* env,
                                                                            jclass klass) {
-  typedef void (*FnPtr_t)(JNIEnv*, jclass);
+  using FnPtr_t = void(*)(JNIEnv*, jclass);
   FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
     (find_native_bridge_method("testFindFieldOnAttachedNativeThreadNative")->fnPtr);
   printf("%s called!\n", __FUNCTION__);
@@ -111,7 +110,7 @@
 
 static void trampoline_Java_Main_testCallStaticVoidMethodOnSubClassNative(JNIEnv* env,
                                                                           jclass klass) {
-  typedef void (*FnPtr_t)(JNIEnv*, jclass);
+  using FnPtr_t = void(*)(JNIEnv*, jclass);
   FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
     (find_native_bridge_method("testCallStaticVoidMethodOnSubClassNative")->fnPtr);
   printf("%s called!\n", __FUNCTION__);
@@ -119,7 +118,7 @@
 }
 
 static jobject trampoline_Java_Main_testGetMirandaMethodNative(JNIEnv* env, jclass klass) {
-  typedef jobject (*FnPtr_t)(JNIEnv*, jclass);
+  using FnPtr_t = jobject(*)(JNIEnv*, jclass);
   FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
     (find_native_bridge_method("testGetMirandaMethodNative")->fnPtr);
   printf("%s called!\n", __FUNCTION__);
@@ -127,7 +126,7 @@
 }
 
 static void trampoline_Java_Main_testNewStringObject(JNIEnv* env, jclass klass) {
-  typedef void (*FnPtr_t)(JNIEnv*, jclass);
+  using FnPtr_t = void(*)(JNIEnv*, jclass);
   FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
     (find_native_bridge_method("testNewStringObject")->fnPtr);
   printf("%s called!\n", __FUNCTION__);
@@ -135,7 +134,7 @@
 }
 
 static void trampoline_Java_Main_testZeroLengthByteBuffers(JNIEnv* env, jclass klass) {
-  typedef void (*FnPtr_t)(JNIEnv*, jclass);
+  using FnPtr_t = void(*)(JNIEnv*, jclass);
   FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
     (find_native_bridge_method("testZeroLengthByteBuffers")->fnPtr);
   printf("%s called!\n", __FUNCTION__);
@@ -145,8 +144,8 @@
 static jbyte trampoline_Java_Main_byteMethod(JNIEnv* env, jclass klass, jbyte b1, jbyte b2,
                                              jbyte b3, jbyte b4, jbyte b5, jbyte b6,
                                              jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
-  typedef jbyte (*FnPtr_t)(JNIEnv*, jclass, jbyte, jbyte, jbyte, jbyte, jbyte,
-                           jbyte, jbyte, jbyte, jbyte, jbyte);
+  using FnPtr_t = jbyte(*)(JNIEnv*, jclass, jbyte, jbyte, jbyte, jbyte, jbyte, jbyte, jbyte, jbyte,
+                           jbyte, jbyte);
   FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("byteMethod")->fnPtr);
   printf("%s called!\n", __FUNCTION__);
   return fnPtr(env, klass, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10);
@@ -155,8 +154,8 @@
 static jshort trampoline_Java_Main_shortMethod(JNIEnv* env, jclass klass, jshort s1, jshort s2,
                                                jshort s3, jshort s4, jshort s5, jshort s6,
                                                jshort s7, jshort s8, jshort s9, jshort s10) {
-  typedef jshort (*FnPtr_t)(JNIEnv*, jclass, jshort, jshort, jshort, jshort, jshort,
-                            jshort, jshort, jshort, jshort, jshort);
+  using FnPtr_t = jshort(*)(JNIEnv*, jclass, jshort, jshort, jshort, jshort, jshort, jshort, jshort,
+                            jshort, jshort, jshort);
   FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("shortMethod")->fnPtr);
   printf("%s called!\n", __FUNCTION__);
   return fnPtr(env, klass, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10);
@@ -166,7 +165,7 @@
                                                    jboolean b2, jboolean b3, jboolean b4,
                                                    jboolean b5, jboolean b6, jboolean b7,
                                                    jboolean b8, jboolean b9, jboolean b10) {
-  typedef jboolean (*FnPtr_t)(JNIEnv*, jclass, jboolean, jboolean, jboolean, jboolean, jboolean,
+  using FnPtr_t = jboolean(*)(JNIEnv*, jclass, jboolean, jboolean, jboolean, jboolean, jboolean,
                               jboolean, jboolean, jboolean, jboolean, jboolean);
   FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("booleanMethod")->fnPtr);
   printf("%s called!\n", __FUNCTION__);
@@ -176,8 +175,8 @@
 static jchar trampoline_Java_Main_charMethod(JNIEnv* env, jclass klass, jchar c1, jchar c2,
                                              jchar c3, jchar c4, jchar c5, jchar c6,
                                              jchar c7, jchar c8, jchar c9, jchar c10) {
-  typedef jchar (*FnPtr_t)(JNIEnv*, jclass, jchar, jchar, jchar, jchar, jchar,
-                           jchar, jchar, jchar, jchar, jchar);
+  using FnPtr_t = jchar(*)(JNIEnv*, jclass, jchar, jchar, jchar, jchar, jchar, jchar, jchar, jchar,
+                           jchar, jchar);
   FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("charMethod")->fnPtr);
   printf("%s called!\n", __FUNCTION__);
   return fnPtr(env, klass, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10);
diff --git a/test/117-nopatchoat/expected.txt b/test/117-nopatchoat/expected.txt
deleted file mode 100644
index 7a24e31..0000000
--- a/test/117-nopatchoat/expected.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-JNI_OnLoad called
-Has oat is true, has executable oat is expected.
-This is a function call
diff --git a/test/117-nopatchoat/info.txt b/test/117-nopatchoat/info.txt
deleted file mode 100644
index aa9f57c..0000000
--- a/test/117-nopatchoat/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Test that disables patchoat'ing the application.
diff --git a/test/117-nopatchoat/nopatchoat.cc b/test/117-nopatchoat/nopatchoat.cc
deleted file mode 100644
index c673dd7..0000000
--- a/test/117-nopatchoat/nopatchoat.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "class_linker.h"
-#include "dex/dex_file-inl.h"
-#include "gc/heap.h"
-#include "gc/space/image_space.h"
-#include "mirror/class-inl.h"
-#include "oat_file.h"
-#include "runtime.h"
-#include "scoped_thread_state_change-inl.h"
-#include "thread.h"
-
-namespace art {
-
-class NoPatchoatTest {
- public:
-  static const OatDexFile* getOatDexFile(jclass cls) {
-    ScopedObjectAccess soa(Thread::Current());
-    ObjPtr<mirror::Class> klass = soa.Decode<mirror::Class>(cls);
-    const DexFile& dex_file = klass->GetDexFile();
-    return dex_file.GetOatDexFile();
-  }
-
-  static bool isRelocationDeltaZero() {
-    std::vector<gc::space::ImageSpace*> spaces =
-        Runtime::Current()->GetHeap()->GetBootImageSpaces();
-    return !spaces.empty() && spaces[0]->GetImageHeader().GetPatchDelta() == 0;
-  }
-
-  static bool hasExecutableOat(jclass cls) {
-    const OatDexFile* oat_dex_file = getOatDexFile(cls);
-
-    return oat_dex_file != nullptr && oat_dex_file->GetOatFile()->IsExecutable();
-  }
-};
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isRelocationDeltaZero(JNIEnv*, jclass) {
-  return NoPatchoatTest::isRelocationDeltaZero();
-}
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasExecutableOat(JNIEnv*, jclass cls) {
-  return NoPatchoatTest::hasExecutableOat(cls);
-}
-
-}  // namespace art
diff --git a/test/117-nopatchoat/run b/test/117-nopatchoat/run
deleted file mode 100755
index 4c33f7a..0000000
--- a/test/117-nopatchoat/run
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ensure flags includes prebuild and relocate. It doesn't make sense unless we
-# have a oat file we want to relocate.
-flags="$@"
-
-# This test is supposed to test with oat files. Make sure that the no-prebuild flag isn't set,
-# or complain.
-# Note: prebuild is the default.
-if [[ "${flags}" == *--no-prebuild* ]] ; then
-  echo "Test 117-nopatchoat is not intended to run in no-prebuild mode."
-  exit 1
-fi
-
-# This test is supposed to test relocation. Make sure that the no-relocate flag isn't set,
-# or complain.
-# Note: relocate is the default.
-if [[ "${flags}" == *--no-relocate* ]] ; then
-  echo "Test 117-nopatchoat is not intended to run in no-relocate mode."
-  exit 1
-fi
-
-${RUN} ${flags}
diff --git a/test/117-nopatchoat/src/Main.java b/test/117-nopatchoat/src/Main.java
deleted file mode 100644
index dfb98b0..0000000
--- a/test/117-nopatchoat/src/Main.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
-  public static void main(String[] args) {
-    System.loadLibrary(args[0]);
-
-    boolean executable_correct = hasExecutableOat();
-
-    System.out.println(
-        "Has oat is " + hasOatFile() + ", has executable oat is " + (
-        executable_correct ? "expected" : "not expected") + ".");
-
-    System.out.println(functionCall());
-  }
-
-  public static String functionCall() {
-    String arr[] = {"This", "is", "a", "function", "call"};
-    String ret = "";
-    for (int i = 0; i < arr.length; i++) {
-      ret = ret + arr[i] + " ";
-    }
-    return ret.substring(0, ret.length() - 1);
-  }
-
-  private native static boolean hasOatFile();
-
-  private native static boolean hasExecutableOat();
-
-  private native static boolean isRelocationDeltaZero();
-}
diff --git a/test/118-noimage-dex2oat/run b/test/118-noimage-dex2oat/run
index d68b0a0..d1b9725 100644
--- a/test/118-noimage-dex2oat/run
+++ b/test/118-noimage-dex2oat/run
@@ -31,39 +31,26 @@
   exit 1
 fi
 
-if [[ $@ == *--host* ]]; then
-    framework="${ANDROID_HOST_OUT}/framework"
-    bpath_suffix="-hostdex"
-else
-    framework="/system/framework"
-    bpath_suffix=""
-fi
-bpath="${framework}/core-libart${bpath_suffix}.jar"
-bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar"
-bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar"
-bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar"
-bpath_arg="--runtime-option -Xbootclasspath:${bpath}"
-
 
 # Make sure we can run without an oat file.
 echo "Run -Xnoimage-dex2oat"
-${RUN} ${flags} ${bpath_arg} --runtime-option -Xnoimage-dex2oat
+${RUN} ${flags} --runtime-option -Xnoimage-dex2oat
 return_status1=$?
 
 # Make sure we cannot run without an oat file without fallback.
 echo "Run -Xnoimage-dex2oat -Xno-dex-file-fallback"
-${RUN} ${flags} ${bpath_arg} --runtime-option -Xnoimage-dex2oat \
+${RUN} ${flags} --runtime-option -Xnoimage-dex2oat \
   --runtime-option -Xno-dex-file-fallback
 return_status2=$?
 
 # Make sure we can run with the oat file.
 echo "Run -Ximage-dex2oat"
-${RUN} ${flags} ${bpath_arg} --runtime-option -Ximage-dex2oat
+${RUN} ${flags} --runtime-option -Ximage-dex2oat
 return_status3=$?
 
 # Make sure we can run with the default settings.
 echo "Run default"
-${RUN} ${flags} ${bpath_arg}
+${RUN} ${flags}
 return_status4=$?
 
 # Make sure we don't silently ignore an early failure.
diff --git a/test/119-noimage-patchoat/check b/test/119-noimage-patchoat/check
deleted file mode 100755
index d124ce8..0000000
--- a/test/119-noimage-patchoat/check
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Strip the process pids and line numbers from exact error messages.
-sed -e '/^dalvikvm\(\|32\|64\) E.*\] /d' "$2" > "$2.tmp"
-
-diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/119-noimage-patchoat/expected.txt b/test/119-noimage-patchoat/expected.txt
deleted file mode 100644
index 9b9db58..0000000
--- a/test/119-noimage-patchoat/expected.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Run -Xnoimage-dex2oat -Xpatchoat:/system/bin/false
-JNI_OnLoad called
-Has image is false, is image dex2oat enabled is false.
-Run -Xnoimage-dex2oat -Xpatchoat:/system/bin/false -Xno-dex-file-fallback
-Failed to initialize runtime (check log for details)
-Run -Ximage-dex2oat
-JNI_OnLoad called
-Has image is true, is image dex2oat enabled is true.
-Run default
-JNI_OnLoad called
-Has image is true, is image dex2oat enabled is true.
diff --git a/test/119-noimage-patchoat/info.txt b/test/119-noimage-patchoat/info.txt
deleted file mode 100644
index 6b85368..0000000
--- a/test/119-noimage-patchoat/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Test that disables patchoat'ing the image.
diff --git a/test/119-noimage-patchoat/run b/test/119-noimage-patchoat/run
deleted file mode 100644
index 497dc4a..0000000
--- a/test/119-noimage-patchoat/run
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-flags="$@"
-
-# Force relocation otherwise we will just use the already created core.oat/art pair.
-# Note: relocate is the default.
-if [[ "${flags}" == *--no-relocate* ]] ; then
-  echo "Test 119-noimage-patchoat is not intended to run in no-relocate mode."
-  exit 1
-fi
-
-if [[ $@ == *--host* ]]; then
-  false_bin="/bin/false"
-else
-  false_bin="/system/bin/false"
-fi
-
-# Make sure we can run without an image file.
-echo "Run -Xnoimage-dex2oat -Xpatchoat:/system/bin/false"
-${RUN} ${flags} ${BPATH} --runtime-option -Xnoimage-dex2oat \
-  --runtime-option -Xpatchoat:${false_bin}
-return_status1=$?
-
-# Make sure we cannot run without an image file without fallback.
-echo "Run -Xnoimage-dex2oat -Xpatchoat:/system/bin/false -Xno-dex-file-fallback"
-${RUN} ${flags} ${BPATH} --runtime-option -Xnoimage-dex2oat \
-  --runtime-option -Xpatchoat:${false_bin} --runtime-option -Xno-dex-file-fallback
-# This second run is expected to fail: invert the return status of the previous command.
-return_status2=$((! $?))
-
-# Make sure we can run with the image file.
-echo "Run -Ximage-dex2oat"
-${RUN} ${flags} ${BPATH} --runtime-option -Ximage-dex2oat
-return_status3=$?
-
-# Make sure we can run with the default settings.
-echo "Run default"
-${RUN} ${flags} ${BPATH}
-return_status4=$?
-
-# Make sure we don't silently ignore an early failure.
-(exit $return_status1) && (exit $return_status2) && (exit $return_status3) && (exit $return_status4)
diff --git a/test/119-noimage-patchoat/src/Main.java b/test/119-noimage-patchoat/src/Main.java
deleted file mode 100644
index 6a70f58..0000000
--- a/test/119-noimage-patchoat/src/Main.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
-  public static void main(String[] args) {
-    System.loadLibrary(args[0]);
-    boolean hasImage = hasImage();
-    System.out.println(
-        "Has image is " + hasImage + ", is image dex2oat enabled is "
-        + isImageDex2OatEnabled() + ".");
-
-    if (hasImage && !isImageDex2OatEnabled()) {
-      throw new Error("Image with dex2oat disabled runs with an oat file");
-    } else if (!hasImage && isImageDex2OatEnabled()) {
-      throw new Error("Image with dex2oat enabled runs without an oat file");
-    }
-  }
-
-  private native static boolean hasImage();
-
-  private native static boolean isImageDex2OatEnabled();
-}
diff --git a/test/1339-dead-reference-safe/check b/test/1339-dead-reference-safe/check
new file mode 100644
index 0000000..795cfac
--- /dev/null
+++ b/test/1339-dead-reference-safe/check
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# DeadReferenceSafe result differs for interpreted mode. A real failure
+# will produce an extra line anyway.
+
+diff --ignore-matching-lines="DeadReferenceSafe count:" -q $1 $2
diff --git a/test/1339-dead-reference-safe/expected.txt b/test/1339-dead-reference-safe/expected.txt
new file mode 100644
index 0000000..abafce4
--- /dev/null
+++ b/test/1339-dead-reference-safe/expected.txt
@@ -0,0 +1,6 @@
+JNI_OnLoad called
+DeadReferenceUnsafe count: 5
+DeadReferenceSafe count: N
+ReachabilitySensitive count: 5
+ReachabilitySensitiveFun count: 5
+ReachabilityFence count: 5
diff --git a/test/1339-dead-reference-safe/info.txt b/test/1339-dead-reference-safe/info.txt
new file mode 100644
index 0000000..b6ad217
--- /dev/null
+++ b/test/1339-dead-reference-safe/info.txt
@@ -0,0 +1 @@
+Test that @DeadReferenceSafe and @ReachabilitySensitive have the intended effect.
diff --git a/test/1339-dead-reference-safe/src/DeadReferenceSafeTest.java b/test/1339-dead-reference-safe/src/DeadReferenceSafeTest.java
new file mode 100644
index 0000000..0c19084
--- /dev/null
+++ b/test/1339-dead-reference-safe/src/DeadReferenceSafeTest.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import dalvik.annotation.optimization.DeadReferenceSafe;
+import java.util.concurrent.atomic.AtomicInteger;
+
+@DeadReferenceSafe
+public final class DeadReferenceSafeTest {
+  static AtomicInteger nFinalized = new AtomicInteger(0);
+  private static final int INNER_ITERS = 10;
+  static int count;
+  static boolean interpreted;
+  int n = 1;
+
+  private static void $noinline$loop() {
+    DeadReferenceSafeTest x;
+    // The loop allocates INNER_ITERS DeadReferenceSafeTest objects.
+    for (int i = 0; i < INNER_ITERS; ++i) {
+      // We've allocated i objects so far.
+      x = new DeadReferenceSafeTest();
+      count += x.n;
+      // x is dead here.
+      if (i == 5) {
+        // With dead reference elimination, all 6 objects should have been finalized here.
+        // However the interpreter doesn't (yet?) play by the proper rules.
+        Main.$noinline$gcAndCheck(nFinalized, (interpreted ? 5 : 6), "DeadReferenceSafe",
+            "Failed to reclaim dead reference in @DeadReferenceSafe code!");
+      }
+    }
+  }
+
+  private static void reset(int expected_count) {
+    Runtime.getRuntime().gc();
+    System.runFinalization();
+    if (nFinalized.get() != expected_count) {
+      System.out.println("DeadReferenceSafeTest: Wrong number of finalized objects:"
+                         + nFinalized.get());
+    }
+    nFinalized.set(0);
+  }
+
+  protected void finalize() {
+    nFinalized.incrementAndGet();
+  }
+
+  public static void runTest() {
+    try {
+      interpreted = !Main.ensureCompiled(DeadReferenceSafeTest.class, "$noinline$loop");
+    } catch (NoSuchMethodException e) {
+      System.out.println("Unexpectedly threw " + e);
+    }
+
+    $noinline$loop();
+
+    if (count != INNER_ITERS) {
+      System.out.println("DeadReferenceSafeTest: Final count wrong: " + count);
+    }
+    reset(INNER_ITERS);
+  }
+}
diff --git a/test/1339-dead-reference-safe/src/DeadReferenceUnsafeTest.java b/test/1339-dead-reference-safe/src/DeadReferenceUnsafeTest.java
new file mode 100644
index 0000000..84774da
--- /dev/null
+++ b/test/1339-dead-reference-safe/src/DeadReferenceUnsafeTest.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+public final class DeadReferenceUnsafeTest {
+  static AtomicInteger nFinalized = new AtomicInteger(0);
+  private static final int INNER_ITERS = 10;
+  static int count;
+  int n = 1;
+
+  private static void $noinline$loop() {
+    DeadReferenceUnsafeTest x;
+    // The loop allocates INNER_ITERS DeadReferenceUnsafeTest objects.
+    for (int i = 0; i < INNER_ITERS; ++i) {
+      // We've allocated i objects so far.
+      x = new DeadReferenceUnsafeTest();
+      count += x.n;
+      // x is dead here.
+      if (i == 5) {
+        // Without dead reference elimination, the last object should be kept around,
+        // and only 5 objects should be relcaimed here.
+        Main.$noinline$gcAndCheck(nFinalized, 5, "DeadReferenceUnsafe",
+            "Failed to keep dead reference live in unannotated code!");
+      }
+    }
+  }
+
+  private static void reset(int expected_count) {
+    Runtime.getRuntime().gc();
+    System.runFinalization();
+    if (nFinalized.get() != expected_count) {
+      System.out.println("DeadReferenceUnsafeTest: Wrong number of finalized objects:"
+                         + nFinalized.get());
+    }
+    nFinalized.set(0);
+  }
+
+  protected void finalize() {
+    nFinalized.incrementAndGet();
+  }
+
+  public static void runTest() {
+    try {
+      Main.ensureCompiled(DeadReferenceUnsafeTest.class, "$noinline$loop");
+    } catch (NoSuchMethodException e) {
+      System.out.println("Unexpectedly threw " + e);
+    }
+
+    $noinline$loop();
+
+    if (count != INNER_ITERS) {
+      System.out.println("DeadReferenceUnsafeTest: Final count wrong: " + count);
+    }
+    reset(INNER_ITERS);
+  }
+}
diff --git a/test/1339-dead-reference-safe/src/Main.java b/test/1339-dead-reference-safe/src/Main.java
new file mode 100644
index 0000000..46b533a
--- /dev/null
+++ b/test/1339-dead-reference-safe/src/Main.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.util.concurrent.atomic.AtomicInteger;
+
+public class Main {
+
+  // Ensure that the "loop" method is compiled. Otherwise we currently have no real way to get rid
+  // of dead references. Return true if it looks like we succeeded.
+  public static boolean ensureCompiled(Class cls, String methodName) throws NoSuchMethodException {
+    Method m = cls.getDeclaredMethod(methodName);
+    if (isAotCompiled(cls, methodName)) {
+      return true;
+    } else {
+      ensureMethodJitCompiled(m);
+      if (hasJitCompiledEntrypoint(cls, methodName)) {
+        return true;
+      }
+      return false;
+    }
+  }
+
+  // Garbage collect and check that the atomic counter has the expected value.
+  // Exped value of -1 means don't care.
+  // Noinline because we don't want the inlining here to interfere with the ReachabilitySensitive
+  // analysis.
+  public static void $noinline$gcAndCheck(AtomicInteger counter, int expected, String label,
+                                          String msg) {
+    Runtime.getRuntime().gc();
+    System.runFinalization();
+    int count = counter.get();
+    System.out.println(label + " count: " + count);
+    if (counter.get() != expected && expected != -1) {
+      System.out.println(msg);
+    }
+  }
+
+  public static void main(String[] args) {
+    System.loadLibrary(args[0]);
+    // Run several variations of the same test with different reachability annotations, etc.
+    // Only the DeadReferenceSafeTest should finalize every previously allocated object.
+    DeadReferenceUnsafeTest.runTest();
+    DeadReferenceSafeTest.runTest();
+    ReachabilitySensitiveTest.runTest();
+    ReachabilitySensitiveFunTest.runTest();
+    ReachabilityFenceTest.runTest();
+  }
+  public static native void ensureMethodJitCompiled(Method meth);
+  public static native boolean hasJitCompiledEntrypoint(Class<?> cls, String methodName);
+  public static native boolean isAotCompiled(Class<?> cls, String methodName);
+}
diff --git a/test/1339-dead-reference-safe/src/ReachabilityFenceTest.java b/test/1339-dead-reference-safe/src/ReachabilityFenceTest.java
new file mode 100644
index 0000000..d4befde
--- /dev/null
+++ b/test/1339-dead-reference-safe/src/ReachabilityFenceTest.java
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// DeadReferenceSafeTest, but with a reachabilityFence.
+
+import dalvik.annotation.optimization.DeadReferenceSafe;
+import java.lang.ref.Reference;
+import java.util.concurrent.atomic.AtomicInteger;
+
+@DeadReferenceSafe
+public final class ReachabilityFenceTest {
+  static AtomicInteger nFinalized = new AtomicInteger(0);
+  private static final int INNER_ITERS = 10;
+  static int count;
+  int n = 1;
+
+  private static void $noinline$loop() {
+    ReachabilityFenceTest x;
+    // Each loop allocates INNER_ITERS ReachabilitySenstiveTest objects.
+    for (int i = 0; i < INNER_ITERS; ++i) {
+      // We've allocated i objects so far.
+      x = new ReachabilityFenceTest();
+      count += x.n;
+      // x is dead here.
+      if (i == 5) {
+        // The rechabilityFence should keep the last allocated object reachable.
+        // Thus the last instance should not be finalized.
+        Main.$noinline$gcAndCheck(nFinalized, 5, "ReachabilityFence",
+            "reachabilityFence failed to keep object live.");
+      }
+      Reference.reachabilityFence(x);
+    }
+  }
+
+  private static void reset(int expected_count) {
+    Runtime.getRuntime().gc();
+    System.runFinalization();
+    if (nFinalized.get() != expected_count) {
+      System.out.println("ReachabilityFenceTest: Wrong number of finalized objects:"
+                         + nFinalized.get());
+    }
+    nFinalized.set(0);
+  }
+
+  protected void finalize() {
+    nFinalized.incrementAndGet();
+  }
+
+  public static void runTest() {
+    try {
+      Main.ensureCompiled(ReachabilityFenceTest.class, "$noinline$loop");
+    } catch (NoSuchMethodException e) {
+      System.out.println("Unexpectedly threw " + e);
+    }
+
+    $noinline$loop();
+
+    if (count != INNER_ITERS) {
+      System.out.println("ReachabilityFenceTest: Final count wrong: " + count);
+    }
+    reset(INNER_ITERS);
+  }
+}
diff --git a/test/1339-dead-reference-safe/src/ReachabilitySensitiveFunTest.java b/test/1339-dead-reference-safe/src/ReachabilitySensitiveFunTest.java
new file mode 100644
index 0000000..2c66146
--- /dev/null
+++ b/test/1339-dead-reference-safe/src/ReachabilitySensitiveFunTest.java
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// DeadReferenceSafeTest, but with a ReachabilitySensitive annotation.
+
+import dalvik.annotation.optimization.DeadReferenceSafe;
+import dalvik.annotation.optimization.ReachabilitySensitive;
+import java.util.concurrent.atomic.AtomicInteger;
+
+@DeadReferenceSafe
+public final class ReachabilitySensitiveFunTest {
+  static AtomicInteger nFinalized = new AtomicInteger(0);
+  private static final int INNER_ITERS = 10;
+  static int count;
+  int n = 1;
+  @ReachabilitySensitive
+  int getN() {
+    return n;
+  }
+
+  private static void $noinline$loop() {
+    ReachabilitySensitiveFunTest x;
+    // The loop allocates INNER_ITERS ReachabilitySensitiveTest objects.
+    for (int i = 0; i < INNER_ITERS; ++i) {
+      // We've allocated i objects so far.
+      x = new ReachabilitySensitiveFunTest();
+      // ReachabilitySensitive reference.
+      count += x.getN();
+      // x is dead here.
+      if (i == 5) {
+        // Since there is a ReachabilitySensitive call, x should be kept live
+        // until it is reassigned. Thus the last instance should not be finalized.
+        Main.$noinline$gcAndCheck(nFinalized, 5, "ReachabilitySensitiveFun",
+            "@ReachabilitySensitive call failed to keep object live.");
+      }
+    }
+  }
+
+  private static void reset(int expected_count) {
+    Runtime.getRuntime().gc();
+    System.runFinalization();
+    if (nFinalized.get() != expected_count) {
+      System.out.println("ReachabilitySensitiveFunTest: Wrong number of finalized objects:"
+                         + nFinalized.get());
+    }
+    nFinalized.set(0);
+  }
+
+  protected void finalize() {
+    nFinalized.incrementAndGet();
+  }
+
+  public static void runTest() {
+    try {
+      Main.ensureCompiled(ReachabilitySensitiveFunTest.class, "$noinline$loop");
+    } catch (NoSuchMethodException e) {
+      System.out.println("Unexpectedly threw " + e);
+    }
+
+    $noinline$loop();
+
+    if (count != INNER_ITERS) {
+      System.out.println("ReachabilitySensitiveFunTest: Final count wrong: " + count);
+    }
+    reset(INNER_ITERS);
+  }
+}
diff --git a/test/1339-dead-reference-safe/src/ReachabilitySensitiveTest.java b/test/1339-dead-reference-safe/src/ReachabilitySensitiveTest.java
new file mode 100644
index 0000000..aff43b6
--- /dev/null
+++ b/test/1339-dead-reference-safe/src/ReachabilitySensitiveTest.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// DeadReferenceSafeTest, but with a ReachabilitySensitive annotation.
+
+import dalvik.annotation.optimization.DeadReferenceSafe;
+import dalvik.annotation.optimization.ReachabilitySensitive;
+import java.util.concurrent.atomic.AtomicInteger;
+
+@DeadReferenceSafe
+public final class ReachabilitySensitiveTest {
+  static AtomicInteger nFinalized = new AtomicInteger(0);
+  private static final int INNER_ITERS = 10;
+  static int count;
+  @ReachabilitySensitive
+  int n = 1;
+
+  private static void $noinline$loop() {
+    ReachabilitySensitiveTest x;
+    // The loop allocates INNER_ITERS ReachabilitySensitiveTest objects.
+    for (int i = 0; i < INNER_ITERS; ++i) {
+      // We've allocated i objects so far.
+      x = new ReachabilitySensitiveTest();
+      // ReachabilitySensitive reference.
+      count += x.n;
+      // x is dead here.
+      if (i == 5) {
+        // Since there is a ReachabilitySensitive reference to x.n, x should be kept live
+        // until it is reassigned. Thus the last instance should not be finalized.
+        Main.$noinline$gcAndCheck(nFinalized, 5, "ReachabilitySensitive",
+            "@ReachabilitySensitive failed to keep object live.");
+      }
+    }
+  }
+
+  private static void reset(int expected_count) {
+    Runtime.getRuntime().gc();
+    System.runFinalization();
+    if (nFinalized.get() != expected_count) {
+      System.out.println("ReachabilitySensitiveTest: Wrong number of finalized objects:"
+                         + nFinalized.get());
+    }
+    nFinalized.set(0);
+  }
+
+  protected void finalize() {
+    nFinalized.incrementAndGet();
+  }
+
+  public static void runTest() {
+    try {
+      Main.ensureCompiled(ReachabilitySensitiveTest.class, "$noinline$loop");
+    } catch (NoSuchMethodException e) {
+      System.out.println("Unexpectedly threw " + e);
+    }
+
+    $noinline$loop();
+
+    if (count != INNER_ITERS) {
+      System.out.println("ReachabilitySensitiveTest: Final count wrong: " + count);
+    }
+    reset(INNER_ITERS);
+  }
+}
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index 985d273..0cb220e 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -30,6 +30,7 @@
 #include <backtrace/Backtrace.h>
 
 #include "base/file_utils.h"
+#include "base/logging.h"
 #include "base/macros.h"
 #include "base/utils.h"
 #include "gc/heap.h"
diff --git a/test/143-string-value/check b/test/143-string-value/check
index 2a3476c..b5e51ce 100755
--- a/test/143-string-value/check
+++ b/test/143-string-value/check
@@ -15,6 +15,6 @@
 # limitations under the License.
 
 # Strip error log messages.
-sed -e '/^dalvikvm\(\|32\|64\) E.*\] /d' "$2" > "$2.tmp"
+sed -e '/^.*dalvikvm\(\|32\|64\) E.*\] /d' "$2" > "$2.tmp"
 
 diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/160-read-barrier-stress/src/Main.java b/test/160-read-barrier-stress/src/Main.java
index 5865094..5e49e66 100644
--- a/test/160-read-barrier-stress/src/Main.java
+++ b/test/160-read-barrier-stress/src/Main.java
@@ -121,6 +121,23 @@
             assertSameObject(f4444, la[i4444]);
             assertDifferentObject(f4999, la[i4998]);
             assertSameObject(f4999, la[i4999]);
+
+            la = largeArray;
+            // Group the ArrayGets so they aren't divided by a function call; this will enable
+            // interm. address sharing for arm64.
+            Object tmp1 = la[i0];
+            Object tmp2 = la[i0 + 1];
+            Object tmp3 = la[i0 + 1024];
+            Object tmp4 = la[i0 + 4444];
+            Object tmp5 = la[i0 + 4998];
+            Object tmp6 = la[i0 + 4999];
+
+            assertSameObject(f0000, tmp1);
+            assertDifferentObject(f0000, tmp2);
+            assertSameObject(f1024, tmp3);
+            assertSameObject(f4444, tmp4);
+            assertDifferentObject(f4999, tmp5);
+            assertSameObject(f4999, tmp6);
         }
     }
 
diff --git a/test/174-escaping-instance-of-bad-class/expected.txt b/test/174-escaping-instance-of-bad-class/expected.txt
index e287759..611d698 100644
--- a/test/174-escaping-instance-of-bad-class/expected.txt
+++ b/test/174-escaping-instance-of-bad-class/expected.txt
@@ -1,6 +1,8 @@
 Bad.foo()
 Bad.instanceValue = 33
 Caught NoClassDefFoundError.
+Bad.bar()
+Caught NoClassDefFoundError.
 BadSuper.foo()
 BadSuper.superInstanceValue = 1
 Caught NoClassDefFoundError.
diff --git a/test/174-escaping-instance-of-bad-class/src/Main.java b/test/174-escaping-instance-of-bad-class/src/Main.java
index 4346152..4f66a31 100644
--- a/test/174-escaping-instance-of-bad-class/src/Main.java
+++ b/test/174-escaping-instance-of-bad-class/src/Main.java
@@ -39,6 +39,17 @@
         ncdfe.printStackTrace();
       }
     }
+    // Call bar() on the escaped instance of Bad.
+    try {
+      bad.bar();
+    } catch (NoClassDefFoundError ncdfe) {
+      // On RI, the NCDFE has no cause. On ART, the badClinit is the cause.
+      if (ncdfe.getCause() == badClinit || ncdfe.getCause() == null) {
+        System.out.println("Caught NoClassDefFoundError.");
+      } else {
+        ncdfe.printStackTrace();
+      }
+    }
   }
 
   public static void hierarchyTest() {
@@ -117,9 +128,19 @@
     System.out.println("Bad.instanceValue = " + instanceValue);
     System.out.println("Bad.staticValue = " + staticValue);
   }
+  public void bar() {
+    System.out.println("Bad.bar()");
+    System.out.println("Bad.staticValue [indirect] = " + Helper.$inline$getBadStaticValue());
+  }
   public Bad(int iv) { instanceValue = iv; }
   public int instanceValue;
   public static int staticValue;
+
+  public static class Helper {
+    public static int $inline$getBadStaticValue() {
+      return Bad.staticValue;
+    }
+  }
 }
 
 class BadSuper {
diff --git a/test/175-alloc-big-bignums/expected.txt b/test/175-alloc-big-bignums/expected.txt
new file mode 100644
index 0000000..f75da10
--- /dev/null
+++ b/test/175-alloc-big-bignums/expected.txt
@@ -0,0 +1 @@
+Test complete
diff --git a/test/175-alloc-big-bignums/info.txt b/test/175-alloc-big-bignums/info.txt
new file mode 100644
index 0000000..8f6bcc3
--- /dev/null
+++ b/test/175-alloc-big-bignums/info.txt
@@ -0,0 +1,11 @@
+Allocate large numbers of huge BigIntegers in rapid succession. Most of the
+associated memory will be in the C++ heap. This makes sure that we trigger
+the garbage collector often enough to prevent us from running out of memory.
+
+The test allocates roughly 10GB of native memory, approximately 1MB of which
+will be live at any point. Basically all native memory deallocation is
+triggered by Java garbage collection.
+
+This test is a lot nastier than it looks. In particular, failure on target tends
+to exhaust device memory, and kill off all processes on the device, including the
+adb daemon :-( .
diff --git a/test/175-alloc-big-bignums/src/Main.java b/test/175-alloc-big-bignums/src/Main.java
new file mode 100644
index 0000000..5fbeb46
--- /dev/null
+++ b/test/175-alloc-big-bignums/src/Main.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.math.BigInteger;
+
+// This is motivated by the assumption that BigInteger allocates malloc memory
+// underneath. That's true (in 2018) on Android.
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    final int nIters = 20_000;  // Presumed < 1_000_000.
+    final BigInteger big2_20 = BigInteger.valueOf(1024*1024); // 2^20
+    BigInteger huge = BigInteger.valueOf(1).shiftLeft(4_000_000);  // ~0.5MB
+    for (int i = 0; i < nIters; ++i) {  // 10 GB total
+      huge = huge.add(BigInteger.ONE);
+    }
+    if (huge.bitLength() != 4_000_001) {
+      System.out.println("Wrong answer length: " + huge.bitLength());
+    } else if (huge.mod(big2_20).compareTo(BigInteger.valueOf(nIters)) != 0) {
+      System.out.println("Wrong answer: ..." + huge.mod(big2_20));
+    } else {
+      System.out.println("Test complete");
+    }
+  }
+}
diff --git a/test/1900-track-alloc/alloc.cc b/test/1900-track-alloc/alloc.cc
index db5617c..f209611 100644
--- a/test/1900-track-alloc/alloc.cc
+++ b/test/1900-track-alloc/alloc.cc
@@ -24,7 +24,7 @@
 namespace art {
 namespace Test1900TrackAlloc {
 
-typedef jvmtiError (*GetGlobalState)(jvmtiEnv* env, jlong* allocated);
+using GetGlobalState = jvmtiError(*)(jvmtiEnv* env, jlong* allocated);
 
 struct AllocTrackingData {
   GetGlobalState get_global_state;
diff --git a/test/1919-vminit-thread-start-timing/src/art/Test1919.java b/test/1919-vminit-thread-start-timing/src/art/Test1919.java
index 3d5c079..f6b770f 100644
--- a/test/1919-vminit-thread-start-timing/src/art/Test1919.java
+++ b/test/1919-vminit-thread-start-timing/src/art/Test1919.java
@@ -21,10 +21,12 @@
 
   public static void run() {
     for (Event e : getEvents()) {
-      if (PRINT_ALL_THREADS ||
-          e.thr.equals(Thread.currentThread()) ||
-          e.thr.getName().equals("JVMTI_THREAD-Test1919")) {
-        System.out.println(e.name + ": " + e.thr.getName());
+      if (e.thr != null) {
+        if (PRINT_ALL_THREADS ||
+            e.thr.equals(Thread.currentThread()) ||
+            e.thr.getName().equals("JVMTI_THREAD-Test1919")) {
+          System.out.println(e.name + ": " + e.thr.getName());
+        }
       }
     }
   }
diff --git a/test/1931-monitor-events/check b/test/1931-monitor-events/check
new file mode 100644
index 0000000..8a7f844
--- /dev/null
+++ b/test/1931-monitor-events/check
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Art sends events for park/unpark, and the RI doesn't. Remove it from the expected output.
+if [[ "$TEST_RUNTIME" == "jvm" ]]; then
+  patch -p0 expected.txt < jvm-expected.patch >/dev/null
+fi
+
+./default-check "$@"
diff --git a/test/1931-monitor-events/expected.txt b/test/1931-monitor-events/expected.txt
index 33a9bd3..f368ae2 100644
--- a/test/1931-monitor-events/expected.txt
+++ b/test/1931-monitor-events/expected.txt
@@ -1,6 +1,9 @@
 Testing contended locking.
 Locker thread 1 for NamedLock[Lock testLock] contended-LOCKING NamedLock[Lock testLock]
 Locker thread 1 for NamedLock[Lock testLock] LOCKED NamedLock[Lock testLock]
+Testing park.
+ParkThread start-monitor-wait NamedLock[Parking blocker object] timeout: 1
+ParkThread monitor-waited NamedLock[Parking blocker object] timed_out: true
 Testing monitor wait.
 Locker thread 2 for NamedLock[Lock testWait] start-monitor-wait NamedLock[Lock testWait] timeout: 0
 Locker thread 2 for NamedLock[Lock testWait] monitor-waited NamedLock[Lock testWait] timed_out: false
diff --git a/test/1931-monitor-events/jvm-expected.patch b/test/1931-monitor-events/jvm-expected.patch
new file mode 100644
index 0000000..7595b14
--- /dev/null
+++ b/test/1931-monitor-events/jvm-expected.patch
@@ -0,0 +1,3 @@
+5,6d4
+< ParkThread start-monitor-wait NamedLock[Parking blocker object] timeout: 1
+< ParkThread monitor-waited NamedLock[Parking blocker object] timed_out: true
diff --git a/test/1931-monitor-events/src/art/Test1931.java b/test/1931-monitor-events/src/art/Test1931.java
index ccefede..f549789 100644
--- a/test/1931-monitor-events/src/art/Test1931.java
+++ b/test/1931-monitor-events/src/art/Test1931.java
@@ -23,6 +23,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.atomic.*;
+import java.util.concurrent.locks.LockSupport;
 import java.util.ListIterator;
 import java.util.function.Consumer;
 import java.util.function.Function;
@@ -67,6 +68,9 @@
     System.out.println("Testing contended locking.");
     testLock(new Monitors.NamedLock("Lock testLock"));
 
+    System.out.println("Testing park.");
+    testPark(new Monitors.NamedLock("Parking blocker object"));
+
     System.out.println("Testing monitor wait.");
     testWait(new Monitors.NamedLock("Lock testWait"));
 
@@ -88,6 +92,14 @@
     testInteruptWait(new Monitors.NamedLock("Lock testInteruptWait"));
   }
 
+  public static void testPark(Object blocker) throws Exception {
+    Thread holder = new Thread(() -> {
+      LockSupport.parkNanos(blocker, 10); // Should round up to one millisecond
+    }, "ParkThread");
+    holder.start();
+    holder.join();
+  }
+
   public static void testInteruptWait(final Monitors.NamedLock lk) throws Exception {
     final Monitors.LockController controller1 = new Monitors.LockController(lk);
     controller1.DoLock();
diff --git a/test/1934-jvmti-signal-thread/signal_threads.cc b/test/1934-jvmti-signal-thread/signal_threads.cc
index 726a7a86..dfb08c1 100644
--- a/test/1934-jvmti-signal-thread/signal_threads.cc
+++ b/test/1934-jvmti-signal-thread/signal_threads.cc
@@ -47,19 +47,19 @@
                             jvmti_env,
                             jvmti_env->Allocate(sizeof(NativeMonitor),
                                                 reinterpret_cast<unsigned char**>(&mon)))) {
-    return -1l;
+    return -1L;
   }
   if (JvmtiErrorToException(env,
                             jvmti_env,
                             jvmti_env->CreateRawMonitor("test-1934 start",
                                                         &mon->start_monitor))) {
-    return -1l;
+    return -1L;
   }
   if (JvmtiErrorToException(env,
                             jvmti_env,
                             jvmti_env->CreateRawMonitor("test-1934 continue",
                                                         &mon->continue_monitor))) {
-    return -1l;
+    return -1L;
   }
   mon->should_continue = false;
   mon->should_start = false;
@@ -92,7 +92,7 @@
   while (!mon->should_continue) {
     if (JvmtiErrorToException(env,
                               jvmti_env,
-                              jvmti_env->RawMonitorWait(mon->continue_monitor, -1l))) {
+                              jvmti_env->RawMonitorWait(mon->continue_monitor, -1L))) {
       JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(mon->continue_monitor));
       return;
     }
@@ -112,7 +112,7 @@
   while (!mon->should_start) {
     if (JvmtiErrorToException(env,
                               jvmti_env,
-                              jvmti_env->RawMonitorWait(mon->start_monitor, -1l))) {
+                              jvmti_env->RawMonitorWait(mon->start_monitor, -1L))) {
       return;
     }
   }
diff --git a/test/1935-get-set-current-frame-jit/src/Main.java b/test/1935-get-set-current-frame-jit/src/Main.java
index 97f0973..cc8a4c4 100644
--- a/test/1935-get-set-current-frame-jit/src/Main.java
+++ b/test/1935-get-set-current-frame-jit/src/Main.java
@@ -58,7 +58,8 @@
     }
     public void run() {
       int TARGET = 42;
-      if (hasJit() && expectOsr && !Main.isInterpreted()) {
+      boolean normalJit = hasJit() && getJitThreshold() != 0;  // Excluding JIT-at-first-use.
+      if (normalJit && expectOsr && !Main.isInterpreted()) {
           System.out.println("Unexpectedly in jit code prior to restarting the JIT!");
       }
       startJit();
@@ -72,10 +73,10 @@
       do {
         // Don't actually do anything here.
         inBusyLoop = true;
-      } while (hasJit() && !Main.isInOsrCode("run") && osrDeadline.compareTo(Instant.now()) > 0);
+      } while (normalJit && !Main.isInOsrCode("run") && osrDeadline.compareTo(Instant.now()) > 0);
       // We shouldn't be doing OSR since we are using JVMTI and the set prevents OSR.
       // Set local will also push us to interpreter but the get local may remain in compiled code.
-      if (hasJit()) {
+      if (normalJit) {
         boolean inOsr = Main.isInOsrCode("run");
         if (expectOsr && !inOsr) {
           throw new Error(
@@ -184,4 +185,5 @@
   public static native boolean stopJit();
   public static native boolean startJit();
   public static native boolean hasJit();
+  public static native int getJitThreshold();
 }
diff --git a/test/1940-ddms-ext/ddm_ext.cc b/test/1940-ddms-ext/ddm_ext.cc
index cc29df9..452187b 100644
--- a/test/1940-ddms-ext/ddm_ext.cc
+++ b/test/1940-ddms-ext/ddm_ext.cc
@@ -25,7 +25,7 @@
 namespace art {
 namespace Test1940DdmExt {
 
-typedef jvmtiError (*DdmHandleChunk)(jvmtiEnv* env,
+using DdmHandleChunk = jvmtiError(*)(jvmtiEnv* env,
                                      jint type_in,
                                      jint len_in,
                                      const jbyte* data_in,
diff --git a/test/1946-list-descriptors/descriptors.cc b/test/1946-list-descriptors/descriptors.cc
index 01b306d..07fee61 100644
--- a/test/1946-list-descriptors/descriptors.cc
+++ b/test/1946-list-descriptors/descriptors.cc
@@ -24,7 +24,7 @@
 namespace art {
 namespace Test1946Descriptors {
 
-typedef jvmtiError (*GetDescriptorList)(jvmtiEnv* env, jobject loader, jint* cnt, char*** descs);
+using GetDescriptorList = jvmtiError(*)(jvmtiEnv* env, jobject loader, jint* cnt, char*** descs);
 
 struct DescriptorData {
   GetDescriptorList get_descriptor_list;
diff --git a/test/1951-monitor-enter-no-suspend/raw_monitor.cc b/test/1951-monitor-enter-no-suspend/raw_monitor.cc
index 0425e35..efd02b6 100644
--- a/test/1951-monitor-enter-no-suspend/raw_monitor.cc
+++ b/test/1951-monitor-enter-no-suspend/raw_monitor.cc
@@ -26,7 +26,7 @@
 namespace art {
 namespace Test1951MonitorEnterNoSuspend {
 
-typedef jvmtiError (*RawMonitorEnterNoSuspend)(jvmtiEnv* env, jrawMonitorID mon);
+using RawMonitorEnterNoSuspend = jvmtiError(*)(jvmtiEnv* env, jrawMonitorID mon);
 
 template <typename T>
 static void Dealloc(T* t) {
diff --git a/test/1953-pop-frame/check b/test/1953-pop-frame/check
new file mode 100755
index 0000000..d552272
--- /dev/null
+++ b/test/1953-pop-frame/check
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The RI has restrictions and bugs around some PopFrame behavior that ART lacks.
+# See b/116003018. Some configurations cannot handle the class load events in
+# quite the right way so they are disabled there too.
+./default-check "$@" || \
+  (patch -p0 expected.txt < class-loading-expected.patch >/dev/null && ./default-check "$@")
diff --git a/test/1953-pop-frame/class-loading-expected.patch b/test/1953-pop-frame/class-loading-expected.patch
new file mode 100644
index 0000000..2edef15
--- /dev/null
+++ b/test/1953-pop-frame/class-loading-expected.patch
@@ -0,0 +1,21 @@
+74a75,94
+> Test stopped during a ClassLoad event.
+> Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 0} base-call-count: 0
+> Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+> 	art.Test1953.popFrame(Native Method)
+> 	art.Test1953.runTestOn(Test1953.java)
+> 	art.Test1953.runTestOn(Test1953.java)
+> 	art.Test1953.runTests(Test1953.java)
+> 	<Additional frames hidden>
+> TC0.foo == 1
+> result is ClassLoadObject { cnt: 1, curClass: 1} base-call count: 1
+> Test stopped during a ClassPrepare event.
+> Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 1} base-call-count: 0
+> Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+> 	art.Test1953.popFrame(Native Method)
+> 	art.Test1953.runTestOn(Test1953.java)
+> 	art.Test1953.runTestOn(Test1953.java)
+> 	art.Test1953.runTests(Test1953.java)
+> 	<Additional frames hidden>
+> TC1.foo == 2
+> result is ClassLoadObject { cnt: 1, curClass: 2} base-call count: 1
diff --git a/test/1953-pop-frame/expected.txt b/test/1953-pop-frame/expected.txt
new file mode 100644
index 0000000..906703d
--- /dev/null
+++ b/test/1953-pop-frame/expected.txt
@@ -0,0 +1,98 @@
+Test stopped using breakpoint
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with declared synchronized function
+Single call with PopFrame on SynchronizedFunctionTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedFunctionTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with synchronized block
+Single call with PopFrame on SynchronizedTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedTestObject { cnt: 2 } base-call count: 1
+Test stopped on single step
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped on field access
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped on field modification
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped during Method Exit of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Enter of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during Method Enter of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit due to exception thrown in same function
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: false } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: false } base-call count: 1
+Test stopped during Method Exit due to exception thrown in subroutine
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: true } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: true } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of calledFunction
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of doThrow
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 1 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in subroutine)
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in calling function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in parent of calling function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError thrown and caught!
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError caught in same function.
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during random Suspend.
+Single call with PopFrame on SuspendSuddenlyObject { cnt: 0 } base-call-count: 0
+result is SuspendSuddenlyObject { cnt: 2 } base-call count: 1
+Test redefining frame being popped.
+Single call with PopFrame on RedefineTestObject { states: [] current: ORIGINAL } base-call-count: 0
+result is RedefineTestObject { states: [ORIGINAL, REDEFINED] current: REDEFINED } base-call count: 1
+Test stopped during a native method fails
+Single call with PopFrame on NativeCalledObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.Test1953.popFrame(Native Method)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTests(Test1953.java)
+	<Additional frames hidden>
+result is NativeCalledObject { cnt: 1 } base-call count: 1
+Test stopped in a method called by native fails
+Single call with PopFrame on NativeCallerObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.Test1953.popFrame(Native Method)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTests(Test1953.java)
+	<Additional frames hidden>
+result is NativeCallerObject { cnt: 1 } base-call count: 1
diff --git a/test/1953-pop-frame/info.txt b/test/1953-pop-frame/info.txt
new file mode 100644
index 0000000..b5eb546
--- /dev/null
+++ b/test/1953-pop-frame/info.txt
@@ -0,0 +1,7 @@
+Test basic JVMTI breakpoint functionality.
+
+This test places a breakpoint on the first instruction of a number of functions
+that are entered in every way possible for the given class of method.
+
+It also tests that breakpoints don't interfere with each other by having
+multiple breakpoints be set at once.
diff --git a/test/1953-pop-frame/pop_frame.cc b/test/1953-pop-frame/pop_frame.cc
new file mode 100644
index 0000000..1c2d2a1
--- /dev/null
+++ b/test/1953-pop-frame/pop_frame.cc
@@ -0,0 +1,998 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
+
+// Test infrastructure
+#include "jni_binder.h"
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+
+namespace art {
+namespace Test1953PopFrame {
+
+struct TestData {
+  jlocation target_loc;
+  jmethodID target_method;
+  jclass target_klass;
+  jfieldID target_field;
+  jrawMonitorID notify_monitor;
+  jint frame_pop_offset;
+  jmethodID frame_pop_setup_method;
+  std::vector<std::string> interesting_classes;
+  bool hit_location;
+
+  TestData(jvmtiEnv* jvmti,
+           JNIEnv* env,
+           jlocation loc,
+           jobject meth,
+           jclass klass,
+           jobject field,
+           jobject setup_meth,
+           jint pop_offset,
+           const std::vector<std::string>&& interesting)
+      : target_loc(loc),
+        target_method(meth != nullptr ? env->FromReflectedMethod(meth) : nullptr),
+        target_klass(reinterpret_cast<jclass>(env->NewGlobalRef(klass))),
+        target_field(field != nullptr ? env->FromReflectedField(field) : nullptr),
+        frame_pop_offset(pop_offset),
+        frame_pop_setup_method(setup_meth != nullptr ? env->FromReflectedMethod(setup_meth)
+                                                     : nullptr),
+        interesting_classes(interesting),
+        hit_location(false) {
+    JvmtiErrorToException(env, jvmti, jvmti->CreateRawMonitor("SuspendStopMonitor",
+                                                              &notify_monitor));
+  }
+
+  void PerformSuspend(jvmtiEnv* jvmti, JNIEnv* env) {
+    // Wake up the waiting thread.
+    JvmtiErrorToException(env, jvmti, jvmti->RawMonitorEnter(notify_monitor));
+    hit_location = true;
+    JvmtiErrorToException(env, jvmti, jvmti->RawMonitorNotifyAll(notify_monitor));
+    JvmtiErrorToException(env, jvmti, jvmti->RawMonitorExit(notify_monitor));
+    // Suspend ourself
+    jvmti->SuspendThread(nullptr);
+  }
+};
+
+void JNICALL cbSingleStep(jvmtiEnv* jvmti,
+                          JNIEnv* env,
+                          jthread thr,
+                          jmethodID meth,
+                          jlocation loc) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti,
+                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (meth != data->target_method || loc != data->target_loc) {
+    return;
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbExceptionCatch(jvmtiEnv *jvmti,
+                              JNIEnv* env,
+                              jthread thr,
+                              jmethodID method,
+                              jlocation location ATTRIBUTE_UNUSED,
+                              jobject exception ATTRIBUTE_UNUSED) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti,
+                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (method != data->target_method) {
+    return;
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbException(jvmtiEnv *jvmti,
+                         JNIEnv* env,
+                         jthread thr,
+                         jmethodID method,
+                         jlocation location ATTRIBUTE_UNUSED,
+                         jobject exception ATTRIBUTE_UNUSED,
+                         jmethodID catch_method ATTRIBUTE_UNUSED,
+                         jlocation catch_location ATTRIBUTE_UNUSED) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti,
+                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (method != data->target_method) {
+    return;
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbMethodEntry(jvmtiEnv *jvmti,
+                           JNIEnv* env,
+                           jthread thr,
+                           jmethodID method) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti,
+                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (method != data->target_method) {
+    return;
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbMethodExit(jvmtiEnv *jvmti,
+                          JNIEnv* env,
+                          jthread thr,
+                          jmethodID method,
+                          jboolean was_popped_by_exception ATTRIBUTE_UNUSED,
+                          jvalue return_value ATTRIBUTE_UNUSED) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti,
+                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (method != data->target_method) {
+    return;
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbFieldModification(jvmtiEnv* jvmti,
+                                 JNIEnv* env,
+                                 jthread thr,
+                                 jmethodID method ATTRIBUTE_UNUSED,
+                                 jlocation location ATTRIBUTE_UNUSED,
+                                 jclass field_klass ATTRIBUTE_UNUSED,
+                                 jobject object ATTRIBUTE_UNUSED,
+                                 jfieldID field,
+                                 char signature_type ATTRIBUTE_UNUSED,
+                                 jvalue new_value ATTRIBUTE_UNUSED) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti,
+                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (field != data->target_field) {
+    // TODO What to do here.
+    LOG(FATAL) << "Strange, shouldn't get here!";
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbFieldAccess(jvmtiEnv* jvmti,
+                           JNIEnv* env,
+                           jthread thr,
+                           jmethodID method ATTRIBUTE_UNUSED,
+                           jlocation location ATTRIBUTE_UNUSED,
+                           jclass field_klass,
+                           jobject object ATTRIBUTE_UNUSED,
+                           jfieldID field) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti,
+                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (field != data->target_field || !env->IsSameObject(field_klass, data->target_klass)) {
+    // TODO What to do here.
+    LOG(FATAL) << "Strange, shouldn't get here!";
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbBreakpointHit(jvmtiEnv* jvmti,
+                             JNIEnv* env,
+                             jthread thr,
+                             jmethodID method,
+                             jlocation loc) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti,
+                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (data->frame_pop_setup_method == method) {
+    CHECK(loc == 0) << "We should have stopped at location 0";
+    if (JvmtiErrorToException(env,
+                              jvmti,
+                              jvmti->NotifyFramePop(thr, data->frame_pop_offset))) {
+      return;
+    }
+    return;
+  }
+  if (method != data->target_method || loc != data->target_loc) {
+    // TODO What to do here.
+    LOG(FATAL) << "Strange, shouldn't get here!";
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbFramePop(jvmtiEnv* jvmti,
+                        JNIEnv* env,
+                        jthread thr,
+                        jmethodID method ATTRIBUTE_UNUSED,
+                        jboolean was_popped_by_exception ATTRIBUTE_UNUSED) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti,
+                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbClassLoadOrPrepare(jvmtiEnv* jvmti,
+                                  JNIEnv* env,
+                                  jthread thr,
+                                  jclass klass) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti,
+                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  char* name;
+  if (JvmtiErrorToException(env, jvmti, jvmti->GetClassSignature(klass, &name, nullptr))) {
+    return;
+  }
+  std::string name_str(name);
+  if (JvmtiErrorToException(env,
+                            jvmti,
+                            jvmti->Deallocate(reinterpret_cast<unsigned char*>(name)))) {
+    return;
+  }
+  if (std::find(data->interesting_classes.cbegin(),
+                data->interesting_classes.cend(),
+                name_str) != data->interesting_classes.cend()) {
+    data->PerformSuspend(jvmti, env);
+  }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupTest(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+  jvmtiCapabilities caps;
+  memset(&caps, 0, sizeof(caps));
+  // Most of these will already be there but might as well be complete.
+  caps.can_pop_frame                          = 1;
+  caps.can_generate_single_step_events        = 1;
+  caps.can_generate_breakpoint_events         = 1;
+  caps.can_suspend                            = 1;
+  caps.can_generate_method_entry_events       = 1;
+  caps.can_generate_method_exit_events        = 1;
+  caps.can_generate_monitor_events            = 1;
+  caps.can_generate_exception_events          = 1;
+  caps.can_generate_frame_pop_events          = 1;
+  caps.can_generate_field_access_events       = 1;
+  caps.can_generate_field_modification_events = 1;
+  caps.can_redefine_classes                   = 1;
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->AddCapabilities(&caps))) {
+    return;
+  }
+  jvmtiEventCallbacks cb;
+  memset(&cb, 0, sizeof(cb));
+  // TODO Add the rest of these.
+  cb.Breakpoint        = cbBreakpointHit;
+  cb.SingleStep        = cbSingleStep;
+  cb.FieldAccess       = cbFieldAccess;
+  cb.FieldModification = cbFieldModification;
+  cb.MethodEntry       = cbMethodEntry;
+  cb.MethodExit        = cbMethodExit;
+  cb.Exception         = cbException;
+  cb.ExceptionCatch    = cbExceptionCatch;
+  cb.FramePop          = cbFramePop;
+  cb.ClassLoad         = cbClassLoadOrPrepare;
+  cb.ClassPrepare      = cbClassLoadOrPrepare;
+  JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEventCallbacks(&cb, sizeof(cb)));
+}
+
+static bool DeleteTestData(JNIEnv* env, jthread thr, TestData* data) {
+  env->DeleteGlobalRef(data->target_klass);
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
+    return false;
+  }
+  return JvmtiErrorToException(env,
+                               jvmti_env,
+                               jvmti_env->Deallocate(reinterpret_cast<uint8_t*>(data)));
+}
+
+static TestData* SetupTestData(JNIEnv* env,
+                               jobject meth,
+                               jlocation loc,
+                               jclass target_klass,
+                               jobject field,
+                               jobject setup_meth,
+                               jint pop_offset,
+                               const std::vector<std::string>&& interesting_names) {
+  void* data_ptr;
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->Allocate(sizeof(TestData),
+                                                reinterpret_cast<uint8_t**>(&data_ptr)))) {
+    return nullptr;
+  }
+  data = new (data_ptr) TestData(jvmti_env,
+                                 env,
+                                 loc,
+                                 meth,
+                                 target_klass,
+                                 field,
+                                 setup_meth,
+                                 pop_offset,
+                                 std::move(interesting_names));
+  if (env->ExceptionCheck()) {
+    env->DeleteGlobalRef(data->target_klass);
+    jvmti_env->Deallocate(reinterpret_cast<uint8_t*>(data));
+    return nullptr;
+  }
+  return data;
+}
+
+static TestData* SetupTestData(JNIEnv* env,
+                               jobject meth,
+                               jlocation loc,
+                               jclass target_klass,
+                               jobject field,
+                               jobject setup_meth,
+                               jint pop_offset) {
+  std::vector<std::string> empty;
+  return SetupTestData(
+      env, meth, loc, target_klass, field, setup_meth, pop_offset, std::move(empty));
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupSuspendClassEvent(JNIEnv* env,
+                                                      jclass klass ATTRIBUTE_UNUSED,
+                                                      jint event_num,
+                                                      jobjectArray interesting_names,
+                                                      jthread thr) {
+  CHECK(event_num == JVMTI_EVENT_CLASS_LOAD || event_num == JVMTI_EVENT_CLASS_PREPARE);
+  std::vector<std::string> names;
+  jint cnt = env->GetArrayLength(interesting_names);
+  for (jint i = 0; i < cnt; i++) {
+    env->PushLocalFrame(1);
+    jstring name_obj = reinterpret_cast<jstring>(env->GetObjectArrayElement(interesting_names, i));
+    const char* name_chr = env->GetStringUTFChars(name_obj, nullptr);
+    names.push_back(std::string(name_chr));
+    env->ReleaseStringUTFChars(name_obj, name_chr);
+    env->PopLocalFrame(nullptr);
+  }
+  TestData* data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(thr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, nullptr, 0, nullptr, nullptr, nullptr, 0, std::move(names));
+  if (data == nullptr) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+  JvmtiErrorToException(env,
+                        jvmti_env,
+                        jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+                                                            static_cast<jvmtiEvent>(event_num),
+                                                            thr));
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearSuspendClassEvent(JNIEnv* env,
+                                                      jclass klass ATTRIBUTE_UNUSED,
+                                                      jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(thr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+                                                                JVMTI_EVENT_CLASS_LOAD,
+                                                                thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+                                                                JVMTI_EVENT_CLASS_PREPARE,
+                                                                thr))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupSuspendSingleStepAt(JNIEnv* env,
+                                                        jclass klass ATTRIBUTE_UNUSED,
+                                                        jobject meth,
+                                                        jlocation loc,
+                                                        jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(thr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, meth, loc, nullptr, nullptr, nullptr, 0);
+  if (data == nullptr) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+  JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+                                                                            JVMTI_EVENT_SINGLE_STEP,
+                                                                            thr));
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearSuspendSingleStepFor(JNIEnv* env,
+                                                         jclass klass ATTRIBUTE_UNUSED,
+                                                         jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(thr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+                                                                JVMTI_EVENT_SINGLE_STEP,
+                                                                thr))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupSuspendPopFrameEvent(JNIEnv* env,
+                                                         jclass klass ATTRIBUTE_UNUSED,
+                                                         jint offset,
+                                                         jobject breakpoint_func,
+                                                         jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(thr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, nullptr, 0, nullptr, nullptr, breakpoint_func, offset);
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+                                                                JVMTI_EVENT_FRAME_POP,
+                                                                thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+                                                                JVMTI_EVENT_BREAKPOINT,
+                                                                thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetBreakpoint(data->frame_pop_setup_method, 0))) {
+    return;
+  }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearSuspendPopFrameEvent(JNIEnv* env,
+                                                         jclass klass ATTRIBUTE_UNUSED,
+                                                         jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(thr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+                                                                JVMTI_EVENT_FRAME_POP,
+                                                                thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+                                                                JVMTI_EVENT_BREAKPOINT,
+                                                                thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->ClearBreakpoint(data->frame_pop_setup_method, 0))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupSuspendBreakpointFor(JNIEnv* env,
+                                                         jclass klass ATTRIBUTE_UNUSED,
+                                                         jobject meth,
+                                                         jlocation loc,
+                                                         jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(thr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, meth, loc, nullptr, nullptr, nullptr, 0);
+  if (data == nullptr) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+                                                                JVMTI_EVENT_BREAKPOINT,
+                                                                thr))) {
+    return;
+  }
+  JvmtiErrorToException(env, jvmti_env, jvmti_env->SetBreakpoint(data->target_method,
+                                                                 data->target_loc));
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearSuspendBreakpointFor(JNIEnv* env,
+                                                         jclass klass ATTRIBUTE_UNUSED,
+                                                         jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(thr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+                                                                JVMTI_EVENT_BREAKPOINT,
+                                                                thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->ClearBreakpoint(data->target_method,
+                                                       data->target_loc))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupSuspendExceptionEvent(JNIEnv* env,
+                                                          jclass klass ATTRIBUTE_UNUSED,
+                                                          jobject method,
+                                                          jboolean is_catch,
+                                                          jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(
+                                thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, method, 0, nullptr, nullptr, nullptr, 0);
+  if (data == nullptr) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+  JvmtiErrorToException(env,
+                        jvmti_env,
+                        jvmti_env->SetEventNotificationMode(
+                            JVMTI_ENABLE,
+                            is_catch ? JVMTI_EVENT_EXCEPTION_CATCH : JVMTI_EVENT_EXCEPTION,
+                            thr));
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearSuspendExceptionEvent(JNIEnv* env,
+                                                          jclass klass ATTRIBUTE_UNUSED,
+                                                          jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(thr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+                                                                JVMTI_EVENT_EXCEPTION_CATCH,
+                                                                thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+                                                                JVMTI_EVENT_EXCEPTION,
+                                                                thr))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupSuspendMethodEvent(JNIEnv* env,
+                                                       jclass klass ATTRIBUTE_UNUSED,
+                                                       jobject method,
+                                                       jboolean enter,
+                                                       jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(
+                                thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, method, 0, nullptr, nullptr, nullptr, 0);
+  if (data == nullptr) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+  JvmtiErrorToException(env,
+                        jvmti_env,
+                        jvmti_env->SetEventNotificationMode(
+                            JVMTI_ENABLE,
+                            enter ? JVMTI_EVENT_METHOD_ENTRY : JVMTI_EVENT_METHOD_EXIT,
+                            thr));
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearSuspendMethodEvent(JNIEnv* env,
+                                                       jclass klass ATTRIBUTE_UNUSED,
+                                                       jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(thr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+                                                                JVMTI_EVENT_METHOD_EXIT,
+                                                                thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+                                                                JVMTI_EVENT_METHOD_ENTRY,
+                                                                thr))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupFieldSuspendFor(JNIEnv* env,
+                                                    jclass klass ATTRIBUTE_UNUSED,
+                                                    jclass target_klass,
+                                                    jobject field,
+                                                    jboolean access,
+                                                    jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(
+                                thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, nullptr, 0, target_klass, field, nullptr, 0);
+  if (data == nullptr) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(
+                                JVMTI_ENABLE,
+                                access ? JVMTI_EVENT_FIELD_ACCESS : JVMTI_EVENT_FIELD_MODIFICATION,
+                                thr))) {
+    return;
+  }
+  if (access) {
+    JvmtiErrorToException(env, jvmti_env, jvmti_env->SetFieldAccessWatch(data->target_klass,
+                                                                         data->target_field));
+  } else {
+    JvmtiErrorToException(env, jvmti_env, jvmti_env->SetFieldModificationWatch(data->target_klass,
+                                                                               data->target_field));
+  }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearFieldSuspendFor(JNIEnv* env,
+                                                    jclass klass ATTRIBUTE_UNUSED,
+                                                    jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(thr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+                                                                JVMTI_EVENT_FIELD_ACCESS,
+                                                                thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+                                                                JVMTI_EVENT_FIELD_MODIFICATION,
+                                                                thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->ClearFieldModificationWatch(
+                                data->target_klass, data->target_field)) &&
+      JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->ClearFieldAccessWatch(
+                                data->target_klass, data->target_field))) {
+    return;
+  } else {
+    env->ExceptionClear();
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupWaitForNativeCall(JNIEnv* env,
+                                                      jclass klass ATTRIBUTE_UNUSED,
+                                                      jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(
+                                thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, nullptr, 0, nullptr, nullptr, nullptr, 0);
+  if (data == nullptr) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearWaitForNativeCall(JNIEnv* env,
+                                                      jclass klass ATTRIBUTE_UNUSED,
+                                                      jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(thr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_waitForSuspendHit(JNIEnv* env,
+                                                 jclass klass ATTRIBUTE_UNUSED,
+                                                 jthread thr) {
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(thr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorEnter(data->notify_monitor))) {
+    return;
+  }
+  while (!data->hit_location) {
+    if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorWait(data->notify_monitor, -1))) {
+      return;
+    }
+  }
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(data->notify_monitor))) {
+    return;
+  }
+  jint state = 0;
+  while (!JvmtiErrorToException(env, jvmti_env, jvmti_env->GetThreadState(thr, &state)) &&
+         (state & JVMTI_THREAD_STATE_SUSPENDED) == 0) { }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_popFrame(JNIEnv* env,
+                                        jclass klass ATTRIBUTE_UNUSED,
+                                        jthread thr) {
+  JvmtiErrorToException(env, jvmti_env, jvmti_env->PopFrame(thr));
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_00024NativeCalledObject_calledFunction(
+    JNIEnv* env, jobject thiz) {
+  env->PushLocalFrame(1);
+  jclass klass = env->GetObjectClass(thiz);
+  jfieldID cnt = env->GetFieldID(klass, "cnt", "I");
+  env->SetIntField(thiz, cnt, env->GetIntField(thiz, cnt) + 1);
+  env->PopLocalFrame(nullptr);
+  TestData *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(/* thread */ nullptr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  data->PerformSuspend(jvmti_env, env);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_00024NativeCallerObject_run(
+    JNIEnv* env, jobject thiz) {
+  env->PushLocalFrame(1);
+  jclass klass = env->GetObjectClass(thiz);
+  jfieldID baseCnt = env->GetFieldID(klass, "baseCnt", "I");
+  env->SetIntField(thiz, baseCnt, env->GetIntField(thiz, baseCnt) + 1);
+  jmethodID called = env->GetMethodID(klass, "calledFunction", "()V");
+  env->CallVoidMethod(thiz, called);
+  env->PopLocalFrame(nullptr);
+}
+
+extern "C" JNIEXPORT
+jboolean JNICALL Java_art_Test1953_isClassLoaded(JNIEnv* env, jclass, jstring name) {
+  ScopedUtfChars chr(env, name);
+  if (env->ExceptionCheck()) {
+    return false;
+  }
+  jint cnt = 0;
+  jclass* klasses = nullptr;
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetLoadedClasses(&cnt, &klasses))) {
+    return false;
+  }
+  bool res = false;
+  for (jint i = 0; !res && i < cnt; i++) {
+    char* sig;
+    if (JvmtiErrorToException(env,
+                              jvmti_env,
+                              jvmti_env->GetClassSignature(klasses[i], &sig, nullptr))) {
+      return false;
+    }
+    res = (strcmp(sig, chr.c_str()) == 0);
+    jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(sig));
+  }
+  jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(klasses));
+  return res;
+}
+
+}  // namespace Test1953PopFrame
+}  // namespace art
+
diff --git a/test/1953-pop-frame/run b/test/1953-pop-frame/run
new file mode 100755
index 0000000..d16d4e6
--- /dev/null
+++ b/test/1953-pop-frame/run
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# On RI we need to turn class-load tests off since those events are buggy around
+# pop-frame (see b/116003018).
+ARGS=""
+if [[ "$TEST_RUNTIME" == "jvm" ]]; then
+  ARGS="--args DISABLE_CLASS_LOAD_TESTS"
+fi
+
+./default-run "$@" --jvmti $ARGS
diff --git a/test/1953-pop-frame/src/Main.java b/test/1953-pop-frame/src/Main.java
new file mode 100644
index 0000000..156076e
--- /dev/null
+++ b/test/1953-pop-frame/src/Main.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.List;
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1953.run(!Arrays.asList(args).contains("DISABLE_CLASS_LOAD_TESTS"));
+  }
+}
diff --git a/test/1953-pop-frame/src/art/Breakpoint.java b/test/1953-pop-frame/src/art/Breakpoint.java
new file mode 100644
index 0000000..bbb89f7
--- /dev/null
+++ b/test/1953-pop-frame/src/art/Breakpoint.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Objects;
+
+public class Breakpoint {
+  public static class Manager {
+    public static class BP {
+      public final Executable method;
+      public final long location;
+
+      public BP(Executable method) {
+        this(method, getStartLocation(method));
+      }
+
+      public BP(Executable method, long location) {
+        this.method = method;
+        this.location = location;
+      }
+
+      @Override
+      public boolean equals(Object other) {
+        return (other instanceof BP) &&
+            method.equals(((BP)other).method) &&
+            location == ((BP)other).location;
+      }
+
+      @Override
+      public String toString() {
+        return method.toString() + " @ " + getLine();
+      }
+
+      @Override
+      public int hashCode() {
+        return Objects.hash(method, location);
+      }
+
+      public int getLine() {
+        try {
+          LineNumber[] lines = getLineNumberTable(method);
+          int best = -1;
+          for (LineNumber l : lines) {
+            if (l.location > location) {
+              break;
+            } else {
+              best = l.line;
+            }
+          }
+          return best;
+        } catch (Exception e) {
+          return -1;
+        }
+      }
+    }
+
+    private Set<BP> breaks = new HashSet<>();
+
+    public void setBreakpoints(BP... bs) {
+      for (BP b : bs) {
+        if (breaks.add(b)) {
+          Breakpoint.setBreakpoint(b.method, b.location);
+        }
+      }
+    }
+    public void setBreakpoint(Executable method, long location) {
+      setBreakpoints(new BP(method, location));
+    }
+
+    public void clearBreakpoints(BP... bs) {
+      for (BP b : bs) {
+        if (breaks.remove(b)) {
+          Breakpoint.clearBreakpoint(b.method, b.location);
+        }
+      }
+    }
+    public void clearBreakpoint(Executable method, long location) {
+      clearBreakpoints(new BP(method, location));
+    }
+
+    public void clearAllBreakpoints() {
+      clearBreakpoints(breaks.toArray(new BP[0]));
+    }
+  }
+
+  public static void startBreakpointWatch(Class<?> methodClass,
+                                          Executable breakpointReached,
+                                          Thread thr) {
+    startBreakpointWatch(methodClass, breakpointReached, false, thr);
+  }
+
+  /**
+   * Enables the trapping of breakpoint events.
+   *
+   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
+   */
+  public static native void startBreakpointWatch(Class<?> methodClass,
+                                                 Executable breakpointReached,
+                                                 boolean allowRecursive,
+                                                 Thread thr);
+  public static native void stopBreakpointWatch(Thread thr);
+
+  public static final class LineNumber implements Comparable<LineNumber> {
+    public final long location;
+    public final int line;
+
+    private LineNumber(long loc, int line) {
+      this.location = loc;
+      this.line = line;
+    }
+
+    public boolean equals(Object other) {
+      return other instanceof LineNumber && ((LineNumber)other).line == line &&
+          ((LineNumber)other).location == location;
+    }
+
+    public int compareTo(LineNumber other) {
+      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
+      if (v != 0) {
+        return v;
+      } else {
+        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
+      }
+    }
+  }
+
+  public static native void setBreakpoint(Executable m, long loc);
+  public static void setBreakpoint(Executable m, LineNumber l) {
+    setBreakpoint(m, l.location);
+  }
+
+  public static native void clearBreakpoint(Executable m, long loc);
+  public static void clearBreakpoint(Executable m, LineNumber l) {
+    clearBreakpoint(m, l.location);
+  }
+
+  private static native Object[] getLineNumberTableNative(Executable m);
+  public static LineNumber[] getLineNumberTable(Executable m) {
+    Object[] nativeTable = getLineNumberTableNative(m);
+    long[] location = (long[])(nativeTable[0]);
+    int[] lines = (int[])(nativeTable[1]);
+    if (lines.length != location.length) {
+      throw new Error("Lines and locations have different lengths!");
+    }
+    LineNumber[] out = new LineNumber[lines.length];
+    for (int i = 0; i < lines.length; i++) {
+      out[i] = new LineNumber(location[i], lines[i]);
+    }
+    return out;
+  }
+
+  public static native long getStartLocation(Executable m);
+
+  public static int locationToLine(Executable m, long location) {
+    try {
+      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+      int best = -1;
+      for (Breakpoint.LineNumber l : lines) {
+        if (l.location > location) {
+          break;
+        } else {
+          best = l.line;
+        }
+      }
+      return best;
+    } catch (Exception e) {
+      return -1;
+    }
+  }
+
+  public static long lineToLocation(Executable m, int line) throws Exception {
+    try {
+      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+      for (Breakpoint.LineNumber l : lines) {
+        if (l.line == line) {
+          return l.location;
+        }
+      }
+      throw new Exception("Unable to find line " + line + " in " + m);
+    } catch (Exception e) {
+      throw new Exception("Unable to get line number info for " + m, e);
+    }
+  }
+}
+
diff --git a/test/1953-pop-frame/src/art/Redefinition.java b/test/1953-pop-frame/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1953-pop-frame/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+  public static final class CommonClassDefinition {
+    public final Class<?> target;
+    public final byte[] class_file_bytes;
+    public final byte[] dex_file_bytes;
+
+    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+      this.target = target;
+      this.class_file_bytes = class_file_bytes;
+      this.dex_file_bytes = dex_file_bytes;
+    }
+  }
+
+  // A set of possible test configurations. Test should set this if they need to.
+  // This must be kept in sync with the defines in ti-agent/common_helper.cc
+  public static enum Config {
+    COMMON_REDEFINE(0),
+    COMMON_RETRANSFORM(1),
+    COMMON_TRANSFORM(2);
+
+    private final int val;
+    private Config(int val) {
+      this.val = val;
+    }
+  }
+
+  public static void setTestConfiguration(Config type) {
+    nativeSetTestConfiguration(type.val);
+  }
+
+  private static native void nativeSetTestConfiguration(int type);
+
+  // Transforms the class
+  public static native void doCommonClassRedefinition(Class<?> target,
+                                                      byte[] classfile,
+                                                      byte[] dexfile);
+
+  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+    ArrayList<Class<?>> classes = new ArrayList<>();
+    ArrayList<byte[]> class_files = new ArrayList<>();
+    ArrayList<byte[]> dex_files = new ArrayList<>();
+
+    for (CommonClassDefinition d : defs) {
+      classes.add(d.target);
+      class_files.add(d.class_file_bytes);
+      dex_files.add(d.dex_file_bytes);
+    }
+    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+                                   class_files.toArray(new byte[0][]),
+                                   dex_files.toArray(new byte[0][]));
+  }
+
+  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+    for (CommonClassDefinition d : defs) {
+      addCommonTransformationResult(d.target.getCanonicalName(),
+                                    d.class_file_bytes,
+                                    d.dex_file_bytes);
+    }
+  }
+
+  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+                                                           byte[][] classfiles,
+                                                           byte[][] dexfiles);
+  public static native void doCommonClassRetransformation(Class<?>... target);
+  public static native void setPopRetransformations(boolean pop);
+  public static native void popTransformationFor(String name);
+  public static native void enableCommonRetransformation(boolean enable);
+  public static native void addCommonTransformationResult(String target_name,
+                                                          byte[] class_bytes,
+                                                          byte[] dex_bytes);
+}
diff --git a/test/1953-pop-frame/src/art/StackTrace.java b/test/1953-pop-frame/src/art/StackTrace.java
new file mode 100644
index 0000000..2ea2f20
--- /dev/null
+++ b/test/1953-pop-frame/src/art/StackTrace.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Executable;
+
+public class StackTrace {
+  public static class StackFrameData {
+    public final Thread thr;
+    public final Executable method;
+    public final long current_location;
+    public final int depth;
+
+    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
+      this.thr = thr;
+      this.method = e;
+      this.current_location = loc;
+      this.depth = depth;
+    }
+    @Override
+    public String toString() {
+      return String.format(
+          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
+          this.thr,
+          this.method,
+          this.current_location,
+          this.depth);
+    }
+  }
+
+  public static native int GetStackDepth(Thread thr);
+
+  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
+
+  public static StackFrameData[] GetStackTrace(Thread thr) {
+    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
+    // suspended. The spec says that not being suspended is fine but since we want this to be
+    // consistent we will suspend for the RI.
+    boolean suspend_thread =
+        !System.getProperty("java.vm.name").equals("Dalvik") &&
+        !thr.equals(Thread.currentThread()) &&
+        !Suspension.isSuspended(thr);
+    if (suspend_thread) {
+      Suspension.suspend(thr);
+    }
+    StackFrameData[] out = nativeGetStackTrace(thr);
+    if (suspend_thread) {
+      Suspension.resume(thr);
+    }
+    return out;
+  }
+}
+
diff --git a/test/1953-pop-frame/src/art/Suspension.java b/test/1953-pop-frame/src/art/Suspension.java
new file mode 100644
index 0000000..16e62cc
--- /dev/null
+++ b/test/1953-pop-frame/src/art/Suspension.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Suspension {
+  // Suspends a thread using jvmti.
+  public native static void suspend(Thread thr);
+
+  // Resumes a thread using jvmti.
+  public native static void resume(Thread thr);
+
+  public native static boolean isSuspended(Thread thr);
+
+  public native static int[] suspendList(Thread... threads);
+  public native static int[] resumeList(Thread... threads);
+}
diff --git a/test/1953-pop-frame/src/art/Test1953.java b/test/1953-pop-frame/src/art/Test1953.java
new file mode 100644
index 0000000..adec776
--- /dev/null
+++ b/test/1953-pop-frame/src/art/Test1953.java
@@ -0,0 +1,976 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.Base64;
+import java.util.EnumSet;
+import java.util.concurrent.CountDownLatch;
+import java.util.function.Consumer;
+
+public class Test1953 {
+  public final boolean canRunClassLoadTests;
+  public static void doNothing() {}
+
+  public interface TestRunnable extends Runnable {
+    public int getBaseCallCount();
+    public Method getCalledMethod() throws Exception;
+    public default Method getCallingMethod() throws Exception {
+      return this.getClass().getMethod("run");
+    };
+  }
+
+  public static interface TestSuspender {
+    public void setup(Thread thr);
+    public void waitForSuspend(Thread thr);
+    public void cleanup(Thread thr);
+  }
+
+  public static interface ThreadRunnable { public void run(Thread thr); }
+  public static TestSuspender makeSuspend(final ThreadRunnable setup, final ThreadRunnable clean) {
+    return new TestSuspender() {
+      public void setup(Thread thr) { setup.run(thr); }
+      public void waitForSuspend(Thread thr) { Test1953.waitForSuspendHit(thr); }
+      public void cleanup(Thread thr) { clean.run(thr); }
+    };
+  }
+
+  public void runTestOn(TestRunnable testObj, ThreadRunnable su, ThreadRunnable cl) throws
+      Exception {
+    runTestOn(testObj, makeSuspend(su, cl));
+  }
+
+  private static void SafePrintStackTrace(StackTraceElement st[]) {
+    for (StackTraceElement e : st) {
+      System.out.println("\t" + e.getClassName() + "." + e.getMethodName() + "(" +
+          (e.isNativeMethod() ? "Native Method" : e.getFileName()) + ")");
+      if (e.getClassName().equals("art.Test1953") && e.getMethodName().equals("runTests")) {
+        System.out.println("\t<Additional frames hidden>");
+        break;
+      }
+    }
+  }
+
+  public void runTestOn(TestRunnable testObj, TestSuspender su) throws Exception {
+    System.out.println("Single call with PopFrame on " + testObj + " base-call-count: " +
+        testObj.getBaseCallCount());
+    final CountDownLatch continue_latch = new CountDownLatch(1);
+    final CountDownLatch startup_latch = new CountDownLatch(1);
+    Runnable await = () -> {
+      try {
+        startup_latch.countDown();
+        continue_latch.await();
+      } catch (Exception e) {
+        throw new Error("Failed to await latch", e);
+      }
+    };
+    Thread thr = new Thread(() -> { await.run(); testObj.run(); });
+    thr.start();
+
+    // Wait until the other thread is started.
+    startup_latch.await();
+
+    // Do any final setup.
+    preTest.accept(testObj);
+
+    // Setup suspension method on the thread.
+    su.setup(thr);
+
+    // Let the other thread go.
+    continue_latch.countDown();
+
+    // Wait for the other thread to hit the breakpoint/watchpoint/whatever and suspend itself
+    // (without re-entering java)
+    su.waitForSuspend(thr);
+
+    // Cleanup the breakpoint/watchpoint/etc.
+    su.cleanup(thr);
+
+    try {
+      // Pop the frame.
+      popFrame(thr);
+    } catch (Exception e) {
+      System.out.println("Failed to pop frame due to " + e);
+      SafePrintStackTrace(e.getStackTrace());
+    }
+
+    // Start the other thread going again.
+    Suspension.resume(thr);
+
+    // Wait for the other thread to finish.
+    thr.join();
+
+    // See how many times calledFunction was called.
+    System.out.println("result is " + testObj + " base-call count: " + testObj.getBaseCallCount());
+  }
+
+  public static abstract class AbstractTestObject implements TestRunnable {
+    public int callerCnt;
+
+    public AbstractTestObject() {
+      callerCnt = 0;
+    }
+
+    public int getBaseCallCount() {
+      return callerCnt;
+    }
+
+    public void run() {
+      callerCnt++;
+      // This function should be re-executed by the popFrame.
+      calledFunction();
+    }
+
+    public Method getCalledMethod() throws Exception {
+      return this.getClass().getMethod("calledFunction");
+    }
+
+    public abstract void calledFunction();
+  }
+
+  public static class RedefineTestObject extends AbstractTestObject implements Runnable {
+    public static enum RedefineState { ORIGINAL, REDEFINED, };
+    /* public static class RedefineTestObject extends AbstractTestObject implements Runnable {
+     *   public static final byte[] CLASS_BYTES;
+     *   public static final byte[] DEX_BYTES;
+     *   static {
+     *     CLASS_BYTES = null;
+     *     DEX_BYTES = null;
+     *   }
+     *
+     *   public EnumSet<RedefineState> redefine_states;
+     *   public RedefineTestObject() {
+     *     super();
+     *     redefine_states = EnumSet.noneOf(RedefineState.class);
+     *   }
+     *   public String toString() {
+     *     return "RedefineTestObject { states: " + redefine_states.toString()
+     *                                            + " current: REDEFINED }";
+     *   }
+     *   public void calledFunction() {
+     *     redefine_states.add(RedefineState.REDEFINED);  // line +0
+     *     // We will trigger the redefinition using a breakpoint on the next line.
+     *     doNothing();                                   // line +2
+     *   }
+     * }
+     */
+    public static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+      "yv66vgAAADUATQoADQAjBwAkCgAlACYJAAwAJwoAJQAoEgAAACwJAAIALQoAJQAuCgAvADAJAAwA" +
+      "MQkADAAyBwAzBwA0BwA2AQASUmVkZWZpbmVUZXN0T2JqZWN0AQAMSW5uZXJDbGFzc2VzAQANUmVk" +
+      "ZWZpbmVTdGF0ZQEAC0NMQVNTX0JZVEVTAQACW0IBAAlERVhfQllURVMBAA9yZWRlZmluZV9zdGF0" +
+      "ZXMBABNMamF2YS91dGlsL0VudW1TZXQ7AQAJU2lnbmF0dXJlAQBETGphdmEvdXRpbC9FbnVtU2V0" +
+      "PExhcnQvVGVzdDE5NTMkUmVkZWZpbmVUZXN0T2JqZWN0JFJlZGVmaW5lU3RhdGU7PjsBAAY8aW5p" +
+      "dD4BAAMoKVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAIdG9TdHJpbmcBABQoKUxqYXZhL2xh" +
+      "bmcvU3RyaW5nOwEADmNhbGxlZEZ1bmN0aW9uAQAIPGNsaW5pdD4BAApTb3VyY2VGaWxlAQANVGVz" +
+      "dDE5NTMuamF2YQwAGQAaAQAtYXJ0L1Rlc3QxOTUzJFJlZGVmaW5lVGVzdE9iamVjdCRSZWRlZmlu" +
+      "ZVN0YXRlBwA3DAA4ADkMABUAFgwAHQAeAQAQQm9vdHN0cmFwTWV0aG9kcw8GADoIADsMADwAPQwA" +
+      "PgA/DABAAEEHAEIMAEMAGgwAEgATDAAUABMBAB9hcnQvVGVzdDE5NTMkUmVkZWZpbmVUZXN0T2Jq" +
+      "ZWN0AQAfYXJ0L1Rlc3QxOTUzJEFic3RyYWN0VGVzdE9iamVjdAEAEkFic3RyYWN0VGVzdE9iamVj" +
+      "dAEAEmphdmEvbGFuZy9SdW5uYWJsZQEAEWphdmEvdXRpbC9FbnVtU2V0AQAGbm9uZU9mAQAmKExq" +
+      "YXZhL2xhbmcvQ2xhc3M7KUxqYXZhL3V0aWwvRW51bVNldDsKAEQARQEAM1JlZGVmaW5lVGVzdE9i" +
+      "amVjdCB7IHN0YXRlczogASBjdXJyZW50OiBSRURFRklORUQgfQEAF21ha2VDb25jYXRXaXRoQ29u" +
+      "c3RhbnRzAQAmKExqYXZhL2xhbmcvU3RyaW5nOylMamF2YS9sYW5nL1N0cmluZzsBAAlSRURFRklO" +
+      "RUQBAC9MYXJ0L1Rlc3QxOTUzJFJlZGVmaW5lVGVzdE9iamVjdCRSZWRlZmluZVN0YXRlOwEAA2Fk" +
+      "ZAEAFShMamF2YS9sYW5nL09iamVjdDspWgEADGFydC9UZXN0MTk1MwEACWRvTm90aGluZwcARgwA" +
+      "PABJAQAkamF2YS9sYW5nL2ludm9rZS9TdHJpbmdDb25jYXRGYWN0b3J5BwBLAQAGTG9va3VwAQCY" +
+      "KExqYXZhL2xhbmcvaW52b2tlL01ldGhvZEhhbmRsZXMkTG9va3VwO0xqYXZhL2xhbmcvU3RyaW5n" +
+      "O0xqYXZhL2xhbmcvaW52b2tlL01ldGhvZFR5cGU7TGphdmEvbGFuZy9TdHJpbmc7W0xqYXZhL2xh" +
+      "bmcvT2JqZWN0OylMamF2YS9sYW5nL2ludm9rZS9DYWxsU2l0ZTsHAEwBACVqYXZhL2xhbmcvaW52" +
+      "b2tlL01ldGhvZEhhbmRsZXMkTG9va3VwAQAeamF2YS9sYW5nL2ludm9rZS9NZXRob2RIYW5kbGVz" +
+      "ACEADAANAAEADgADABkAEgATAAAAGQAUABMAAAABABUAFgABABcAAAACABgABAABABkAGgABABsA" +
+      "AAAuAAIAAQAAAA4qtwABKhICuAADtQAEsQAAAAEAHAAAAA4AAwAAACEABAAiAA0AIwABAB0AHgAB" +
+      "ABsAAAAlAAEAAQAAAA0qtAAEtgAFugAGAACwAAAAAQAcAAAABgABAAAAJQABAB8AGgABABsAAAAv" +
+      "AAIAAQAAAA8qtAAEsgAHtgAIV7gACbEAAAABABwAAAAOAAMAAAApAAsAKwAOACwACAAgABoAAQAb" +
+      "AAAAKQABAAAAAAAJAbMACgGzAAuxAAAAAQAcAAAADgADAAAAGwAEABwACAAdAAMAIQAAAAIAIgAQ" +
+      "AAAAIgAEAAwALwAPAAkAAgAMABFAGQANAC8ANQQJAEcASgBIABkAKQAAAAgAAQAqAAEAKw==");
+    public static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+      "ZGV4CjAzNQAaR23N6WpunLRVX+BexSuzzNNiHNOvQpFoBwAAcAAAAHhWNBIAAAAAAAAAAKQGAAAq" +
+      "AAAAcAAAABEAAAAYAQAABQAAAFwBAAAEAAAAmAEAAAwAAAC4AQAAAQAAABgCAAAwBQAAOAIAACID" +
+      "AAA5AwAAQwMAAEsDAABPAwAAXAMAAGcDAABqAwAAbgMAAJEDAADCAwAA5QMAAPUDAAAZBAAAOQQA" +
+      "AFwEAAB7BAAAjgQAAKIEAAC4BAAAzAQAAOcEAAD8BAAAEQUAABwFAAAwBQAATwUAAF4FAABhBQAA" +
+      "ZAUAAGgFAABsBQAAeQUAAH4FAACGBQAAlgUAAKEFAACnBQAArwUAAMAFAADKBQAA0QUAAAgAAAAJ" +
+      "AAAACgAAAAsAAAAMAAAADQAAAA4AAAAPAAAAEAAAABEAAAASAAAAEwAAABQAAAAVAAAAGwAAABwA" +
+      "AAAeAAAABgAAAAsAAAAAAAAABwAAAAwAAAAMAwAABwAAAA0AAAAUAwAAGwAAAA4AAAAAAAAAHQAA" +
+      "AA8AAAAcAwAAAQABABcAAAACABAABAAAAAIAEAAFAAAAAgANACYAAAAAAAMAAgAAAAIAAwABAAAA" +
+      "AgADAAIAAAACAAMAIgAAAAIAAAAnAAAAAwADACMAAAAMAAMAAgAAAAwAAQAhAAAADAAAACcAAAAN" +
+      "AAQAIAAAAA0AAgAlAAAADQAAACcAAAACAAAAAQAAAAAAAAAEAwAAGgAAAIwGAABRBgAAAAAAAAQA" +
+      "AQACAAAA+gIAAB0AAABUMAMAbhALAAAADAAiAQwAcBAGAAEAGgIZAG4gBwAhAG4gBwABABoAAABu" +
+      "IAcAAQBuEAgAAQAMABEAAAABAAAAAAAAAPQCAAAGAAAAEgBpAAEAaQACAA4AAgABAAEAAADuAgAA" +
+      "DAAAAHAQAAABABwAAQBxEAoAAAAMAFsQAwAOAAMAAQACAAAA/gIAAAsAAABUIAMAYgEAAG4gCQAQ" +
+      "AHEABQAAAA4AIQAOPIcAGwAOPC0AJQAOACkADnk8AAEAAAAKAAAAAQAAAAsAAAABAAAACAAAAAEA" +
+      "AAAJABUgY3VycmVudDogUkVERUZJTkVEIH0ACDxjbGluaXQ+AAY8aW5pdD4AAj47AAtDTEFTU19C" +
+      "WVRFUwAJREVYX0JZVEVTAAFMAAJMTAAhTGFydC9UZXN0MTk1MyRBYnN0cmFjdFRlc3RPYmplY3Q7" +
+      "AC9MYXJ0L1Rlc3QxOTUzJFJlZGVmaW5lVGVzdE9iamVjdCRSZWRlZmluZVN0YXRlOwAhTGFydC9U" +
+      "ZXN0MTk1MyRSZWRlZmluZVRlc3RPYmplY3Q7AA5MYXJ0L1Rlc3QxOTUzOwAiTGRhbHZpay9hbm5v" +
+      "dGF0aW9uL0VuY2xvc2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xhc3M7ACFM" +
+      "ZGFsdmlrL2Fubm90YXRpb24vTWVtYmVyQ2xhc3NlczsAHUxkYWx2aWsvYW5ub3RhdGlvbi9TaWdu" +
+      "YXR1cmU7ABFMamF2YS9sYW5nL0NsYXNzOwASTGphdmEvbGFuZy9PYmplY3Q7ABRMamF2YS9sYW5n" +
+      "L1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7" +
+      "ABNMamF2YS91dGlsL0VudW1TZXQ7ABNMamF2YS91dGlsL0VudW1TZXQ8AAlSRURFRklORUQAElJl" +
+      "ZGVmaW5lVGVzdE9iamVjdAAdUmVkZWZpbmVUZXN0T2JqZWN0IHsgc3RhdGVzOiAADVRlc3QxOTUz" +
+      "LmphdmEAAVYAAVoAAlpMAAJbQgALYWNjZXNzRmxhZ3MAA2FkZAAGYXBwZW5kAA5jYWxsZWRGdW5j" +
+      "dGlvbgAJZG9Ob3RoaW5nAARuYW1lAAZub25lT2YAD3JlZGVmaW5lX3N0YXRlcwAIdG9TdHJpbmcA" +
+      "BXZhbHVlAFt+fkQ4eyJtaW4tYXBpIjoxLCJzaGEtMSI6IjUyNzNjM2RmZWUxMDQ2NzIwYWY0MjVm" +
+      "YTg1NTMxNmM5OWM4NmM4ZDIiLCJ2ZXJzaW9uIjoiMS4zLjE4LWRldiJ9AAIHASgcAxcWFwkXAwIE" +
+      "ASgYAwIFAh8ECSQXGAIGASgcARgBAgECAgEZARkDAQGIgASEBQGBgASgBQMByAUBAbgEAAAAAAAB" +
+      "AAAALgYAAAMAAAA6BgAAQAYAAEkGAAB8BgAAAQAAAAAAAAAAAAAAAwAAAHQGAAAQAAAAAAAAAAEA" +
+      "AAAAAAAAAQAAACoAAABwAAAAAgAAABEAAAAYAQAAAwAAAAUAAABcAQAABAAAAAQAAACYAQAABQAA" +
+      "AAwAAAC4AQAABgAAAAEAAAAYAgAAASAAAAQAAAA4AgAAAyAAAAQAAADuAgAAARAAAAQAAAAEAwAA" +
+      "AiAAACoAAAAiAwAABCAAAAQAAAAuBgAAACAAAAEAAABRBgAAAxAAAAMAAABwBgAABiAAAAEAAACM" +
+      "BgAAABAAAAEAAACkBgAA");
+
+    public EnumSet<RedefineState> redefine_states;
+    public RedefineTestObject() {
+      super();
+      redefine_states = EnumSet.noneOf(RedefineState.class);
+    }
+
+    public String toString() {
+      return "RedefineTestObject { states: " + redefine_states.toString() + " current: ORIGINAL }";
+    }
+
+    public void calledFunction() {
+      redefine_states.add(RedefineState.ORIGINAL);  // line +0
+      // We will trigger the redefinition using a breakpoint on the next line.
+      doNothing();                                  // line +2
+    }
+  }
+
+  public static class ClassLoadObject implements TestRunnable {
+    public int cnt;
+    public int baseCallCnt;
+
+    public static final String[] CLASS_NAMES = new String[] {
+      "Lart/Test1953$ClassLoadObject$TC0;",
+      "Lart/Test1953$ClassLoadObject$TC1;",
+      "Lart/Test1953$ClassLoadObject$TC2;",
+      "Lart/Test1953$ClassLoadObject$TC3;",
+      "Lart/Test1953$ClassLoadObject$TC4;",
+      "Lart/Test1953$ClassLoadObject$TC5;",
+      "Lart/Test1953$ClassLoadObject$TC6;",
+      "Lart/Test1953$ClassLoadObject$TC7;",
+      "Lart/Test1953$ClassLoadObject$TC8;",
+      "Lart/Test1953$ClassLoadObject$TC9;",
+    };
+
+    private static int curClass = 0;
+
+    private static class TC0 { public static int foo; static { foo = 1; } }
+    private static class TC1 { public static int foo; static { foo = 2; } }
+    private static class TC2 { public static int foo; static { foo = 3; } }
+    private static class TC3 { public static int foo; static { foo = 4; } }
+    private static class TC4 { public static int foo; static { foo = 5; } }
+    private static class TC5 { public static int foo; static { foo = 6; } }
+    private static class TC6 { public static int foo; static { foo = 7; } }
+    private static class TC7 { public static int foo; static { foo = 8; } }
+    private static class TC8 { public static int foo; static { foo = 9; } }
+    private static class TC9 { public static int foo; static { foo = 10; } }
+
+    public ClassLoadObject() {
+      super();
+      cnt = 0;
+      baseCallCnt = 0;
+    }
+
+    public int getBaseCallCount() {
+      return baseCallCnt;
+    }
+
+    public void run() {
+      baseCallCnt++;
+      if (curClass == 0) {
+        $noprecompile$calledFunction0();
+      } else if (curClass == 1) {
+        $noprecompile$calledFunction1();
+      } else if (curClass == 2) {
+        $noprecompile$calledFunction2();
+      } else if (curClass == 3) {
+        $noprecompile$calledFunction3();
+      } else if (curClass == 4) {
+        $noprecompile$calledFunction4();
+      } else if (curClass == 5) {
+        $noprecompile$calledFunction5();
+      } else if (curClass == 6) {
+        $noprecompile$calledFunction6();
+      } else if (curClass == 7) {
+        $noprecompile$calledFunction7();
+      } else if (curClass == 8) {
+        $noprecompile$calledFunction8();
+      } else if (curClass == 9) {
+        $noprecompile$calledFunction9();
+      }
+      curClass++;
+    }
+
+    public Method getCalledMethod() throws Exception {
+      return this.getClass().getMethod("jnoprecompile$calledFunction" + curClass);
+    }
+
+    // Give these all a tag to prevent 1954 from compiling them (and loading the class as a
+    // consequence).
+    public void $noprecompile$calledFunction0() {
+      cnt++;
+      System.out.println("TC0.foo == " + TC0.foo);
+    }
+
+    public void $noprecompile$calledFunction1() {
+      cnt++;
+      System.out.println("TC1.foo == " + TC1.foo);
+    }
+
+    public void $noprecompile$calledFunction2() {
+      cnt++;
+      System.out.println("TC2.foo == " + TC2.foo);
+    }
+
+    public void $noprecompile$calledFunction3() {
+      cnt++;
+      System.out.println("TC3.foo == " + TC3.foo);
+    }
+
+    public void $noprecompile$calledFunction4() {
+      cnt++;
+      System.out.println("TC4.foo == " + TC4.foo);
+    }
+
+    public void $noprecompile$calledFunction5() {
+      cnt++;
+      System.out.println("TC5.foo == " + TC5.foo);
+    }
+
+    public void $noprecompile$calledFunction6() {
+      cnt++;
+      System.out.println("TC6.foo == " + TC6.foo);
+    }
+
+    public void $noprecompile$calledFunction7() {
+      cnt++;
+      System.out.println("TC7.foo == " + TC7.foo);
+    }
+
+    public void $noprecompile$calledFunction8() {
+      cnt++;
+      System.out.println("TC8.foo == " + TC8.foo);
+    }
+
+    public void $noprecompile$calledFunction9() {
+      cnt++;
+      System.out.println("TC9.foo == " + TC9.foo);
+    }
+
+    public String toString() {
+      return "ClassLoadObject { cnt: " + cnt + ", curClass: " + curClass + "}";
+    }
+  }
+
+  public static class FieldBasedTestObject extends AbstractTestObject implements Runnable {
+    public int cnt;
+    public int TARGET_FIELD;
+    public FieldBasedTestObject() {
+      super();
+      cnt = 0;
+      TARGET_FIELD = 0;
+    }
+
+    public void calledFunction() {
+      cnt++;
+      // We put a watchpoint here and PopFrame when we are at it.
+      TARGET_FIELD += 10;
+      if (cnt == 1) { System.out.println("FAILED: No pop on first call!"); }
+    }
+
+    public String toString() {
+      return "FieldBasedTestObject { cnt: " + cnt + ", TARGET_FIELD: " + TARGET_FIELD + " }";
+    }
+  }
+
+  public static class StandardTestObject extends AbstractTestObject implements Runnable {
+    public int cnt;
+    public final boolean check;
+
+    public StandardTestObject(boolean check) {
+      super();
+      cnt = 0;
+      this.check = check;
+    }
+
+    public StandardTestObject() {
+      this(true);
+    }
+
+    public void calledFunction() {
+      cnt++;       // line +0
+      // We put a breakpoint here and PopFrame when we are at it.
+      doNothing(); // line +2
+      if (check && cnt == 1) { System.out.println("FAILED: No pop on first call!"); }
+    }
+
+    public String toString() {
+      return "StandardTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class SynchronizedFunctionTestObject extends AbstractTestObject implements Runnable {
+    public int cnt;
+
+    public SynchronizedFunctionTestObject() {
+      super();
+      cnt = 0;
+    }
+
+    public synchronized void calledFunction() {
+      cnt++;               // line +0
+      // We put a breakpoint here and PopFrame when we are at it.
+      doNothing();         // line +2
+    }
+
+    public String toString() {
+      return "SynchronizedFunctionTestObject { cnt: " + cnt + " }";
+    }
+  }
+  public static class SynchronizedTestObject extends AbstractTestObject implements Runnable {
+    public int cnt;
+    public final Object lock;
+
+    public SynchronizedTestObject() {
+      super();
+      cnt = 0;
+      lock = new Object();
+    }
+
+    public void calledFunction() {
+      synchronized (lock) {  // line +0
+        cnt++;               // line +1
+        // We put a breakpoint here and PopFrame when we are at it.
+        doNothing();         // line +3
+      }
+    }
+
+    public String toString() {
+      return "SynchronizedTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class ExceptionCatchTestObject extends AbstractTestObject implements Runnable {
+    public static class TestError extends Error {}
+
+    public int cnt;
+    public ExceptionCatchTestObject() {
+      super();
+      cnt = 0;
+    }
+
+    public void calledFunction() {
+      cnt++;
+      try {
+        doThrow();
+      } catch (TestError e) {
+        System.out.println(e.getClass().getName() + " caught in called function.");
+      }
+    }
+
+    public void doThrow() {
+      throw new TestError();
+    }
+
+    public String toString() {
+      return "ExceptionCatchTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class ExceptionThrowFarTestObject implements TestRunnable {
+    public static class TestError extends Error {}
+
+    public int cnt;
+    public int baseCallCnt;
+    public final boolean catchInCalled;
+    public ExceptionThrowFarTestObject(boolean catchInCalled) {
+      super();
+      cnt = 0;
+      baseCallCnt = 0;
+      this.catchInCalled = catchInCalled;
+    }
+
+    public int getBaseCallCount() {
+      return baseCallCnt;
+    }
+
+    public void run() {
+      baseCallCnt++;
+      try {
+        callingFunction();
+      } catch (TestError e) {
+        System.out.println(e.getClass().getName() + " thrown and caught!");
+      }
+    }
+
+    public void callingFunction() {
+      calledFunction();
+    }
+    public void calledFunction() {
+      cnt++;
+      if (catchInCalled) {
+        try {
+          throw new TestError(); // We put a watch here.
+        } catch (TestError e) {
+          System.out.println(e.getClass().getName() + " caught in same function.");
+        }
+      } else {
+        throw new TestError(); // We put a watch here.
+      }
+    }
+
+    public Method getCallingMethod() throws Exception {
+      return this.getClass().getMethod("callingFunction");
+    }
+
+    public Method getCalledMethod() throws Exception {
+      return this.getClass().getMethod("calledFunction");
+    }
+
+    public String toString() {
+      return "ExceptionThrowFarTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class ExceptionOnceObject extends AbstractTestObject {
+    public static final class TestError extends Error {}
+    public int cnt;
+    public final boolean throwInSub;
+    public ExceptionOnceObject(boolean throwInSub) {
+      super();
+      cnt = 0;
+      this.throwInSub = throwInSub;
+    }
+
+    public void calledFunction() {
+      cnt++;
+      if (cnt == 1) {
+        if (throwInSub) {
+          doThrow();
+        } else {
+          throw new TestError();
+        }
+      }
+    }
+
+    public void doThrow() {
+      throw new TestError();
+    }
+
+    public String toString() {
+      return "ExceptionOnceObject { cnt: " + cnt + ", throwInSub: " + throwInSub + " }";
+    }
+  }
+
+  public static class ExceptionThrowTestObject implements TestRunnable {
+    public static class TestError extends Error {}
+
+    public int cnt;
+    public int baseCallCnt;
+    public final boolean catchInCalled;
+    public ExceptionThrowTestObject(boolean catchInCalled) {
+      super();
+      cnt = 0;
+      baseCallCnt = 0;
+      this.catchInCalled = catchInCalled;
+    }
+
+    public int getBaseCallCount() {
+      return baseCallCnt;
+    }
+
+    public void run() {
+      baseCallCnt++;
+      try {
+        calledFunction();
+      } catch (TestError e) {
+        System.out.println(e.getClass().getName() + " thrown and caught!");
+      }
+    }
+
+    public void calledFunction() {
+      cnt++;
+      if (catchInCalled) {
+        try {
+          throw new TestError(); // We put a watch here.
+        } catch (TestError e) {
+          System.out.println(e.getClass().getName() + " caught in same function.");
+        }
+      } else {
+        throw new TestError(); // We put a watch here.
+      }
+    }
+
+    public Method getCalledMethod() throws Exception {
+      return this.getClass().getMethod("calledFunction");
+    }
+
+    public String toString() {
+      return "ExceptionThrowTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class NativeCalledObject extends AbstractTestObject {
+    public int cnt = 0;
+
+    public native void calledFunction();
+
+    public String toString() {
+      return "NativeCalledObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class NativeCallerObject implements TestRunnable {
+    public int baseCnt = 0;
+    public int cnt = 0;
+
+    public int getBaseCallCount() {
+      return baseCnt;
+    }
+
+    public native void run();
+
+    public void calledFunction() {
+      cnt++;
+      // We will stop using a MethodExit event.
+    }
+
+    public Method getCalledMethod() throws Exception {
+      return this.getClass().getMethod("calledFunction");
+    }
+
+    public String toString() {
+      return "NativeCallerObject { cnt: " + cnt + " }";
+    }
+  }
+  public static class SuspendSuddenlyObject extends AbstractTestObject {
+    public volatile boolean stop_spinning = false;
+    public volatile boolean is_spinning = false;
+    public int cnt = 0;
+
+    public void calledFunction() {
+      cnt++;
+      while (!stop_spinning) {
+        is_spinning = true;
+      }
+    }
+
+    public String toString() {
+      return "SuspendSuddenlyObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static void run(boolean canRunClassLoadTests) throws Exception {
+    new Test1953(canRunClassLoadTests, (x)-> {}).runTests();
+  }
+
+  // This entrypoint is used by CTS only. */
+  public static void run() throws Exception {
+    /* TODO: Due to the way that CTS tests are verified we cannot run class-load-tests since the
+     *       verifier will be delayed until runtime and then load the classes all at once. This
+     *       makes the test impossible to run.
+     */
+    run(/*canRunClassLoadTests*/ false);
+  }
+
+  public Test1953(boolean canRunClassLoadTests, Consumer<TestRunnable> preTest) {
+    this.canRunClassLoadTests = canRunClassLoadTests;
+    this.preTest = preTest;
+  }
+
+  private Consumer<TestRunnable> preTest;
+
+  public void runTests() throws Exception {
+    setupTest();
+
+    final Method calledFunction = StandardTestObject.class.getDeclaredMethod("calledFunction");
+    final Method doNothingMethod = Test1953.class.getDeclaredMethod("doNothing");
+    // Add a breakpoint on the second line after the start of the function
+    final int line = Breakpoint.locationToLine(calledFunction, 0) + 2;
+    final long loc = Breakpoint.lineToLocation(calledFunction, line);
+    System.out.println("Test stopped using breakpoint");
+    runTestOn(new StandardTestObject(),
+        (thr) -> setupSuspendBreakpointFor(calledFunction, loc, thr),
+        Test1953::clearSuspendBreakpointFor);
+
+    final Method syncFunctionCalledFunction =
+        SynchronizedFunctionTestObject.class.getDeclaredMethod("calledFunction");
+    // Add a breakpoint on the second line after the start of the function
+    // Annoyingly r8 generally has the first instruction (a monitor enter) not be marked as being
+    // on any line but javac has it marked as being on the first line of the function. Just use the
+    // second entry on the line-number table to get the breakpoint. This should be good for both.
+    final long syncFunctionLoc =
+        Breakpoint.getLineNumberTable(syncFunctionCalledFunction)[1].location;
+    System.out.println("Test stopped using breakpoint with declared synchronized function");
+    runTestOn(new SynchronizedFunctionTestObject(),
+        (thr) -> setupSuspendBreakpointFor(syncFunctionCalledFunction, syncFunctionLoc, thr),
+        Test1953::clearSuspendBreakpointFor);
+
+    final Method syncCalledFunction =
+        SynchronizedTestObject.class.getDeclaredMethod("calledFunction");
+    // Add a breakpoint on the second line after the start of the function
+    final int syncLine = Breakpoint.locationToLine(syncCalledFunction, 0) + 3;
+    final long syncLoc = Breakpoint.lineToLocation(syncCalledFunction, syncLine);
+    System.out.println("Test stopped using breakpoint with synchronized block");
+    runTestOn(new SynchronizedTestObject(),
+        (thr) -> setupSuspendBreakpointFor(syncCalledFunction, syncLoc, thr),
+        Test1953::clearSuspendBreakpointFor);
+
+    System.out.println("Test stopped on single step");
+    runTestOn(new StandardTestObject(),
+        (thr) -> setupSuspendSingleStepAt(calledFunction, loc, thr),
+        Test1953::clearSuspendSingleStepFor);
+
+    final Field target_field = FieldBasedTestObject.class.getDeclaredField("TARGET_FIELD");
+    System.out.println("Test stopped on field access");
+    runTestOn(new FieldBasedTestObject(),
+        (thr) -> setupFieldSuspendFor(FieldBasedTestObject.class, target_field, true, thr),
+        Test1953::clearFieldSuspendFor);
+
+    System.out.println("Test stopped on field modification");
+    runTestOn(new FieldBasedTestObject(),
+        (thr) -> setupFieldSuspendFor(FieldBasedTestObject.class, target_field, false, thr),
+        Test1953::clearFieldSuspendFor);
+
+    System.out.println("Test stopped during Method Exit of doNothing");
+    runTestOn(new StandardTestObject(false),
+        (thr) -> setupSuspendMethodEvent(doNothingMethod, /*enter*/ false, thr),
+        Test1953::clearSuspendMethodEvent);
+
+    // NB We need another test to make sure the MethodEntered event is triggered twice.
+    System.out.println("Test stopped during Method Enter of doNothing");
+    runTestOn(new StandardTestObject(false),
+        (thr) -> setupSuspendMethodEvent(doNothingMethod, /*enter*/ true, thr),
+        Test1953::clearSuspendMethodEvent);
+
+    System.out.println("Test stopped during Method Exit of calledFunction");
+    runTestOn(new StandardTestObject(false),
+        (thr) -> setupSuspendMethodEvent(calledFunction, /*enter*/ false, thr),
+        Test1953::clearSuspendMethodEvent);
+
+    System.out.println("Test stopped during Method Enter of calledFunction");
+    runTestOn(new StandardTestObject(false),
+        (thr) -> setupSuspendMethodEvent(calledFunction, /*enter*/ true, thr),
+        Test1953::clearSuspendMethodEvent);
+
+    final Method exceptionOnceCalledMethod =
+        ExceptionOnceObject.class.getDeclaredMethod("calledFunction");
+    System.out.println("Test stopped during Method Exit due to exception thrown in same function");
+    runTestOn(new ExceptionOnceObject(/*throwInSub*/ false),
+        (thr) -> setupSuspendMethodEvent(exceptionOnceCalledMethod, /*enter*/ false, thr),
+        Test1953::clearSuspendMethodEvent);
+
+    System.out.println("Test stopped during Method Exit due to exception thrown in subroutine");
+    runTestOn(new ExceptionOnceObject(/*throwInSub*/ true),
+        (thr) -> setupSuspendMethodEvent(exceptionOnceCalledMethod, /*enter*/ false, thr),
+        Test1953::clearSuspendMethodEvent);
+
+    System.out.println("Test stopped during notifyFramePop without exception on pop of calledFunction");
+    runTestOn(new StandardTestObject(false),
+        (thr) -> setupSuspendPopFrameEvent(1, doNothingMethod, thr),
+        Test1953::clearSuspendPopFrameEvent);
+
+    System.out.println("Test stopped during notifyFramePop without exception on pop of doNothing");
+    runTestOn(new StandardTestObject(false),
+        (thr) -> setupSuspendPopFrameEvent(0, doNothingMethod, thr),
+        Test1953::clearSuspendPopFrameEvent);
+
+    final Method exceptionThrowCalledMethod =
+        ExceptionThrowTestObject.class.getDeclaredMethod("calledFunction");
+    System.out.println("Test stopped during notifyFramePop with exception on pop of calledFunction");
+    runTestOn(new ExceptionThrowTestObject(false),
+        (thr) -> setupSuspendPopFrameEvent(0, exceptionThrowCalledMethod, thr),
+        Test1953::clearSuspendPopFrameEvent);
+
+    final Method exceptionCatchThrowMethod =
+        ExceptionCatchTestObject.class.getDeclaredMethod("doThrow");
+    System.out.println("Test stopped during notifyFramePop with exception on pop of doThrow");
+    runTestOn(new ExceptionCatchTestObject(),
+        (thr) -> setupSuspendPopFrameEvent(0, exceptionCatchThrowMethod, thr),
+        Test1953::clearSuspendPopFrameEvent);
+
+    System.out.println("Test stopped during ExceptionCatch event of calledFunction " +
+        "(catch in called function, throw in called function)");
+    runTestOn(new ExceptionThrowTestObject(true),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /*catch*/ true, thr),
+        Test1953::clearSuspendExceptionEvent);
+
+    final Method exceptionCatchCalledMethod =
+        ExceptionCatchTestObject.class.getDeclaredMethod("calledFunction");
+    System.out.println("Test stopped during ExceptionCatch event of calledFunction " +
+        "(catch in called function, throw in subroutine)");
+    runTestOn(new ExceptionCatchTestObject(),
+        (thr) -> setupSuspendExceptionEvent(exceptionCatchCalledMethod, /*catch*/ true, thr),
+        Test1953::clearSuspendExceptionEvent);
+
+    System.out.println("Test stopped during Exception event of calledFunction " +
+        "(catch in calling function)");
+    runTestOn(new ExceptionThrowTestObject(false),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /*catch*/ false, thr),
+        Test1953::clearSuspendExceptionEvent);
+
+    System.out.println("Test stopped during Exception event of calledFunction " +
+        "(catch in called function)");
+    runTestOn(new ExceptionThrowTestObject(true),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /*catch*/ false, thr),
+        Test1953::clearSuspendExceptionEvent);
+
+    final Method exceptionThrowFarCalledMethod =
+        ExceptionThrowFarTestObject.class.getDeclaredMethod("calledFunction");
+    System.out.println("Test stopped during Exception event of calledFunction " +
+        "(catch in parent of calling function)");
+    runTestOn(new ExceptionThrowFarTestObject(false),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowFarCalledMethod, /*catch*/ false, thr),
+        Test1953::clearSuspendExceptionEvent);
+
+    System.out.println("Test stopped during Exception event of calledFunction " +
+        "(catch in called function)");
+    runTestOn(new ExceptionThrowFarTestObject(true),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowFarCalledMethod, /*catch*/ false, thr),
+        Test1953::clearSuspendExceptionEvent);
+
+    // These tests are disabled for either the RI (b/116003018) or for jvmti-stress. For the
+    // later it is due to the additional agent causing classes to be loaded earlier as it forces
+    // deeper verification during class redefinition, causing failures.
+    // NB the agent is prevented from popping frames in either of these events in ART. See
+    // b/117615146 for more information about this restriction.
+    if (canRunClassLoadTests && CanRunClassLoadingTests()) {
+      // This test doesn't work on RI since the RI disallows use of PopFrame during a ClassLoad
+      // event. See b/116003018 for more information.
+      System.out.println("Test stopped during a ClassLoad event.");
+      runTestOn(new ClassLoadObject(),
+          (thr) -> setupSuspendClassEvent(EVENT_TYPE_CLASS_LOAD, ClassLoadObject.CLASS_NAMES, thr),
+          Test1953::clearSuspendClassEvent);
+
+      // The RI handles a PopFrame during a ClassPrepare event incorrectly. See b/116003018 for
+      // more information.
+      System.out.println("Test stopped during a ClassPrepare event.");
+      runTestOn(new ClassLoadObject(),
+          (thr) -> setupSuspendClassEvent(EVENT_TYPE_CLASS_PREPARE,
+                                          ClassLoadObject.CLASS_NAMES,
+                                          thr),
+          Test1953::clearSuspendClassEvent);
+    }
+    System.out.println("Test stopped during random Suspend.");
+    final SuspendSuddenlyObject sso = new SuspendSuddenlyObject();
+    runTestOn(
+        sso,
+        new TestSuspender() {
+          public void setup(Thread thr) { }
+          public void waitForSuspend(Thread thr) {
+            while (!sso.is_spinning) {}
+            Suspension.suspend(thr);
+          }
+          public void cleanup(Thread thr) {
+            sso.stop_spinning = true;
+          }
+        });
+
+    final Method redefineCalledFunction =
+       RedefineTestObject.class.getDeclaredMethod("calledFunction");
+    final int redefLine = Breakpoint.locationToLine(redefineCalledFunction, 0) + 2;
+    final long redefLoc = Breakpoint.lineToLocation(redefineCalledFunction, redefLine);
+    System.out.println("Test redefining frame being popped.");
+    runTestOn(new RedefineTestObject(),
+        (thr) -> setupSuspendBreakpointFor(redefineCalledFunction, redefLoc, thr),
+        (thr) -> {
+          clearSuspendBreakpointFor(thr);
+          Redefinition.doCommonClassRedefinition(RedefineTestObject.class,
+                                                 RedefineTestObject.CLASS_BYTES,
+                                                 RedefineTestObject.DEX_BYTES);
+        });
+
+    System.out.println("Test stopped during a native method fails");
+    runTestOn(new NativeCalledObject(),
+        Test1953::setupWaitForNativeCall,
+        Test1953::clearWaitForNativeCall);
+
+    System.out.println("Test stopped in a method called by native fails");
+    final Method nativeCallerMethod = NativeCallerObject.class.getDeclaredMethod("calledFunction");
+    runTestOn(new NativeCallerObject(),
+        (thr) -> setupSuspendMethodEvent(nativeCallerMethod, /*enter*/ false, thr),
+        Test1953::clearSuspendMethodEvent);
+  }
+
+  // Volatile is to prevent any future optimizations that could invalidate this test by doing
+  // constant propagation and eliminating the failing paths before the verifier is able to load the
+  // class.
+  static volatile boolean ranClassLoadTest = false;
+  static boolean classesPreverified = false;
+  private static final class RCLT0 { public void foo() {} }
+  private static final class RCLT1 { public void foo() {} }
+  // If classes are not preverified for some reason (interp-ac, no-image, etc) the verifier will
+  // actually load classes as it runs. This means that we cannot use the class-load tests as they
+  // are written. TODO Support this.
+  public boolean CanRunClassLoadingTests() {
+    if (ranClassLoadTest) {
+      return classesPreverified;
+    }
+    if (!ranClassLoadTest) {
+      // Only this will ever be executed.
+      new RCLT0().foo();
+    } else {
+      // This will never be executed. If classes are not preverified the verifier will load RCLT1
+      // when the enclosing method is run. This behavior makes the class-load/prepare test cases
+      // impossible to successfully run (they will deadlock).
+      new RCLT1().foo();
+      System.out.println("FAILURE: UNREACHABLE Location!");
+    }
+    classesPreverified = !isClassLoaded("Lart/Test1953$RCLT1;");
+    ranClassLoadTest = true;
+    return classesPreverified;
+  }
+
+  public static native boolean isClassLoaded(String name);
+
+  public static native void setupTest();
+  public static native void popFrame(Thread thr);
+
+  public static native void setupSuspendBreakpointFor(Executable meth, long loc, Thread thr);
+  public static native void clearSuspendBreakpointFor(Thread thr);
+
+  public static native void setupSuspendSingleStepAt(Executable meth, long loc, Thread thr);
+  public static native void clearSuspendSingleStepFor(Thread thr);
+
+  public static native void setupFieldSuspendFor(Class klass, Field f, boolean access, Thread thr);
+  public static native void clearFieldSuspendFor(Thread thr);
+
+  public static native void setupSuspendMethodEvent(Executable meth, boolean enter, Thread thr);
+  public static native void clearSuspendMethodEvent(Thread thr);
+
+  public static native void setupSuspendExceptionEvent(
+      Executable meth, boolean is_catch, Thread thr);
+  public static native void clearSuspendExceptionEvent(Thread thr);
+
+  public static native void setupSuspendPopFrameEvent(
+      int offset, Executable breakpointFunction, Thread thr);
+  public static native void clearSuspendPopFrameEvent(Thread thr);
+
+  public static final int EVENT_TYPE_CLASS_LOAD = 55;
+  public static final int EVENT_TYPE_CLASS_PREPARE = 56;
+  public static native void setupSuspendClassEvent(
+      int eventType, String[] interestingNames, Thread thr);
+  public static native void clearSuspendClassEvent(Thread thr);
+
+  public static native void setupWaitForNativeCall(Thread thr);
+  public static native void clearWaitForNativeCall(Thread thr);
+
+  public static native void waitForSuspendHit(Thread thr);
+}
diff --git a/test/1954-pop-frame-jit/check b/test/1954-pop-frame-jit/check
new file mode 100755
index 0000000..10b87cc
--- /dev/null
+++ b/test/1954-pop-frame-jit/check
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The RI has restrictions and bugs around some PopFrame behavior that ART lacks.
+# See b/116003018. Some configurations cannot handle the class load events in
+# quite the right way so they are disabled there too.
+./default-check "$@" || \
+  (patch -p0 expected.txt < jvm-expected.patch >/dev/null && ./default-check "$@")
diff --git a/test/1954-pop-frame-jit/expected.txt b/test/1954-pop-frame-jit/expected.txt
new file mode 100644
index 0000000..a20a045
--- /dev/null
+++ b/test/1954-pop-frame-jit/expected.txt
@@ -0,0 +1,118 @@
+Test stopped using breakpoint
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with declared synchronized function
+Single call with PopFrame on SynchronizedFunctionTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedFunctionTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with synchronized block
+Single call with PopFrame on SynchronizedTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedTestObject { cnt: 2 } base-call count: 1
+Test stopped on single step
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped on field access
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped on field modification
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped during Method Exit of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Enter of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during Method Enter of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit due to exception thrown in same function
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: false } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: false } base-call count: 1
+Test stopped during Method Exit due to exception thrown in subroutine
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: true } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: true } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of calledFunction
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of doThrow
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 1 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in subroutine)
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in calling function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in parent of calling function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError thrown and caught!
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError caught in same function.
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during a ClassLoad event.
+Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 0} base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.Test1953.popFrame(Native Method)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTests(Test1953.java)
+	<Additional frames hidden>
+TC0.foo == 1
+result is ClassLoadObject { cnt: 1, curClass: 1} base-call count: 1
+Test stopped during a ClassPrepare event.
+Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 1} base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.Test1953.popFrame(Native Method)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTests(Test1953.java)
+	<Additional frames hidden>
+TC1.foo == 2
+result is ClassLoadObject { cnt: 1, curClass: 2} base-call count: 1
+Test stopped during random Suspend.
+Single call with PopFrame on SuspendSuddenlyObject { cnt: 0 } base-call-count: 0
+result is SuspendSuddenlyObject { cnt: 2 } base-call count: 1
+Test redefining frame being popped.
+Single call with PopFrame on RedefineTestObject { states: [] current: ORIGINAL } base-call-count: 0
+result is RedefineTestObject { states: [ORIGINAL, REDEFINED] current: REDEFINED } base-call count: 1
+Test stopped during a native method fails
+Single call with PopFrame on NativeCalledObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.Test1953.popFrame(Native Method)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTests(Test1953.java)
+	<Additional frames hidden>
+result is NativeCalledObject { cnt: 1 } base-call count: 1
+Test stopped in a method called by native fails
+Single call with PopFrame on NativeCallerObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.Test1953.popFrame(Native Method)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTests(Test1953.java)
+	<Additional frames hidden>
+result is NativeCallerObject { cnt: 1 } base-call count: 1
diff --git a/test/1954-pop-frame-jit/info.txt b/test/1954-pop-frame-jit/info.txt
new file mode 100644
index 0000000..b5eb546
--- /dev/null
+++ b/test/1954-pop-frame-jit/info.txt
@@ -0,0 +1,7 @@
+Test basic JVMTI breakpoint functionality.
+
+This test places a breakpoint on the first instruction of a number of functions
+that are entered in every way possible for the given class of method.
+
+It also tests that breakpoints don't interfere with each other by having
+multiple breakpoints be set at once.
diff --git a/test/1954-pop-frame-jit/jvm-expected.patch b/test/1954-pop-frame-jit/jvm-expected.patch
new file mode 100644
index 0000000..718f8ad
--- /dev/null
+++ b/test/1954-pop-frame-jit/jvm-expected.patch
@@ -0,0 +1,21 @@
+75,94d74
+< Test stopped during a ClassLoad event.
+< Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 0} base-call-count: 0
+< Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+< 	art.Test1953.popFrame(Native Method)
+< 	art.Test1953.runTestOn(Test1953.java)
+< 	art.Test1953.runTestOn(Test1953.java)
+< 	art.Test1953.runTests(Test1953.java)
+< 	<Additional frames hidden>
+< TC0.foo == 1
+< result is ClassLoadObject { cnt: 1, curClass: 1} base-call count: 1
+< Test stopped during a ClassPrepare event.
+< Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 1} base-call-count: 0
+< Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+< 	art.Test1953.popFrame(Native Method)
+< 	art.Test1953.runTestOn(Test1953.java)
+< 	art.Test1953.runTestOn(Test1953.java)
+< 	art.Test1953.runTests(Test1953.java)
+< 	<Additional frames hidden>
+< TC1.foo == 2
+< result is ClassLoadObject { cnt: 1, curClass: 2} base-call count: 1
diff --git a/test/1954-pop-frame-jit/run b/test/1954-pop-frame-jit/run
new file mode 100755
index 0000000..d16d4e6
--- /dev/null
+++ b/test/1954-pop-frame-jit/run
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# On RI we need to turn class-load tests off since those events are buggy around
+# pop-frame (see b/116003018).
+ARGS=""
+if [[ "$TEST_RUNTIME" == "jvm" ]]; then
+  ARGS="--args DISABLE_CLASS_LOAD_TESTS"
+fi
+
+./default-run "$@" --jvmti $ARGS
diff --git a/test/1954-pop-frame-jit/src/Main.java b/test/1954-pop-frame-jit/src/Main.java
new file mode 100644
index 0000000..12defcd
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/Main.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Executable;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+
+import java.time.Duration;
+
+import java.util.concurrent.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+import java.util.Random;
+import java.util.Stack;
+import java.util.Vector;
+
+import java.util.function.Supplier;
+
+import art.*;
+
+public class Main extends Test1953 {
+  public Main(boolean run_class_load_tests) {
+    super(run_class_load_tests, (testObj) -> {
+      try {
+        // Make sure everything is jitted in the method. We do this before calling setup since the
+        // suspend setup might make it impossible to jit the methods (by setting breakpoints or
+        // something).
+        for (Method m : testObj.getClass().getMethods()) {
+          if ((m.getModifiers() & Modifier.NATIVE) == 0 &&
+              !m.getName().startsWith("$noprecompile$")) {
+            ensureMethodJitCompiled(m);
+          }
+        }
+      } catch (Exception e) {}
+    });
+  }
+
+  public static void main(String[] args) throws Exception {
+    new Main(!Arrays.asList(args).contains("DISABLE_CLASS_LOAD_TESTS")).runTests();
+  }
+
+  public static native void ensureMethodJitCompiled(Method meth);
+}
diff --git a/test/1954-pop-frame-jit/src/art/Breakpoint.java b/test/1954-pop-frame-jit/src/art/Breakpoint.java
new file mode 100644
index 0000000..bbb89f7
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/art/Breakpoint.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Objects;
+
+public class Breakpoint {
+  public static class Manager {
+    public static class BP {
+      public final Executable method;
+      public final long location;
+
+      public BP(Executable method) {
+        this(method, getStartLocation(method));
+      }
+
+      public BP(Executable method, long location) {
+        this.method = method;
+        this.location = location;
+      }
+
+      @Override
+      public boolean equals(Object other) {
+        return (other instanceof BP) &&
+            method.equals(((BP)other).method) &&
+            location == ((BP)other).location;
+      }
+
+      @Override
+      public String toString() {
+        return method.toString() + " @ " + getLine();
+      }
+
+      @Override
+      public int hashCode() {
+        return Objects.hash(method, location);
+      }
+
+      public int getLine() {
+        try {
+          LineNumber[] lines = getLineNumberTable(method);
+          int best = -1;
+          for (LineNumber l : lines) {
+            if (l.location > location) {
+              break;
+            } else {
+              best = l.line;
+            }
+          }
+          return best;
+        } catch (Exception e) {
+          return -1;
+        }
+      }
+    }
+
+    private Set<BP> breaks = new HashSet<>();
+
+    public void setBreakpoints(BP... bs) {
+      for (BP b : bs) {
+        if (breaks.add(b)) {
+          Breakpoint.setBreakpoint(b.method, b.location);
+        }
+      }
+    }
+    public void setBreakpoint(Executable method, long location) {
+      setBreakpoints(new BP(method, location));
+    }
+
+    public void clearBreakpoints(BP... bs) {
+      for (BP b : bs) {
+        if (breaks.remove(b)) {
+          Breakpoint.clearBreakpoint(b.method, b.location);
+        }
+      }
+    }
+    public void clearBreakpoint(Executable method, long location) {
+      clearBreakpoints(new BP(method, location));
+    }
+
+    public void clearAllBreakpoints() {
+      clearBreakpoints(breaks.toArray(new BP[0]));
+    }
+  }
+
+  public static void startBreakpointWatch(Class<?> methodClass,
+                                          Executable breakpointReached,
+                                          Thread thr) {
+    startBreakpointWatch(methodClass, breakpointReached, false, thr);
+  }
+
+  /**
+   * Enables the trapping of breakpoint events.
+   *
+   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
+   */
+  public static native void startBreakpointWatch(Class<?> methodClass,
+                                                 Executable breakpointReached,
+                                                 boolean allowRecursive,
+                                                 Thread thr);
+  public static native void stopBreakpointWatch(Thread thr);
+
+  public static final class LineNumber implements Comparable<LineNumber> {
+    public final long location;
+    public final int line;
+
+    private LineNumber(long loc, int line) {
+      this.location = loc;
+      this.line = line;
+    }
+
+    public boolean equals(Object other) {
+      return other instanceof LineNumber && ((LineNumber)other).line == line &&
+          ((LineNumber)other).location == location;
+    }
+
+    public int compareTo(LineNumber other) {
+      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
+      if (v != 0) {
+        return v;
+      } else {
+        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
+      }
+    }
+  }
+
+  public static native void setBreakpoint(Executable m, long loc);
+  public static void setBreakpoint(Executable m, LineNumber l) {
+    setBreakpoint(m, l.location);
+  }
+
+  public static native void clearBreakpoint(Executable m, long loc);
+  public static void clearBreakpoint(Executable m, LineNumber l) {
+    clearBreakpoint(m, l.location);
+  }
+
+  private static native Object[] getLineNumberTableNative(Executable m);
+  public static LineNumber[] getLineNumberTable(Executable m) {
+    Object[] nativeTable = getLineNumberTableNative(m);
+    long[] location = (long[])(nativeTable[0]);
+    int[] lines = (int[])(nativeTable[1]);
+    if (lines.length != location.length) {
+      throw new Error("Lines and locations have different lengths!");
+    }
+    LineNumber[] out = new LineNumber[lines.length];
+    for (int i = 0; i < lines.length; i++) {
+      out[i] = new LineNumber(location[i], lines[i]);
+    }
+    return out;
+  }
+
+  public static native long getStartLocation(Executable m);
+
+  public static int locationToLine(Executable m, long location) {
+    try {
+      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+      int best = -1;
+      for (Breakpoint.LineNumber l : lines) {
+        if (l.location > location) {
+          break;
+        } else {
+          best = l.line;
+        }
+      }
+      return best;
+    } catch (Exception e) {
+      return -1;
+    }
+  }
+
+  public static long lineToLocation(Executable m, int line) throws Exception {
+    try {
+      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+      for (Breakpoint.LineNumber l : lines) {
+        if (l.line == line) {
+          return l.location;
+        }
+      }
+      throw new Exception("Unable to find line " + line + " in " + m);
+    } catch (Exception e) {
+      throw new Exception("Unable to get line number info for " + m, e);
+    }
+  }
+}
+
diff --git a/test/1954-pop-frame-jit/src/art/Redefinition.java b/test/1954-pop-frame-jit/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+  public static final class CommonClassDefinition {
+    public final Class<?> target;
+    public final byte[] class_file_bytes;
+    public final byte[] dex_file_bytes;
+
+    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+      this.target = target;
+      this.class_file_bytes = class_file_bytes;
+      this.dex_file_bytes = dex_file_bytes;
+    }
+  }
+
+  // A set of possible test configurations. Test should set this if they need to.
+  // This must be kept in sync with the defines in ti-agent/common_helper.cc
+  public static enum Config {
+    COMMON_REDEFINE(0),
+    COMMON_RETRANSFORM(1),
+    COMMON_TRANSFORM(2);
+
+    private final int val;
+    private Config(int val) {
+      this.val = val;
+    }
+  }
+
+  public static void setTestConfiguration(Config type) {
+    nativeSetTestConfiguration(type.val);
+  }
+
+  private static native void nativeSetTestConfiguration(int type);
+
+  // Transforms the class
+  public static native void doCommonClassRedefinition(Class<?> target,
+                                                      byte[] classfile,
+                                                      byte[] dexfile);
+
+  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+    ArrayList<Class<?>> classes = new ArrayList<>();
+    ArrayList<byte[]> class_files = new ArrayList<>();
+    ArrayList<byte[]> dex_files = new ArrayList<>();
+
+    for (CommonClassDefinition d : defs) {
+      classes.add(d.target);
+      class_files.add(d.class_file_bytes);
+      dex_files.add(d.dex_file_bytes);
+    }
+    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+                                   class_files.toArray(new byte[0][]),
+                                   dex_files.toArray(new byte[0][]));
+  }
+
+  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+    for (CommonClassDefinition d : defs) {
+      addCommonTransformationResult(d.target.getCanonicalName(),
+                                    d.class_file_bytes,
+                                    d.dex_file_bytes);
+    }
+  }
+
+  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+                                                           byte[][] classfiles,
+                                                           byte[][] dexfiles);
+  public static native void doCommonClassRetransformation(Class<?>... target);
+  public static native void setPopRetransformations(boolean pop);
+  public static native void popTransformationFor(String name);
+  public static native void enableCommonRetransformation(boolean enable);
+  public static native void addCommonTransformationResult(String target_name,
+                                                          byte[] class_bytes,
+                                                          byte[] dex_bytes);
+}
diff --git a/test/1954-pop-frame-jit/src/art/StackTrace.java b/test/1954-pop-frame-jit/src/art/StackTrace.java
new file mode 100644
index 0000000..2ea2f20
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/art/StackTrace.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Executable;
+
+public class StackTrace {
+  public static class StackFrameData {
+    public final Thread thr;
+    public final Executable method;
+    public final long current_location;
+    public final int depth;
+
+    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
+      this.thr = thr;
+      this.method = e;
+      this.current_location = loc;
+      this.depth = depth;
+    }
+    @Override
+    public String toString() {
+      return String.format(
+          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
+          this.thr,
+          this.method,
+          this.current_location,
+          this.depth);
+    }
+  }
+
+  public static native int GetStackDepth(Thread thr);
+
+  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
+
+  public static StackFrameData[] GetStackTrace(Thread thr) {
+    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
+    // suspended. The spec says that not being suspended is fine but since we want this to be
+    // consistent we will suspend for the RI.
+    boolean suspend_thread =
+        !System.getProperty("java.vm.name").equals("Dalvik") &&
+        !thr.equals(Thread.currentThread()) &&
+        !Suspension.isSuspended(thr);
+    if (suspend_thread) {
+      Suspension.suspend(thr);
+    }
+    StackFrameData[] out = nativeGetStackTrace(thr);
+    if (suspend_thread) {
+      Suspension.resume(thr);
+    }
+    return out;
+  }
+}
+
diff --git a/test/1954-pop-frame-jit/src/art/Suspension.java b/test/1954-pop-frame-jit/src/art/Suspension.java
new file mode 100644
index 0000000..16e62cc
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/art/Suspension.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Suspension {
+  // Suspends a thread using jvmti.
+  public native static void suspend(Thread thr);
+
+  // Resumes a thread using jvmti.
+  public native static void resume(Thread thr);
+
+  public native static boolean isSuspended(Thread thr);
+
+  public native static int[] suspendList(Thread... threads);
+  public native static int[] resumeList(Thread... threads);
+}
diff --git a/test/1954-pop-frame-jit/src/art/Test1953.java b/test/1954-pop-frame-jit/src/art/Test1953.java
new file mode 120000
index 0000000..f281434
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/art/Test1953.java
@@ -0,0 +1 @@
+../../../1953-pop-frame/src/art/Test1953.java
\ No newline at end of file
diff --git a/test/1955-pop-frame-jit-called/check b/test/1955-pop-frame-jit-called/check
new file mode 100755
index 0000000..10b87cc
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/check
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The RI has restrictions and bugs around some PopFrame behavior that ART lacks.
+# See b/116003018. Some configurations cannot handle the class load events in
+# quite the right way so they are disabled there too.
+./default-check "$@" || \
+  (patch -p0 expected.txt < jvm-expected.patch >/dev/null && ./default-check "$@")
diff --git a/test/1955-pop-frame-jit-called/expected.txt b/test/1955-pop-frame-jit-called/expected.txt
new file mode 100644
index 0000000..a20a045
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/expected.txt
@@ -0,0 +1,118 @@
+Test stopped using breakpoint
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with declared synchronized function
+Single call with PopFrame on SynchronizedFunctionTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedFunctionTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with synchronized block
+Single call with PopFrame on SynchronizedTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedTestObject { cnt: 2 } base-call count: 1
+Test stopped on single step
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped on field access
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped on field modification
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped during Method Exit of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Enter of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during Method Enter of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit due to exception thrown in same function
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: false } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: false } base-call count: 1
+Test stopped during Method Exit due to exception thrown in subroutine
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: true } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: true } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of calledFunction
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of doThrow
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 1 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in subroutine)
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in calling function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in parent of calling function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError thrown and caught!
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError caught in same function.
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during a ClassLoad event.
+Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 0} base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.Test1953.popFrame(Native Method)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTests(Test1953.java)
+	<Additional frames hidden>
+TC0.foo == 1
+result is ClassLoadObject { cnt: 1, curClass: 1} base-call count: 1
+Test stopped during a ClassPrepare event.
+Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 1} base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.Test1953.popFrame(Native Method)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTests(Test1953.java)
+	<Additional frames hidden>
+TC1.foo == 2
+result is ClassLoadObject { cnt: 1, curClass: 2} base-call count: 1
+Test stopped during random Suspend.
+Single call with PopFrame on SuspendSuddenlyObject { cnt: 0 } base-call-count: 0
+result is SuspendSuddenlyObject { cnt: 2 } base-call count: 1
+Test redefining frame being popped.
+Single call with PopFrame on RedefineTestObject { states: [] current: ORIGINAL } base-call-count: 0
+result is RedefineTestObject { states: [ORIGINAL, REDEFINED] current: REDEFINED } base-call count: 1
+Test stopped during a native method fails
+Single call with PopFrame on NativeCalledObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.Test1953.popFrame(Native Method)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTests(Test1953.java)
+	<Additional frames hidden>
+result is NativeCalledObject { cnt: 1 } base-call count: 1
+Test stopped in a method called by native fails
+Single call with PopFrame on NativeCallerObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.Test1953.popFrame(Native Method)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTests(Test1953.java)
+	<Additional frames hidden>
+result is NativeCallerObject { cnt: 1 } base-call count: 1
diff --git a/test/1955-pop-frame-jit-called/info.txt b/test/1955-pop-frame-jit-called/info.txt
new file mode 100644
index 0000000..b5eb546
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/info.txt
@@ -0,0 +1,7 @@
+Test basic JVMTI breakpoint functionality.
+
+This test places a breakpoint on the first instruction of a number of functions
+that are entered in every way possible for the given class of method.
+
+It also tests that breakpoints don't interfere with each other by having
+multiple breakpoints be set at once.
diff --git a/test/1955-pop-frame-jit-called/jvm-expected.patch b/test/1955-pop-frame-jit-called/jvm-expected.patch
new file mode 100644
index 0000000..718f8ad
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/jvm-expected.patch
@@ -0,0 +1,21 @@
+75,94d74
+< Test stopped during a ClassLoad event.
+< Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 0} base-call-count: 0
+< Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+< 	art.Test1953.popFrame(Native Method)
+< 	art.Test1953.runTestOn(Test1953.java)
+< 	art.Test1953.runTestOn(Test1953.java)
+< 	art.Test1953.runTests(Test1953.java)
+< 	<Additional frames hidden>
+< TC0.foo == 1
+< result is ClassLoadObject { cnt: 1, curClass: 1} base-call count: 1
+< Test stopped during a ClassPrepare event.
+< Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 1} base-call-count: 0
+< Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+< 	art.Test1953.popFrame(Native Method)
+< 	art.Test1953.runTestOn(Test1953.java)
+< 	art.Test1953.runTestOn(Test1953.java)
+< 	art.Test1953.runTests(Test1953.java)
+< 	<Additional frames hidden>
+< TC1.foo == 2
+< result is ClassLoadObject { cnt: 1, curClass: 2} base-call count: 1
diff --git a/test/1955-pop-frame-jit-called/run b/test/1955-pop-frame-jit-called/run
new file mode 100755
index 0000000..2984461
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/run
@@ -0,0 +1,26 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# On RI we need to turn class-load tests off since those events are buggy around
+# pop-frame (see b/116003018).
+ARGS=""
+if [[ "$TEST_RUNTIME" == "jvm" ]]; then
+  ARGS="--args DISABLE_CLASS_LOAD_TESTS"
+fi
+
+# The jitthreshold prevents the jit from compiling anything except those which
+# we explicitly request.
+./default-run "$@" --android-runtime-option -Xjitthreshold:1000 --jvmti $ARGS
diff --git a/test/1955-pop-frame-jit-called/src/Main.java b/test/1955-pop-frame-jit-called/src/Main.java
new file mode 100644
index 0000000..30a42ea
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Executable;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+
+import java.time.Duration;
+
+import java.util.concurrent.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+import java.util.Random;
+import java.util.Stack;
+import java.util.Vector;
+
+import java.util.function.Supplier;
+
+import art.*;
+
+public class Main extends Test1953 {
+  public Main(boolean run_class_load_tests) {
+    super(run_class_load_tests, (testObj) -> {
+      try {
+        // Make sure the called method is jitted
+        ensureMethodJitCompiled(testObj.getCalledMethod());
+      } catch (Exception e) {}
+    });
+  }
+
+  public static void main(String[] args) throws Exception {
+    new Main(!Arrays.asList(args).contains("DISABLE_CLASS_LOAD_TESTS")).runTests();
+  }
+
+  public static native void ensureMethodJitCompiled(Method meth);
+}
diff --git a/test/1955-pop-frame-jit-called/src/art/Breakpoint.java b/test/1955-pop-frame-jit-called/src/art/Breakpoint.java
new file mode 100644
index 0000000..bbb89f7
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/art/Breakpoint.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Objects;
+
+public class Breakpoint {
+  public static class Manager {
+    public static class BP {
+      public final Executable method;
+      public final long location;
+
+      public BP(Executable method) {
+        this(method, getStartLocation(method));
+      }
+
+      public BP(Executable method, long location) {
+        this.method = method;
+        this.location = location;
+      }
+
+      @Override
+      public boolean equals(Object other) {
+        return (other instanceof BP) &&
+            method.equals(((BP)other).method) &&
+            location == ((BP)other).location;
+      }
+
+      @Override
+      public String toString() {
+        return method.toString() + " @ " + getLine();
+      }
+
+      @Override
+      public int hashCode() {
+        return Objects.hash(method, location);
+      }
+
+      public int getLine() {
+        try {
+          LineNumber[] lines = getLineNumberTable(method);
+          int best = -1;
+          for (LineNumber l : lines) {
+            if (l.location > location) {
+              break;
+            } else {
+              best = l.line;
+            }
+          }
+          return best;
+        } catch (Exception e) {
+          return -1;
+        }
+      }
+    }
+
+    private Set<BP> breaks = new HashSet<>();
+
+    public void setBreakpoints(BP... bs) {
+      for (BP b : bs) {
+        if (breaks.add(b)) {
+          Breakpoint.setBreakpoint(b.method, b.location);
+        }
+      }
+    }
+    public void setBreakpoint(Executable method, long location) {
+      setBreakpoints(new BP(method, location));
+    }
+
+    public void clearBreakpoints(BP... bs) {
+      for (BP b : bs) {
+        if (breaks.remove(b)) {
+          Breakpoint.clearBreakpoint(b.method, b.location);
+        }
+      }
+    }
+    public void clearBreakpoint(Executable method, long location) {
+      clearBreakpoints(new BP(method, location));
+    }
+
+    public void clearAllBreakpoints() {
+      clearBreakpoints(breaks.toArray(new BP[0]));
+    }
+  }
+
+  public static void startBreakpointWatch(Class<?> methodClass,
+                                          Executable breakpointReached,
+                                          Thread thr) {
+    startBreakpointWatch(methodClass, breakpointReached, false, thr);
+  }
+
+  /**
+   * Enables the trapping of breakpoint events.
+   *
+   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
+   */
+  public static native void startBreakpointWatch(Class<?> methodClass,
+                                                 Executable breakpointReached,
+                                                 boolean allowRecursive,
+                                                 Thread thr);
+  public static native void stopBreakpointWatch(Thread thr);
+
+  public static final class LineNumber implements Comparable<LineNumber> {
+    public final long location;
+    public final int line;
+
+    private LineNumber(long loc, int line) {
+      this.location = loc;
+      this.line = line;
+    }
+
+    public boolean equals(Object other) {
+      return other instanceof LineNumber && ((LineNumber)other).line == line &&
+          ((LineNumber)other).location == location;
+    }
+
+    public int compareTo(LineNumber other) {
+      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
+      if (v != 0) {
+        return v;
+      } else {
+        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
+      }
+    }
+  }
+
+  public static native void setBreakpoint(Executable m, long loc);
+  public static void setBreakpoint(Executable m, LineNumber l) {
+    setBreakpoint(m, l.location);
+  }
+
+  public static native void clearBreakpoint(Executable m, long loc);
+  public static void clearBreakpoint(Executable m, LineNumber l) {
+    clearBreakpoint(m, l.location);
+  }
+
+  private static native Object[] getLineNumberTableNative(Executable m);
+  public static LineNumber[] getLineNumberTable(Executable m) {
+    Object[] nativeTable = getLineNumberTableNative(m);
+    long[] location = (long[])(nativeTable[0]);
+    int[] lines = (int[])(nativeTable[1]);
+    if (lines.length != location.length) {
+      throw new Error("Lines and locations have different lengths!");
+    }
+    LineNumber[] out = new LineNumber[lines.length];
+    for (int i = 0; i < lines.length; i++) {
+      out[i] = new LineNumber(location[i], lines[i]);
+    }
+    return out;
+  }
+
+  public static native long getStartLocation(Executable m);
+
+  public static int locationToLine(Executable m, long location) {
+    try {
+      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+      int best = -1;
+      for (Breakpoint.LineNumber l : lines) {
+        if (l.location > location) {
+          break;
+        } else {
+          best = l.line;
+        }
+      }
+      return best;
+    } catch (Exception e) {
+      return -1;
+    }
+  }
+
+  public static long lineToLocation(Executable m, int line) throws Exception {
+    try {
+      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+      for (Breakpoint.LineNumber l : lines) {
+        if (l.line == line) {
+          return l.location;
+        }
+      }
+      throw new Exception("Unable to find line " + line + " in " + m);
+    } catch (Exception e) {
+      throw new Exception("Unable to get line number info for " + m, e);
+    }
+  }
+}
+
diff --git a/test/1955-pop-frame-jit-called/src/art/Redefinition.java b/test/1955-pop-frame-jit-called/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+  public static final class CommonClassDefinition {
+    public final Class<?> target;
+    public final byte[] class_file_bytes;
+    public final byte[] dex_file_bytes;
+
+    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+      this.target = target;
+      this.class_file_bytes = class_file_bytes;
+      this.dex_file_bytes = dex_file_bytes;
+    }
+  }
+
+  // A set of possible test configurations. Test should set this if they need to.
+  // This must be kept in sync with the defines in ti-agent/common_helper.cc
+  public static enum Config {
+    COMMON_REDEFINE(0),
+    COMMON_RETRANSFORM(1),
+    COMMON_TRANSFORM(2);
+
+    private final int val;
+    private Config(int val) {
+      this.val = val;
+    }
+  }
+
+  public static void setTestConfiguration(Config type) {
+    nativeSetTestConfiguration(type.val);
+  }
+
+  private static native void nativeSetTestConfiguration(int type);
+
+  // Transforms the class
+  public static native void doCommonClassRedefinition(Class<?> target,
+                                                      byte[] classfile,
+                                                      byte[] dexfile);
+
+  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+    ArrayList<Class<?>> classes = new ArrayList<>();
+    ArrayList<byte[]> class_files = new ArrayList<>();
+    ArrayList<byte[]> dex_files = new ArrayList<>();
+
+    for (CommonClassDefinition d : defs) {
+      classes.add(d.target);
+      class_files.add(d.class_file_bytes);
+      dex_files.add(d.dex_file_bytes);
+    }
+    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+                                   class_files.toArray(new byte[0][]),
+                                   dex_files.toArray(new byte[0][]));
+  }
+
+  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+    for (CommonClassDefinition d : defs) {
+      addCommonTransformationResult(d.target.getCanonicalName(),
+                                    d.class_file_bytes,
+                                    d.dex_file_bytes);
+    }
+  }
+
+  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+                                                           byte[][] classfiles,
+                                                           byte[][] dexfiles);
+  public static native void doCommonClassRetransformation(Class<?>... target);
+  public static native void setPopRetransformations(boolean pop);
+  public static native void popTransformationFor(String name);
+  public static native void enableCommonRetransformation(boolean enable);
+  public static native void addCommonTransformationResult(String target_name,
+                                                          byte[] class_bytes,
+                                                          byte[] dex_bytes);
+}
diff --git a/test/1955-pop-frame-jit-called/src/art/StackTrace.java b/test/1955-pop-frame-jit-called/src/art/StackTrace.java
new file mode 100644
index 0000000..2ea2f20
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/art/StackTrace.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Executable;
+
+public class StackTrace {
+  public static class StackFrameData {
+    public final Thread thr;
+    public final Executable method;
+    public final long current_location;
+    public final int depth;
+
+    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
+      this.thr = thr;
+      this.method = e;
+      this.current_location = loc;
+      this.depth = depth;
+    }
+    @Override
+    public String toString() {
+      return String.format(
+          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
+          this.thr,
+          this.method,
+          this.current_location,
+          this.depth);
+    }
+  }
+
+  public static native int GetStackDepth(Thread thr);
+
+  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
+
+  public static StackFrameData[] GetStackTrace(Thread thr) {
+    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
+    // suspended. The spec says that not being suspended is fine but since we want this to be
+    // consistent we will suspend for the RI.
+    boolean suspend_thread =
+        !System.getProperty("java.vm.name").equals("Dalvik") &&
+        !thr.equals(Thread.currentThread()) &&
+        !Suspension.isSuspended(thr);
+    if (suspend_thread) {
+      Suspension.suspend(thr);
+    }
+    StackFrameData[] out = nativeGetStackTrace(thr);
+    if (suspend_thread) {
+      Suspension.resume(thr);
+    }
+    return out;
+  }
+}
+
diff --git a/test/1955-pop-frame-jit-called/src/art/Suspension.java b/test/1955-pop-frame-jit-called/src/art/Suspension.java
new file mode 100644
index 0000000..16e62cc
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/art/Suspension.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Suspension {
+  // Suspends a thread using jvmti.
+  public native static void suspend(Thread thr);
+
+  // Resumes a thread using jvmti.
+  public native static void resume(Thread thr);
+
+  public native static boolean isSuspended(Thread thr);
+
+  public native static int[] suspendList(Thread... threads);
+  public native static int[] resumeList(Thread... threads);
+}
diff --git a/test/1955-pop-frame-jit-called/src/art/Test1953.java b/test/1955-pop-frame-jit-called/src/art/Test1953.java
new file mode 120000
index 0000000..f281434
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/art/Test1953.java
@@ -0,0 +1 @@
+../../../1953-pop-frame/src/art/Test1953.java
\ No newline at end of file
diff --git a/test/1956-pop-frame-jit-calling/check b/test/1956-pop-frame-jit-calling/check
new file mode 100755
index 0000000..10b87cc
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/check
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The RI has restrictions and bugs around some PopFrame behavior that ART lacks.
+# See b/116003018. Some configurations cannot handle the class load events in
+# quite the right way so they are disabled there too.
+./default-check "$@" || \
+  (patch -p0 expected.txt < jvm-expected.patch >/dev/null && ./default-check "$@")
diff --git a/test/1956-pop-frame-jit-calling/expected.txt b/test/1956-pop-frame-jit-calling/expected.txt
new file mode 100644
index 0000000..a20a045
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/expected.txt
@@ -0,0 +1,118 @@
+Test stopped using breakpoint
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with declared synchronized function
+Single call with PopFrame on SynchronizedFunctionTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedFunctionTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with synchronized block
+Single call with PopFrame on SynchronizedTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedTestObject { cnt: 2 } base-call count: 1
+Test stopped on single step
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped on field access
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped on field modification
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped during Method Exit of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Enter of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during Method Enter of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit due to exception thrown in same function
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: false } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: false } base-call count: 1
+Test stopped during Method Exit due to exception thrown in subroutine
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: true } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: true } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of calledFunction
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of doThrow
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 1 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in subroutine)
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in calling function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in parent of calling function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError thrown and caught!
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError caught in same function.
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during a ClassLoad event.
+Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 0} base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.Test1953.popFrame(Native Method)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTests(Test1953.java)
+	<Additional frames hidden>
+TC0.foo == 1
+result is ClassLoadObject { cnt: 1, curClass: 1} base-call count: 1
+Test stopped during a ClassPrepare event.
+Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 1} base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.Test1953.popFrame(Native Method)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTests(Test1953.java)
+	<Additional frames hidden>
+TC1.foo == 2
+result is ClassLoadObject { cnt: 1, curClass: 2} base-call count: 1
+Test stopped during random Suspend.
+Single call with PopFrame on SuspendSuddenlyObject { cnt: 0 } base-call-count: 0
+result is SuspendSuddenlyObject { cnt: 2 } base-call count: 1
+Test redefining frame being popped.
+Single call with PopFrame on RedefineTestObject { states: [] current: ORIGINAL } base-call-count: 0
+result is RedefineTestObject { states: [ORIGINAL, REDEFINED] current: REDEFINED } base-call count: 1
+Test stopped during a native method fails
+Single call with PopFrame on NativeCalledObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.Test1953.popFrame(Native Method)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTests(Test1953.java)
+	<Additional frames hidden>
+result is NativeCalledObject { cnt: 1 } base-call count: 1
+Test stopped in a method called by native fails
+Single call with PopFrame on NativeCallerObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.Test1953.popFrame(Native Method)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTestOn(Test1953.java)
+	art.Test1953.runTests(Test1953.java)
+	<Additional frames hidden>
+result is NativeCallerObject { cnt: 1 } base-call count: 1
diff --git a/test/1956-pop-frame-jit-calling/info.txt b/test/1956-pop-frame-jit-calling/info.txt
new file mode 100644
index 0000000..b5eb546
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/info.txt
@@ -0,0 +1,7 @@
+Test basic JVMTI breakpoint functionality.
+
+This test places a breakpoint on the first instruction of a number of functions
+that are entered in every way possible for the given class of method.
+
+It also tests that breakpoints don't interfere with each other by having
+multiple breakpoints be set at once.
diff --git a/test/1956-pop-frame-jit-calling/jvm-expected.patch b/test/1956-pop-frame-jit-calling/jvm-expected.patch
new file mode 100644
index 0000000..718f8ad
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/jvm-expected.patch
@@ -0,0 +1,21 @@
+75,94d74
+< Test stopped during a ClassLoad event.
+< Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 0} base-call-count: 0
+< Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+< 	art.Test1953.popFrame(Native Method)
+< 	art.Test1953.runTestOn(Test1953.java)
+< 	art.Test1953.runTestOn(Test1953.java)
+< 	art.Test1953.runTests(Test1953.java)
+< 	<Additional frames hidden>
+< TC0.foo == 1
+< result is ClassLoadObject { cnt: 1, curClass: 1} base-call count: 1
+< Test stopped during a ClassPrepare event.
+< Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 1} base-call-count: 0
+< Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+< 	art.Test1953.popFrame(Native Method)
+< 	art.Test1953.runTestOn(Test1953.java)
+< 	art.Test1953.runTestOn(Test1953.java)
+< 	art.Test1953.runTests(Test1953.java)
+< 	<Additional frames hidden>
+< TC1.foo == 2
+< result is ClassLoadObject { cnt: 1, curClass: 2} base-call count: 1
diff --git a/test/1956-pop-frame-jit-calling/run b/test/1956-pop-frame-jit-calling/run
new file mode 100755
index 0000000..2984461
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/run
@@ -0,0 +1,26 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# On RI we need to turn class-load tests off since those events are buggy around
+# pop-frame (see b/116003018).
+ARGS=""
+if [[ "$TEST_RUNTIME" == "jvm" ]]; then
+  ARGS="--args DISABLE_CLASS_LOAD_TESTS"
+fi
+
+# The jitthreshold prevents the jit from compiling anything except those which
+# we explicitly request.
+./default-run "$@" --android-runtime-option -Xjitthreshold:1000 --jvmti $ARGS
diff --git a/test/1956-pop-frame-jit-calling/src/Main.java b/test/1956-pop-frame-jit-calling/src/Main.java
new file mode 100644
index 0000000..c44e035
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Executable;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+
+import java.time.Duration;
+
+import java.util.concurrent.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+import java.util.Random;
+import java.util.Stack;
+import java.util.Vector;
+
+import java.util.function.Supplier;
+
+import art.*;
+
+public class Main extends Test1953 {
+  public Main(boolean run_class_load_tests) {
+    super(run_class_load_tests, (testObj) -> {
+      try {
+        // Make sure the calling method is jitted
+        ensureMethodJitCompiled(testObj.getCallingMethod());
+      } catch (Exception e) {}
+    });
+  }
+
+  public static void main(String[] args) throws Exception {
+    new Main(!Arrays.asList(args).contains("DISABLE_CLASS_LOAD_TESTS")).runTests();
+  }
+
+  public static native void ensureMethodJitCompiled(Method meth);
+}
diff --git a/test/1956-pop-frame-jit-calling/src/art/Breakpoint.java b/test/1956-pop-frame-jit-calling/src/art/Breakpoint.java
new file mode 100644
index 0000000..bbb89f7
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/art/Breakpoint.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Objects;
+
+public class Breakpoint {
+  public static class Manager {
+    public static class BP {
+      public final Executable method;
+      public final long location;
+
+      public BP(Executable method) {
+        this(method, getStartLocation(method));
+      }
+
+      public BP(Executable method, long location) {
+        this.method = method;
+        this.location = location;
+      }
+
+      @Override
+      public boolean equals(Object other) {
+        return (other instanceof BP) &&
+            method.equals(((BP)other).method) &&
+            location == ((BP)other).location;
+      }
+
+      @Override
+      public String toString() {
+        return method.toString() + " @ " + getLine();
+      }
+
+      @Override
+      public int hashCode() {
+        return Objects.hash(method, location);
+      }
+
+      public int getLine() {
+        try {
+          LineNumber[] lines = getLineNumberTable(method);
+          int best = -1;
+          for (LineNumber l : lines) {
+            if (l.location > location) {
+              break;
+            } else {
+              best = l.line;
+            }
+          }
+          return best;
+        } catch (Exception e) {
+          return -1;
+        }
+      }
+    }
+
+    private Set<BP> breaks = new HashSet<>();
+
+    public void setBreakpoints(BP... bs) {
+      for (BP b : bs) {
+        if (breaks.add(b)) {
+          Breakpoint.setBreakpoint(b.method, b.location);
+        }
+      }
+    }
+    public void setBreakpoint(Executable method, long location) {
+      setBreakpoints(new BP(method, location));
+    }
+
+    public void clearBreakpoints(BP... bs) {
+      for (BP b : bs) {
+        if (breaks.remove(b)) {
+          Breakpoint.clearBreakpoint(b.method, b.location);
+        }
+      }
+    }
+    public void clearBreakpoint(Executable method, long location) {
+      clearBreakpoints(new BP(method, location));
+    }
+
+    public void clearAllBreakpoints() {
+      clearBreakpoints(breaks.toArray(new BP[0]));
+    }
+  }
+
+  public static void startBreakpointWatch(Class<?> methodClass,
+                                          Executable breakpointReached,
+                                          Thread thr) {
+    startBreakpointWatch(methodClass, breakpointReached, false, thr);
+  }
+
+  /**
+   * Enables the trapping of breakpoint events.
+   *
+   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
+   */
+  public static native void startBreakpointWatch(Class<?> methodClass,
+                                                 Executable breakpointReached,
+                                                 boolean allowRecursive,
+                                                 Thread thr);
+  public static native void stopBreakpointWatch(Thread thr);
+
+  public static final class LineNumber implements Comparable<LineNumber> {
+    public final long location;
+    public final int line;
+
+    private LineNumber(long loc, int line) {
+      this.location = loc;
+      this.line = line;
+    }
+
+    public boolean equals(Object other) {
+      return other instanceof LineNumber && ((LineNumber)other).line == line &&
+          ((LineNumber)other).location == location;
+    }
+
+    public int compareTo(LineNumber other) {
+      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
+      if (v != 0) {
+        return v;
+      } else {
+        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
+      }
+    }
+  }
+
+  public static native void setBreakpoint(Executable m, long loc);
+  public static void setBreakpoint(Executable m, LineNumber l) {
+    setBreakpoint(m, l.location);
+  }
+
+  public static native void clearBreakpoint(Executable m, long loc);
+  public static void clearBreakpoint(Executable m, LineNumber l) {
+    clearBreakpoint(m, l.location);
+  }
+
+  private static native Object[] getLineNumberTableNative(Executable m);
+  public static LineNumber[] getLineNumberTable(Executable m) {
+    Object[] nativeTable = getLineNumberTableNative(m);
+    long[] location = (long[])(nativeTable[0]);
+    int[] lines = (int[])(nativeTable[1]);
+    if (lines.length != location.length) {
+      throw new Error("Lines and locations have different lengths!");
+    }
+    LineNumber[] out = new LineNumber[lines.length];
+    for (int i = 0; i < lines.length; i++) {
+      out[i] = new LineNumber(location[i], lines[i]);
+    }
+    return out;
+  }
+
+  public static native long getStartLocation(Executable m);
+
+  public static int locationToLine(Executable m, long location) {
+    try {
+      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+      int best = -1;
+      for (Breakpoint.LineNumber l : lines) {
+        if (l.location > location) {
+          break;
+        } else {
+          best = l.line;
+        }
+      }
+      return best;
+    } catch (Exception e) {
+      return -1;
+    }
+  }
+
+  public static long lineToLocation(Executable m, int line) throws Exception {
+    try {
+      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+      for (Breakpoint.LineNumber l : lines) {
+        if (l.line == line) {
+          return l.location;
+        }
+      }
+      throw new Exception("Unable to find line " + line + " in " + m);
+    } catch (Exception e) {
+      throw new Exception("Unable to get line number info for " + m, e);
+    }
+  }
+}
+
diff --git a/test/1956-pop-frame-jit-calling/src/art/Redefinition.java b/test/1956-pop-frame-jit-calling/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+  public static final class CommonClassDefinition {
+    public final Class<?> target;
+    public final byte[] class_file_bytes;
+    public final byte[] dex_file_bytes;
+
+    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+      this.target = target;
+      this.class_file_bytes = class_file_bytes;
+      this.dex_file_bytes = dex_file_bytes;
+    }
+  }
+
+  // A set of possible test configurations. Test should set this if they need to.
+  // This must be kept in sync with the defines in ti-agent/common_helper.cc
+  public static enum Config {
+    COMMON_REDEFINE(0),
+    COMMON_RETRANSFORM(1),
+    COMMON_TRANSFORM(2);
+
+    private final int val;
+    private Config(int val) {
+      this.val = val;
+    }
+  }
+
+  public static void setTestConfiguration(Config type) {
+    nativeSetTestConfiguration(type.val);
+  }
+
+  private static native void nativeSetTestConfiguration(int type);
+
+  // Transforms the class
+  public static native void doCommonClassRedefinition(Class<?> target,
+                                                      byte[] classfile,
+                                                      byte[] dexfile);
+
+  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+    ArrayList<Class<?>> classes = new ArrayList<>();
+    ArrayList<byte[]> class_files = new ArrayList<>();
+    ArrayList<byte[]> dex_files = new ArrayList<>();
+
+    for (CommonClassDefinition d : defs) {
+      classes.add(d.target);
+      class_files.add(d.class_file_bytes);
+      dex_files.add(d.dex_file_bytes);
+    }
+    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+                                   class_files.toArray(new byte[0][]),
+                                   dex_files.toArray(new byte[0][]));
+  }
+
+  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+    for (CommonClassDefinition d : defs) {
+      addCommonTransformationResult(d.target.getCanonicalName(),
+                                    d.class_file_bytes,
+                                    d.dex_file_bytes);
+    }
+  }
+
+  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+                                                           byte[][] classfiles,
+                                                           byte[][] dexfiles);
+  public static native void doCommonClassRetransformation(Class<?>... target);
+  public static native void setPopRetransformations(boolean pop);
+  public static native void popTransformationFor(String name);
+  public static native void enableCommonRetransformation(boolean enable);
+  public static native void addCommonTransformationResult(String target_name,
+                                                          byte[] class_bytes,
+                                                          byte[] dex_bytes);
+}
diff --git a/test/1956-pop-frame-jit-calling/src/art/StackTrace.java b/test/1956-pop-frame-jit-calling/src/art/StackTrace.java
new file mode 100644
index 0000000..2ea2f20
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/art/StackTrace.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Executable;
+
+public class StackTrace {
+  public static class StackFrameData {
+    public final Thread thr;
+    public final Executable method;
+    public final long current_location;
+    public final int depth;
+
+    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
+      this.thr = thr;
+      this.method = e;
+      this.current_location = loc;
+      this.depth = depth;
+    }
+    @Override
+    public String toString() {
+      return String.format(
+          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
+          this.thr,
+          this.method,
+          this.current_location,
+          this.depth);
+    }
+  }
+
+  public static native int GetStackDepth(Thread thr);
+
+  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
+
+  public static StackFrameData[] GetStackTrace(Thread thr) {
+    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
+    // suspended. The spec says that not being suspended is fine but since we want this to be
+    // consistent we will suspend for the RI.
+    boolean suspend_thread =
+        !System.getProperty("java.vm.name").equals("Dalvik") &&
+        !thr.equals(Thread.currentThread()) &&
+        !Suspension.isSuspended(thr);
+    if (suspend_thread) {
+      Suspension.suspend(thr);
+    }
+    StackFrameData[] out = nativeGetStackTrace(thr);
+    if (suspend_thread) {
+      Suspension.resume(thr);
+    }
+    return out;
+  }
+}
+
diff --git a/test/1956-pop-frame-jit-calling/src/art/Suspension.java b/test/1956-pop-frame-jit-calling/src/art/Suspension.java
new file mode 100644
index 0000000..16e62cc
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/art/Suspension.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Suspension {
+  // Suspends a thread using jvmti.
+  public native static void suspend(Thread thr);
+
+  // Resumes a thread using jvmti.
+  public native static void resume(Thread thr);
+
+  public native static boolean isSuspended(Thread thr);
+
+  public native static int[] suspendList(Thread... threads);
+  public native static int[] resumeList(Thread... threads);
+}
diff --git a/test/1956-pop-frame-jit-calling/src/art/Test1953.java b/test/1956-pop-frame-jit-calling/src/art/Test1953.java
new file mode 120000
index 0000000..f281434
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/art/Test1953.java
@@ -0,0 +1 @@
+../../../1953-pop-frame/src/art/Test1953.java
\ No newline at end of file
diff --git a/test/1957-error-ext/expected.txt b/test/1957-error-ext/expected.txt
new file mode 100644
index 0000000..bfe7033
--- /dev/null
+++ b/test/1957-error-ext/expected.txt
@@ -0,0 +1,4 @@
+LastError is: <call returned error: class java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION>
+Got class java.lang.Exception: Failed to redefine class <Lart/Test1957$Transform;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED
+LastError is: FAILURE TO REDEFINE Unable to perform redefinition of 'Lart/Test1957$Transform;': Total number of declared methods changed from 2 to 1
+LastError is: <call returned error: class java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION>
diff --git a/test/1957-error-ext/info.txt b/test/1957-error-ext/info.txt
new file mode 100644
index 0000000..ef772d9
--- /dev/null
+++ b/test/1957-error-ext/info.txt
@@ -0,0 +1 @@
+Test for get_last_error_message extension function.
diff --git a/test/1957-error-ext/lasterror.cc b/test/1957-error-ext/lasterror.cc
new file mode 100644
index 0000000..5aa3fbe
--- /dev/null
+++ b/test/1957-error-ext/lasterror.cc
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
+
+// Test infrastructure
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+
+namespace art {
+namespace Test1957ErrorExt {
+
+using GetLastError = jvmtiError(*)(jvmtiEnv* env, char** msg);
+using ClearLastError = jvmtiError(*)(jvmtiEnv* env);
+
+template <typename T>
+static void Dealloc(T* t) {
+  jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(t));
+}
+
+template <typename T, typename ...Rest>
+static void Dealloc(T* t, Rest... rs) {
+  Dealloc(t);
+  Dealloc(rs...);
+}
+
+static void DeallocParams(jvmtiParamInfo* params, jint n_params) {
+  for (jint i = 0; i < n_params; i++) {
+    Dealloc(params[i].name);
+  }
+}
+
+static jvmtiExtensionFunction FindExtensionMethod(JNIEnv* env, const std::string& name) {
+  jint n_ext;
+  jvmtiExtensionFunctionInfo* infos;
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetExtensionFunctions(&n_ext, &infos))) {
+    return nullptr;
+  }
+  jvmtiExtensionFunction res = nullptr;
+  for (jint i = 0; i < n_ext; i++) {
+    jvmtiExtensionFunctionInfo* cur_info = &infos[i];
+    if (strcmp(name.c_str(), cur_info->id) == 0) {
+      res = cur_info->func;
+    }
+    // Cleanup the cur_info
+    DeallocParams(cur_info->params, cur_info->param_count);
+    Dealloc(cur_info->id, cur_info->short_description, cur_info->params, cur_info->errors);
+  }
+  // Cleanup the array.
+  Dealloc(infos);
+  if (res == nullptr) {
+    ScopedLocalRef<jclass> rt_exception(env, env->FindClass("java/lang/RuntimeException"));
+    env->ThrowNew(rt_exception.get(), (name + " extensions not found").c_str());
+    return nullptr;
+  }
+  return res;
+}
+
+extern "C" JNIEXPORT
+jstring JNICALL Java_art_Test1957_getLastError(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+  GetLastError get_last_error = reinterpret_cast<GetLastError>(
+      FindExtensionMethod(env, "com.android.art.misc.get_last_error_message"));
+  if (get_last_error == nullptr) {
+    return nullptr;
+  }
+  char* msg;
+  if (JvmtiErrorToException(env, jvmti_env, get_last_error(jvmti_env, &msg))) {
+    return nullptr;
+  }
+
+  return env->NewStringUTF(msg);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1957_clearLastError(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+  ClearLastError clear_last_error = reinterpret_cast<ClearLastError>(
+      FindExtensionMethod(env, "com.android.art.misc.clear_last_error_message"));
+  if (clear_last_error == nullptr) {
+    return;
+  }
+  JvmtiErrorToException(env, jvmti_env, clear_last_error(jvmti_env));
+}
+
+}  // namespace Test1957ErrorExt
+}  // namespace art
diff --git a/test/1957-error-ext/run b/test/1957-error-ext/run
new file mode 100755
index 0000000..8be0ed4
--- /dev/null
+++ b/test/1957-error-ext/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+./default-run "$@" --jvmti
diff --git a/test/1957-error-ext/src/Main.java b/test/1957-error-ext/src/Main.java
new file mode 100644
index 0000000..7e5e075
--- /dev/null
+++ b/test/1957-error-ext/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1957.run();
+  }
+}
diff --git a/test/1957-error-ext/src/art/Redefinition.java b/test/1957-error-ext/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1957-error-ext/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+  public static final class CommonClassDefinition {
+    public final Class<?> target;
+    public final byte[] class_file_bytes;
+    public final byte[] dex_file_bytes;
+
+    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+      this.target = target;
+      this.class_file_bytes = class_file_bytes;
+      this.dex_file_bytes = dex_file_bytes;
+    }
+  }
+
+  // A set of possible test configurations. Test should set this if they need to.
+  // This must be kept in sync with the defines in ti-agent/common_helper.cc
+  public static enum Config {
+    COMMON_REDEFINE(0),
+    COMMON_RETRANSFORM(1),
+    COMMON_TRANSFORM(2);
+
+    private final int val;
+    private Config(int val) {
+      this.val = val;
+    }
+  }
+
+  public static void setTestConfiguration(Config type) {
+    nativeSetTestConfiguration(type.val);
+  }
+
+  private static native void nativeSetTestConfiguration(int type);
+
+  // Transforms the class
+  public static native void doCommonClassRedefinition(Class<?> target,
+                                                      byte[] classfile,
+                                                      byte[] dexfile);
+
+  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+    ArrayList<Class<?>> classes = new ArrayList<>();
+    ArrayList<byte[]> class_files = new ArrayList<>();
+    ArrayList<byte[]> dex_files = new ArrayList<>();
+
+    for (CommonClassDefinition d : defs) {
+      classes.add(d.target);
+      class_files.add(d.class_file_bytes);
+      dex_files.add(d.dex_file_bytes);
+    }
+    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+                                   class_files.toArray(new byte[0][]),
+                                   dex_files.toArray(new byte[0][]));
+  }
+
+  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+    for (CommonClassDefinition d : defs) {
+      addCommonTransformationResult(d.target.getCanonicalName(),
+                                    d.class_file_bytes,
+                                    d.dex_file_bytes);
+    }
+  }
+
+  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+                                                           byte[][] classfiles,
+                                                           byte[][] dexfiles);
+  public static native void doCommonClassRetransformation(Class<?>... target);
+  public static native void setPopRetransformations(boolean pop);
+  public static native void popTransformationFor(String name);
+  public static native void enableCommonRetransformation(boolean enable);
+  public static native void addCommonTransformationResult(String target_name,
+                                                          byte[] class_bytes,
+                                                          byte[] dex_bytes);
+}
diff --git a/test/1957-error-ext/src/art/Test1957.java b/test/1957-error-ext/src/art/Test1957.java
new file mode 100644
index 0000000..ffb68be
--- /dev/null
+++ b/test/1957-error-ext/src/art/Test1957.java
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test1957 {
+
+  static class Transform {
+    public void sayHi() {
+      // Use lower 'h' to make sure the string will have a different string id
+      // than the transformation (the transformation code is the same except
+      // the actual printed String, which was making the test inacurately passing
+      // in JIT mode when loading the string from the dex cache, as the string ids
+      // of the two different strings were the same).
+      // We know the string ids will be different because lexicographically:
+      // "Goodbye" < "LTransform;" < "hello".
+      System.out.println("hello");
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * class Transform {
+   * }
+   */
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+    "yv66vgAAADUAEQoAAwAKBwAMBwAPAQAGPGluaXQ+AQADKClWAQAEQ29kZQEAD0xpbmVOdW1iZXJU" +
+    "YWJsZQEAClNvdXJjZUZpbGUBAA1UZXN0MTk1Ny5qYXZhDAAEAAUHABABABZhcnQvVGVzdDE5NTck" +
+    "VHJhbnNmb3JtAQAJVHJhbnNmb3JtAQAMSW5uZXJDbGFzc2VzAQAQamF2YS9sYW5nL09iamVjdAEA" +
+    "DGFydC9UZXN0MTk1NwAgAAIAAwAAAAAAAQAAAAQABQABAAYAAAAdAAEAAQAAAAUqtwABsQAAAAEA" +
+    "BwAAAAYAAQAAAAYAAgAIAAAAAgAJAA4AAAAKAAEAAgALAA0ACA==");
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQAQiK+oahCb4T18bDge0pSvp7rka4UQ2AY0AwAAcAAAAHhWNBIAAAAAAAAAAIgCAAAN" +
+    "AAAAcAAAAAYAAACkAAAAAQAAALwAAAAAAAAAAAAAAAIAAADIAAAAAQAAANgAAAA8AgAA+AAAABQB" +
+    "AAAcAQAANgEAAEYBAABqAQAAigEAAJ4BAACtAQAAuAEAALsBAADIAQAAzgEAANUBAAABAAAAAgAA" +
+    "AAMAAAAEAAAABQAAAAgAAAAIAAAABQAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAA" +
+    "AAAAAAYAAAB4AgAAWwIAAAAAAAABAAEAAQAAABABAAAEAAAAcBABAAAADgAGAA4ABjxpbml0PgAY" +
+    "TGFydC9UZXN0MTk1NyRUcmFuc2Zvcm07AA5MYXJ0L1Rlc3QxOTU3OwAiTGRhbHZpay9hbm5vdGF0" +
+    "aW9uL0VuY2xvc2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xhc3M7ABJMamF2" +
+    "YS9sYW5nL09iamVjdDsADVRlc3QxOTU3LmphdmEACVRyYW5zZm9ybQABVgALYWNjZXNzRmxhZ3MA" +
+    "BG5hbWUABXZhbHVlAHV+fkQ4eyJjb21waWxhdGlvbi1tb2RlIjoiZGVidWciLCJtaW4tYXBpIjox" +
+    "LCJzaGEtMSI6Ijg0NjI2ZDE0MmRiMmY4NzVhY2E2YjVlOWVmYWU3OThjYWQ5ZDlhNTAiLCJ2ZXJz" +
+    "aW9uIjoiMS40LjItZGV2In0AAgIBCxgBAgMCCQQIChcHAAABAACAgAT4AQAAAAAAAAACAAAATAIA" +
+    "AFICAABsAgAAAAAAAAAAAAAAAAAADgAAAAAAAAABAAAAAAAAAAEAAAANAAAAcAAAAAIAAAAGAAAA" +
+    "pAAAAAMAAAABAAAAvAAAAAUAAAACAAAAyAAAAAYAAAABAAAA2AAAAAEgAAABAAAA+AAAAAMgAAAB" +
+    "AAAAEAEAAAIgAAANAAAAFAEAAAQgAAACAAAATAIAAAAgAAABAAAAWwIAAAMQAAACAAAAaAIAAAYg" +
+    "AAABAAAAeAIAAAAQAAABAAAAiAIAAA==");
+
+  public static void run() {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    Transform t = new Transform();
+    System.out.println("LastError is: " + getLastErrorOrException());
+    try {
+      Redefinition.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+    } catch (Throwable e) {
+      System.out.println("Got " + e.getClass().toString() + ": " + e.getMessage());
+    }
+    System.out.println("LastError is: " + getLastErrorOrException());
+    clearLastError();
+    System.out.println("LastError is: " + getLastErrorOrException());
+  }
+
+  public static String getLastErrorOrException() {
+    try {
+      return getLastError();
+    } catch (Throwable t) {
+      return "<call returned error: " + t.getClass().toString() + ": " + t.getMessage() + ">";
+    }
+  }
+  public static native String getLastError();
+  public static native void clearLastError();
+}
diff --git a/test/1958-transform-try-jit/expected.txt b/test/1958-transform-try-jit/expected.txt
new file mode 100644
index 0000000..8cfaea2
--- /dev/null
+++ b/test/1958-transform-try-jit/expected.txt
@@ -0,0 +1,2 @@
+Before redefinition: hello
+After redefinition: Goodbye
diff --git a/test/1958-transform-try-jit/info.txt b/test/1958-transform-try-jit/info.txt
new file mode 100644
index 0000000..0bf5ee5
--- /dev/null
+++ b/test/1958-transform-try-jit/info.txt
@@ -0,0 +1,5 @@
+Tests that JVMTI transformation seems to work even when we try to get a method
+inlined by the jit.
+
+Note this test deliberately avoids any internal libart calls so it can be
+included in CTS.
diff --git a/test/1958-transform-try-jit/run b/test/1958-transform-try-jit/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/1958-transform-try-jit/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/1958-transform-try-jit/src/Main.java b/test/1958-transform-try-jit/src/Main.java
new file mode 100644
index 0000000..7f45c20
--- /dev/null
+++ b/test/1958-transform-try-jit/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1958.run();
+  }
+}
diff --git a/test/1958-transform-try-jit/src/art/Redefinition.java b/test/1958-transform-try-jit/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1958-transform-try-jit/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+  public static final class CommonClassDefinition {
+    public final Class<?> target;
+    public final byte[] class_file_bytes;
+    public final byte[] dex_file_bytes;
+
+    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+      this.target = target;
+      this.class_file_bytes = class_file_bytes;
+      this.dex_file_bytes = dex_file_bytes;
+    }
+  }
+
+  // A set of possible test configurations. Test should set this if they need to.
+  // This must be kept in sync with the defines in ti-agent/common_helper.cc
+  public static enum Config {
+    COMMON_REDEFINE(0),
+    COMMON_RETRANSFORM(1),
+    COMMON_TRANSFORM(2);
+
+    private final int val;
+    private Config(int val) {
+      this.val = val;
+    }
+  }
+
+  public static void setTestConfiguration(Config type) {
+    nativeSetTestConfiguration(type.val);
+  }
+
+  private static native void nativeSetTestConfiguration(int type);
+
+  // Transforms the class
+  public static native void doCommonClassRedefinition(Class<?> target,
+                                                      byte[] classfile,
+                                                      byte[] dexfile);
+
+  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+    ArrayList<Class<?>> classes = new ArrayList<>();
+    ArrayList<byte[]> class_files = new ArrayList<>();
+    ArrayList<byte[]> dex_files = new ArrayList<>();
+
+    for (CommonClassDefinition d : defs) {
+      classes.add(d.target);
+      class_files.add(d.class_file_bytes);
+      dex_files.add(d.dex_file_bytes);
+    }
+    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+                                   class_files.toArray(new byte[0][]),
+                                   dex_files.toArray(new byte[0][]));
+  }
+
+  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+    for (CommonClassDefinition d : defs) {
+      addCommonTransformationResult(d.target.getCanonicalName(),
+                                    d.class_file_bytes,
+                                    d.dex_file_bytes);
+    }
+  }
+
+  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+                                                           byte[][] classfiles,
+                                                           byte[][] dexfiles);
+  public static native void doCommonClassRetransformation(Class<?>... target);
+  public static native void setPopRetransformations(boolean pop);
+  public static native void popTransformationFor(String name);
+  public static native void enableCommonRetransformation(boolean enable);
+  public static native void addCommonTransformationResult(String target_name,
+                                                          byte[] class_bytes,
+                                                          byte[] dex_bytes);
+}
diff --git a/test/1958-transform-try-jit/src/art/Test1958.java b/test/1958-transform-try-jit/src/art/Test1958.java
new file mode 100644
index 0000000..100daa1
--- /dev/null
+++ b/test/1958-transform-try-jit/src/art/Test1958.java
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Method;
+import java.util.Base64;
+
+public class Test1958 {
+  static class Runner {
+    public static String doSayHi() {
+      return sayHiHelper(true);
+    }
+
+    public static String dontSayHi() {
+      return sayHiHelper(false);
+    }
+
+    // We are trying to get the definition of Transform.sayHi inlined into this function.
+    public static String sayHiHelper(boolean sayHi) {
+      if (sayHi) {
+        return Transform.sayHi();
+      } else {
+        return "NOPE!";
+      }
+    }
+  }
+
+  static class Transform {
+    public static String sayHi() {
+      // Use lower 'h' to make sure the string will have a different string id
+      // than the transformation (the transformation code is the same except
+      // the actual printed String, which was making the test inacurately passing
+      // in JIT mode when loading the string from the dex cache, as the string ids
+      // of the two different strings were the same).
+      // We know the string ids will be different because lexicographically:
+      // "Goodbye" < "LTransform;" < "hello".
+      return "hello";
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * static class Transform {
+   *   public static String sayHi() {
+   *    return "Goodbye";
+   *   }
+   * }
+   */
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+    "yv66vgAAADUAFQoABAANCAAOBwAQBwATAQAGPGluaXQ+AQADKClWAQAEQ29kZQEAD0xpbmVOdW1i" +
+    "ZXJUYWJsZQEABXNheUhpAQAUKClMamF2YS9sYW5nL1N0cmluZzsBAApTb3VyY2VGaWxlAQANVGVz" +
+    "dDE5NTguamF2YQwABQAGAQAHR29vZGJ5ZQcAFAEAFmFydC9UZXN0MTk1OCRUcmFuc2Zvcm0BAAlU" +
+    "cmFuc2Zvcm0BAAxJbm5lckNsYXNzZXMBABBqYXZhL2xhbmcvT2JqZWN0AQAMYXJ0L1Rlc3QxOTU4" +
+    "ACAAAwAEAAAAAAACAAAABQAGAAEABwAAAB0AAQABAAAABSq3AAGxAAAAAQAIAAAABgABAAAABgAJ" +
+    "AAkACgABAAcAAAAbAAEAAAAAAAMSArAAAAABAAgAAAAGAAEAAAAIAAIACwAAAAIADAASAAAACgAB" +
+    "AAMADwARAAg=");
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQCRmEaLPLzpKe+CHcDM8YhIJCPWwcFR5yegAwAAcAAAAHhWNBIAAAAAAAAAAPQCAAAR" +
+    "AAAAcAAAAAcAAAC0AAAAAgAAANAAAAAAAAAAAAAAAAMAAADoAAAAAQAAAAABAACAAgAAIAEAAFgB" +
+    "AABgAQAAaQEAAGwBAACGAQAAlgEAALoBAADaAQAA7gEAAAICAAARAgAAHAIAAB8CAAAsAgAAMgIA" +
+    "ADkCAABAAgAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACwAAAAIAAAAFAAAAAAAAAAsAAAAGAAAA" +
+    "AAAAAAAAAQAAAAAAAAAAAA4AAAAEAAEAAAAAAAAAAAAAAAAABAAAAAAAAAAJAAAA5AIAAMYCAAAA" +
+    "AAAAAQAAAAAAAABUAQAAAwAAABoAAQARAAAAAQABAAEAAABQAQAABAAAAHAQAgAAAA4ABgAOAAgA" +
+    "DgAGPGluaXQ+AAdHb29kYnllAAFMABhMYXJ0L1Rlc3QxOTU4JFRyYW5zZm9ybTsADkxhcnQvVGVz" +
+    "dDE5NTg7ACJMZGFsdmlrL2Fubm90YXRpb24vRW5jbG9zaW5nQ2xhc3M7AB5MZGFsdmlrL2Fubm90" +
+    "YXRpb24vSW5uZXJDbGFzczsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7" +
+    "AA1UZXN0MTk1OC5qYXZhAAlUcmFuc2Zvcm0AAVYAC2FjY2Vzc0ZsYWdzAARuYW1lAAVzYXlIaQAF" +
+    "dmFsdWUAdX5+RDh7ImNvbXBpbGF0aW9uLW1vZGUiOiJkZWJ1ZyIsIm1pbi1hcGkiOjEsInNoYS0x" +
+    "IjoiNTFjYWNlMWFiZGQwOGIzMjBmMjVmYjgxMTZjMjQzMmIwMmYwOTI5NSIsInZlcnNpb24iOiIx" +
+    "LjQuNS1kZXYifQACAgEPGAECAwIMBAgNFwoAAAIAAICABLgCAQmgAgAAAAACAAAAtwIAAL0CAADY" +
+    "AgAAAAAAAAAAAAAAAAAADgAAAAAAAAABAAAAAAAAAAEAAAARAAAAcAAAAAIAAAAHAAAAtAAAAAMA" +
+    "AAACAAAA0AAAAAUAAAADAAAA6AAAAAYAAAABAAAAAAEAAAEgAAACAAAAIAEAAAMgAAACAAAAUAEA" +
+    "AAIgAAARAAAAWAEAAAQgAAACAAAAtwIAAAAgAAABAAAAxgIAAAMQAAACAAAA1AIAAAYgAAABAAAA" +
+    "5AIAAAAQAAABAAAA9AIAAA==");
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    Method doSayHi = Runner.class.getDeclaredMethod("doSayHi");
+    Method dontSayHi = Runner.class.getDeclaredMethod("dontSayHi");
+    // Run the method enough times that the jit thinks it's interesting (default 10000).
+    for (int i = 0; i < 20000; i++) {
+      doSayHi.invoke(null);
+      dontSayHi.invoke(null);
+    }
+    // Sleep for 10 seconds to let the jit finish any work it's doing.
+    Thread.sleep(10 * 1000);
+    // Check what we get right now.
+    System.out.println("Before redefinition: " + doSayHi.invoke(null));
+    Redefinition.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+    System.out.println("After redefinition: " + doSayHi.invoke(null));
+  }
+}
diff --git a/test/1959-redefine-object-instrument/expected.txt b/test/1959-redefine-object-instrument/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/1959-redefine-object-instrument/expected.txt
diff --git a/test/1959-redefine-object-instrument/fake_redef_object.cc b/test/1959-redefine-object-instrument/fake_redef_object.cc
new file mode 100644
index 0000000..b1201ab
--- /dev/null
+++ b/test/1959-redefine-object-instrument/fake_redef_object.cc
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <limits>
+#include <memory>
+
+#include "jni.h"
+#include "jvmti.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
+
+// Slicer's headers have code that triggers these warnings. b/65298177
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wsign-compare"
+#pragma clang diagnostic ignored "-Wunused-parameter"
+#include "slicer/instrumentation.h"
+#include "slicer/reader.h"
+#include "slicer/writer.h"
+#pragma clang diagnostic pop
+
+namespace art {
+namespace Test1959RedefineObjectInstrument {
+
+// Just pull it out of the dex file but don't bother changing anything.
+static void JNICALL RedefineObjectHook(jvmtiEnv *jvmti_env,
+                                       JNIEnv* env,
+                                       jclass class_being_redefined ATTRIBUTE_UNUSED,
+                                       jobject loader ATTRIBUTE_UNUSED,
+                                       const char* name,
+                                       jobject protection_domain ATTRIBUTE_UNUSED,
+                                       jint class_data_len,
+                                       const unsigned char* class_data,
+                                       jint* new_class_data_len,
+                                       unsigned char** new_class_data) {
+  if (strcmp(name, "java/lang/Object") != 0) {
+    return;
+  }
+
+  dex::Reader reader(class_data, class_data_len);
+  dex::u4 class_index = reader.FindClassIndex("Ljava/lang/Object;");
+  if (class_index == dex::kNoIndex) {
+    env->ThrowNew(env->FindClass("java/lang/RuntimeException"),
+                  "Failed to find object in dex file!");
+    return;
+  }
+
+  reader.CreateClassIr(class_index);
+  auto dex_ir = reader.GetIr();
+  dex::Writer writer(dex_ir);
+
+  class JvmtiAllocator : public dex::Writer::Allocator {
+   public:
+    explicit JvmtiAllocator(jvmtiEnv* jvmti) : jvmti_(jvmti) {}
+
+    void* Allocate(size_t size) override {
+      unsigned char* res = nullptr;
+      jvmti_->Allocate(size, &res);
+      return res;
+    }
+
+    void Free(void* ptr) override {
+      jvmti_->Deallocate(reinterpret_cast<unsigned char*>(ptr));
+    }
+
+   private:
+    jvmtiEnv* jvmti_;
+  };
+  JvmtiAllocator allocator(jvmti_env);
+  size_t new_size;
+  *new_class_data = writer.CreateImage(&allocator, &new_size);
+  if (new_size > std::numeric_limits<jint>::max()) {
+    *new_class_data = nullptr;
+    env->ThrowNew(env->FindClass("java/lang/RuntimeException"),
+                  "transform result is too large!");
+    return;
+  }
+  *new_class_data_len = static_cast<jint>(new_size);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_forceRedefine(JNIEnv* env,
+                                                          jclass klass ATTRIBUTE_UNUSED,
+                                                          jclass obj_class,
+                                                          jthread thr) {
+  if (IsJVM()) {
+    // RI so don't do anything.
+    return;
+  }
+  jvmtiCapabilities caps {.can_retransform_classes = 1};
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->AddCapabilities(&caps))) {
+    return;
+  }
+  jvmtiEventCallbacks cb {.ClassFileLoadHook = RedefineObjectHook };
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEventCallbacks(&cb, sizeof(cb)))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+                                                                JVMTI_EVENT_CLASS_FILE_LOAD_HOOK,
+                                                                thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->RetransformClasses(1, &obj_class))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+                                                                JVMTI_EVENT_CLASS_FILE_LOAD_HOOK,
+                                                                thr))) {
+    return;
+  }
+}
+
+}  // namespace Test1959RedefineObjectInstrument
+}  // namespace art
+
diff --git a/test/1959-redefine-object-instrument/info.txt b/test/1959-redefine-object-instrument/info.txt
new file mode 100644
index 0000000..d15c0e0
--- /dev/null
+++ b/test/1959-redefine-object-instrument/info.txt
@@ -0,0 +1,9 @@
+Regression test for bug related to interaction between instrumentation
+installation and class redefinition.
+
+Redefining a class does not update the instrumentation stack of a thread.
+This is generally fine because the method pointer in the instrumentation
+stack is only used for some sanity-checks, logging and method-exit events
+(where it being the non-obsolete version is advantageous). Unfortunately some
+of the checks fail to account for obsolete methods and can fail sanity
+checks.
\ No newline at end of file
diff --git a/test/1959-redefine-object-instrument/run b/test/1959-redefine-object-instrument/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/1959-redefine-object-instrument/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/1959-redefine-object-instrument/src/Main.java b/test/1959-redefine-object-instrument/src/Main.java
new file mode 100644
index 0000000..b3201f6
--- /dev/null
+++ b/test/1959-redefine-object-instrument/src/Main.java
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.LinkedList;
+import java.lang.reflect.Executable;
+
+import art.*;
+
+public class Main {
+  /**
+   * NB This test cannot be run on the RI.
+   * TODO We should make this run on the RI.
+   */
+
+  public static void main(String[] args) throws Exception {
+    doTest();
+  }
+
+  public static volatile boolean started = false;
+
+  public static void doNothing() {}
+  public static void notifyBreakpointReached(Thread thr, Executable e, long l) {}
+
+  public static void doTest() throws Exception {
+    final Object lock = new Object();
+    Breakpoint.Manager man = new Breakpoint.Manager();
+    Breakpoint.startBreakpointWatch(
+        Main.class,
+        Main.class.getDeclaredMethod("notifyBreakpointReached", Thread.class, Executable.class, Long.TYPE),
+        null);
+    Thread thr = new Thread(() -> {
+      synchronized (lock) {
+        started = true;
+        // Wait basically forever.
+        try {
+          lock.wait(Integer.MAX_VALUE - 1);
+        } catch (Exception e) {
+          throw new Error("WAIT EXCEPTION", e);
+        }
+      }
+    });
+    // set the breakpoint.
+    man.setBreakpoint(Main.class.getDeclaredMethod("doNothing"), 0l);
+    thr.start();
+    while (!started || thr.getState() != Thread.State.TIMED_WAITING);
+    // Redefine while thread is paused.
+    forceRedefine(Object.class, Thread.currentThread());
+    // Clear breakpoints.
+    man.clearAllBreakpoints();
+    // set the breakpoint again.
+    man.setBreakpoint(Main.class.getDeclaredMethod("doNothing"), 0l);
+    // Wakeup
+    synchronized(lock) {
+      lock.notifyAll();
+    }
+    thr.join();
+  }
+
+  private static native void forceRedefine(Class c, Thread thr);
+}
diff --git a/test/1959-redefine-object-instrument/src/art/Breakpoint.java b/test/1959-redefine-object-instrument/src/art/Breakpoint.java
new file mode 100644
index 0000000..bbb89f7
--- /dev/null
+++ b/test/1959-redefine-object-instrument/src/art/Breakpoint.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Objects;
+
+public class Breakpoint {
+  public static class Manager {
+    public static class BP {
+      public final Executable method;
+      public final long location;
+
+      public BP(Executable method) {
+        this(method, getStartLocation(method));
+      }
+
+      public BP(Executable method, long location) {
+        this.method = method;
+        this.location = location;
+      }
+
+      @Override
+      public boolean equals(Object other) {
+        return (other instanceof BP) &&
+            method.equals(((BP)other).method) &&
+            location == ((BP)other).location;
+      }
+
+      @Override
+      public String toString() {
+        return method.toString() + " @ " + getLine();
+      }
+
+      @Override
+      public int hashCode() {
+        return Objects.hash(method, location);
+      }
+
+      public int getLine() {
+        try {
+          LineNumber[] lines = getLineNumberTable(method);
+          int best = -1;
+          for (LineNumber l : lines) {
+            if (l.location > location) {
+              break;
+            } else {
+              best = l.line;
+            }
+          }
+          return best;
+        } catch (Exception e) {
+          return -1;
+        }
+      }
+    }
+
+    private Set<BP> breaks = new HashSet<>();
+
+    public void setBreakpoints(BP... bs) {
+      for (BP b : bs) {
+        if (breaks.add(b)) {
+          Breakpoint.setBreakpoint(b.method, b.location);
+        }
+      }
+    }
+    public void setBreakpoint(Executable method, long location) {
+      setBreakpoints(new BP(method, location));
+    }
+
+    public void clearBreakpoints(BP... bs) {
+      for (BP b : bs) {
+        if (breaks.remove(b)) {
+          Breakpoint.clearBreakpoint(b.method, b.location);
+        }
+      }
+    }
+    public void clearBreakpoint(Executable method, long location) {
+      clearBreakpoints(new BP(method, location));
+    }
+
+    public void clearAllBreakpoints() {
+      clearBreakpoints(breaks.toArray(new BP[0]));
+    }
+  }
+
+  public static void startBreakpointWatch(Class<?> methodClass,
+                                          Executable breakpointReached,
+                                          Thread thr) {
+    startBreakpointWatch(methodClass, breakpointReached, false, thr);
+  }
+
+  /**
+   * Enables the trapping of breakpoint events.
+   *
+   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
+   */
+  public static native void startBreakpointWatch(Class<?> methodClass,
+                                                 Executable breakpointReached,
+                                                 boolean allowRecursive,
+                                                 Thread thr);
+  public static native void stopBreakpointWatch(Thread thr);
+
+  public static final class LineNumber implements Comparable<LineNumber> {
+    public final long location;
+    public final int line;
+
+    private LineNumber(long loc, int line) {
+      this.location = loc;
+      this.line = line;
+    }
+
+    public boolean equals(Object other) {
+      return other instanceof LineNumber && ((LineNumber)other).line == line &&
+          ((LineNumber)other).location == location;
+    }
+
+    public int compareTo(LineNumber other) {
+      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
+      if (v != 0) {
+        return v;
+      } else {
+        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
+      }
+    }
+  }
+
+  public static native void setBreakpoint(Executable m, long loc);
+  public static void setBreakpoint(Executable m, LineNumber l) {
+    setBreakpoint(m, l.location);
+  }
+
+  public static native void clearBreakpoint(Executable m, long loc);
+  public static void clearBreakpoint(Executable m, LineNumber l) {
+    clearBreakpoint(m, l.location);
+  }
+
+  private static native Object[] getLineNumberTableNative(Executable m);
+  public static LineNumber[] getLineNumberTable(Executable m) {
+    Object[] nativeTable = getLineNumberTableNative(m);
+    long[] location = (long[])(nativeTable[0]);
+    int[] lines = (int[])(nativeTable[1]);
+    if (lines.length != location.length) {
+      throw new Error("Lines and locations have different lengths!");
+    }
+    LineNumber[] out = new LineNumber[lines.length];
+    for (int i = 0; i < lines.length; i++) {
+      out[i] = new LineNumber(location[i], lines[i]);
+    }
+    return out;
+  }
+
+  public static native long getStartLocation(Executable m);
+
+  public static int locationToLine(Executable m, long location) {
+    try {
+      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+      int best = -1;
+      for (Breakpoint.LineNumber l : lines) {
+        if (l.location > location) {
+          break;
+        } else {
+          best = l.line;
+        }
+      }
+      return best;
+    } catch (Exception e) {
+      return -1;
+    }
+  }
+
+  public static long lineToLocation(Executable m, int line) throws Exception {
+    try {
+      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+      for (Breakpoint.LineNumber l : lines) {
+        if (l.line == line) {
+          return l.location;
+        }
+      }
+      throw new Exception("Unable to find line " + line + " in " + m);
+    } catch (Exception e) {
+      throw new Exception("Unable to get line number info for " + m, e);
+    }
+  }
+}
+
diff --git a/test/202-thread-oome/src/Main.java b/test/202-thread-oome/src/Main.java
index f7df93b..b5c0ce6 100644
--- a/test/202-thread-oome/src/Main.java
+++ b/test/202-thread-oome/src/Main.java
@@ -21,7 +21,7 @@
       t.start();
     } catch (OutOfMemoryError expected) {
       // TODO: fix bionic bug https://b/6702535 so we can check the full detail message.
-      if (!expected.getMessage().startsWith("pthread_create (3GB stack) failed: ")) {
+      if (!expected.getMessage().startsWith("pthread_create (3073MB stack) failed: ")) {
         throw new AssertionError(expected);
       }
     }
diff --git a/test/411-checker-hdiv-hrem-pow2/src/DivTest.java b/test/411-checker-hdiv-hrem-pow2/src/DivTest.java
index a3882e7..1a086ef 100644
--- a/test/411-checker-hdiv-hrem-pow2/src/DivTest.java
+++ b/test/411-checker-hdiv-hrem-pow2/src/DivTest.java
@@ -94,6 +94,9 @@
   /// CHECK-START-ARM64: java.lang.Integer DivTest.$noinline$IntDivBy2(int) disassembly (after)
   /// CHECK:                 add w{{\d+}}, w{{\d+}}, w{{\d+}}, lsr #31
   /// CHECK:                 asr w{{\d+}}, w{{\d+}}, #1
+  /// CHECK-START-X86_64: java.lang.Integer DivTest.$noinline$IntDivBy2(int) disassembly (after)
+  /// CHECK-NOT:             cmovnl/geq
+  /// CHECK:                 add
   private static Integer $noinline$IntDivBy2(int v) {
     int r = v / 2;
     return r;
@@ -102,6 +105,9 @@
   /// CHECK-START-ARM64: java.lang.Integer DivTest.$noinline$IntDivByMinus2(int) disassembly (after)
   /// CHECK:                 add w{{\d+}}, w{{\d+}}, w{{\d+}}, lsr #31
   /// CHECK:                 neg w{{\d+}}, w{{\d+}}, asr #1
+  /// CHECK-START-X86_64: java.lang.Integer DivTest.$noinline$IntDivByMinus2(int) disassembly (after)
+  /// CHECK-NOT:             cmovnl/geq
+  /// CHECK:                 add
   private static Integer $noinline$IntDivByMinus2(int v) {
     int r = v / -2;
     return r;
@@ -205,6 +211,9 @@
   /// CHECK-START-ARM64: java.lang.Long DivTest.$noinline$LongDivBy2(long) disassembly (after)
   /// CHECK:                 add x{{\d+}}, x{{\d+}}, x{{\d+}}, lsr #63
   /// CHECK:                 asr x{{\d+}}, x{{\d+}}, #1
+  /// CHECK-START-X86_64: java.lang.Long DivTest.$noinline$LongDivBy2(long) disassembly (after)
+  /// CHECK-NOT:             cmovnl/geq
+  /// CHECK:                 addq
   private static Long $noinline$LongDivBy2(long v) {
     long r = v / 2;
     return r;
@@ -213,6 +222,9 @@
   /// CHECK-START-ARM64: java.lang.Long DivTest.$noinline$LongDivByMinus2(long) disassembly (after)
   /// CHECK:                 add x{{\d+}}, x{{\d+}}, x{{\d+}}, lsr #63
   /// CHECK:                 neg x{{\d+}}, x{{\d+}}, asr #1
+  /// CHECK-START-X86_64: java.lang.Long DivTest.$noinline$LongDivByMinus2(long) disassembly (after)
+  /// CHECK-NOT:             cmovnl/geq
+  /// CHECK:                 addq
   private static Long $noinline$LongDivByMinus2(long v) {
     long r = v / -2;
     return r;
diff --git a/test/411-checker-hdiv-hrem-pow2/src/RemTest.java b/test/411-checker-hdiv-hrem-pow2/src/RemTest.java
index 72725c1..54d7847f 100644
--- a/test/411-checker-hdiv-hrem-pow2/src/RemTest.java
+++ b/test/411-checker-hdiv-hrem-pow2/src/RemTest.java
@@ -92,6 +92,17 @@
   /// CHECK:                 cmp w{{\d+}}, #0x0
   /// CHECK:                 and w{{\d+}}, w{{\d+}}, #0x1
   /// CHECK:                 cneg w{{\d+}}, w{{\d+}}, lt
+  /// CHECK-START-X86_64: java.lang.Integer RemTest.$noinline$IntMod2(int) disassembly (after)
+  /// CHECK:          Rem [{{i\d+}},{{i\d+}}]
+  /// CHECK-NOT:      imul
+  /// CHECK-NOT:      shr
+  /// CHECK-NOT:      imul
+  /// CHECK:          mov
+  /// CHECK:          and
+  /// CHECK:          jz/eq
+  /// CHECK:          lea
+  /// CHECK:          test
+  /// CHECK:          cmovl/nge
   private static Integer $noinline$IntMod2(int v) {
     int r = v % 2;
     return r;
@@ -101,6 +112,17 @@
   /// CHECK:                 cmp w{{\d+}}, #0x0
   /// CHECK:                 and w{{\d+}}, w{{\d+}}, #0x1
   /// CHECK:                 cneg w{{\d+}}, w{{\d+}}, lt
+  /// CHECK-START-X86_64: java.lang.Integer RemTest.$noinline$IntModMinus2(int) disassembly (after)
+  /// CHECK:          Rem [{{i\d+}},{{i\d+}}]
+  /// CHECK-NOT:      imul
+  /// CHECK-NOT:      shr
+  /// CHECK-NOT:      imul
+  /// CHECK:          mov
+  /// CHECK:          and
+  /// CHECK:          jz/eq
+  /// CHECK:          lea
+  /// CHECK:          test
+  /// CHECK:          cmovl/nge
   private static Integer $noinline$IntModMinus2(int v) {
     int r = v % -2;
     return r;
@@ -111,6 +133,17 @@
   /// CHECK:                 and w{{\d+}}, w{{\d+}}, #0xf
   /// CHECK:                 and w{{\d+}}, w{{\d+}}, #0xf
   /// CHECK:                 csneg w{{\d+}}, w{{\d+}}, mi
+  /// CHECK-START-X86_64: java.lang.Integer RemTest.$noinline$IntMod16(int) disassembly (after)
+  /// CHECK:          Rem [{{i\d+}},{{i\d+}}]
+  /// CHECK-NOT:      imul
+  /// CHECK-NOT:      shr
+  /// CHECK-NOT:      imul
+  /// CHECK:          mov
+  /// CHECK:          and
+  /// CHECK:          jz/eq
+  /// CHECK:          lea
+  /// CHECK:          test
+  /// CHECK:          cmovl/nge
   private static Integer $noinline$IntMod16(int v) {
     int r = v % 16;
     return r;
@@ -121,6 +154,17 @@
   /// CHECK:                 and w{{\d+}}, w{{\d+}}, #0xf
   /// CHECK:                 and w{{\d+}}, w{{\d+}}, #0xf
   /// CHECK:                 csneg w{{\d+}}, w{{\d+}}, mi
+  /// CHECK-START-X86_64: java.lang.Integer RemTest.$noinline$IntModMinus16(int) disassembly (after)
+  /// CHECK:          Rem [{{i\d+}},{{i\d+}}]
+  /// CHECK-NOT:      imul
+  /// CHECK-NOT:      shr
+  /// CHECK-NOT:      imul
+  /// CHECK:          mov
+  /// CHECK:          and
+  /// CHECK:          jz/eq
+  /// CHECK:          lea
+  /// CHECK:          test
+  /// CHECK:          cmovl/nge
   private static Integer $noinline$IntModMinus16(int v) {
     int r = v % -16;
     return r;
@@ -131,6 +175,17 @@
   /// CHECK:                 and w{{\d+}}, w{{\d+}}, #0x7fffffff
   /// CHECK:                 and w{{\d+}}, w{{\d+}}, #0x7fffffff
   /// CHECK:                 csneg w{{\d+}}, w{{\d+}}, mi
+  /// CHECK-START-X86_64: java.lang.Integer RemTest.$noinline$IntModIntMin(int) disassembly (after)
+  /// CHECK:          Rem [{{i\d+}},{{i\d+}}]
+  /// CHECK-NOT:      imul
+  /// CHECK-NOT:      shr
+  /// CHECK-NOT:      imul
+  /// CHECK:          mov
+  /// CHECK:          and
+  /// CHECK:          jz/eq
+  /// CHECK:          lea
+  /// CHECK:          test
+  /// CHECK:          cmovl/nge
   private static Integer $noinline$IntModIntMin(int v) {
     int r = v % Integer.MIN_VALUE;
     return r;
@@ -211,6 +266,18 @@
   /// CHECK:                 cmp x{{\d+}}, #0x0
   /// CHECK:                 and x{{\d+}}, x{{\d+}}, #0x1
   /// CHECK:                 cneg x{{\d+}}, x{{\d+}}, lt
+  /// CHECK-START-X86_64: java.lang.Long RemTest.$noinline$LongMod2(long) disassembly (after)
+  /// CHECK:          Rem [{{j\d+}},{{j\d+}}]
+  /// CHECK-NOT:      imul
+  /// CHECK-NOT:      shrq
+  /// CHECK-NOT:      imulq
+  /// CHECK:          movq
+  /// CHECK:          andq
+  /// CHECK:          jz/eq
+  /// CHECK:          movq
+  /// CHECK:          sarq
+  /// CHECK:          shlq
+  /// CHECK:          orq
   private static Long $noinline$LongMod2(long v) {
     long r = v % 2;
     return r;
@@ -220,6 +287,18 @@
   /// CHECK:                 cmp x{{\d+}}, #0x0
   /// CHECK:                 and x{{\d+}}, x{{\d+}}, #0x1
   /// CHECK:                 cneg x{{\d+}}, x{{\d+}}, lt
+  /// CHECK-START-X86_64: java.lang.Long RemTest.$noinline$LongModMinus2(long) disassembly (after)
+  /// CHECK:          Rem [{{j\d+}},{{j\d+}}]
+  /// CHECK-NOT:      imul
+  /// CHECK-NOT:      shrq
+  /// CHECK-NOT:      imulq
+  /// CHECK:          movq
+  /// CHECK:          andq
+  /// CHECK:          jz/eq
+  /// CHECK:          movq
+  /// CHECK:          sarq
+  /// CHECK:          shlq
+  /// CHECK:          orq
   private static Long $noinline$LongModMinus2(long v) {
     long r = v % -2;
     return r;
@@ -230,6 +309,19 @@
   /// CHECK:                 and x{{\d+}}, x{{\d+}}, #0xf
   /// CHECK:                 and x{{\d+}}, x{{\d+}}, #0xf
   /// CHECK:                 csneg x{{\d+}}, x{{\d+}}, mi
+
+  /// CHECK-START-X86_64: java.lang.Long RemTest.$noinline$LongMod16(long) disassembly (after)
+  /// CHECK:          Rem [{{j\d+}},{{j\d+}}]
+  /// CHECK-NOT:      imul
+  /// CHECK-NOT:      shrq
+  /// CHECK-NOT:      imulq
+  /// CHECK:          movq
+  /// CHECK:          andq
+  /// CHECK:          jz/eq
+  /// CHECK:          movq
+  /// CHECK:          sarq
+  /// CHECK:          shlq
+  /// CHECK:          orq
   private static Long $noinline$LongMod16(long v) {
     long r = v % 16;
     return r;
@@ -240,6 +332,18 @@
   /// CHECK:                 and x{{\d+}}, x{{\d+}}, #0xf
   /// CHECK:                 and x{{\d+}}, x{{\d+}}, #0xf
   /// CHECK:                 csneg x{{\d+}}, x{{\d+}}, mi
+  /// CHECK-START-X86_64: java.lang.Long RemTest.$noinline$LongModMinus16(long) disassembly (after)
+  /// CHECK:          Rem [{{j\d+}},{{j\d+}}]
+  /// CHECK-NOT:      imul
+  /// CHECK-NOT:      shrq
+  /// CHECK-NOT:      imulq
+  /// CHECK:          movq
+  /// CHECK:          andq
+  /// CHECK:          jz/eq
+  /// CHECK:          movq
+  /// CHECK:          sarq
+  /// CHECK:          shlq
+  /// CHECK:          orq
   private static Long $noinline$LongModMinus16(long v) {
     long r = v % -16;
     return r;
@@ -250,6 +354,18 @@
   /// CHECK:                 and x{{\d+}}, x{{\d+}}, #0x7fffffffffffffff
   /// CHECK:                 and x{{\d+}}, x{{\d+}}, #0x7fffffffffffffff
   /// CHECK:                 csneg x{{\d+}}, x{{\d+}}, mi
+  /// CHECK-START-X86_64: java.lang.Long RemTest.$noinline$LongModLongMin(long) disassembly (after)
+  /// CHECK:          Rem [{{j\d+}},{{j\d+}}]
+  /// CHECK-NOT:      imul
+  /// CHECK-NOT:      shrq
+  /// CHECK-NOT:      imulq
+  /// CHECK:          movq
+  /// CHECK:          andq
+  /// CHECK:          jz/eq
+  /// CHECK:          movq
+  /// CHECK:          sarq
+  /// CHECK:          shlq
+  /// CHECK:          orq
   private static Long $noinline$LongModLongMin(long v) {
     long r = v % Long.MIN_VALUE;
     return r;
diff --git a/test/441-checker-inliner/src/Main.java b/test/441-checker-inliner/src/Main.java
index 3ccfce4..6c75962 100644
--- a/test/441-checker-inliner/src/Main.java
+++ b/test/441-checker-inliner/src/Main.java
@@ -135,11 +135,7 @@
     }
   }
 
-  /// CHECK-START: int Main.returnAbs(int) intrinsics_recognition (before)
-  /// CHECK-DAG:     <<Result:i\d+>>      InvokeStaticOrDirect
-  /// CHECK-DAG:                          Return [<<Result>>]
-
-  /// CHECK-START: int Main.returnAbs(int) intrinsics_recognition (after)
+  /// CHECK-START: int Main.returnAbs(int) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>>      InvokeStaticOrDirect intrinsic:MathAbsInt
   /// CHECK-DAG:                          Return [<<Result>>]
 
diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc
index 5fc5464..eb81f3b 100644
--- a/test/454-get-vreg/get_vreg_jni.cc
+++ b/test/454-get-vreg/get_vreg_jni.cc
@@ -34,7 +34,7 @@
         this_value_(this_value),
         found_method_index_(0) {}
 
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     std::string m_name(m->GetName());
 
diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc
index f867bdf..80abb3b 100644
--- a/test/457-regs/regs_jni.cc
+++ b/test/457-regs/regs_jni.cc
@@ -32,7 +32,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
 
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     std::string m_name(m->GetName());
 
diff --git a/test/461-get-reference-vreg/get_reference_vreg_jni.cc b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
index 7eb3fe5..817a647 100644
--- a/test/461-get-reference-vreg/get_reference_vreg_jni.cc
+++ b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
@@ -25,62 +25,50 @@
 
 namespace {
 
-class TestVisitor : public StackVisitor {
- public:
-  TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        this_value_(this_value),
-        found_method_index_(0) {}
-
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* m = GetMethod();
-    std::string m_name(m->GetName());
-
-    if (m_name.compare("$noinline$testThisWithInstanceCall") == 0) {
-      found_method_index_ = 1;
-      uint32_t value = 0;
-      CHECK(GetVReg(m, 1, kReferenceVReg, &value));
-      CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value_);
-      CHECK_EQ(GetThisObject(), this_value_);
-    } else if (m_name.compare("$noinline$testThisWithStaticCall") == 0) {
-      found_method_index_ = 2;
-      uint32_t value = 0;
-      CHECK(GetVReg(m, 1, kReferenceVReg, &value));
-    } else if (m_name.compare("$noinline$testParameter") == 0) {
-      found_method_index_ = 3;
-      uint32_t value = 0;
-      CHECK(GetVReg(m, 1, kReferenceVReg, &value));
-    } else if (m_name.compare("$noinline$testObjectInScope") == 0) {
-      found_method_index_ = 4;
-      uint32_t value = 0;
-      CHECK(GetVReg(m, 0, kReferenceVReg, &value));
-    }
-
-    return true;
-  }
-
-  mirror::Object* this_value_;
-
-  // Value returned to Java to ensure the methods testSimpleVReg and testPairVReg
-  // have been found and tested.
-  jint found_method_index_;
-};
-
-extern "C" JNIEXPORT jint JNICALL Java_Main_doNativeCallRef(JNIEnv*, jobject value) {
+jint FindMethodIndex(jobject this_value_jobj) {
   ScopedObjectAccess soa(Thread::Current());
   std::unique_ptr<Context> context(Context::Create());
-  TestVisitor visitor(soa.Self(), context.get(), soa.Decode<mirror::Object>(value).Ptr());
-  visitor.WalkStack();
-  return visitor.found_method_index_;
+  ObjPtr<mirror::Object> this_value = soa.Decode<mirror::Object>(this_value_jobj);
+  jint found_method_index = 0;
+  StackVisitor::WalkStack(
+      [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        ArtMethod* m = stack_visitor->GetMethod();
+        std::string m_name(m->GetName());
+
+        if (m_name.compare("$noinline$testThisWithInstanceCall") == 0) {
+          found_method_index = 1;
+          uint32_t value = 0;
+          CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &value));
+          CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value);
+          CHECK_EQ(stack_visitor->GetThisObject(), this_value);
+        } else if (m_name.compare("$noinline$testThisWithStaticCall") == 0) {
+          found_method_index = 2;
+          uint32_t value = 0;
+          CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &value));
+        } else if (m_name.compare("$noinline$testParameter") == 0) {
+          found_method_index = 3;
+          uint32_t value = 0;
+          CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &value));
+        } else if (m_name.compare("$noinline$testObjectInScope") == 0) {
+          found_method_index = 4;
+          uint32_t value = 0;
+          CHECK(stack_visitor->GetVReg(m, 0, kReferenceVReg, &value));
+        }
+
+        return true;
+      },
+      soa.Self(),
+      context.get(),
+      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+  return found_method_index;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_doNativeCallRef(JNIEnv*, jobject value) {
+  return FindMethodIndex(value);
 }
 
 extern "C" JNIEXPORT jint JNICALL Java_Main_doStaticNativeCallRef(JNIEnv*, jclass) {
-  ScopedObjectAccess soa(Thread::Current());
-  std::unique_ptr<Context> context(Context::Create());
-  TestVisitor visitor(soa.Self(), context.get(), nullptr);
-  visitor.WalkStack();
-  return visitor.found_method_index_;
+  return FindMethodIndex(nullptr);
 }
 
 }  // namespace
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index 58ffe04..905d8e6 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -32,7 +32,7 @@
   TestVisitor(Thread* thread, Context* context) REQUIRES_SHARED(Locks::mutator_lock_)
       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
 
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     std::string m_name(m->GetName());
 
diff --git a/test/478-checker-clinit-check-pruning/src/Main.java b/test/478-checker-clinit-check-pruning/src/Main.java
index e16fa69..b1bc51e 100644
--- a/test/478-checker-clinit-check-pruning/src/Main.java
+++ b/test/478-checker-clinit-check-pruning/src/Main.java
@@ -26,7 +26,7 @@
   /// CHECK-START: void Main.invokeStaticInlined() builder (after)
   /// CHECK-DAG:     <<LoadClass:l\d+>>    LoadClass gen_clinit_check:false
   /// CHECK-DAG:     <<ClinitCheck:l\d+>>  ClinitCheck [<<LoadClass>>]
-  /// CHECK-DAG:                           InvokeStaticOrDirect [{{[ij]\d+}},<<ClinitCheck>>]
+  /// CHECK-DAG:                           InvokeStaticOrDirect [{{([ij]\d+,)?}}<<ClinitCheck>>]
 
   /// CHECK-START: void Main.invokeStaticInlined() inliner (after)
   /// CHECK-DAG:     <<LoadClass:l\d+>>    LoadClass gen_clinit_check:false
@@ -69,7 +69,7 @@
   /// CHECK-START: void Main.invokeStaticNotInlined() builder (after)
   /// CHECK:         <<LoadClass:l\d+>>    LoadClass gen_clinit_check:false
   /// CHECK:         <<ClinitCheck:l\d+>>  ClinitCheck [<<LoadClass>>]
-  /// CHECK:                               InvokeStaticOrDirect [{{[ij]\d+}},<<ClinitCheck>>]
+  /// CHECK:                               InvokeStaticOrDirect [{{([ij]\d+,)?}}<<ClinitCheck>>]
 
   /// CHECK-START: void Main.invokeStaticNotInlined() inliner (after)
   /// CHECK:         <<LoadClass:l\d+>>    LoadClass gen_clinit_check:false
diff --git a/test/527-checker-array-access-split/src/Main.java b/test/527-checker-array-access-split/src/Main.java
index f83c924..f39b5e2 100644
--- a/test/527-checker-array-access-split/src/Main.java
+++ b/test/527-checker-array-access-split/src/Main.java
@@ -572,6 +572,75 @@
     buf1[end] = 'n';
   }
 
+  //
+  // Check that IntermediateAddress can be shared for object ArrayGets.
+  //
+  /// CHECK-START-ARM64: int Main.checkObjectArrayGet(int, java.lang.Integer[], java.lang.Integer[]) instruction_simplifier_arm64 (before)
+  /// CHECK: <<Parameter:l\d+>>     ParameterValue
+  /// CHECK: <<Array:l\d+>>         NullCheck [<<Parameter>>]
+  /// CHECK:                        ArrayGet [<<Array>>,{{i\d+}}]
+  /// CHECK:                        ArrayGet [<<Array>>,{{i\d+}}]
+  /// CHECK:                        ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+  /// CHECK:                        ArrayGet [<<Array>>,{{i\d+}}]
+  /// CHECK:                        ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+  /// CHECK:                        ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+
+  /// CHECK-START-ARM64: int Main.checkObjectArrayGet(int, java.lang.Integer[], java.lang.Integer[]) instruction_simplifier_arm64 (after)
+  /// CHECK: <<Parameter:l\d+>>     ParameterValue
+  /// CHECK: <<DataOffset:i\d+>>    IntConstant 12
+  /// CHECK: <<Array:l\d+>>         NullCheck [<<Parameter>>]
+  /// CHECK: <<IntAddr1:i\d+>>      IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK:                        ArrayGet [<<IntAddr1>>,{{i\d+}}]
+  /// CHECK: <<IntAddr2:i\d+>>      IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK:                        ArrayGet [<<IntAddr2>>,{{i\d+}}]
+  /// CHECK:                        ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+  /// CHECK: <<IntAddr3:i\d+>>      IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK:                        ArrayGet [<<IntAddr3>>,{{i\d+}}]
+  /// CHECK:                        ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+  /// CHECK:                        ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+  //
+  /// CHECK-NOT:                    IntermediateAddress
+
+  /// CHECK-START-ARM64: int Main.checkObjectArrayGet(int, java.lang.Integer[], java.lang.Integer[]) GVN$after_arch (after)
+  /// CHECK: <<Parameter:l\d+>>     ParameterValue
+  /// CHECK: <<DataOffset:i\d+>>    IntConstant 12
+  /// CHECK: <<Array:l\d+>>         NullCheck [<<Parameter>>]
+  /// CHECK: <<IntAddr1:i\d+>>      IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK:                        ArrayGet [<<IntAddr1>>,{{i\d+}}]
+  /// CHECK:                        ArrayGet [<<IntAddr1>>,{{i\d+}}]
+  /// CHECK:                        ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+  /// CHECK: <<IntAddr3:i\d+>>      IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK:                        ArrayGet [<<IntAddr3>>,{{i\d+}}]
+  /// CHECK:                        ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+  /// CHECK:                        ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+  //
+  /// CHECK-NOT:                    IntermediateAddress
+  public final static int checkObjectArrayGet(int index, Integer[] a, Integer[] b) {
+    Integer five = Integer.valueOf(5);
+    int tmp1 = a[index];
+    tmp1 += a[index + 1];
+    a[index + 1] = five;
+    tmp1 += a[index + 2];
+    a[index + 2] = five;
+    a[index + 3] = five;
+    return tmp1;
+  }
+
+  /// CHECK-START-ARM64: int Main.testIntAddressObjDisasm(java.lang.Integer[], int) disassembly (after)
+  /// CHECK: <<IntAddr:i\d+>>       IntermediateAddress
+  /// CHECK:                          add w<<AddrReg:\d+>>, {{w\d+}}, #0xc
+  /// CHECK:                        ArrayGet [<<IntAddr>>,{{i\d+}}]
+  /// CHECK:                          ldr {{w\d+}}, [x<<AddrReg>>, x{{\d+}}, lsl #2]
+  /// CHECK:                        ArrayGet [<<IntAddr>>,{{i\d+}}]
+  /// CHECK:                          ldr {{w\d+}}, [x<<AddrReg>>, x{{\d+}}, lsl #2]
+
+  /// CHECK-START-ARM64: int Main.testIntAddressObjDisasm(java.lang.Integer[], int) disassembly (after)
+  /// CHECK:                          add {{w\d+}}, {{w\d+}}, #0xc
+  /// CHECK-NOT:                      add {{w\d+}}, {{w\d+}}, #0xc
+  private int testIntAddressObjDisasm(Integer[] obj, int x) {
+    return obj[x] + obj[x + 1];
+  }
+
   public final static int ARRAY_SIZE = 128;
 
   public static void main(String[] args) {
diff --git a/test/530-checker-peel-unroll/smali/PeelUnroll.smali b/test/530-checker-peel-unroll/smali/PeelUnroll.smali
new file mode 100644
index 0000000..6e09e92
--- /dev/null
+++ b/test/530-checker-peel-unroll/smali/PeelUnroll.smali
@@ -0,0 +1,232 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LPeelUnroll;
+
+.super Ljava/lang/Object;
+
+## CHECK-START: void PeelUnroll.unrollingWhile(int[]) loop_optimization (before)
+## CHECK-DAG: <<Array:l\d+>>    ParameterValue                            loop:none
+## CHECK-DAG: <<Const0:i\d+>>   IntConstant 0                             loop:none
+## CHECK-DAG: <<Const1:i\d+>>   IntConstant 1                             loop:none
+## CHECK-DAG: <<Const2:i\d+>>   IntConstant 2                             loop:none
+## CHECK-DAG: <<Const128:i\d+>> IntConstant 128                           loop:none
+## CHECK-DAG: <<Limit:i\d+>>    IntConstant 4094                          loop:none
+## CHECK-DAG: <<PhiI:i\d+>>     Phi [<<Const0>>,{{i\d+}}]                 loop:<<Loop:B\d+>> outer_loop:none
+## CHECK-DAG: <<PhiS:i\d+>>     Phi [<<Const128>>,{{i\d+}}]               loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<AddI:i\d+>>     Add [<<PhiI>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<Check:z\d+>>    GreaterThanOrEqual [<<PhiI>>,<<Limit>>]   loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<If:v\d+>>       If [<<Check>>]                            loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<Rem:i\d+>>      Rem [<<AddI>>,<<Const2>>]                 loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<NE:z\d+>>       NotEqual [<<Rem>>,<<Const0>>]             loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   If [<<NE>>]                               loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<AddS:i\d+>>     Add [<<PhiS>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   ArraySet                                  loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   Phi [<<PhiS>>,<<AddS>>]                   loop:<<Loop>>      outer_loop:none
+
+## CHECK-NOT:                   ArrayGet                                  loop:<<Loop>>      outer_loop:none
+## CHECK-NOT:                   ArraySet                                  loop:<<Loop>>      outer_loop:none
+
+## CHECK-START: void PeelUnroll.unrollingWhile(int[]) loop_optimization (after)
+## CHECK-DAG: <<Array:l\d+>>    ParameterValue                            loop:none
+## CHECK-DAG: <<Const0:i\d+>>   IntConstant 0                             loop:none
+## CHECK-DAG: <<Const1:i\d+>>   IntConstant 1                             loop:none
+## CHECK-DAG: <<Const2:i\d+>>   IntConstant 2                             loop:none
+## CHECK-DAG: <<Const128:i\d+>> IntConstant 128                           loop:none
+## CHECK-DAG: <<Limit:i\d+>>    IntConstant 4094                          loop:none
+## CHECK-DAG: <<PhiI:i\d+>>     Phi [<<Const0>>,{{i\d+}}]                 loop:<<Loop:B\d+>> outer_loop:none
+## CHECK-DAG: <<PhiS:i\d+>>     Phi [<<Const128>>,{{i\d+}}]               loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<AddI:i\d+>>     Add [<<PhiI>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<Check:z\d+>>    GreaterThanOrEqual [<<PhiI>>,<<Limit>>]   loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<If:v\d+>>       If [<<Check>>]                            loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<Rem:i\d+>>      Rem [<<AddI>>,<<Const2>>]                 loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<NE:z\d+>>       NotEqual [<<Rem>>,<<Const0>>]             loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   If [<<NE>>]                               loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<AddS:i\d+>>     Add [<<PhiS>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   ArraySet [{{l\d+}},{{i\d+}},<<PhiS>>]     loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<PhiSM:i\d+>>    Phi [<<PhiS>>,<<AddS>>]                   loop:<<Loop>>      outer_loop:none
+
+## CHECK-DAG: <<AddIA:i\d+>>    Add [<<AddI>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<CheckA:z\d+>>   GreaterThanOrEqual [<<AddI>>,<<Limit>>]   loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<IfA:v\d+>>      If [<<Const0>>]                           loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<RemA:i\d+>>     Rem [<<AddIA>>,<<Const2>>]                loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<NEA:z\d+>>      NotEqual [<<RemA>>,<<Const0>>]            loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   If [<<NEA>>]                              loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<AddSA:i\d+>>    Add [<<PhiSM>>,<<Const1>>]                loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   ArraySet [{{l\d+}},{{i\d+}},<<PhiSM>>]    loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   Phi [<<AddSA>>,<<PhiSM>>]                 loop:<<Loop>>      outer_loop:none
+
+## CHECK-NOT:                   ArrayGet                                  loop:<<Loop>>      outer_loop:none
+## CHECK-NOT:                   ArraySet                                  loop:<<Loop>>      outer_loop:none
+.method public static final unrollingWhile([I)V
+    .registers 5
+    .param p0, "a"    # [I
+
+    .line 167
+    const/4 v0, 0x0
+
+    .line 168
+    .local v0, "i":I
+    const/16 v1, 0x80
+
+    .line 169
+    .local v1, "s":I
+    :goto_3
+    add-int/lit8 v2, v0, 0x1
+
+    .end local v0    # "i":I
+    .local v2, "i":I
+    const/16 v3, 0xffe
+
+    if-ge v0, v3, :cond_14
+
+    .line 170
+    rem-int/lit8 v0, v2, 0x2
+
+    if-nez v0, :cond_12
+
+    .line 171
+    add-int/lit8 v0, v1, 0x1
+
+    .end local v1    # "s":I
+    .local v0, "s":I
+    aput v1, p0, v2
+
+    .line 169
+    move v1, v0
+
+    .end local v2    # "i":I
+    .local v0, "i":I
+    .restart local v1    # "s":I
+    :cond_12
+    move v0, v2
+
+    goto :goto_3
+
+    .line 174
+    .end local v0    # "i":I
+    .restart local v2    # "i":I
+    :cond_14
+    return-void
+.end method
+
+
+## CHECK-START: int PeelUnroll.unrollingWhileLiveOuts(int[]) loop_optimization (before)
+## CHECK-DAG: <<Array:l\d+>>    ParameterValue                            loop:none
+## CHECK-DAG: <<Const0:i\d+>>   IntConstant 0                             loop:none
+## CHECK-DAG: <<Const1:i\d+>>   IntConstant 1                             loop:none
+## CHECK-DAG: <<Const2:i\d+>>   IntConstant 2                             loop:none
+## CHECK-DAG: <<Const128:i\d+>> IntConstant 128                           loop:none
+## CHECK-DAG: <<Limit:i\d+>>    IntConstant 4094                          loop:none
+## CHECK-DAG: <<PhiI:i\d+>>     Phi [<<Const0>>,{{i\d+}}]                 loop:<<Loop:B\d+>> outer_loop:none
+## CHECK-DAG: <<PhiS:i\d+>>     Phi [<<Const128>>,{{i\d+}}]               loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<AddI:i\d+>>     Add [<<PhiI>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<Check:z\d+>>    GreaterThanOrEqual [<<PhiI>>,<<Limit>>]   loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<If:v\d+>>       If [<<Check>>]                            loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<Rem:i\d+>>      Rem [<<AddI>>,<<Const2>>]                 loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<NE:z\d+>>       NotEqual [<<Rem>>,<<Const0>>]             loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   If [<<NE>>]                               loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<AddS:i\d+>>     Add [<<PhiS>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   ArraySet                                  loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   Phi [<<PhiS>>,<<AddS>>]                   loop:<<Loop>>      outer_loop:none
+
+## CHECK-NOT:                   ArrayGet
+## CHECK-NOT:                   ArraySet
+
+## CHECK-START: int PeelUnroll.unrollingWhileLiveOuts(int[]) loop_optimization (after)
+## CHECK-DAG: <<Array:l\d+>>    ParameterValue                            loop:none
+## CHECK-DAG: <<Const0:i\d+>>   IntConstant 0                             loop:none
+## CHECK-DAG: <<Const1:i\d+>>   IntConstant 1                             loop:none
+## CHECK-DAG: <<Const2:i\d+>>   IntConstant 2                             loop:none
+## CHECK-DAG: <<Const128:i\d+>> IntConstant 128                           loop:none
+## CHECK-DAG: <<Limit:i\d+>>    IntConstant 4094                          loop:none
+## CHECK-DAG: <<PhiI:i\d+>>     Phi [<<Const0>>,{{i\d+}}]                 loop:<<Loop:B\d+>> outer_loop:none
+## CHECK-DAG: <<PhiS:i\d+>>     Phi [<<Const128>>,{{i\d+}}]               loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<AddI:i\d+>>     Add [<<PhiI>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<Check:z\d+>>    GreaterThanOrEqual [<<PhiI>>,<<Limit>>]   loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<If:v\d+>>       If [<<Check>>]                            loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<Rem:i\d+>>      Rem [<<AddI>>,<<Const2>>]                 loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<NE:z\d+>>       NotEqual [<<Rem>>,<<Const0>>]             loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   If [<<NE>>]                               loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<AddS:i\d+>>     Add [<<PhiS>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   ArraySet [{{l\d+}},{{i\d+}},<<PhiS>>]     loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<PhiSM:i\d+>>    Phi [<<PhiS>>,<<AddS>>]                   loop:<<Loop>>      outer_loop:none
+
+## CHECK-DAG: <<AddIA:i\d+>>    Add [<<AddI>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<CheckA:z\d+>>   GreaterThanOrEqual [<<AddI>>,<<Limit>>]   loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<IfA:v\d+>>      If [<<Const0>>]                           loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<RemA:i\d+>>     Rem [<<AddIA>>,<<Const2>>]                loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<NEA:z\d+>>      NotEqual [<<RemA>>,<<Const0>>]            loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   If [<<NEA>>]                              loop:<<Loop>>      outer_loop:none
+## CHECK-DAG: <<AddSA:i\d+>>    Add [<<PhiSM>>,<<Const1>>]                loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   ArraySet [{{l\d+}},{{i\d+}},<<PhiSM>>]    loop:<<Loop>>      outer_loop:none
+## CHECK-DAG:                   Phi [<<AddSA>>,<<PhiSM>>]                 loop:<<Loop>>      outer_loop:none
+
+## CHECK-DAG: <<RetPhi:i\d+>>   Phi [<<PhiS>>,<<PhiSM>>]                  loop:none
+## CHECK-DAG:                   Return [<<RetPhi>>]                       loop:none
+
+## CHECK-NOT:                   ArrayGet
+## CHECK-NOT:                   ArraySet
+.method public static final unrollingWhileLiveOuts([I)I
+    .registers 5
+    .param p0, "a"    # [I
+
+    .line 598
+    const/4 v0, 0x0
+
+    .line 599
+    .local v0, "i":I
+    const/16 v1, 0x80
+
+    .line 600
+    .local v1, "s":I
+    :goto_3
+    add-int/lit8 v2, v0, 0x1
+
+    .end local v0    # "i":I
+    .local v2, "i":I
+    const/16 v3, 0xffe
+
+    if-ge v0, v3, :cond_14
+
+    .line 601
+    rem-int/lit8 v0, v2, 0x2
+
+    if-nez v0, :cond_12
+
+    .line 602
+    add-int/lit8 v0, v1, 0x1
+
+    .end local v1    # "s":I
+    .local v0, "s":I
+    aput v1, p0, v2
+
+    .line 600
+    move v1, v0
+
+    .end local v2    # "i":I
+    .local v0, "i":I
+    .restart local v1    # "s":I
+    :cond_12
+    move v0, v2
+
+    goto :goto_3
+
+    .line 605
+    .end local v0    # "i":I
+    .restart local v2    # "i":I
+    :cond_14
+    return v1
+.end method
+
diff --git a/test/530-checker-peel-unroll/src/Main.java b/test/530-checker-peel-unroll/src/Main.java
index 4d81440..aee32b7 100644
--- a/test/530-checker-peel-unroll/src/Main.java
+++ b/test/530-checker-peel-unroll/src/Main.java
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+import java.lang.reflect.Method;
+
 //
 // Test loop optimizations, in particular scalar loop peeling and unrolling.
 public class Main {
@@ -110,69 +112,6 @@
     }
   }
 
-  /// CHECK-START: void Main.unrollingWhile(int[]) loop_optimization (before)
-  /// CHECK-DAG: <<Array:l\d+>>    ParameterValue                            loop:none
-  /// CHECK-DAG: <<Const0:i\d+>>   IntConstant 0                             loop:none
-  /// CHECK-DAG: <<Const1:i\d+>>   IntConstant 1                             loop:none
-  /// CHECK-DAG: <<Const2:i\d+>>   IntConstant 2                             loop:none
-  /// CHECK-DAG: <<Const128:i\d+>> IntConstant 128                           loop:none
-  /// CHECK-DAG: <<Limit:i\d+>>    IntConstant 4094                          loop:none
-  /// CHECK-DAG: <<PhiI:i\d+>>     Phi [<<Const0>>,{{i\d+}}]                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<PhiS:i\d+>>     Phi [<<Const128>>,{{i\d+}}]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<AddI:i\d+>>     Add [<<PhiI>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Check:z\d+>>    GreaterThanOrEqual [<<PhiI>>,<<Limit>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<If:v\d+>>       If [<<Check>>]                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Rem:i\d+>>      Rem [<<AddI>>,<<Const2>>]                 loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<NE:z\d+>>       NotEqual [<<Rem>>,<<Const0>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   If [<<NE>>]                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<AddS:i\d+>>     Add [<<PhiS>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   ArraySet                                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   Phi [<<PhiS>>,<<AddS>>]                   loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-NOT:                   ArrayGet                                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-NOT:                   ArraySet                                  loop:<<Loop>>      outer_loop:none
-
-  /// CHECK-START: void Main.unrollingWhile(int[]) loop_optimization (after)
-  /// CHECK-DAG: <<Array:l\d+>>    ParameterValue                            loop:none
-  /// CHECK-DAG: <<Const0:i\d+>>   IntConstant 0                             loop:none
-  /// CHECK-DAG: <<Const1:i\d+>>   IntConstant 1                             loop:none
-  /// CHECK-DAG: <<Const2:i\d+>>   IntConstant 2                             loop:none
-  /// CHECK-DAG: <<Const128:i\d+>> IntConstant 128                           loop:none
-  /// CHECK-DAG: <<Limit:i\d+>>    IntConstant 4094                          loop:none
-  /// CHECK-DAG: <<PhiI:i\d+>>     Phi [<<Const0>>,{{i\d+}}]                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<PhiS:i\d+>>     Phi [<<Const128>>,{{i\d+}}]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<AddI:i\d+>>     Add [<<PhiI>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Check:z\d+>>    GreaterThanOrEqual [<<PhiI>>,<<Limit>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<If:v\d+>>       If [<<Check>>]                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Rem:i\d+>>      Rem [<<AddI>>,<<Const2>>]                 loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<NE:z\d+>>       NotEqual [<<Rem>>,<<Const0>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   If [<<NE>>]                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<AddS:i\d+>>     Add [<<PhiS>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   ArraySet [{{l\d+}},{{i\d+}},<<PhiS>>]     loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<PhiSM:i\d+>>    Phi [<<PhiS>>,<<AddS>>]                   loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-DAG: <<AddIA:i\d+>>    Add [<<AddI>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<CheckA:z\d+>>   GreaterThanOrEqual [<<AddI>>,<<Limit>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<IfA:v\d+>>      If [<<Const0>>]                           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<RemA:i\d+>>     Rem [<<AddIA>>,<<Const2>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<NEA:z\d+>>      NotEqual [<<RemA>>,<<Const0>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   If [<<NEA>>]                              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<AddSA:i\d+>>    Add [<<PhiSM>>,<<Const1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   ArraySet [{{l\d+}},{{i\d+}},<<PhiSM>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   Phi [<<AddSA>>,<<PhiSM>>]                 loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-NOT:                   ArrayGet                                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-NOT:                   ArraySet                                  loop:<<Loop>>      outer_loop:none
-  private static final void unrollingWhile(int[] a) {
-    int i = 0;
-    int s = 128;
-    while (i++ < LENGTH - 2) {
-      if (i % 2 == 0) {
-        a[i] = s++;
-      }
-    }
-  }
-
   // Simple check that loop unrolling has happened.
   //
   /// CHECK-START: void Main.unrollingSwitch(int[]) loop_optimization (before)
@@ -538,73 +477,6 @@
     return 1 / (s + t);
   }
 
-  /// CHECK-START: int Main.unrollingWhileLiveOuts(int[]) loop_optimization (before)
-  /// CHECK-DAG: <<Array:l\d+>>    ParameterValue                            loop:none
-  /// CHECK-DAG: <<Const0:i\d+>>   IntConstant 0                             loop:none
-  /// CHECK-DAG: <<Const1:i\d+>>   IntConstant 1                             loop:none
-  /// CHECK-DAG: <<Const2:i\d+>>   IntConstant 2                             loop:none
-  /// CHECK-DAG: <<Const128:i\d+>> IntConstant 128                           loop:none
-  /// CHECK-DAG: <<Limit:i\d+>>    IntConstant 4094                          loop:none
-  /// CHECK-DAG: <<PhiI:i\d+>>     Phi [<<Const0>>,{{i\d+}}]                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<PhiS:i\d+>>     Phi [<<Const128>>,{{i\d+}}]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<AddI:i\d+>>     Add [<<PhiI>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Check:z\d+>>    GreaterThanOrEqual [<<PhiI>>,<<Limit>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<If:v\d+>>       If [<<Check>>]                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Rem:i\d+>>      Rem [<<AddI>>,<<Const2>>]                 loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<NE:z\d+>>       NotEqual [<<Rem>>,<<Const0>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   If [<<NE>>]                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<AddS:i\d+>>     Add [<<PhiS>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   ArraySet                                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   Phi [<<PhiS>>,<<AddS>>]                   loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-NOT:                   ArrayGet
-  /// CHECK-NOT:                   ArraySet
-
-  /// CHECK-START: int Main.unrollingWhileLiveOuts(int[]) loop_optimization (after)
-  /// CHECK-DAG: <<Array:l\d+>>    ParameterValue                            loop:none
-  /// CHECK-DAG: <<Const0:i\d+>>   IntConstant 0                             loop:none
-  /// CHECK-DAG: <<Const1:i\d+>>   IntConstant 1                             loop:none
-  /// CHECK-DAG: <<Const2:i\d+>>   IntConstant 2                             loop:none
-  /// CHECK-DAG: <<Const128:i\d+>> IntConstant 128                           loop:none
-  /// CHECK-DAG: <<Limit:i\d+>>    IntConstant 4094                          loop:none
-  /// CHECK-DAG: <<PhiI:i\d+>>     Phi [<<Const0>>,{{i\d+}}]                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<PhiS:i\d+>>     Phi [<<Const128>>,{{i\d+}}]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<AddI:i\d+>>     Add [<<PhiI>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Check:z\d+>>    GreaterThanOrEqual [<<PhiI>>,<<Limit>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<If:v\d+>>       If [<<Check>>]                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Rem:i\d+>>      Rem [<<AddI>>,<<Const2>>]                 loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<NE:z\d+>>       NotEqual [<<Rem>>,<<Const0>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   If [<<NE>>]                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<AddS:i\d+>>     Add [<<PhiS>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   ArraySet [{{l\d+}},{{i\d+}},<<PhiS>>]     loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<PhiSM:i\d+>>    Phi [<<PhiS>>,<<AddS>>]                   loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-DAG: <<AddIA:i\d+>>    Add [<<AddI>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<CheckA:z\d+>>   GreaterThanOrEqual [<<AddI>>,<<Limit>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<IfA:v\d+>>      If [<<Const0>>]                           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<RemA:i\d+>>     Rem [<<AddIA>>,<<Const2>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<NEA:z\d+>>      NotEqual [<<RemA>>,<<Const0>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   If [<<NEA>>]                              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<AddSA:i\d+>>    Add [<<PhiSM>>,<<Const1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   ArraySet [{{l\d+}},{{i\d+}},<<PhiSM>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                   Phi [<<AddSA>>,<<PhiSM>>]                 loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-DAG: <<RetPhi:i\d+>>   Phi [<<PhiS>>,<<PhiSM>>]                  loop:none
-  /// CHECK-DAG:                   Return [<<RetPhi>>]                       loop:none
-  //
-  /// CHECK-NOT:                   ArrayGet
-  /// CHECK-NOT:                   ArraySet
-  private static final int unrollingWhileLiveOuts(int[] a) {
-    int i = 0;
-    int s = 128;
-    while (i++ < LENGTH - 2) {
-      if (i % 2 == 0) {
-        a[i] = s++;
-      }
-    }
-    return s;
-  }
-
   /// CHECK-START: int Main.unrollingLiveOutsNested(int[]) loop_optimization (before)
   /// CHECK-DAG: <<Array:l\d+>>   ParameterValue                            loop:none
   /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                             loop:none
@@ -1113,7 +985,7 @@
     }
   }
 
-  public void verifyUnrolling() {
+  public void verifyUnrolling() throws Exception {
     initIntArray(a);
     initIntArray(b);
 
@@ -1132,7 +1004,12 @@
     unrollingTypeConversion(a, doubleArray);
     unrollingCheckCast(a, new SubMain());
 
-    unrollingWhile(a);
+    // Call unrollingWhile(a);
+    Class<?> c = Class.forName("PeelUnroll");
+    Method m = c.getMethod("unrollingWhile", Class.forName("[I"));
+    Object[] arguments = { a };
+    m.invoke(null, arguments);
+
     unrollingLoadStoreElimination(a);
     unrollingSwitch(a);
     unrollingSwapElements(a);
@@ -1152,7 +1029,7 @@
     expectEquals(expected, found);
   }
 
-  public void verifyPeeling() {
+  public void verifyPeeling() throws Exception {
     expectEquals(1, peelingHoistOneControl(0));  // anything else loops
     expectEquals(1, peelingHoistOneControl(0, 0));
     expectEquals(1, peelingHoistOneControl(0, 1));
@@ -1176,7 +1053,13 @@
     peelingBreakFromNest(a, true);
 
     unrollingSimpleLiveOuts(a);
-    unrollingWhileLiveOuts(a);
+
+    // Call unrollingWhileLiveOuts(a);
+    Class<?> c = Class.forName("PeelUnroll");
+    Method m = c.getMethod("unrollingWhileLiveOuts", Class.forName("[I"));
+    Object[] arguments = { a };
+    m.invoke(null, arguments);
+
     unrollingLiveOutsNested(a);
 
     int expected = 51565978;
@@ -1188,7 +1071,7 @@
     expectEquals(expected, found);
   }
 
-  public static void main(String[] args) {
+  public static void main(String[] args) throws Exception {
     Main obj = new Main();
 
     obj.verifyUnrolling();
diff --git a/test/543-env-long-ref/env_long_ref.cc b/test/543-env-long-ref/env_long_ref.cc
index ce5602f..1885f8d 100644
--- a/test/543-env-long-ref/env_long_ref.cc
+++ b/test/543-env-long-ref/env_long_ref.cc
@@ -23,44 +23,28 @@
 
 namespace art {
 
-namespace {
-
-class TestVisitor : public StackVisitor {
- public:
-  TestVisitor(const ScopedObjectAccess& soa, Context* context, jobject expected_value)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : StackVisitor(soa.Self(), context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        expected_value_(expected_value),
-        found_(false),
-        soa_(soa) {}
-
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* m = GetMethod();
-    std::string m_name(m->GetName());
-
-    if (m_name == "testCase") {
-      found_ = true;
-      uint32_t value = 0;
-      CHECK(GetVReg(m, 1, kReferenceVReg, &value));
-      CHECK_EQ(reinterpret_cast<mirror::Object*>(value),
-               soa_.Decode<mirror::Object>(expected_value_).Ptr());
-    }
-    return true;
-  }
-
-  jobject expected_value_;
-  bool found_;
-  const ScopedObjectAccess& soa_;
-};
-
-}  // namespace
-
 extern "C" JNIEXPORT void JNICALL Java_Main_lookForMyRegisters(JNIEnv*, jclass, jobject value) {
   ScopedObjectAccess soa(Thread::Current());
   std::unique_ptr<Context> context(Context::Create());
-  TestVisitor visitor(soa, context.get(), value);
-  visitor.WalkStack();
-  CHECK(visitor.found_);
+  bool found = false;
+  StackVisitor::WalkStack(
+      [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        ArtMethod* m = stack_visitor->GetMethod();
+        std::string m_name(m->GetName());
+
+        if (m_name == "testCase") {
+          found = true;
+          uint32_t stack_value = 0;
+          CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &stack_value));
+          CHECK_EQ(reinterpret_cast<mirror::Object*>(stack_value),
+                   soa.Decode<mirror::Object>(value).Ptr());
+        }
+        return true;
+      },
+      soa.Self(),
+      context.get(),
+      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+  CHECK(found);
 }
 
 }  // namespace art
diff --git a/test/551-checker-clinit/src/Main.java b/test/551-checker-clinit/src/Main.java
index ab92cd0..0eea800 100644
--- a/test/551-checker-clinit/src/Main.java
+++ b/test/551-checker-clinit/src/Main.java
@@ -57,7 +57,7 @@
   }
 
   /// CHECK-START: void Sub.invokeSubClass() builder (after)
-  /// CHECK:                            ClinitCheck
+  /// CHECK:                        ClinitCheck
   public void invokeSubClass() {
     int a = SubSub.foo;
   }
@@ -71,3 +71,23 @@
   }
   public static int foo = 42;
 }
+
+class NonTrivial {
+  public static int staticFoo = 42;
+  public int instanceFoo;
+
+  static {
+    System.out.println("NonTrivial.<clinit>");
+  }
+
+  /// CHECK-START: void NonTrivial.<init>() builder (after)
+  /// CHECK-NOT:                    ClinitCheck
+
+  /// CHECK-START: void NonTrivial.<init>() builder (after)
+  /// CHECK:                        StaticFieldGet
+  public NonTrivial() {
+    // ClinitCheck is eliminated because this is a constructor and therefore the
+    // corresponding new-instance in the caller must have performed the check.
+    instanceFoo = staticFoo;
+  }
+}
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index 746887f..0bceffd 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -41,10 +41,7 @@
     return x;
   }
 
-  /// CHECK-START: int Main.testSimple(int) sharpening (before)
-  /// CHECK:                InvokeStaticOrDirect method_load_kind:RuntimeCall
-
-  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: int Main.testSimple(int) sharpening (after)
+  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: int Main.testSimple(int) builder (after)
   /// CHECK:                InvokeStaticOrDirect method_load_kind:BssEntry
 
   /// CHECK-START-X86: int Main.testSimple(int) pc_relative_fixups_x86 (before)
@@ -59,11 +56,7 @@
     return $noinline$foo(x);
   }
 
-  /// CHECK-START: int Main.testDiamond(boolean, int) sharpening (before)
-  /// CHECK:                InvokeStaticOrDirect method_load_kind:RuntimeCall
-  /// CHECK:                InvokeStaticOrDirect method_load_kind:RuntimeCall
-
-  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: int Main.testDiamond(boolean, int) sharpening (after)
+  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: int Main.testDiamond(boolean, int) builder (after)
   /// CHECK:                InvokeStaticOrDirect method_load_kind:BssEntry
   /// CHECK:                InvokeStaticOrDirect method_load_kind:BssEntry
 
@@ -194,18 +187,12 @@
   }
 
   /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$toHexString(int) builder (after)
-  /// CHECK:                InvokeStaticOrDirect method_load_kind:RuntimeCall
-
-  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$toHexString(int) sharpening (after)
   /// CHECK:                InvokeStaticOrDirect method_load_kind:BootImageRelRo
   public static String $noinline$toHexString(int value) {
     return Integer.toString(value, 16);
   }
 
   /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$toHexStringIndirect(int) builder (after)
-  /// CHECK:                InvokeStaticOrDirect method_load_kind:RuntimeCall
-
-  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$toHexStringIndirect(int) sharpening (after)
   /// CHECK:                InvokeStaticOrDirect method_load_kind:BssEntry
 
   /// CHECK-START-X86: java.lang.String Main.$noinline$toHexStringIndirect(int) pc_relative_fixups_x86 (before)
diff --git a/test/552-checker-x86-avx2-bit-manipulation/expected.txt b/test/552-checker-x86-avx2-bit-manipulation/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/552-checker-x86-avx2-bit-manipulation/expected.txt
diff --git a/test/552-checker-x86-avx2-bit-manipulation/info.txt b/test/552-checker-x86-avx2-bit-manipulation/info.txt
new file mode 100644
index 0000000..37bc6dd
--- /dev/null
+++ b/test/552-checker-x86-avx2-bit-manipulation/info.txt
@@ -0,0 +1 @@
+Tests for generating bit manipulation instructions on x86
diff --git a/test/552-checker-x86-avx2-bit-manipulation/src/Main.java b/test/552-checker-x86-avx2-bit-manipulation/src/Main.java
new file mode 100644
index 0000000..b8138dd
--- /dev/null
+++ b/test/552-checker-x86-avx2-bit-manipulation/src/Main.java
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+  public static void assertIntEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void assertLongEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  /// CHECK-START-X86_64: long Main.and_not_64(long, long) instruction_simplifier_x86_64 (before)
+  /// CHECK-DAG:    Phi     loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG:    Not     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    And     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    Not     loop:none
+  /// CHECK-DAG:    And     loop:none
+
+  // TODO:re-enable when checker supports isa features
+  // CHECK-START-X86_64: long Main.and_not_64(long, long) instruction_simplifier_x86_64 (after)
+  // CHECK-DAG:      X86AndNot loop:<<Loop:B\d+>> outer_loop:none
+  // CHECK-DAG:      X86AndNot loop:none
+
+  // TODO:re-enable when checker supports isa features
+  // CHECK-START-X86_64: long Main.and_not_64(long, long) instruction_simplifier_x86_64 (after)
+  // CHECK-NOT:      Not       loop:<<Loop>>      outer_loop:none
+  // CHECK-NOT:      And       loop:<<Loop>>      outer_loop:none
+  // CHECK-NOT:      Not       loop:none
+  // CHECK-NOT:      And       loop:none
+  public static long and_not_64( long x, long y) {
+    long j = 1;
+    long k = 2;
+    for (long i = -64 ; i < 64; i++ ) {
+      x = x & ~i;
+      y = y | i;
+    }
+    return x & ~y;
+  }
+
+  /// CHECK-START-X86_64: int Main.and_not_32(int, int) instruction_simplifier_x86_64 (before)
+  /// CHECK-DAG:    Phi     loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG:    Not     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    And     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    Not     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    And     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    Not     loop:none
+  /// CHECK-DAG:    And     loop:none
+
+  // TODO:re-enable when checker supports isa features
+  // CHECK-START-X86_64: int Main.and_not_32(int, int) instruction_simplifier_x86_64 (after)
+  // CHECK-DAG:      X86AndNot loop:<<Loop:B\d+>> outer_loop:none
+  // CHECK-DAG:      X86AndNot loop:<<Loop>>      outer_loop:none
+  // CHECK-DAG:      X86AndNot loop:none
+
+  // TODO:re-enable when checker supports isa features
+  // CHECK-START-X86_64: int Main.and_not_32(int, int) instruction_simplifier_x86_64 (after)
+  // CHECK-NOT:      Not       loop:<<Loop>>      outer_loop:none
+  // CHECK-NOT:      And       loop:<<Loop>>      outer_loop:none
+  // CHECK-NOT:      Not       loop:none
+  // CHECK-NOT:      And       loop:none
+  public static int and_not_32( int x, int y) {
+    int j = 1;
+    int k = 2;
+    for (int i = -64 ; i < 64; i++ ) {
+      x = x & ~i;
+      y = y | i;
+    }
+    return x & ~y;
+  }
+
+  /// CHECK-START-X86_64: int Main.reset_lowest_set_bit_32(int) instruction_simplifier_x86_64 (before)
+  /// CHECK-DAG:    Phi     loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG:    Add     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    And     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    Add     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    And     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    Add     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    And     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    Add     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    And     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    Add     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    And     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    Add     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    And     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    Add     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    And     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    Add     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    And     loop:<<Loop>>      outer_loop:none
+
+
+  // TODO:re-enable when checker supports isa features
+  // CHECK-START-X86_64: int Main.reset_lowest_set_bit_32(int) instruction_simplifier_x86_64 (after)
+  // CHECK-DAG:      X86MaskOrResetLeastSetBit  loop:<<Loop:B\d+>> outer_loop:none
+  // CHECK-DAG:      X86MaskOrResetLeastSetBit  loop:<<Loop>> outer_loop:none
+  // CHECK-DAG:      X86MaskOrResetLeastSetBit  loop:<<Loop>> outer_loop:none
+  // CHECK-DAG:      X86MaskOrResetLeastSetBit  loop:<<Loop>> outer_loop:none
+  // CHECK-DAG:      X86MaskOrResetLeastSetBit  loop:<<Loop>> outer_loop:none
+  // CHECK-DAG:      X86MaskOrResetLeastSetBit  loop:<<Loop>> outer_loop:none
+  // CHECK-DAG:      X86MaskOrResetLeastSetBit  loop:<<Loop>> outer_loop:none
+  // CHECK-DAG:      X86MaskOrResetLeastSetBit  loop:<<Loop>> outer_loop:none
+
+  // TODO:re-enable when checker supports isa features
+  // CHECK-START-X86_64: int Main.reset_lowest_set_bit_32(int) instruction_simplifier_x86_64 (after)
+  // CHECK-NOT:      And                        loop:<<Loop>> outer_loop:none
+  public static int reset_lowest_set_bit_32(int x) {
+    int y = x;
+    int j = 5;
+    int k = 10;
+    int l = 20;
+    for (int i = -64 ; i < 64; i++) {
+      y = i & i-1;
+      j += y;
+      j = j & j-1;
+      k +=j;
+      k = k & k-1;
+      l +=k;
+      l = l & l-1;
+    }
+    return y + j + k + l;
+  }
+
+  /// CHECK-START-X86_64: long Main.reset_lowest_set_bit_64(long) instruction_simplifier_x86_64 (before)
+  /// CHECK-DAG:    Phi     loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG:    Sub     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    And     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    Sub     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    And     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    Sub     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    And     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    Sub     loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:    And     loop:<<Loop>>      outer_loop:none
+
+  // TODO:re-enable when checker supports isa features
+  // CHECK-START-X86_64: long Main.reset_lowest_set_bit_64(long) instruction_simplifier_x86_64 (after)
+  // CHECK-DAG:      X86MaskOrResetLeastSetBit  loop:<<Loop:B\d+>> outer_loop:none
+  // CHECK-DAG:      X86MaskOrResetLeastSetBit  loop:<<Loop>> outer_loop:none
+  // CHECK-DAG:      X86MaskOrResetLeastSetBit  loop:<<Loop>> outer_loop:none
+  // CHECK-DAG:      X86MaskOrResetLeastSetBit  loop:<<Loop>> outer_loop:none
+
+  // TODO:re-enable when checker supports isa features
+  // CHECK-START-X86_64: long Main.reset_lowest_set_bit_64(long) instruction_simplifier_x86_64 (after)
+  // CHECK-NOT:      And                        loop:<<Loop>> outer_loop:none
+  // CHECK-NOT:      Sub                        loop:<<Loop>> outer_loop:none
+  public static long reset_lowest_set_bit_64(long x) {
+    long y = x;
+    long j = 5;
+    long k = 10;
+    long l = 20;
+    for (long i = -64 ; i < 64; i++) {
+      y = i & i-1;
+      j += y;
+      j = j & j-1;
+      k +=j;
+      k = k & k-1;
+      l +=k;
+      l = l & l-1;
+    }
+    return y + j + k + l;
+  }
+
+  /// CHECK-START-X86_64: int Main.get_mask_lowest_set_bit_32(int) instruction_simplifier_x86_64 (before)
+  /// CHECK-DAG:    Add     loop:none
+  /// CHECK-DAG:    Xor     loop:none
+
+  // TODO:re-enable when checker supports isa features
+  // CHECK-START-X86_64: int Main.get_mask_lowest_set_bit_32(int) instruction_simplifier_x86_64 (after)
+  // CHECK-DAG:      X86MaskOrResetLeastSetBit  loop:none
+
+  // TODO:re-enable when checker supports isa features
+  // CHECK-START-X86_64: int Main.get_mask_lowest_set_bit_32(int) instruction_simplifier_x86_64 (after)
+  // CHECK-NOT:      Add    loop:none
+  // CHECK-NOT:      Xor    loop:none
+  public static int get_mask_lowest_set_bit_32(int x) {
+    return (x-1) ^ x;
+  }
+
+  /// CHECK-START-X86_64: long Main.get_mask_lowest_set_bit_64(long) instruction_simplifier_x86_64 (before)
+  /// CHECK-DAG:    Sub     loop:none
+  /// CHECK-DAG:    Xor     loop:none
+
+  // TODO:re-enable when checker supports isa features
+  // CHECK-START-X86_64: long Main.get_mask_lowest_set_bit_64(long) instruction_simplifier_x86_64 (after)
+  // CHECK-DAG:      X86MaskOrResetLeastSetBit  loop:none
+
+  // TODO:re-enable when checker supports isa features
+  // CHECK-START-X86_64: long Main.get_mask_lowest_set_bit_64(long) instruction_simplifier_x86_64 (after)
+  // CHECK-NOT:      Sub    loop:none
+  // CHECK-NOT:      Xor    loop:none
+  public static long get_mask_lowest_set_bit_64(long x) {
+    return (x-1) ^ x;
+  }
+
+  public static void main(String[] args) {
+    int x = 50;
+    int y = x/2;
+    long a = Long.MAX_VALUE;
+    long b = Long.MAX_VALUE/2;
+    assertIntEquals(0,and_not_32(x,y));
+    assertLongEquals(0L, and_not_64(a,b));
+    assertIntEquals(-20502606, reset_lowest_set_bit_32(x));
+    assertLongEquals(-20502606L, reset_lowest_set_bit_64(a));
+    assertLongEquals(-20502606L, reset_lowest_set_bit_64(b));
+    assertIntEquals(1, get_mask_lowest_set_bit_32(y));
+    assertLongEquals(1L, get_mask_lowest_set_bit_64(b));
+  }
+}
diff --git a/test/563-checker-fakestring/smali/TestCase.smali b/test/563-checker-fakestring/smali/TestCase.smali
index 0fe39ee..4721eca 100644
--- a/test/563-checker-fakestring/smali/TestCase.smali
+++ b/test/563-checker-fakestring/smali/TestCase.smali
@@ -307,7 +307,6 @@
 .end method
 
 ## CHECK-START: java.lang.String TestCase.loopAndStringInitAndPhi(byte[], boolean) register (after)
-## CHECK:                        NewInstance
 ## CHECK-NOT:                    NewInstance
 ## CHECK-DAG:   <<Invoke1:l\d+>> InvokeStaticOrDirect method_name:java.lang.String.<init>
 ## CHECK-DAG:   <<Invoke2:l\d+>> InvokeStaticOrDirect method_name:java.lang.String.<init>
@@ -337,3 +336,140 @@
    return-object v0
 
 .end method
+
+.method public static loopAndTwoStringInitAndPhi([BZZ)Ljava/lang/String;
+   .registers 6
+
+   new-instance v0, Ljava/lang/String;
+   new-instance v2, Ljava/lang/String;
+
+   if-nez p2, :allocate_other
+
+   # Loop
+   :loop_header
+   if-eqz p1, :loop_exit
+   goto :loop_header
+
+   :loop_exit
+   const-string v1, "UTF8"
+   invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+   goto :exit
+
+   :allocate_other
+
+   # Loop
+   :loop_header2
+   if-eqz p1, :loop_exit2
+   goto :loop_header2
+
+   :loop_exit2
+   const-string v1, "UTF8"
+   invoke-direct {v2, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+   move-object v0, v2
+
+   :exit
+   return-object v0
+
+.end method
+
+# Regression test for a new string flowing into a catch phi.
+.method public static stringAndCatch([BZ)Ljava/lang/Object;
+   .registers 4
+
+   const v0, 0x0
+
+   :try_start_a
+   new-instance v0, Ljava/lang/String;
+
+   # Loop
+   :loop_header
+   if-eqz p1, :loop_exit
+   goto :loop_header
+
+   :loop_exit
+   const-string v1, "UTF8"
+   invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+   goto :exit
+   :try_end_a
+   .catch Ljava/lang/Exception; {:try_start_a .. :try_end_a} :catch_a
+
+   :catch_a
+   # Initially, we create a catch phi with the potential uninitalized string, which used to
+   # trip the compiler. However, using that catch phi is an error caught by the verifier, so
+   # having the phi is benign.
+   const v0, 0x0
+
+   :exit
+   return-object v0
+
+.end method
+
+# Same test as above, but with a catch phi being used by the string constructor.
+.method public static stringAndCatch2([BZ)Ljava/lang/Object;
+   .registers 4
+
+   const v0, 0x0
+   new-instance v0, Ljava/lang/String;
+
+   :try_start_a
+   const-string v1, "UTF8"
+   :try_end_a
+   .catch Ljava/lang/Exception; {:try_start_a .. :try_end_a} :catch_a
+
+   :catch_a
+   const-string v1, "UTF8"
+   invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+   return-object v0
+
+.end method
+
+# Same test as above, but with a catch phi being used by the string constructor and
+# a null test.
+.method public static stringAndCatch3([BZ)Ljava/lang/Object;
+   .registers 4
+
+   const v0, 0x0
+   new-instance v0, Ljava/lang/String;
+
+   :try_start_a
+   const-string v1, "UTF8"
+   :try_end_a
+   .catch Ljava/lang/Exception; {:try_start_a .. :try_end_a} :catch_a
+
+   :catch_a
+   if-eqz v0, :unexpected
+   const-string v1, "UTF8"
+   invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+   goto :exit
+   :unexpected
+   const-string v0, "UTF8"
+   :exit
+   return-object v0
+
+.end method
+
+# Regression test that tripped the compiler.
+.method public static stringAndPhi([BZ)Ljava/lang/Object;
+   .registers 4
+
+   new-instance v0, Ljava/lang/String;
+   const-string v1, "UTF8"
+
+   :loop_header
+   if-nez p1, :unused
+   if-eqz p1, :invoke
+   goto :loop_header
+
+   :invoke
+   invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+   goto :exit
+
+   :unused
+   const-string v0, "UTF8"
+   if-nez p1, :exit
+   goto :unused
+
+   :exit
+   return-object v0
+
+.end method
diff --git a/test/563-checker-fakestring/src/Main.java b/test/563-checker-fakestring/src/Main.java
index df9e9dc..77a108f 100644
--- a/test/563-checker-fakestring/src/Main.java
+++ b/test/563-checker-fakestring/src/Main.java
@@ -133,6 +133,29 @@
       result = (String) m.invoke(null, new Object[] { testData, false });
       assertEqual(testString, result);
     }
+    {
+      Method m =
+          c.getMethod("loopAndTwoStringInitAndPhi", byte[].class, boolean.class, boolean.class);
+      String result = (String) m.invoke(null, new Object[] { testData, false, false });
+      assertEqual(testString, result);
+      result = (String) m.invoke(null, new Object[] { testData, false, true });
+      assertEqual(testString, result);
+    }
+    {
+      Method m = c.getMethod("stringAndCatch", byte[].class, boolean.class);
+      String result = (String) m.invoke(null, new Object[] { testData, false });
+      assertEqual(testString, result);
+    }
+    {
+      Method m = c.getMethod("stringAndCatch2", byte[].class, boolean.class);
+      String result = (String) m.invoke(null, new Object[] { testData, false });
+      assertEqual(testString, result);
+    }
+    {
+      Method m = c.getMethod("stringAndCatch3", byte[].class, boolean.class);
+      String result = (String) m.invoke(null, new Object[] { testData, false });
+      assertEqual(testString, result);
+    }
   }
 
   public static boolean doThrow = false;
diff --git a/test/564-checker-bitcount/src/Main.java b/test/564-checker-bitcount/src/Main.java
index aad9689..e022d9d 100644
--- a/test/564-checker-bitcount/src/Main.java
+++ b/test/564-checker-bitcount/src/Main.java
@@ -21,7 +21,7 @@
   // CHECK-DAG: popcnt
 
 
-  /// CHECK-START: int Main.$noinline$BitCountBoolean(boolean) intrinsics_recognition (after)
+  /// CHECK-START: int Main.$noinline$BitCountBoolean(boolean) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect intrinsic:IntegerBitCount
   /// CHECK-DAG:                      Return [<<Result>>]
   private static int $noinline$BitCountBoolean(boolean x) {
@@ -29,7 +29,7 @@
     return Integer.bitCount(x ? 1 : 0);
   }
 
-  /// CHECK-START: int Main.$noinline$BitCountByte(byte) intrinsics_recognition (after)
+  /// CHECK-START: int Main.$noinline$BitCountByte(byte) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect intrinsic:IntegerBitCount
   /// CHECK-DAG:                      Return [<<Result>>]
   private static int $noinline$BitCountByte(byte x) {
@@ -37,7 +37,7 @@
     return Integer.bitCount(x);
   }
 
-  /// CHECK-START: int Main.$noinline$BitCountShort(short) intrinsics_recognition (after)
+  /// CHECK-START: int Main.$noinline$BitCountShort(short) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect intrinsic:IntegerBitCount
   /// CHECK-DAG:                      Return [<<Result>>]
   private static int $noinline$BitCountShort(short x) {
@@ -45,7 +45,7 @@
     return Integer.bitCount(x);
   }
 
-  /// CHECK-START: int Main.$noinline$BitCountChar(char) intrinsics_recognition (after)
+  /// CHECK-START: int Main.$noinline$BitCountChar(char) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect intrinsic:IntegerBitCount
   /// CHECK-DAG:                      Return [<<Result>>]
   private static int $noinline$BitCountChar(char x) {
@@ -53,7 +53,7 @@
     return Integer.bitCount(x);
   }
 
-  /// CHECK-START: int Main.$noinline$BitCountInt(int) intrinsics_recognition (after)
+  /// CHECK-START: int Main.$noinline$BitCountInt(int) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect intrinsic:IntegerBitCount
   /// CHECK-DAG:                      Return [<<Result>>]
   private static int $noinline$BitCountInt(int x) {
@@ -61,7 +61,7 @@
     return Integer.bitCount(x);
   }
 
-  /// CHECK-START: int Main.$noinline$BitCountLong(long) intrinsics_recognition (after)
+  /// CHECK-START: int Main.$noinline$BitCountLong(long) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect intrinsic:LongBitCount
   /// CHECK-DAG:                      Return [<<Result>>]
   private static int $noinline$BitCountLong(long x) {
diff --git a/test/565-checker-rotate/smali/Main2.smali b/test/565-checker-rotate/smali/Main2.smali
index ca5027e..768c9d0 100644
--- a/test/565-checker-rotate/smali/Main2.smali
+++ b/test/565-checker-rotate/smali/Main2.smali
@@ -15,14 +15,13 @@
 .class public LMain2;
 .super Ljava/lang/Object;
 
-## CHECK-START: int Main2.rotateLeftBoolean(boolean, int) intrinsics_recognition (after)
-## CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+## CHECK-START: int Main2.rotateLeftBoolean(boolean, int) builder (after)
 ## CHECK:         <<ArgVal:z\d+>>  ParameterValue
 ## CHECK:         <<ArgDist:i\d+>> ParameterValue
 ## CHECK-DAG:     <<Zero:i\d+>>    IntConstant 0
 ## CHECK-DAG:     <<One:i\d+>>     IntConstant 1
 ## CHECK-DAG:     <<Val:i\d+>>     Phi [<<One>>,<<Zero>>]
-## CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<Val>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+## CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<Val>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateLeft
 ## CHECK-DAG:                      Return [<<Result>>]
 
 ## CHECK-START: int Main2.rotateLeftBoolean(boolean, int) instruction_simplifier (after)
@@ -91,15 +90,14 @@
     goto :goto_3
 .end method
 
-## CHECK-START: int Main2.rotateRightBoolean(boolean, int) intrinsics_recognition (after)
-## CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+## CHECK-START: int Main2.rotateRightBoolean(boolean, int) builder (after)
 ## CHECK:         <<ArgVal:z\d+>>  ParameterValue
 ## CHECK:         <<ArgDist:i\d+>> ParameterValue
 ## CHECK-DAG:     <<Zero:i\d+>>    IntConstant 0
 ## CHECK-DAG:     <<One:i\d+>>     IntConstant 1
 ## CHECK-DAG:     <<Val:i\d+>>     Phi [<<One>>,<<Zero>>]
-## CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<Val>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
-## CHECK-DAG:                     Return [<<Result>>]
+## CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<Val>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateRight
+## CHECK-DAG:                      Return [<<Result>>]
 
 ## CHECK-START: int Main2.rotateRightBoolean(boolean, int) instruction_simplifier (after)
 ## CHECK:         <<ArgVal:z\d+>>  ParameterValue
diff --git a/test/565-checker-rotate/src-art/Main.java b/test/565-checker-rotate/src-art/Main.java
index b9e1315..867feb8 100644
--- a/test/565-checker-rotate/src-art/Main.java
+++ b/test/565-checker-rotate/src-art/Main.java
@@ -20,11 +20,10 @@
 
   private static Class main2;
 
-  /// CHECK-START: int Main.rotateLeftByte(byte, int) intrinsics_recognition (after)
-  /// CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+  /// CHECK-START: int Main.rotateLeftByte(byte, int) builder (after)
   /// CHECK:         <<ArgVal:b\d+>>  ParameterValue
   /// CHECK:         <<ArgDist:i\d+>> ParameterValue
-  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateLeft
   /// CHECK-DAG:                      Return [<<Result>>]
 
   /// CHECK-START: int Main.rotateLeftByte(byte, int) instruction_simplifier (after)
@@ -41,11 +40,10 @@
     return Integer.rotateLeft(value, distance);
   }
 
-  /// CHECK-START: int Main.rotateLeftShort(short, int) intrinsics_recognition (after)
-  /// CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+  /// CHECK-START: int Main.rotateLeftShort(short, int) builder (after)
   /// CHECK:         <<ArgVal:s\d+>>  ParameterValue
   /// CHECK:         <<ArgDist:i\d+>> ParameterValue
-  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateLeft
   /// CHECK-DAG:                      Return [<<Result>>]
 
   /// CHECK-START: int Main.rotateLeftShort(short, int) instruction_simplifier (after)
@@ -62,11 +60,10 @@
     return Integer.rotateLeft(value, distance);
   }
 
-  /// CHECK-START: int Main.rotateLeftChar(char, int) intrinsics_recognition (after)
-  /// CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+  /// CHECK-START: int Main.rotateLeftChar(char, int) builder (after)
   /// CHECK:         <<ArgVal:c\d+>>  ParameterValue
   /// CHECK:         <<ArgDist:i\d+>> ParameterValue
-  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateLeft
   /// CHECK-DAG:                      Return [<<Result>>]
 
   /// CHECK-START: int Main.rotateLeftChar(char, int) instruction_simplifier (after)
@@ -83,11 +80,10 @@
     return Integer.rotateLeft(value, distance);
   }
 
-  /// CHECK-START: int Main.rotateLeftInt(int, int) intrinsics_recognition (after)
-  /// CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+  /// CHECK-START: int Main.rotateLeftInt(int, int) builder (after)
   /// CHECK:         <<ArgVal:i\d+>>  ParameterValue
   /// CHECK:         <<ArgDist:i\d+>> ParameterValue
-  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateLeft
   /// CHECK-DAG:                      Return [<<Result>>]
 
   /// CHECK-START: int Main.rotateLeftInt(int, int) instruction_simplifier (after)
@@ -104,11 +100,10 @@
     return Integer.rotateLeft(value, distance);
   }
 
-  /// CHECK-START: long Main.rotateLeftLong(long, int) intrinsics_recognition (after)
-  /// CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+  /// CHECK-START: long Main.rotateLeftLong(long, int) builder (after)
   /// CHECK:         <<ArgVal:j\d+>>  ParameterValue
   /// CHECK:         <<ArgDist:i\d+>> ParameterValue
-  /// CHECK-DAG:     <<Result:j\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:LongRotateLeft
+  /// CHECK-DAG:     <<Result:j\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:LongRotateLeft
   /// CHECK-DAG:                      Return [<<Result>>]
 
   /// CHECK-START: long Main.rotateLeftLong(long, int) instruction_simplifier (after)
@@ -125,11 +120,10 @@
     return Long.rotateLeft(value, distance);
   }
 
-  /// CHECK-START: int Main.rotateRightByte(byte, int) intrinsics_recognition (after)
-  /// CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+  /// CHECK-START: int Main.rotateRightByte(byte, int) builder (after)
   /// CHECK:         <<ArgVal:b\d+>>  ParameterValue
   /// CHECK:         <<ArgDist:i\d+>> ParameterValue
-  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateRight
   /// CHECK-DAG:                      Return [<<Result>>]
 
   /// CHECK-START: int Main.rotateRightByte(byte, int) instruction_simplifier (after)
@@ -145,11 +139,10 @@
     return Integer.rotateRight(value, distance);
   }
 
-  /// CHECK-START: int Main.rotateRightShort(short, int) intrinsics_recognition (after)
-  /// CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+  /// CHECK-START: int Main.rotateRightShort(short, int) builder (after)
   /// CHECK:         <<ArgVal:s\d+>>  ParameterValue
   /// CHECK:         <<ArgDist:i\d+>> ParameterValue
-  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateRight
   /// CHECK-DAG:                      Return [<<Result>>]
 
   /// CHECK-START: int Main.rotateRightShort(short, int) instruction_simplifier (after)
@@ -165,11 +158,10 @@
     return Integer.rotateRight(value, distance);
   }
 
-  /// CHECK-START: int Main.rotateRightChar(char, int) intrinsics_recognition (after)
-  /// CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+  /// CHECK-START: int Main.rotateRightChar(char, int) builder (after)
   /// CHECK:         <<ArgVal:c\d+>>  ParameterValue
   /// CHECK:         <<ArgDist:i\d+>> ParameterValue
-  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateRight
   /// CHECK-DAG:                      Return [<<Result>>]
 
   /// CHECK-START: int Main.rotateRightChar(char, int) instruction_simplifier (after)
@@ -185,11 +177,10 @@
     return Integer.rotateRight(value, distance);
   }
 
-  /// CHECK-START: int Main.rotateRightInt(int, int) intrinsics_recognition (after)
-  /// CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+  /// CHECK-START: int Main.rotateRightInt(int, int) builder (after)
   /// CHECK:         <<ArgVal:i\d+>>  ParameterValue
   /// CHECK:         <<ArgDist:i\d+>> ParameterValue
-  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateRight
   /// CHECK-DAG:                      Return [<<Result>>]
 
   /// CHECK-START: int Main.rotateRightInt(int, int) instruction_simplifier (after)
@@ -205,11 +196,10 @@
     return Integer.rotateRight(value, distance);
   }
 
-  /// CHECK-START: long Main.rotateRightLong(long, int) intrinsics_recognition (after)
-  /// CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+  /// CHECK-START: long Main.rotateRightLong(long, int) builder (after)
   /// CHECK:         <<ArgVal:j\d+>>  ParameterValue
   /// CHECK:         <<ArgDist:i\d+>> ParameterValue
-  /// CHECK-DAG:     <<Result:j\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:LongRotateRight
+  /// CHECK-DAG:     <<Result:j\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:LongRotateRight
   /// CHECK-DAG:                      Return [<<Result>>]
 
   /// CHECK-START: long Main.rotateRightLong(long, int) instruction_simplifier (after)
@@ -226,11 +216,10 @@
   }
 
 
-  /// CHECK-START: int Main.rotateLeftIntWithByteDistance(int, byte) intrinsics_recognition (after)
-  /// CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+  /// CHECK-START: int Main.rotateLeftIntWithByteDistance(int, byte) builder (after)
   /// CHECK:         <<ArgVal:i\d+>>  ParameterValue
   /// CHECK:         <<ArgDist:b\d+>> ParameterValue
-  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateLeft
   /// CHECK-DAG:                      Return [<<Result>>]
 
   /// CHECK-START: int Main.rotateLeftIntWithByteDistance(int, byte) instruction_simplifier (after)
@@ -247,11 +236,10 @@
     return Integer.rotateLeft(value, distance);
   }
 
-  /// CHECK-START: int Main.rotateRightIntWithByteDistance(int, byte) intrinsics_recognition (after)
-  /// CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+  /// CHECK-START: int Main.rotateRightIntWithByteDistance(int, byte) builder (after)
   /// CHECK:         <<ArgVal:i\d+>>  ParameterValue
   /// CHECK:         <<ArgDist:b\d+>> ParameterValue
-  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+  /// CHECK-DAG:     <<Result:i\d+>>  InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateRight
   /// CHECK-DAG:                      Return [<<Result>>]
 
   /// CHECK-START: int Main.rotateRightIntWithByteDistance(int, byte) instruction_simplifier (after)
diff --git a/test/566-checker-signum/smali/Main2.smali b/test/566-checker-signum/smali/Main2.smali
index d99ad86..767bed2 100644
--- a/test/566-checker-signum/smali/Main2.smali
+++ b/test/566-checker-signum/smali/Main2.smali
@@ -15,12 +15,11 @@
 .class public LMain2;
 .super Ljava/lang/Object;
 
-## CHECK-START: int Main2.signBoolean(boolean) intrinsics_recognition (after)
-## CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+## CHECK-START: int Main2.signBoolean(boolean) builder (after)
 ## CHECK-DAG:     <<Zero:i\d+>>   IntConstant 0
 ## CHECK-DAG:     <<One:i\d+>>    IntConstant 1
 ## CHECK-DAG:     <<Phi:i\d+>>    Phi [<<One>>,<<Zero>>]
-## CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect [<<Phi>>,<<Method>>] intrinsic:IntegerSignum
+## CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect [<<Phi>>{{(,[ij]\d+)?}}] intrinsic:IntegerSignum
 ## CHECK-DAG:                     Return [<<Result>>]
 
 ## CHECK-START: int Main2.signBoolean(boolean) instruction_simplifier (after)
diff --git a/test/566-checker-signum/src-art/Main.java b/test/566-checker-signum/src-art/Main.java
index f1e1e1b..ea01785 100644
--- a/test/566-checker-signum/src-art/Main.java
+++ b/test/566-checker-signum/src-art/Main.java
@@ -18,7 +18,7 @@
 
 public class Main {
 
-  /// CHECK-START: int Main.signByte(byte) intrinsics_recognition (after)
+  /// CHECK-START: int Main.signByte(byte) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -33,7 +33,7 @@
     return Integer.signum(x);
   }
 
-  /// CHECK-START: int Main.signShort(short) intrinsics_recognition (after)
+  /// CHECK-START: int Main.signShort(short) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -48,7 +48,7 @@
     return Integer.signum(x);
   }
 
-  /// CHECK-START: int Main.signChar(char) intrinsics_recognition (after)
+  /// CHECK-START: int Main.signChar(char) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -63,7 +63,7 @@
     return Integer.signum(x);
   }
 
-  /// CHECK-START: int Main.signInt(int) intrinsics_recognition (after)
+  /// CHECK-START: int Main.signInt(int) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -78,7 +78,7 @@
     return Integer.signum(x);
   }
 
-  /// CHECK-START: int Main.signLong(long) intrinsics_recognition (after)
+  /// CHECK-START: int Main.signLong(long) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:LongSignum
   /// CHECK-DAG:                     Return [<<Result>>]
 
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index 7c1507f..00827cf 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -41,11 +41,12 @@
       header = OatQuickMethodHeader::FromEntryPoint(pc);
       break;
     } else {
+      ScopedThreadSuspension sts(soa.Self(), kSuspended);
       // Sleep to yield to the compiler thread.
       usleep(1000);
-      // Will either ensure it's compiled or do the compilation itself.
-      jit->CompileMethod(method, soa.Self(), /* osr */ false);
     }
+    // Will either ensure it's compiled or do the compilation itself.
+    jit->CompileMethod(method, soa.Self(), /*baseline=*/ false, /*osr=*/ false);
   }
 
   CodeInfo info(header);
diff --git a/test/567-checker-compare/smali/Smali.smali b/test/567-checker-compare/smali/Smali.smali
index 8fc39f1..fb6d241 100644
--- a/test/567-checker-compare/smali/Smali.smali
+++ b/test/567-checker-compare/smali/Smali.smali
@@ -15,13 +15,12 @@
 .class public LSmali;
 .super Ljava/lang/Object;
 
-##  CHECK-START: int Smali.compareBooleans(boolean, boolean) intrinsics_recognition (after)
-##  CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
+##  CHECK-START: int Smali.compareBooleans(boolean, boolean) builder (after)
 ##  CHECK-DAG:     <<Zero:i\d+>>   IntConstant 0
 ##  CHECK-DAG:     <<One:i\d+>>    IntConstant 1
 ##  CHECK-DAG:     <<PhiX:i\d+>>   Phi [<<One>>,<<Zero>>]
 ##  CHECK-DAG:     <<PhiY:i\d+>>   Phi [<<One>>,<<Zero>>]
-##  CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect [<<PhiX>>,<<PhiY>>,<<Method>>] intrinsic:IntegerCompare
+##  CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect [<<PhiX>>,<<PhiY>>{{(,[ij]\d+)?}}] intrinsic:IntegerCompare
 ##  CHECK-DAG:                     Return [<<Result>>]
 
 ##  CHECK-START: int Smali.compareBooleans(boolean, boolean) instruction_simplifier (after)
diff --git a/test/567-checker-compare/src/Main.java b/test/567-checker-compare/src/Main.java
index abfaf9f..a3ff005 100644
--- a/test/567-checker-compare/src/Main.java
+++ b/test/567-checker-compare/src/Main.java
@@ -20,11 +20,10 @@
 
   public static boolean doThrow = false;
 
-  /// CHECK-START: void Main.$opt$noinline$testReplaceInputWithItself(int) intrinsics_recognition (after)
+  /// CHECK-START: void Main.$opt$noinline$testReplaceInputWithItself(int) builder (after)
   /// CHECK-DAG:     <<ArgX:i\d+>>   ParameterValue
-  /// CHECK-DAG:     <<Method:[ij]\d+>> CurrentMethod
   /// CHECK-DAG:     <<Zero:i\d+>>   IntConstant 0
-  /// CHECK-DAG:     <<Cmp:i\d+>>    InvokeStaticOrDirect [<<ArgX>>,<<Zero>>,<<Method>>] intrinsic:IntegerCompare
+  /// CHECK-DAG:     <<Cmp:i\d+>>    InvokeStaticOrDirect [<<ArgX>>,<<Zero>>{{(,[ij]\d+)?}}] intrinsic:IntegerCompare
   /// CHECK-DAG:                     GreaterThanOrEqual [<<Cmp>>,<<Zero>>]
 
   /// CHECK-START: void Main.$opt$noinline$testReplaceInputWithItself(int) instruction_simplifier (after)
@@ -66,7 +65,7 @@
     return (Integer) m.invoke(null, x, y);
   }
 
-  /// CHECK-START: int Main.compareBytes(byte, byte) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareBytes(byte, byte) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -81,7 +80,7 @@
     return Integer.compare(x, y);
   }
 
-  /// CHECK-START: int Main.compareShorts(short, short) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareShorts(short, short) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -96,7 +95,7 @@
     return Integer.compare(x, y);
   }
 
-  /// CHECK-START: int Main.compareChars(char, char) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareChars(char, char) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -111,7 +110,7 @@
     return Integer.compare(x, y);
   }
 
-  /// CHECK-START: int Main.compareInts(int, int) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareInts(int, int) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -126,7 +125,7 @@
     return Integer.compare(x, y);
   }
 
-  /// CHECK-START: int Main.compareLongs(long, long) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareLongs(long, long) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:LongCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -142,7 +141,7 @@
   }
 
 
-  /// CHECK-START: int Main.compareByteShort(byte, short) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareByteShort(byte, short) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -157,7 +156,7 @@
     return Integer.compare(x, y);
   }
 
-  /// CHECK-START: int Main.compareByteChar(byte, char) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareByteChar(byte, char) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -172,7 +171,7 @@
     return Integer.compare(x, y);
   }
 
-  /// CHECK-START: int Main.compareByteInt(byte, int) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareByteInt(byte, int) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -188,7 +187,7 @@
   }
 
 
-  /// CHECK-START: int Main.compareShortByte(short, byte) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareShortByte(short, byte) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -203,7 +202,7 @@
     return Integer.compare(x, y);
   }
 
-  /// CHECK-START: int Main.compareShortChar(short, char) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareShortChar(short, char) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -218,7 +217,7 @@
     return Integer.compare(x, y);
   }
 
-  /// CHECK-START: int Main.compareShortInt(short, int) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareShortInt(short, int) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -234,7 +233,7 @@
   }
 
 
-  /// CHECK-START: int Main.compareCharByte(char, byte) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareCharByte(char, byte) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -249,7 +248,7 @@
     return Integer.compare(x, y);
   }
 
-  /// CHECK-START: int Main.compareCharShort(char, short) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareCharShort(char, short) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -264,7 +263,7 @@
     return Integer.compare(x, y);
   }
 
-  /// CHECK-START: int Main.compareCharInt(char, int) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareCharInt(char, int) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -280,7 +279,7 @@
   }
 
 
-  /// CHECK-START: int Main.compareIntByte(int, byte) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareIntByte(int, byte) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -295,7 +294,7 @@
     return Integer.compare(x, y);
   }
 
-  /// CHECK-START: int Main.compareIntShort(int, short) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareIntShort(int, short) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
@@ -310,7 +309,7 @@
     return Integer.compare(x, y);
   }
 
-  /// CHECK-START: int Main.compareIntChar(int, char) intrinsics_recognition (after)
+  /// CHECK-START: int Main.compareIntChar(int, char) builder (after)
   /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
   /// CHECK-DAG:                     Return [<<Result>>]
 
diff --git a/test/568-checker-onebit/src/Main.java b/test/568-checker-onebit/src/Main.java
index 6ce4ffb..e4d3e88 100644
--- a/test/568-checker-onebit/src/Main.java
+++ b/test/568-checker-onebit/src/Main.java
@@ -16,28 +16,28 @@
 
 public class Main {
 
-  /// CHECK-START: int Main.hi32(int) intrinsics_recognition (after)
+  /// CHECK-START: int Main.hi32(int) builder (after)
   /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerHighestOneBit
   /// CHECK-DAG:                 Return [<<Result>>]
   private static int hi32(int x) {
     return Integer.highestOneBit(x);
   }
 
-  /// CHECK-START: int Main.lo32(int) intrinsics_recognition (after)
+  /// CHECK-START: int Main.lo32(int) builder (after)
   /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerLowestOneBit
   /// CHECK-DAG:                 Return [<<Result>>]
   private static int lo32(int x) {
     return Integer.lowestOneBit(x);
   }
 
-  /// CHECK-START: long Main.hi64(long) intrinsics_recognition (after)
+  /// CHECK-START: long Main.hi64(long) builder (after)
   /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect intrinsic:LongHighestOneBit
   /// CHECK-DAG:                 Return [<<Result>>]
   private static long hi64(long x) {
     return Long.highestOneBit(x);
   }
 
-  /// CHECK-START: long Main.lo64(long) intrinsics_recognition (after)
+  /// CHECK-START: long Main.lo64(long) builder (after)
   /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect intrinsic:LongLowestOneBit
   /// CHECK-DAG:                 Return [<<Result>>]
   private static long lo64(long x) {
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index faec3c3..dc0e94c 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -23,39 +23,33 @@
 #include "scoped_thread_state_change-inl.h"
 #include "stack.h"
 #include "stack_map.h"
+#include "thread-current-inl.h"
 
 namespace art {
 
-class OsrVisitor : public StackVisitor {
- public:
-  explicit OsrVisitor(Thread* thread, const char* method_name)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        method_name_(method_name),
-        in_osr_method_(false),
-        in_interpreter_(false) {}
+namespace {
 
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* m = GetMethod();
-    std::string m_name(m->GetName());
+template <typename Handler>
+void ProcessMethodWithName(JNIEnv* env, jstring method_name, const Handler& handler) {
+  ScopedUtfChars chars(env, method_name);
+  CHECK(chars.c_str() != nullptr);
+  ScopedObjectAccess soa(Thread::Current());
+  StackVisitor::WalkStack(
+      [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        std::string m_name(stack_visitor->GetMethod()->GetName());
 
-    if (m_name.compare(method_name_) == 0) {
-      const OatQuickMethodHeader* header =
-          Runtime::Current()->GetJit()->GetCodeCache()->LookupOsrMethodHeader(m);
-      if (header != nullptr && header == GetCurrentOatQuickMethodHeader()) {
-        in_osr_method_ = true;
-      } else if (IsShadowFrame()) {
-        in_interpreter_ = true;
-      }
-      return false;
-    }
-    return true;
-  }
+        if (m_name.compare(chars.c_str()) == 0) {
+          handler(stack_visitor);
+          return false;
+        }
+        return true;
+      },
+      soa.Self(),
+      /* context= */ nullptr,
+      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+}
 
-  const char* const method_name_;
-  bool in_osr_method_;
-  bool in_interpreter_;
-};
+}  // namespace
 
 extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInOsrCode(JNIEnv* env,
                                                             jclass,
@@ -65,12 +59,19 @@
     // Just return true for non-jit configurations to stop the infinite loop.
     return JNI_TRUE;
   }
-  ScopedUtfChars chars(env, method_name);
-  CHECK(chars.c_str() != nullptr);
-  ScopedObjectAccess soa(Thread::Current());
-  OsrVisitor visitor(soa.Self(), chars.c_str());
-  visitor.WalkStack();
-  return visitor.in_osr_method_;
+  bool in_osr_code = false;
+  ProcessMethodWithName(
+      env,
+      method_name,
+      [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        ArtMethod* m = stack_visitor->GetMethod();
+        const OatQuickMethodHeader* header =
+            Runtime::Current()->GetJit()->GetCodeCache()->LookupOsrMethodHeader(m);
+        if (header != nullptr && header == stack_visitor->GetCurrentOatQuickMethodHeader()) {
+          in_osr_code = true;
+        }
+      });
+  return in_osr_code;
 }
 
 extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInInterpreter(JNIEnv* env,
@@ -80,86 +81,56 @@
     // The return value is irrelevant if we're not using JIT.
     return false;
   }
-  ScopedUtfChars chars(env, method_name);
-  CHECK(chars.c_str() != nullptr);
-  ScopedObjectAccess soa(Thread::Current());
-  OsrVisitor visitor(soa.Self(), chars.c_str());
-  visitor.WalkStack();
-  return visitor.in_interpreter_;
+  bool in_interpreter = false;
+  ProcessMethodWithName(
+      env,
+      method_name,
+      [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        ArtMethod* m = stack_visitor->GetMethod();
+        const OatQuickMethodHeader* header =
+            Runtime::Current()->GetJit()->GetCodeCache()->LookupOsrMethodHeader(m);
+        if ((header == nullptr || header != stack_visitor->GetCurrentOatQuickMethodHeader()) &&
+            stack_visitor->IsShadowFrame()) {
+          in_interpreter = true;
+        }
+      });
+  return in_interpreter;
 }
 
-class ProfilingInfoVisitor : public StackVisitor {
- public:
-  explicit ProfilingInfoVisitor(Thread* thread, const char* method_name)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        method_name_(method_name) {}
-
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* m = GetMethod();
-    std::string m_name(m->GetName());
-
-    if (m_name.compare(method_name_) == 0) {
-      ProfilingInfo::Create(Thread::Current(), m, /* retry_allocation */ true);
-      return false;
-    }
-    return true;
-  }
-
-  const char* const method_name_;
-};
-
 extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasProfilingInfo(JNIEnv* env,
                                                                    jclass,
                                                                    jstring method_name) {
   if (!Runtime::Current()->UseJitCompilation()) {
     return;
   }
-  ScopedUtfChars chars(env, method_name);
-  CHECK(chars.c_str() != nullptr);
-  ScopedObjectAccess soa(Thread::Current());
-  ProfilingInfoVisitor visitor(soa.Self(), chars.c_str());
-  visitor.WalkStack();
+  ProcessMethodWithName(
+      env,
+      method_name,
+      [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        ArtMethod* m = stack_visitor->GetMethod();
+        ProfilingInfo::Create(Thread::Current(), m, /* retry_allocation */ true);
+      });
 }
 
-class OsrCheckVisitor : public StackVisitor {
- public:
-  OsrCheckVisitor(Thread* thread, const char* method_name)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        method_name_(method_name) {}
-
-  bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* m = GetMethod();
-    std::string m_name(m->GetName());
-
-    jit::Jit* jit = Runtime::Current()->GetJit();
-    if (m_name.compare(method_name_) == 0) {
-      while (jit->GetCodeCache()->LookupOsrMethodHeader(m) == nullptr) {
-        // Sleep to yield to the compiler thread.
-        usleep(1000);
-        // Will either ensure it's compiled or do the compilation itself.
-        jit->CompileMethod(m, Thread::Current(), /* osr */ true);
-      }
-      return false;
-    }
-    return true;
-  }
-
-  const char* const method_name_;
-};
-
 extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasOsrCode(JNIEnv* env,
                                                              jclass,
                                                              jstring method_name) {
   if (!Runtime::Current()->UseJitCompilation()) {
     return;
   }
-  ScopedUtfChars chars(env, method_name);
-  CHECK(chars.c_str() != nullptr);
-  ScopedObjectAccess soa(Thread::Current());
-  OsrCheckVisitor visitor(soa.Self(), chars.c_str());
-  visitor.WalkStack();
+  ProcessMethodWithName(
+      env,
+      method_name,
+      [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        ArtMethod* m = stack_visitor->GetMethod();
+        jit::Jit* jit = Runtime::Current()->GetJit();
+        while (jit->GetCodeCache()->LookupOsrMethodHeader(m) == nullptr) {
+          // Sleep to yield to the compiler thread.
+          usleep(1000);
+          // Will either ensure it's compiled or do the compilation itself.
+          jit->CompileMethod(m, Thread::Current(), /*baseline=*/ false, /*osr=*/ true);
+        }
+      });
 }
 
 }  // namespace art
diff --git a/test/580-checker-round/src/Main.java b/test/580-checker-round/src/Main.java
index 83bc55c..a6752b5 100644
--- a/test/580-checker-round/src/Main.java
+++ b/test/580-checker-round/src/Main.java
@@ -16,14 +16,14 @@
 
 public class Main {
 
-  /// CHECK-START: int Main.round32(float) intrinsics_recognition (after)
+  /// CHECK-START: int Main.round32(float) builder (after)
   /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:MathRoundFloat
   /// CHECK-DAG:                 Return [<<Result>>]
   private static int round32(float f) {
     return Math.round(f);
   }
 
-  /// CHECK-START: long Main.round64(double) intrinsics_recognition (after)
+  /// CHECK-START: long Main.round64(double) builder (after)
   /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect intrinsic:MathRoundDouble
   /// CHECK-DAG:                 Return [<<Result>>]
   private static long round64(double d) {
diff --git a/test/580-checker-string-fact-intrinsics/src-art/Main.java b/test/580-checker-string-fact-intrinsics/src-art/Main.java
index a2e34bf..d0750f9 100644
--- a/test/580-checker-string-fact-intrinsics/src-art/Main.java
+++ b/test/580-checker-string-fact-intrinsics/src-art/Main.java
@@ -17,9 +17,6 @@
 public class Main {
 
   /// CHECK-START: void Main.testNewStringFromBytes() builder (after)
-  /// CHECK-DAG:     InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromBytes intrinsic:None
-
-  /// CHECK-START: void Main.testNewStringFromBytes() intrinsics_recognition (after)
   /// CHECK-DAG:     InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromBytes intrinsic:StringNewStringFromBytes
 
   public static void testNewStringFromBytes() {
@@ -51,9 +48,6 @@
   /// CHECK-START: void Main.testNewStringFromChars() builder (after)
   /// CHECK-DAG:     InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromChars intrinsic:None
 
-  /// CHECK-START: void Main.testNewStringFromChars() intrinsics_recognition (after)
-  /// CHECK-DAG:     InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromChars intrinsic:None
-
   /// CHECK-START: void Main.testNewStringFromChars() inliner (after)
   /// CHECK-DAG:     InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromChars intrinsic:None
 
@@ -64,9 +58,6 @@
   }
 
   /// CHECK-START: void Main.testNewStringFromString() builder (after)
-  /// CHECK-DAG:     InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromString intrinsic:None
-
-  /// CHECK-START: void Main.testNewStringFromString() intrinsics_recognition (after)
   /// CHECK-DAG:     InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromString intrinsic:StringNewStringFromString
 
   public static void testNewStringFromString() {
diff --git a/test/580-crc32/expected.txt b/test/580-crc32/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/580-crc32/expected.txt
diff --git a/test/580-crc32/info.txt b/test/580-crc32/info.txt
new file mode 100644
index 0000000..24f31e0
--- /dev/null
+++ b/test/580-crc32/info.txt
@@ -0,0 +1 @@
+This test case is used to test java.util.zip.CRC32.
diff --git a/test/580-crc32/src/Main.java b/test/580-crc32/src/Main.java
new file mode 100644
index 0000000..dfc0b3c
--- /dev/null
+++ b/test/580-crc32/src/Main.java
@@ -0,0 +1,536 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.zip.CRC32;
+import java.util.Random;
+import java.nio.ByteBuffer;
+
+/**
+ * The ART compiler can use intrinsics for the java.util.zip.CRC32 methods:
+ *   private native static int update(int crc, int b)
+ *   private native static int updateBytes(int crc, byte[] b, int off, int len)
+ *
+ * As the methods are private it is not possible to check the use of intrinsics
+ * for them directly.
+ * The tests check that correct checksums are produced.
+ */
+public class Main {
+  public Main() {
+  }
+
+  public static long CRC32Byte(int value) {
+    CRC32 crc32 = new CRC32();
+    crc32.update(value);
+    return crc32.getValue();
+  }
+
+  public static long CRC32BytesUsingUpdateInt(int... values) {
+    CRC32 crc32 = new CRC32();
+    for (int value : values) {
+      crc32.update(value);
+    }
+    return crc32.getValue();
+  }
+
+  public static void assertEqual(long expected, long actual) {
+    if (expected != actual) {
+      throw new Error("Expected: " + expected + ", found: " + actual);
+    }
+  }
+
+  private static void assertEqual(boolean expected, boolean actual) {
+    if (expected != actual) {
+      throw new Error("Expected: " + expected + ", found: " + actual);
+    }
+  }
+
+  private static void TestCRC32Update() {
+    // public void update(int b)
+    //
+    // Tests for checksums of the byte 0x0
+    // Check that only the low eight bits of the argument are used.
+    assertEqual(0xD202EF8DL, CRC32Byte(0x0));
+    assertEqual(0xD202EF8DL, CRC32Byte(0x0100));
+    assertEqual(0xD202EF8DL, CRC32Byte(0x010000));
+    assertEqual(0xD202EF8DL, CRC32Byte(0x01000000));
+    assertEqual(0xD202EF8DL, CRC32Byte(0xff00));
+    assertEqual(0xD202EF8DL, CRC32Byte(0xffff00));
+    assertEqual(0xD202EF8DL, CRC32Byte(0xffffff00));
+    assertEqual(0xD202EF8DL, CRC32Byte(0x1200));
+    assertEqual(0xD202EF8DL, CRC32Byte(0x123400));
+    assertEqual(0xD202EF8DL, CRC32Byte(0x12345600));
+    assertEqual(0xD202EF8DL, CRC32Byte(Integer.MIN_VALUE));
+
+    // Tests for checksums of the byte 0x1
+    // Check that only the low eight bits of the argument are used.
+    assertEqual(0xA505DF1BL, CRC32Byte(0x1));
+    assertEqual(0xA505DF1BL, CRC32Byte(0x0101));
+    assertEqual(0xA505DF1BL, CRC32Byte(0x010001));
+    assertEqual(0xA505DF1BL, CRC32Byte(0x01000001));
+    assertEqual(0xA505DF1BL, CRC32Byte(0xff01));
+    assertEqual(0xA505DF1BL, CRC32Byte(0xffff01));
+    assertEqual(0xA505DF1BL, CRC32Byte(0xffffff01));
+    assertEqual(0xA505DF1BL, CRC32Byte(0x1201));
+    assertEqual(0xA505DF1BL, CRC32Byte(0x123401));
+    assertEqual(0xA505DF1BL, CRC32Byte(0x12345601));
+
+    // Tests for checksums of the byte 0x0f
+    // Check that only the low eight bits of the argument are used.
+    assertEqual(0x42BDF21CL, CRC32Byte(0x0f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0x010f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0x01000f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0x0100000f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0xff0f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0xffff0f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0xffffff0f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0x120f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0x12340f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0x1234560f));
+
+    // Tests for checksums of the byte 0xff
+    // Check that only the low eight bits of the argument are used.
+    assertEqual(0xFF000000L, CRC32Byte(0x00ff));
+    assertEqual(0xFF000000L, CRC32Byte(0x01ff));
+    assertEqual(0xFF000000L, CRC32Byte(0x0100ff));
+    assertEqual(0xFF000000L, CRC32Byte(0x010000ff));
+    assertEqual(0xFF000000L, CRC32Byte(0x0000ffff));
+    assertEqual(0xFF000000L, CRC32Byte(0x00ffffff));
+    assertEqual(0xFF000000L, CRC32Byte(0xffffffff));
+    assertEqual(0xFF000000L, CRC32Byte(0x12ff));
+    assertEqual(0xFF000000L, CRC32Byte(0x1234ff));
+    assertEqual(0xFF000000L, CRC32Byte(0x123456ff));
+    assertEqual(0xFF000000L, CRC32Byte(Integer.MAX_VALUE));
+
+    // Tests for sequences
+    // Check that only the low eight bits of the values are used.
+    assertEqual(0xFF41D912L, CRC32BytesUsingUpdateInt(0, 0, 0));
+    assertEqual(0xFF41D912L,
+                CRC32BytesUsingUpdateInt(0x0100, 0x010000, 0x01000000));
+    assertEqual(0xFF41D912L,
+                CRC32BytesUsingUpdateInt(0xff00, 0xffff00, 0xffffff00));
+    assertEqual(0xFF41D912L,
+                CRC32BytesUsingUpdateInt(0x1200, 0x123400, 0x12345600));
+
+    assertEqual(0x909FB2F2L, CRC32BytesUsingUpdateInt(1, 1, 1));
+    assertEqual(0x909FB2F2L,
+                CRC32BytesUsingUpdateInt(0x0101, 0x010001, 0x01000001));
+    assertEqual(0x909FB2F2L,
+                CRC32BytesUsingUpdateInt(0xff01, 0xffff01, 0xffffff01));
+    assertEqual(0x909FB2F2L,
+                CRC32BytesUsingUpdateInt(0x1201, 0x123401, 0x12345601));
+
+    assertEqual(0xE33A9F71L, CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f));
+    assertEqual(0xE33A9F71L,
+                CRC32BytesUsingUpdateInt(0x010f, 0x01000f, 0x0100000f));
+    assertEqual(0xE33A9F71L,
+                CRC32BytesUsingUpdateInt(0xff0f, 0xffff0f, 0xffffff0f));
+    assertEqual(0xE33A9F71L,
+                CRC32BytesUsingUpdateInt(0x120f, 0x12340f, 0x1234560f));
+
+    assertEqual(0xFFFFFF00L, CRC32BytesUsingUpdateInt(0x0ff, 0x0ff, 0x0ff));
+    assertEqual(0xFFFFFF00L,
+                CRC32BytesUsingUpdateInt(0x01ff, 0x0100ff, 0x010000ff));
+    assertEqual(0xFFFFFF00L,
+                CRC32BytesUsingUpdateInt(0x00ffff, 0x00ffffff, 0xffffffff));
+    assertEqual(0xFFFFFF00L,
+                CRC32BytesUsingUpdateInt(0x12ff, 0x1234ff, 0x123456ff));
+
+    assertEqual(0xB6CC4292L, CRC32BytesUsingUpdateInt(0x01, 0x02));
+
+    assertEqual(0xB2DE047CL,
+                CRC32BytesUsingUpdateInt(0x0, -1, Integer.MIN_VALUE, Integer.MAX_VALUE));
+  }
+
+  private static long CRC32ByteArray(byte[] bytes, int off, int len) {
+    CRC32 crc32 = new CRC32();
+    crc32.update(bytes, off, len);
+    return crc32.getValue();
+  }
+
+  // This is used to test we generate correct code for constant offsets.
+  // In this case the offset is 0.
+  private static long CRC32ByteArray(byte[] bytes) {
+    CRC32 crc32 = new CRC32();
+    crc32.update(bytes);
+    return crc32.getValue();
+  }
+
+  private static long CRC32ByteAndByteArray(int value, byte[] bytes) {
+    CRC32 crc32 = new CRC32();
+    crc32.update(value);
+    crc32.update(bytes);
+    return crc32.getValue();
+  }
+
+  private static long CRC32ByteArrayAndByte(byte[] bytes, int value) {
+    CRC32 crc32 = new CRC32();
+    crc32.update(bytes);
+    crc32.update(value);
+    return crc32.getValue();
+  }
+
+  private static boolean CRC32ByteArrayThrowsAIOOBE(byte[] bytes, int off, int len) {
+    try {
+      CRC32 crc32 = new CRC32();
+      crc32.update(bytes, off, len);
+    } catch (ArrayIndexOutOfBoundsException ex) {
+      return true;
+    }
+    return false;
+  }
+
+  private static boolean CRC32ByteArrayThrowsNPE() {
+    try {
+      CRC32 crc32 = new CRC32();
+      crc32.update(null, 0, 0);
+      return false;
+    } catch (NullPointerException e) {}
+
+    try {
+      CRC32 crc32 = new CRC32();
+      crc32.update(null, 1, 2);
+      return false;
+    } catch (NullPointerException e) {}
+
+    try {
+      CRC32 crc32 = new CRC32();
+      crc32.update((byte[])null);
+      return false;
+    } catch (NullPointerException e) {}
+
+    return true;
+  }
+
+  private static long CRC32BytesUsingUpdateInt(byte[] bytes, int off, int len) {
+    CRC32 crc32 = new CRC32();
+    while (len-- > 0) {
+      crc32.update(bytes[off++]);
+    }
+    return crc32.getValue();
+  }
+
+  private static void TestCRC32UpdateBytes() {
+    assertEqual(0L, CRC32ByteArray(new byte[] {}));
+    assertEqual(0L, CRC32ByteArray(new byte[] {}, 0, 0));
+    assertEqual(0L, CRC32ByteArray(new byte[] {0}, 0, 0));
+    assertEqual(0L, CRC32ByteArray(new byte[] {0}, 1, 0));
+    assertEqual(0L, CRC32ByteArray(new byte[] {0, 0}, 1, 0));
+
+    assertEqual(true, CRC32ByteArrayThrowsNPE());
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, -1, 0));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {0}, -1, 1));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {0}, 0, -1));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, 0, -1));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, 1, 0));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, -1, 1));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, 1, -1));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, 0, 1));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, 0, 10));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {0}, 0, 10));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, 10, 10));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {0, 0, 0, 0}, 2, 3));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {0, 0, 0, 0}, 3, 2));
+
+    assertEqual(CRC32Byte(0), CRC32ByteArray(new byte[] {0}));
+    assertEqual(CRC32Byte(0), CRC32ByteArray(new byte[] {0}, 0, 1));
+    assertEqual(CRC32Byte(1), CRC32ByteArray(new byte[] {1}));
+    assertEqual(CRC32Byte(1), CRC32ByteArray(new byte[] {1}, 0, 1));
+    assertEqual(CRC32Byte(0x0f), CRC32ByteArray(new byte[] {0x0f}));
+    assertEqual(CRC32Byte(0x0f), CRC32ByteArray(new byte[] {0x0f}, 0, 1));
+    assertEqual(CRC32Byte(0xff), CRC32ByteArray(new byte[] {-1}));
+    assertEqual(CRC32Byte(0xff), CRC32ByteArray(new byte[] {-1}, 0, 1));
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32ByteArray(new byte[] {0, 0, 0}));
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32ByteArray(new byte[] {0, 0, 0}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32ByteArray(new byte[] {1, 1, 1}));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32ByteArray(new byte[] {1, 1, 1}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32ByteArray(new byte[] {0x0f, 0x0f, 0x0f}));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32ByteArray(new byte[] {0x0f, 0x0f, 0x0f}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32ByteArray(new byte[] {-1, -1, -1}));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32ByteArray(new byte[] {-1, -1, -1}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2),
+                CRC32ByteArray(new byte[] {1, 2}));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2),
+                CRC32ByteArray(new byte[] {1, 2}, 0, 2));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32ByteArray(new byte[] {0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE}));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32ByteArray(new byte[] {0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE}, 0, 4));
+
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32ByteAndByteArray(0, new byte[] {0, 0}));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32ByteAndByteArray(1, new byte[] {1, 1}));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32ByteAndByteArray(0x0f, new byte[] {0x0f, 0x0f}));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32ByteAndByteArray(-1, new byte[] {-1, -1}));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2, 3),
+                CRC32ByteAndByteArray(1, new byte[] {2, 3}));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32ByteAndByteArray(0, new byte[] {-1, Byte.MIN_VALUE, Byte.MAX_VALUE}));
+
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32ByteArrayAndByte(new byte[] {0, 0}, 0));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32ByteArrayAndByte(new byte[] {1, 1}, 1));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32ByteArrayAndByte(new byte[] {0x0f, 0x0f}, 0x0f));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32ByteArrayAndByte(new byte[] {-1, -1}, -1));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2, 3),
+                CRC32ByteArrayAndByte(new byte[] {1, 2}, 3));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32ByteArrayAndByte(new byte[] {0, -1, Byte.MIN_VALUE}, Byte.MAX_VALUE));
+
+    byte[] bytes = new byte[128 * 1024];
+    Random rnd = new Random(0);
+    rnd.nextBytes(bytes);
+
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, bytes.length),
+                CRC32ByteArray(bytes));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, 8 * 1024),
+                CRC32ByteArray(bytes, 0, 8 * 1024));
+
+    int off = rnd.nextInt(bytes.length / 2);
+    for (int len = 0; len <= 16; ++len) {
+      assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+                  CRC32ByteArray(bytes, off, len));
+    }
+
+    // Check there are no issues with unaligned accesses.
+    for (int o = 1; o < 8; ++o) {
+      for (int l = 0; l <= 16; ++l) {
+        assertEqual(CRC32BytesUsingUpdateInt(bytes, o, l),
+                    CRC32ByteArray(bytes, o, l));
+      }
+    }
+
+    int len = bytes.length / 2;
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len - 1),
+                CRC32ByteArray(bytes, 0, len - 1));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len),
+                CRC32ByteArray(bytes, 0, len));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len + 1),
+                CRC32ByteArray(bytes, 0, len + 1));
+
+    len = rnd.nextInt(bytes.length + 1);
+    off = rnd.nextInt(bytes.length - len);
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+                CRC32ByteArray(bytes, off, len));
+  }
+
+  private static long CRC32ByteBuffer(byte[] bytes, int off, int len) {
+    ByteBuffer buf = ByteBuffer.wrap(bytes, 0, off + len);
+    buf.position(off);
+    CRC32 crc32 = new CRC32();
+    crc32.update(buf);
+    return crc32.getValue();
+  }
+
+  private static void TestCRC32UpdateByteBuffer() {
+    assertEqual(0L, CRC32ByteBuffer(new byte[] {}, 0, 0));
+    assertEqual(0L, CRC32ByteBuffer(new byte[] {0}, 0, 0));
+    assertEqual(0L, CRC32ByteBuffer(new byte[] {0}, 1, 0));
+    assertEqual(0L, CRC32ByteBuffer(new byte[] {0, 0}, 1, 0));
+
+    assertEqual(CRC32Byte(0), CRC32ByteBuffer(new byte[] {0}, 0, 1));
+    assertEqual(CRC32Byte(1), CRC32ByteBuffer(new byte[] {1}, 0, 1));
+    assertEqual(CRC32Byte(0x0f), CRC32ByteBuffer(new byte[] {0x0f}, 0, 1));
+    assertEqual(CRC32Byte(0xff), CRC32ByteBuffer(new byte[] {-1}, 0, 1));
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32ByteBuffer(new byte[] {0, 0, 0}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32ByteBuffer(new byte[] {1, 1, 1}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32ByteBuffer(new byte[] {0x0f, 0x0f, 0x0f}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32ByteBuffer(new byte[] {-1, -1, -1}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2),
+                CRC32ByteBuffer(new byte[] {1, 2}, 0, 2));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32ByteBuffer(new byte[] {0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE}, 0, 4));
+
+    byte[] bytes = new byte[128 * 1024];
+    Random rnd = new Random(0);
+    rnd.nextBytes(bytes);
+
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, 8 * 1024),
+                CRC32ByteBuffer(bytes, 0, 8 * 1024));
+
+    int off = rnd.nextInt(bytes.length / 2);
+    for (int len = 0; len <= 16; ++len) {
+      assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+                  CRC32ByteBuffer(bytes, off, len));
+    }
+
+    // Check there are no issues with unaligned accesses.
+    for (int o = 1; o < 8; ++o) {
+      for (int l = 0; l <= 16; ++l) {
+        assertEqual(CRC32BytesUsingUpdateInt(bytes, o, l),
+                    CRC32ByteBuffer(bytes, o, l));
+      }
+    }
+
+    int len = bytes.length / 2;
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len - 1),
+                CRC32ByteBuffer(bytes, 0, len - 1));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len),
+                CRC32ByteBuffer(bytes, 0, len));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len + 1),
+                CRC32ByteBuffer(bytes, 0, len + 1));
+
+    len = rnd.nextInt(bytes.length + 1);
+    off = rnd.nextInt(bytes.length - len);
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+                CRC32ByteBuffer(bytes, off, len));
+  }
+
+  private static long CRC32DirectByteBuffer(byte[] bytes, int off, int len) {
+    final int total_len = off + len;
+    ByteBuffer buf = ByteBuffer.allocateDirect(total_len).put(bytes, 0, total_len);
+    buf.position(off);
+    CRC32 crc32 = new CRC32();
+    crc32.update(buf);
+    return crc32.getValue();
+  }
+
+  private static long CRC32ByteAndDirectByteBuffer(int value, byte[] bytes) {
+    ByteBuffer buf = ByteBuffer.allocateDirect(bytes.length).put(bytes);
+    buf.position(0);
+    CRC32 crc32 = new CRC32();
+    crc32.update(value);
+    crc32.update(buf);
+    return crc32.getValue();
+  }
+
+  private static long CRC32DirectByteBufferAndByte(byte[] bytes, int value) {
+    ByteBuffer buf = ByteBuffer.allocateDirect(bytes.length).put(bytes);
+    buf.position(0);
+    CRC32 crc32 = new CRC32();
+    crc32.update(buf);
+    crc32.update(value);
+    return crc32.getValue();
+  }
+
+  private static void TestCRC32UpdateDirectByteBuffer() {
+    assertEqual(0L, CRC32DirectByteBuffer(new byte[] {}, 0, 0));
+    assertEqual(0L, CRC32DirectByteBuffer(new byte[] {0}, 0, 0));
+    assertEqual(0L, CRC32DirectByteBuffer(new byte[] {0}, 1, 0));
+    assertEqual(0L, CRC32DirectByteBuffer(new byte[] {0, 0}, 1, 0));
+
+    assertEqual(CRC32Byte(0), CRC32DirectByteBuffer(new byte[] {0}, 0, 1));
+    assertEqual(CRC32Byte(1), CRC32DirectByteBuffer(new byte[] {1}, 0, 1));
+    assertEqual(CRC32Byte(0x0f), CRC32DirectByteBuffer(new byte[] {0x0f}, 0, 1));
+    assertEqual(CRC32Byte(0xff), CRC32DirectByteBuffer(new byte[] {-1}, 0, 1));
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32DirectByteBuffer(new byte[] {0, 0, 0}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32DirectByteBuffer(new byte[] {1, 1, 1}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32DirectByteBuffer(new byte[] {0x0f, 0x0f, 0x0f}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32DirectByteBuffer(new byte[] {-1, -1, -1}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2),
+                CRC32DirectByteBuffer(new byte[] {1, 2}, 0, 2));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32DirectByteBuffer(new byte[] {0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE}, 0, 4));
+
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32ByteAndDirectByteBuffer(0, new byte[] {0, 0}));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32ByteAndDirectByteBuffer(1, new byte[] {1, 1}));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32ByteAndDirectByteBuffer(0x0f, new byte[] {0x0f, 0x0f}));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32ByteAndDirectByteBuffer(-1, new byte[] {-1, -1}));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2, 3),
+                CRC32ByteAndDirectByteBuffer(1, new byte[] {2, 3}));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32ByteAndDirectByteBuffer(0, new byte[] {-1, Byte.MIN_VALUE, Byte.MAX_VALUE}));
+
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32DirectByteBufferAndByte(new byte[] {0, 0}, 0));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32DirectByteBufferAndByte(new byte[] {1, 1}, 1));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32DirectByteBufferAndByte(new byte[] {0x0f, 0x0f}, 0x0f));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32DirectByteBufferAndByte(new byte[] {-1, -1}, -1));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2, 3),
+                CRC32DirectByteBufferAndByte(new byte[] {1, 2}, 3));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32DirectByteBufferAndByte(new byte[] {0, -1, Byte.MIN_VALUE}, Byte.MAX_VALUE));
+
+    byte[] bytes = new byte[128 * 1024];
+    Random rnd = new Random(0);
+    rnd.nextBytes(bytes);
+
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, bytes.length),
+                CRC32DirectByteBuffer(bytes, 0, bytes.length));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, 8 * 1024),
+                CRC32DirectByteBuffer(bytes, 0, 8 * 1024));
+
+    int off = rnd.nextInt(bytes.length / 2);
+    for (int len = 0; len <= 16; ++len) {
+      assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+                  CRC32DirectByteBuffer(bytes, off, len));
+    }
+
+    // Check there are no issues with unaligned accesses.
+    for (int o = 1; o < 8; ++o) {
+      for (int l = 0; l <= 16; ++l) {
+        assertEqual(CRC32BytesUsingUpdateInt(bytes, o, l),
+                    CRC32DirectByteBuffer(bytes, o, l));
+      }
+    }
+
+    int len = bytes.length / 2;
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len - 1),
+                CRC32DirectByteBuffer(bytes, 0, len - 1));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len),
+                CRC32DirectByteBuffer(bytes, 0, len));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len + 1),
+                CRC32DirectByteBuffer(bytes, 0, len + 1));
+
+    len = rnd.nextInt(bytes.length + 1);
+    off = rnd.nextInt(bytes.length - len);
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+                CRC32DirectByteBuffer(bytes, off, len));
+  }
+
+  public static void main(String args[]) {
+    TestCRC32Update();
+    TestCRC32UpdateBytes();
+    TestCRC32UpdateByteBuffer();
+    TestCRC32UpdateDirectByteBuffer();
+  }
+}
diff --git a/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali b/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali
index f74e88f..bd90fe7 100644
--- a/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali
+++ b/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali
@@ -210,14 +210,12 @@
 .end method
 
 ## CHECK-START: int SmaliTests.longToIntOfBoolean() builder (after)
-## CHECK-DAG:     <<Method:[ij]\d+>>     CurrentMethod
 ## CHECK-DAG:     <<Sget:z\d+>>          StaticFieldGet
-## CHECK-DAG:     <<ZToJ:j\d+>>          InvokeStaticOrDirect [<<Sget>>,<<Method>>]
+## CHECK-DAG:     <<ZToJ:j\d+>>          InvokeStaticOrDirect [<<Sget>>{{(,[ij]\d+)?}}]
 ## CHECK-DAG:     <<JToI:i\d+>>          TypeConversion [<<ZToJ>>]
 ## CHECK-DAG:                            Return [<<JToI>>]
 
 ## CHECK-START: int SmaliTests.longToIntOfBoolean() inliner (after)
-## CHECK-DAG:     <<Method:[ij]\d+>>     CurrentMethod
 ## CHECK-DAG:     <<Zero:i\d+>>          IntConstant 0
 ## CHECK-DAG:     <<One:i\d+>>           IntConstant 1
 ## CHECK-DAG:     <<Sget:z\d+>>          StaticFieldGet
@@ -228,7 +226,6 @@
 ## CHECK-DAG:                            Return [<<JToI>>]
 
 ## CHECK-START: int SmaliTests.longToIntOfBoolean() select_generator (after)
-## CHECK-DAG:     <<Method:[ij]\d+>>     CurrentMethod
 ## CHECK-DAG:     <<Zero:i\d+>>          IntConstant 0
 ## CHECK-DAG:     <<One:i\d+>>           IntConstant 1
 ## CHECK-DAG:     <<Sget:z\d+>>          StaticFieldGet
@@ -236,7 +233,6 @@
 ## CHECK-DAG:                            Return [<<Sel>>]
 
 ## CHECK-START: int SmaliTests.longToIntOfBoolean() instruction_simplifier$after_bce (after)
-## CHECK-DAG:     <<Method:[ij]\d+>>     CurrentMethod
 ## CHECK-DAG:     <<Sget:z\d+>>          StaticFieldGet
 ## CHECK-DAG:                            Return [<<Sget>>]
 .method public static longToIntOfBoolean()I
diff --git a/test/593-checker-boolean-2-integral-conv/src/Main.java b/test/593-checker-boolean-2-integral-conv/src/Main.java
index fdc0919..b085c42 100644
--- a/test/593-checker-boolean-2-integral-conv/src/Main.java
+++ b/test/593-checker-boolean-2-integral-conv/src/Main.java
@@ -100,14 +100,12 @@
   }
 
   /// CHECK-START: int Main.longToIntOfBoolean() builder (after)
-  /// CHECK-DAG:     <<Method:[ij]\d+>>     CurrentMethod
   /// CHECK-DAG:     <<Sget:z\d+>>          StaticFieldGet
-  /// CHECK-DAG:     <<ZToJ:j\d+>>          InvokeStaticOrDirect [<<Sget>>,<<Method>>]
+  /// CHECK-DAG:     <<ZToJ:j\d+>>          InvokeStaticOrDirect [<<Sget>>{{(,[ij]\d+)?}}]
   /// CHECK-DAG:     <<JToI:i\d+>>          TypeConversion [<<ZToJ>>]
   /// CHECK-DAG:                            Return [<<JToI>>]
 
   /// CHECK-START: int Main.longToIntOfBoolean() inliner (after)
-  /// CHECK-DAG:     <<Method:[ij]\d+>>     CurrentMethod
   /// CHECK-DAG:     <<Zero:j\d+>>          LongConstant 0
   /// CHECK-DAG:     <<One:j\d+>>           LongConstant 1
   /// CHECK-DAG:     <<Sget:z\d+>>          StaticFieldGet
@@ -123,7 +121,6 @@
   /// CHECK-NOT:                            Phi
 
   /// CHECK-START: int Main.longToIntOfBoolean() select_generator (after)
-  /// CHECK-DAG:     <<Method:[ij]\d+>>     CurrentMethod
   /// CHECK-DAG:     <<Zero:j\d+>>          LongConstant 0
   /// CHECK-DAG:     <<One:j\d+>>           LongConstant 1
   /// CHECK-DAG:     <<Sget:z\d+>>          StaticFieldGet
@@ -135,7 +132,6 @@
   // TODO: Re-enable checks below after simplifier is updated to handle this pattern: b/63064517
 
   // CHECK-START: int Main.longToIntOfBoolean() instruction_simplifier$after_bce (after)
-  // CHECK-DAG:     <<Method:[ij]\d+>>     CurrentMethod
   // CHECK-DAG:     <<Sget:z\d+>>          StaticFieldGet
   // CHECK-DAG:                            Return [<<Sget>>]
 
diff --git a/test/602-deoptimizeable/src/Main.java b/test/602-deoptimizeable/src/Main.java
index d995923..46584b0 100644
--- a/test/602-deoptimizeable/src/Main.java
+++ b/test/602-deoptimizeable/src/Main.java
@@ -33,10 +33,7 @@
 
     public int hashCode() {
         sHashCodeInvoked = true;
-        Main.assertIsManaged();
         Main.deoptimizeAll();
-        Main.assertIsInterpreted();
-        Main.assertCallerIsManaged();  // Caller is from framework code HashMap.
         return i % 64;
     }
 }
@@ -46,13 +43,6 @@
 
     public static native void deoptimizeAll();
     public static native void undeoptimizeAll();
-    public static native void assertIsInterpreted();
-    public static native void assertIsManaged();
-    public static native void assertCallerIsInterpreted();
-    public static native void assertCallerIsManaged();
-    public static native void disableStackFrameAsserts();
-    public static native boolean hasOatFile();
-    public static native boolean isInterpreted();
 
     public static void execute(Runnable runnable) throws Exception {
       Thread t = new Thread(runnable);
@@ -62,19 +52,13 @@
 
     public static void main(String[] args) throws Exception {
         System.loadLibrary(args[0]);
-        // Only test stack frames in compiled mode.
-        if (!hasOatFile() || isInterpreted()) {
-          disableStackFrameAsserts();
-        }
         final HashMap<DummyObject, Long> map = new HashMap<DummyObject, Long>();
 
         // Single-frame deoptimization that covers partial fragment.
         execute(new Runnable() {
             public void run() {
                 int[] arr = new int[3];
-                assertIsManaged();
                 int res = $noinline$run1(arr);
-                assertIsManaged();  // Only single frame is deoptimized.
                 if (res != 79) {
                     System.out.println("Failure 1!");
                     System.exit(0);
@@ -87,13 +71,11 @@
             public void run() {
                 try {
                     int[] arr = new int[3];
-                    assertIsManaged();
                     // Use reflection to call $noinline$run2 so that it does
                     // full-fragment deoptimization since that is an upcall.
                     Class<?> cls = Class.forName("Main");
                     Method method = cls.getDeclaredMethod("$noinline$run2", int[].class);
                     double res = (double)method.invoke(Main.class, arr);
-                    assertIsManaged();  // Only single frame is deoptimized.
                     if (res != 79.3d) {
                         System.out.println("Failure 2!");
                         System.exit(0);
@@ -107,9 +89,7 @@
         // Full-fragment deoptimization.
         execute(new Runnable() {
             public void run() {
-                assertIsManaged();
                 float res = $noinline$run3B();
-                assertIsInterpreted();  // Every deoptimizeable method is deoptimized.
                 if (res != 0.034f) {
                     System.out.println("Failure 3!");
                     System.exit(0);
@@ -123,9 +103,10 @@
         execute(new Runnable() {
             public void run() {
                 try {
-                    assertIsManaged();
                     map.put(new DummyObject(10), Long.valueOf(100));
-                    assertIsInterpreted();  // Every deoptimizeable method is deoptimized.
+                    if (map.get(new DummyObject(10)) == null) {
+                        System.out.println("Expected map to contain DummyObject(10)");
+                    }
                 } catch (Exception e) {
                     e.printStackTrace(System.out);
                 }
@@ -144,7 +125,6 @@
     }
 
     public static int $noinline$run1(int[] arr) {
-        assertIsManaged();
         // Prevent inlining.
         if (sFlag) {
             throw new Error();
@@ -158,18 +138,15 @@
             // This causes AIOOBE and triggers deoptimization from compiled code.
             arr[3] = 1;
         } catch (ArrayIndexOutOfBoundsException e) {
-            assertIsInterpreted(); // Single-frame deoptimization triggered.
             caught = true;
         }
         if (!caught) {
             System.out.println("Expected exception");
         }
-        assertIsInterpreted();
         return 79;
     }
 
     public static double $noinline$run2(int[] arr) {
-        assertIsManaged();
         // Prevent inlining.
         if (sFlag) {
             throw new Error();
@@ -183,37 +160,30 @@
             // This causes AIOOBE and triggers deoptimization from compiled code.
             arr[3] = 1;
         } catch (ArrayIndexOutOfBoundsException e) {
-            assertIsInterpreted();  // Single-frame deoptimization triggered.
             caught = true;
         }
         if (!caught) {
             System.out.println("Expected exception");
         }
-        assertIsInterpreted();
         return 79.3d;
     }
 
     public static float $noinline$run3A() {
-        assertIsManaged();
         // Prevent inlining.
         if (sFlag) {
             throw new Error();
         }
         // Deoptimize callers.
         deoptimizeAll();
-        assertIsInterpreted();
-        assertCallerIsInterpreted();  // $noinline$run3B is deoptimizeable.
         return 0.034f;
     }
 
     public static float $noinline$run3B() {
-        assertIsManaged();
         // Prevent inlining.
         if (sFlag) {
             throw new Error();
         }
         float res = $noinline$run3A();
-        assertIsInterpreted();
         return res;
     }
 }
diff --git a/test/616-cha-unloading/cha_unload.cc b/test/616-cha-unloading/cha_unload.cc
index b5166ce..f9d3874 100644
--- a/test/616-cha-unloading/cha_unload.cc
+++ b/test/616-cha-unloading/cha_unload.cc
@@ -19,6 +19,7 @@
 #include <iostream>
 
 #include "art_method.h"
+#include "base/casts.h"
 #include "class_linker.h"
 #include "jit/jit.h"
 #include "linear_alloc.h"
@@ -51,13 +52,13 @@
                                                           jobject java_method) {
   ScopedObjectAccess soa(env);
   ArtMethod* method = ArtMethod::FromReflectedMethod(soa, java_method);
-  return static_cast<jlong>(reinterpret_cast<uintptr_t>(method));
+  return reinterpret_cast64<jlong>(method);
 }
 
 extern "C" JNIEXPORT void JNICALL Java_Main_reuseArenaOfMethod(JNIEnv*,
                                                                jclass,
                                                                jlong art_method) {
-  void* ptr = reinterpret_cast<void*>(static_cast<uintptr_t>(art_method));
+  void* ptr = reinterpret_cast64<void*>(art_method);
 
   ReaderMutexLock mu(Thread::Current(), *Locks::mutator_lock_);
   ReaderMutexLock mu2(Thread::Current(), *Locks::classlinker_classes_lock_);
diff --git a/test/622-checker-bce-regressions/src/Main.java b/test/622-checker-bce-regressions/src/Main.java
index 6ba2644..595ade8 100644
--- a/test/622-checker-bce-regressions/src/Main.java
+++ b/test/622-checker-bce-regressions/src/Main.java
@@ -42,8 +42,22 @@
     return j;
   }
 
+  static public void $noinline$regressionTest123284765(String str) {
+    try {
+      int l = str.length();
+      if (l == 34) {
+        str.charAt(l);
+        fail();
+      }
+    } catch (StringIndexOutOfBoundsException expected) {
+      expectEquals(34, str.length());
+    }
+  }
+
   public static void main(String[] args) {
     expectEquals(8, doNotVisitAfterForwardBCE(array));
+    $noinline$regressionTest123284765("0123456789012345678901234567890123");
+    $noinline$regressionTest123284765("012345678901");
     System.out.println("passed");
   }
 
@@ -52,4 +66,8 @@
       throw new Error("Expected: " + expected + ", found: " + result);
     }
   }
+
+  private static void fail() {
+    throw new Error("FAIL");
+  }
 }
diff --git a/test/624-checker-stringops/smali/Smali.smali b/test/624-checker-stringops/smali/Smali.smali
index 7b063c0..f8b9275 100644
--- a/test/624-checker-stringops/smali/Smali.smali
+++ b/test/624-checker-stringops/smali/Smali.smali
@@ -16,23 +16,13 @@
 .class public LSmali;
 .super Ljava/lang/Object;
 
-##  CHECK-START: int Smali.bufferLen2() instruction_simplifier (before)
+##  CHECK-START: int Smali.bufferLen2() builder (after)
 ##  CHECK-DAG: <<New:l\d+>>     NewInstance
 ##  CHECK-DAG: <<String1:l\d+>> LoadString
-##  CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>]   intrinsic:StringBufferAppend
+##  CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>]     intrinsic:StringBufferAppend
 ##  CHECK-DAG: <<String2:l\d+>> LoadString
-##  CHECK-DAG: <<Null1:l\d+>>   NullCheck     [<<Append1>>]
-##  CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null1>>,<<String2>>] intrinsic:StringBufferAppend
-##  CHECK-DAG: <<Null2:l\d+>>   NullCheck     [<<Append2>>]
-##  CHECK-DAG:                  InvokeVirtual [<<Null2>>]             intrinsic:StringBufferLength
-
-##  CHECK-START: int Smali.bufferLen2() instruction_simplifier (after)
-##  CHECK-DAG: <<New:l\d+>>     NewInstance
-##  CHECK-DAG: <<String1:l\d+>> LoadString
-##  CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBufferAppend
-##  CHECK-DAG: <<String2:l\d+>> LoadString
-##  CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<New>>,<<String2>>] intrinsic:StringBufferAppend
-##  CHECK-DAG:                  InvokeVirtual [<<New>>]             intrinsic:StringBufferLength
+##  CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Append1>>,<<String2>>] intrinsic:StringBufferAppend
+##  CHECK-DAG:                  InvokeVirtual [<<Append2>>]             intrinsic:StringBufferLength
 .method public static bufferLen2()I
     .registers 3
 
@@ -44,7 +34,7 @@
     invoke-virtual {v0, v1}, Ljava/lang/StringBuffer;->append(Ljava/lang/String;)Ljava/lang/StringBuffer;
     move-result-object v1
 
-    const-string v2, "x"
+    const-string v2, "y"
     invoke-virtual {v1, v2}, Ljava/lang/StringBuffer;->append(Ljava/lang/String;)Ljava/lang/StringBuffer;
     move-result-object v1
 
@@ -57,12 +47,10 @@
 ## CHECK-START: int Smali.builderLen2() instruction_simplifier (before)
 ## CHECK-DAG: <<New:l\d+>>     NewInstance
 ## CHECK-DAG: <<String1:l\d+>> LoadString
-## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>]   intrinsic:StringBuilderAppend
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>]     intrinsic:StringBuilderAppend
 ## CHECK-DAG: <<String2:l\d+>> LoadString
-## CHECK-DAG: <<Null2:l\d+>>   NullCheck     [<<Append1>>]
-## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null2>>,<<String2>>] intrinsic:StringBuilderAppend
-## CHECK-DAG: <<Null3:l\d+>>   NullCheck     [<<Append2>>]
-## CHECK-DAG:                  InvokeVirtual [<<Null3>>]             intrinsic:StringBuilderLength
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Append1>>,<<String2>>] intrinsic:StringBuilderAppend
+## CHECK-DAG:                  InvokeVirtual [<<Append2>>]             intrinsic:StringBuilderLength
 
 ## CHECK-START: int Smali.builderLen2() instruction_simplifier (after)
 ## CHECK-DAG: <<New:l\d+>>     NewInstance
@@ -82,7 +70,7 @@
     invoke-virtual {v0, v1}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
     move-result-object v1
 
-    const-string v2, "x"
+    const-string v2, "y"
     invoke-virtual {v1, v2}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
     move-result-object v1
 
@@ -93,18 +81,16 @@
 .end method
 
 ## CHECK-START: int Smali.bufferLoopAppender() instruction_simplifier (before)
-## CHECK-DAG: <<New:l\d+>>     NewInstance                                                         loop:none
-## CHECK-DAG: <<String1:l\d+>> LoadString                                                          loop:<<Loop:B\d+>>
-## CHECK-DAG: <<Null1:l\d+>>   NullCheck     [<<New>>]                                             loop:<<Loop>>
-## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>] intrinsic:StringBufferAppend  loop:<<Loop>>
-## CHECK-DAG: <<String2:l\d+>> LoadString                                                          loop:<<Loop>>
-## CHECK-DAG: <<Null2:l\d+>>   NullCheck     [<<Append1>>]                                         loop:<<Loop>>
-## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null2>>,<<String2>>] intrinsic:StringBufferAppend  loop:<<Loop>>
-## CHECK-DAG: <<String3:l\d+>> LoadString                                                          loop:<<Loop>>
-## CHECK-DAG: <<Null3:l\d+>>   NullCheck     [<<Append2>>]                                         loop:<<Loop>>
-## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<Null3>>,<<String3>>] intrinsic:StringBufferAppend  loop:<<Loop>>
-## CHECK-DAG: <<Null4:l\d+>>   NullCheck     [<<New>>]                                             loop:none
-## CHECK-DAG:                  InvokeVirtual [<<Null4>>]             intrinsic:StringBufferLength  loop:none
+## CHECK-DAG: <<New:l\d+>>     NewInstance                                                           loop:none
+## CHECK-DAG: <<String1:l\d+>> LoadString                                                            loop:<<Loop:B\d+>>
+## CHECK-DAG: <<Null1:l\d+>>   NullCheck     [<<New>>]                                               loop:<<Loop>>
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>] intrinsic:StringBufferAppend    loop:<<Loop>>
+## CHECK-DAG: <<String2:l\d+>> LoadString                                                            loop:<<Loop>>
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Append1>>,<<String2>>] intrinsic:StringBufferAppend  loop:<<Loop>>
+## CHECK-DAG: <<String3:l\d+>> LoadString                                                            loop:<<Loop>>
+## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<Append2>>,<<String3>>] intrinsic:StringBufferAppend  loop:<<Loop>>
+## CHECK-DAG: <<Null4:l\d+>>   NullCheck     [<<New>>]                                               loop:none
+## CHECK-DAG:                  InvokeVirtual [<<Null4>>]             intrinsic:StringBufferLength    loop:none
 
 ## CHECK-START: int Smali.bufferLoopAppender() instruction_simplifier (after)
 ## CHECK-DAG: <<New:l\d+>>     NewInstance                                                       loop:none
@@ -152,18 +138,16 @@
 .end method
 
 ## CHECK-START: int Smali.builderLoopAppender() instruction_simplifier (before)
-## CHECK-DAG: <<New:l\d+>>     NewInstance                                                         loop:none
-## CHECK-DAG: <<String1:l\d+>> LoadString                                                          loop:<<Loop:B\d+>>
-## CHECK-DAG: <<Null1:l\d+>>   NullCheck     [<<New>>]                                             loop:<<Loop>>
-## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>] intrinsic:StringBuilderAppend loop:<<Loop>>
-## CHECK-DAG: <<String2:l\d+>> LoadString                                                          loop:<<Loop>>
-## CHECK-DAG: <<Null2:l\d+>>   NullCheck     [<<Append1>>]                                         loop:<<Loop>>
-## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null2>>,<<String2>>] intrinsic:StringBuilderAppend loop:<<Loop>>
-## CHECK-DAG: <<String3:l\d+>> LoadString                                                          loop:<<Loop>>
-## CHECK-DAG: <<Null3:l\d+>>   NullCheck     [<<Append2>>]                                         loop:<<Loop>>
-## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<Null3>>,<<String3>>] intrinsic:StringBuilderAppend loop:<<Loop>>
-## CHECK-DAG: <<Null4:l\d+>>   NullCheck     [<<New>>]                                             loop:none
-## CHECK-DAG:                  InvokeVirtual [<<Null4>>]             intrinsic:StringBuilderLength loop:none
+## CHECK-DAG: <<New:l\d+>>     NewInstance                                                           loop:none
+## CHECK-DAG: <<String1:l\d+>> LoadString                                                            loop:<<Loop:B\d+>>
+## CHECK-DAG: <<Null1:l\d+>>   NullCheck     [<<New>>]                                               loop:<<Loop>>
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>]   intrinsic:StringBuilderAppend loop:<<Loop>>
+## CHECK-DAG: <<String2:l\d+>> LoadString                                                            loop:<<Loop>>
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Append1>>,<<String2>>] intrinsic:StringBuilderAppend loop:<<Loop>>
+## CHECK-DAG: <<String3:l\d+>> LoadString                                                            loop:<<Loop>>
+## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<Append2>>,<<String3>>] intrinsic:StringBuilderAppend loop:<<Loop>>
+## CHECK-DAG: <<Null4:l\d+>>   NullCheck     [<<New>>]                                               loop:none
+## CHECK-DAG:                  InvokeVirtual [<<Null4>>]               intrinsic:StringBuilderLength loop:none
 
 ## CHECK-START: int Smali.builderLoopAppender() instruction_simplifier (after)
 ## CHECK-DAG: <<New:l\d+>>     NewInstance                                                       loop:none
diff --git a/test/624-checker-stringops/src/Main.java b/test/624-checker-stringops/src/Main.java
index 3aa6e56..f52d81a 100644
--- a/test/624-checker-stringops/src/Main.java
+++ b/test/624-checker-stringops/src/Main.java
@@ -120,7 +120,7 @@
   /// CHECK-DAG:                  InvokeVirtual [<<New>>]             intrinsic:StringBufferLength
   static int bufferLen2() {
     StringBuffer s = new StringBuffer();
-    return s.append("x").append("x").length();
+    return s.append("x").append("y").length();
   }
 
   static int bufferLen2Smali() throws Exception {
@@ -150,7 +150,7 @@
   /// CHECK-DAG:                  InvokeVirtual [<<New>>]             intrinsic:StringBuilderLength
   static int builderLen2() {
     StringBuilder s = new StringBuilder();
-    return s.append("x").append("x").length();
+    return s.append("x").append("y").length();
   }
 
   static int builderLen2Smali() throws Exception {
diff --git a/test/626-const-class-linking/clear_dex_cache_types.cc b/test/626-const-class-linking/clear_dex_cache_types.cc
index 96ef266..82c82c6 100644
--- a/test/626-const-class-linking/clear_dex_cache_types.cc
+++ b/test/626-const-class-linking/clear_dex_cache_types.cc
@@ -15,6 +15,7 @@
  */
 
 #include "jni.h"
+#include "handle_scope-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
 #include "mirror/dex_cache-inl.h"
@@ -40,6 +41,7 @@
   if (status == ClassStatus::kResolved) {
     ObjectLock<mirror::Class> lock(soa.Self(), klass);
     klass->SetStatus(klass, ClassStatus::kVerified, soa.Self());
+    klass->SetVerificationAttempted();
   } else {
     LOG(ERROR) << klass->PrettyClass() << " has unexpected status: " << status;
   }
diff --git a/test/631-checker-fp-abs/src/Main.java b/test/631-checker-fp-abs/src/Main.java
index 2db93b8..2d04e36 100644
--- a/test/631-checker-fp-abs/src/Main.java
+++ b/test/631-checker-fp-abs/src/Main.java
@@ -31,7 +31,7 @@
 
   public static boolean doThrow = false;
 
-  /// CHECK-START: float Main.$opt$noinline$absSP(float) intrinsics_recognition (after)
+  /// CHECK-START: float Main.$opt$noinline$absSP(float) builder (after)
   /// CHECK-DAG: <<Result:f\d+>> InvokeStaticOrDirect intrinsic:MathAbsFloat
   /// CHECK-DAG:                 Return [<<Result>>]
   private static float $opt$noinline$absSP(float f) {
@@ -41,7 +41,7 @@
     return Math.abs(f);
   }
 
-  /// CHECK-START: double Main.$opt$noinline$absDP(double) intrinsics_recognition (after)
+  /// CHECK-START: double Main.$opt$noinline$absDP(double) builder (after)
   /// CHECK-DAG: <<Result:d\d+>> InvokeStaticOrDirect intrinsic:MathAbsDouble
   /// CHECK-DAG:                 Return [<<Result>>]
   private static double $opt$noinline$absDP(double d) {
diff --git a/test/638-checker-inline-cache-intrinsic/src/Main.java b/test/638-checker-inline-cache-intrinsic/src/Main.java
index 472cbf6..4a9aba5 100644
--- a/test/638-checker-inline-cache-intrinsic/src/Main.java
+++ b/test/638-checker-inline-cache-intrinsic/src/Main.java
@@ -52,11 +52,11 @@
 
   /// CHECK-START: boolean Main.$noinline$stringEquals(java.lang.Object) inliner (after)
   /// CHECK:       Deoptimize
-  /// CHECK:       InvokeVirtual method_name:java.lang.Object.equals intrinsic:StringEquals
+  /// CHECK:       InvokeVirtual method_name:java.lang.String.equals intrinsic:StringEquals
 
   /// CHECK-START: boolean Main.$noinline$stringEquals(java.lang.Object) instruction_simplifier$after_inlining (after)
   /// CHECK:       Deoptimize
-  /// CHECK:       InvokeVirtual method_name:java.lang.Object.equals intrinsic:StringEquals
+  /// CHECK:       InvokeVirtual method_name:java.lang.String.equals intrinsic:StringEquals
 
   public static boolean $noinline$stringEquals(Object obj) {
     return obj.equals("def");
diff --git a/test/674-hiddenapi/api-blacklist.txt b/test/674-hiddenapi/api-blacklist.txt
deleted file mode 100644
index 4a67fb8..0000000
--- a/test/674-hiddenapi/api-blacklist.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-LNullaryConstructorBlacklist;-><init>()V
-LParentClass;->fieldPublicBlacklist:I
-LParentClass;->fieldPublicBlacklistB:I
-LParentClass;->fieldPackageBlacklist:I
-LParentClass;->fieldProtectedBlacklist:I
-LParentClass;->fieldPrivateBlacklist:I
-LParentClass;->fieldPublicStaticBlacklist:I
-LParentClass;->fieldPublicStaticBlacklistB:I
-LParentClass;->fieldPackageStaticBlacklist:I
-LParentClass;->fieldProtectedStaticBlacklist:I
-LParentClass;->fieldPrivateStaticBlacklist:I
-LParentClass;->methodPublicBlacklist()I
-LParentClass;->methodPackageBlacklist()I
-LParentClass;->methodProtectedBlacklist()I
-LParentClass;->methodPrivateBlacklist()I
-LParentClass;->methodPublicStaticBlacklist()I
-LParentClass;->methodPackageStaticBlacklist()I
-LParentClass;->methodProtectedStaticBlacklist()I
-LParentClass;->methodPrivateStaticBlacklist()I
-LParentClass;-><init>(IC)V
-LParentClass;-><init>(FC)V
-LParentClass;-><init>(JC)V
-LParentClass;-><init>(DC)V
-LParentInterface;->fieldPublicStaticBlacklist:I
-LParentInterface;->methodPublicBlacklist()I
-LParentInterface;->methodPublicStaticBlacklist()I
-LParentInterface;->methodPublicDefaultBlacklist()I
\ No newline at end of file
diff --git a/test/674-hiddenapi/api-dark-greylist.txt b/test/674-hiddenapi/api-dark-greylist.txt
deleted file mode 100644
index e010a0a..0000000
--- a/test/674-hiddenapi/api-dark-greylist.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-LNullaryConstructorDarkGreylist;-><init>()V
-LParentClass;->fieldPublicDarkGreylist:I
-LParentClass;->fieldPublicDarkGreylistB:I
-LParentClass;->fieldPackageDarkGreylist:I
-LParentClass;->fieldProtectedDarkGreylist:I
-LParentClass;->fieldPrivateDarkGreylist:I
-LParentClass;->fieldPublicStaticDarkGreylist:I
-LParentClass;->fieldPublicStaticDarkGreylistB:I
-LParentClass;->fieldPackageStaticDarkGreylist:I
-LParentClass;->fieldProtectedStaticDarkGreylist:I
-LParentClass;->fieldPrivateStaticDarkGreylist:I
-LParentClass;->methodPublicDarkGreylist()I
-LParentClass;->methodPackageDarkGreylist()I
-LParentClass;->methodProtectedDarkGreylist()I
-LParentClass;->methodPrivateDarkGreylist()I
-LParentClass;->methodPublicStaticDarkGreylist()I
-LParentClass;->methodPackageStaticDarkGreylist()I
-LParentClass;->methodProtectedStaticDarkGreylist()I
-LParentClass;->methodPrivateStaticDarkGreylist()I
-LParentClass;-><init>(IB)V
-LParentClass;-><init>(FB)V
-LParentClass;-><init>(JB)V
-LParentClass;-><init>(DB)V
-LParentInterface;->fieldPublicStaticDarkGreylist:I
-LParentInterface;->methodPublicDarkGreylist()I
-LParentInterface;->methodPublicStaticDarkGreylist()I
-LParentInterface;->methodPublicDefaultDarkGreylist()I
\ No newline at end of file
diff --git a/test/674-hiddenapi/api-light-greylist.txt b/test/674-hiddenapi/api-light-greylist.txt
deleted file mode 100644
index 4be793f..0000000
--- a/test/674-hiddenapi/api-light-greylist.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-LNullaryConstructorLightGreylist;-><init>()V
-LParentClass;->fieldPublicLightGreylist:I
-LParentClass;->fieldPublicLightGreylistB:I
-LParentClass;->fieldPackageLightGreylist:I
-LParentClass;->fieldProtectedLightGreylist:I
-LParentClass;->fieldPrivateLightGreylist:I
-LParentClass;->fieldPublicStaticLightGreylist:I
-LParentClass;->fieldPublicStaticLightGreylistB:I
-LParentClass;->fieldPackageStaticLightGreylist:I
-LParentClass;->fieldProtectedStaticLightGreylist:I
-LParentClass;->fieldPrivateStaticLightGreylist:I
-LParentClass;->methodPublicLightGreylist()I
-LParentClass;->methodPackageLightGreylist()I
-LParentClass;->methodProtectedLightGreylist()I
-LParentClass;->methodPrivateLightGreylist()I
-LParentClass;->methodPublicStaticLightGreylist()I
-LParentClass;->methodPackageStaticLightGreylist()I
-LParentClass;->methodProtectedStaticLightGreylist()I
-LParentClass;->methodPrivateStaticLightGreylist()I
-LParentClass;-><init>(IZ)V
-LParentClass;-><init>(FZ)V
-LParentClass;-><init>(JZ)V
-LParentClass;-><init>(DZ)V
-LParentInterface;->fieldPublicStaticLightGreylist:I
-LParentInterface;->methodPublicLightGreylist()I
-LParentInterface;->methodPublicStaticLightGreylist()I
-LParentInterface;->methodPublicDefaultLightGreylist()I
\ No newline at end of file
diff --git a/test/674-hiddenapi/hiddenapi-flags.csv b/test/674-hiddenapi/hiddenapi-flags.csv
new file mode 100644
index 0000000..42626f7
--- /dev/null
+++ b/test/674-hiddenapi/hiddenapi-flags.csv
@@ -0,0 +1,108 @@
+LNullaryConstructorBlacklistAndCorePlatformApi;-><init>()V,blacklist,core-platform-api
+LNullaryConstructorBlacklist;-><init>()V,blacklist
+LNullaryConstructorDarkGreylist;-><init>()V,greylist-max-o
+LNullaryConstructorLightGreylist;-><init>()V,greylist
+LParentClass;->fieldPackageBlacklistAndCorePlatformApi:I,blacklist,core-platform-api
+LParentClass;->fieldPackageBlacklist:I,blacklist
+LParentClass;->fieldPackageDarkGreylist:I,greylist-max-o
+LParentClass;->fieldPackageLightGreylist:I,greylist
+LParentClass;->fieldPackageStaticBlacklistAndCorePlatformApi:I,blacklist,core-platform-api
+LParentClass;->fieldPackageStaticBlacklist:I,blacklist
+LParentClass;->fieldPackageStaticDarkGreylist:I,greylist-max-o
+LParentClass;->fieldPackageStaticLightGreylist:I,greylist
+LParentClass;->fieldPrivateBlacklistAndCorePlatformApi:I,blacklist,core-platform-api
+LParentClass;->fieldPrivateBlacklist:I,blacklist
+LParentClass;->fieldPrivateDarkGreylist:I,greylist-max-o
+LParentClass;->fieldPrivateLightGreylist:I,greylist
+LParentClass;->fieldPrivateStaticBlacklistAndCorePlatformApi:I,blacklist,core-platform-api
+LParentClass;->fieldPrivateStaticBlacklist:I,blacklist
+LParentClass;->fieldPrivateStaticDarkGreylist:I,greylist-max-o
+LParentClass;->fieldPrivateStaticLightGreylist:I,greylist
+LParentClass;->fieldProtectedBlacklistAndCorePlatformApi:I,blacklist,core-platform-api
+LParentClass;->fieldProtectedBlacklist:I,blacklist
+LParentClass;->fieldProtectedDarkGreylist:I,greylist-max-o
+LParentClass;->fieldProtectedLightGreylist:I,greylist
+LParentClass;->fieldProtectedStaticBlacklistAndCorePlatformApi:I,blacklist,core-platform-api
+LParentClass;->fieldProtectedStaticBlacklist:I,blacklist
+LParentClass;->fieldProtectedStaticDarkGreylist:I,greylist-max-o
+LParentClass;->fieldProtectedStaticLightGreylist:I,greylist
+LParentClass;->fieldPublicBlacklistAndCorePlatformApiB:I,blacklist,core-platform-api
+LParentClass;->fieldPublicBlacklistAndCorePlatformApi:I,blacklist,core-platform-api
+LParentClass;->fieldPublicBlacklistB:I,blacklist
+LParentClass;->fieldPublicBlacklist:I,blacklist
+LParentClass;->fieldPublicDarkGreylistB:I,greylist-max-o
+LParentClass;->fieldPublicDarkGreylist:I,greylist-max-o
+LParentClass;->fieldPublicLightGreylistB:I,greylist
+LParentClass;->fieldPublicLightGreylist:I,greylist
+LParentClass;->fieldPublicStaticBlacklistAndCorePlatformApiB:I,blacklist,core-platform-api
+LParentClass;->fieldPublicStaticBlacklistAndCorePlatformApi:I,blacklist,core-platform-api
+LParentClass;->fieldPublicStaticBlacklistB:I,blacklist
+LParentClass;->fieldPublicStaticBlacklist:I,blacklist
+LParentClass;->fieldPublicStaticDarkGreylistB:I,greylist-max-o
+LParentClass;->fieldPublicStaticDarkGreylist:I,greylist-max-o
+LParentClass;->fieldPublicStaticLightGreylistB:I,greylist
+LParentClass;->fieldPublicStaticLightGreylist:I,greylist
+LParentClass;-><init>(DB)V,greylist-max-o
+LParentClass;-><init>(DC)V,blacklist
+LParentClass;-><init>(DI)V,blacklist,core-platform-api
+LParentClass;-><init>(DZ)V,greylist
+LParentClass;-><init>(FB)V,greylist-max-o
+LParentClass;-><init>(FC)V,blacklist
+LParentClass;-><init>(FI)V,blacklist,core-platform-api
+LParentClass;-><init>(FZ)V,greylist
+LParentClass;-><init>(IB)V,greylist-max-o
+LParentClass;-><init>(IC)V,blacklist
+LParentClass;-><init>(II)V,blacklist,core-platform-api
+LParentClass;-><init>(IZ)V,greylist
+LParentClass;-><init>(JB)V,greylist-max-o
+LParentClass;-><init>(JC)V,blacklist
+LParentClass;-><init>(JI)V,blacklist,core-platform-api
+LParentClass;-><init>(JZ)V,greylist
+LParentClass;->methodPackageBlacklistAndCorePlatformApi()I,blacklist,core-platform-api
+LParentClass;->methodPackageBlacklist()I,blacklist
+LParentClass;->methodPackageDarkGreylist()I,greylist-max-o
+LParentClass;->methodPackageLightGreylist()I,greylist
+LParentClass;->methodPackageStaticBlacklistAndCorePlatformApi()I,blacklist,core-platform-api
+LParentClass;->methodPackageStaticBlacklist()I,blacklist
+LParentClass;->methodPackageStaticDarkGreylist()I,greylist-max-o
+LParentClass;->methodPackageStaticLightGreylist()I,greylist
+LParentClass;->methodPrivateBlacklistAndCorePlatformApi()I,blacklist,core-platform-api
+LParentClass;->methodPrivateBlacklist()I,blacklist
+LParentClass;->methodPrivateDarkGreylist()I,greylist-max-o
+LParentClass;->methodPrivateLightGreylist()I,greylist
+LParentClass;->methodPrivateStaticBlacklistAndCorePlatformApi()I,blacklist,core-platform-api
+LParentClass;->methodPrivateStaticBlacklist()I,blacklist
+LParentClass;->methodPrivateStaticDarkGreylist()I,greylist-max-o
+LParentClass;->methodPrivateStaticLightGreylist()I,greylist
+LParentClass;->methodProtectedBlacklistAndCorePlatformApi()I,blacklist,core-platform-api
+LParentClass;->methodProtectedBlacklist()I,blacklist
+LParentClass;->methodProtectedDarkGreylist()I,greylist-max-o
+LParentClass;->methodProtectedLightGreylist()I,greylist
+LParentClass;->methodProtectedStaticBlacklistAndCorePlatformApi()I,blacklist,core-platform-api
+LParentClass;->methodProtectedStaticBlacklist()I,blacklist
+LParentClass;->methodProtectedStaticDarkGreylist()I,greylist-max-o
+LParentClass;->methodProtectedStaticLightGreylist()I,greylist
+LParentClass;->methodPublicBlacklistAndCorePlatformApi()I,blacklist,core-platform-api
+LParentClass;->methodPublicBlacklist()I,blacklist
+LParentClass;->methodPublicDarkGreylist()I,greylist-max-o
+LParentClass;->methodPublicLightGreylist()I,greylist
+LParentClass;->methodPublicStaticBlacklistAndCorePlatformApi()I,blacklist,core-platform-api
+LParentClass;->methodPublicStaticBlacklist()I,blacklist
+LParentClass;->methodPublicStaticDarkGreylist()I,greylist-max-o
+LParentClass;->methodPublicStaticLightGreylist()I,greylist
+LParentInterface;->fieldPublicStaticBlacklistAndCorePlatformApi:I,blacklist,core-platform-api
+LParentInterface;->fieldPublicStaticBlacklist:I,blacklist
+LParentInterface;->fieldPublicStaticDarkGreylist:I,greylist-max-o
+LParentInterface;->fieldPublicStaticLightGreylist:I,greylist
+LParentInterface;->methodPublicBlacklistAndCorePlatformApi()I,blacklist,core-platform-api
+LParentInterface;->methodPublicBlacklist()I,blacklist
+LParentInterface;->methodPublicDarkGreylist()I,greylist-max-o
+LParentInterface;->methodPublicDefaultBlacklistAndCorePlatformApi()I,blacklist,core-platform-api
+LParentInterface;->methodPublicDefaultBlacklist()I,blacklist
+LParentInterface;->methodPublicDefaultDarkGreylist()I,greylist-max-o
+LParentInterface;->methodPublicDefaultLightGreylist()I,greylist
+LParentInterface;->methodPublicLightGreylist()I,greylist
+LParentInterface;->methodPublicStaticBlacklistAndCorePlatformApi()I,blacklist,core-platform-api
+LParentInterface;->methodPublicStaticBlacklist()I,blacklist
+LParentInterface;->methodPublicStaticDarkGreylist()I,greylist-max-o
+LParentInterface;->methodPublicStaticLightGreylist()I,greylist
diff --git a/test/674-hiddenapi/hiddenapi.cc b/test/674-hiddenapi/hiddenapi.cc
index 96754c3..3dc2789 100644
--- a/test/674-hiddenapi/hiddenapi.cc
+++ b/test/674-hiddenapi/hiddenapi.cc
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "base/sdk_version.h"
 #include "class_linker.h"
 #include "dex/art_dex_file_loader.h"
 #include "hidden_api.h"
@@ -26,39 +27,67 @@
 namespace art {
 namespace Test674HiddenApi {
 
+std::vector<std::vector<std::unique_ptr<const DexFile>>> opened_dex_files;
+
 extern "C" JNIEXPORT void JNICALL Java_Main_init(JNIEnv*, jclass) {
   Runtime* runtime = Runtime::Current();
-  runtime->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kBlacklistOnly);
+  runtime->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
+  runtime->SetCorePlatformApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
+  runtime->SetTargetSdkVersion(
+      static_cast<uint32_t>(hiddenapi::ApiList::GreylistMaxO().GetMaxAllowedSdkVersion()));
   runtime->SetDedupeHiddenApiWarnings(false);
-  runtime->AlwaysSetHiddenApiWarningFlag();
 }
 
-extern "C" JNIEXPORT void JNICALL Java_Main_appendToBootClassLoader(
-    JNIEnv* env, jclass, jstring jpath) {
+extern "C" JNIEXPORT void JNICALL Java_Main_setDexDomain(
+    JNIEnv*, jclass, jint int_index, jboolean is_core_platform) {
+  size_t index = static_cast<size_t>(int_index);
+  CHECK_LT(index, opened_dex_files.size());
+  for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files[index]) {
+    const_cast<DexFile*>(dex_file.get())->SetHiddenapiDomain(
+        (is_core_platform == JNI_FALSE) ? hiddenapi::Domain::kPlatform
+                                        : hiddenapi::Domain::kCorePlatform);
+  }
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_appendToBootClassLoader(
+    JNIEnv* env, jclass klass, jstring jpath, jboolean is_core_platform) {
   ScopedUtfChars utf(env, jpath);
   const char* path = utf.c_str();
-  if (path == nullptr) {
-    return;
-  }
+  CHECK(path != nullptr);
+
+  const size_t index = opened_dex_files.size();
+  const jint int_index = static_cast<jint>(index);
+  opened_dex_files.push_back(std::vector<std::unique_ptr<const DexFile>>());
 
   ArtDexFileLoader dex_loader;
   std::string error_msg;
-  std::vector<std::unique_ptr<const DexFile>> dex_files;
+
   if (!dex_loader.Open(path,
                        path,
                        /* verify */ false,
                        /* verify_checksum */ true,
                        &error_msg,
-                       &dex_files)) {
+                       &opened_dex_files[index])) {
     LOG(FATAL) << "Could not open " << path << " for boot classpath extension: " << error_msg;
     UNREACHABLE();
   }
 
+  Java_Main_setDexDomain(env, klass, int_index, is_core_platform);
+
   ScopedObjectAccess soa(Thread::Current());
-  for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
-    Runtime::Current()->GetClassLinker()->AppendToBootClassPath(
-        Thread::Current(), *dex_file.release());
+  for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files[index]) {
+    Runtime::Current()->GetClassLinker()->AppendToBootClassPath(Thread::Current(), *dex_file.get());
   }
+
+  return int_index;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_setWhitelistAll(JNIEnv*, jclass, jboolean value) {
+  std::vector<std::string> exemptions;
+  if (value != JNI_FALSE) {
+    exemptions.push_back("L");
+  }
+  Runtime::Current()->SetHiddenApiExemptions(exemptions);
 }
 
 static jobject NewInstance(JNIEnv* env, jclass klass) {
@@ -284,15 +313,7 @@
 }
 
 extern "C" JNIEXPORT jint JNICALL Java_Reflection_getHiddenApiAccessFlags(JNIEnv*, jclass) {
-  return static_cast<jint>(kAccHiddenApiBits);
-}
-
-extern "C" JNIEXPORT jboolean JNICALL Java_ChildClass_hasPendingWarning(JNIEnv*, jclass) {
-  return Runtime::Current()->HasPendingHiddenApiWarning();
-}
-
-extern "C" JNIEXPORT void JNICALL Java_ChildClass_clearWarning(JNIEnv*, jclass) {
-  Runtime::Current()->SetPendingHiddenApiWarning(false);
+  return static_cast<jint>(kAccHiddenapiBits);
 }
 
 }  // namespace Test674HiddenApi
diff --git a/test/674-hiddenapi/run b/test/674-hiddenapi/run
new file mode 100755
index 0000000..2babeef
--- /dev/null
+++ b/test/674-hiddenapi/run
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Make verification soft fail so that we can re-verify boot classpath
+# methods at runtime.
+exec ${RUN} $@ --verify-soft-fail
\ No newline at end of file
diff --git a/test/674-hiddenapi/src-art/Main.java b/test/674-hiddenapi/src-art/Main.java
index 782748c..d6a8c6d 100644
--- a/test/674-hiddenapi/src-art/Main.java
+++ b/test/674-hiddenapi/src-art/Main.java
@@ -28,6 +28,13 @@
 import java.util.zip.ZipFile;
 
 public class Main {
+  // This needs to be kept in sync with DexDomain in ChildClass.
+  enum DexDomain {
+    CorePlatform,
+    Platform,
+    Application
+  }
+
   public static void main(String[] args) throws Exception {
     System.loadLibrary(args[0]);
     prepareNativeLibFileName(args[0]);
@@ -40,72 +47,87 @@
     // or test the wrong thing. We rely on not deduping hidden API warnings
     // here for the same reasons), meaning the code under test and production
     // code are running in different configurations. Each test should be run in
-    // a fresh process to ensure that they are working correcting and not
-    // accidentally interfering with eachother.
+    // a fresh process to ensure that they are working correctly and not
+    // accidentally interfering with each other.
+    // As a side effect, we also cannot test Platform->Platform and later
+    // Platform->CorePlatform as the former succeeds in verifying linkage usages
+    // that should fail in the latter.
 
     // Run test with both parent and child dex files loaded with class loaders.
     // The expectation is that hidden members in parent should be visible to
     // the child.
-    doTest(false, false, false);
+    doTest(DexDomain.Application, DexDomain.Application, false);
     doUnloading();
 
     // Now append parent dex file to boot class path and run again. This time
     // the child dex file should not be able to access private APIs of the
     // parent.
-    appendToBootClassLoader(DEX_PARENT_BOOT);
-    doTest(true, false, false);
+    int parentIdx = appendToBootClassLoader(DEX_PARENT_BOOT, /* isCorePlatform */ false);
+    doTest(DexDomain.Platform, DexDomain.Application, false);
     doUnloading();
 
     // Now run the same test again, but with the blacklist exmemptions list set
     // to "L" which matches everything.
-    doTest(true, false, true);
+    doTest(DexDomain.Platform, DexDomain.Application, true);
     doUnloading();
 
-    // And finally append to child to boot class path as well. With both in the
-    // boot class path, access should be granted.
-    appendToBootClassLoader(DEX_CHILD);
-    doTest(true, true, false);
+    // Repeat the two tests above, only with parent being a core-platform dex file.
+    setDexDomain(parentIdx, /* isCorePlatform */ true);
+    doTest(DexDomain.CorePlatform, DexDomain.Application, false);
+    doUnloading();
+    doTest(DexDomain.CorePlatform, DexDomain.Application, true);
+    doUnloading();
+
+    // Append child to boot class path, first as a platform dex file.
+    // It should not be allowed to access non-public, non-core platform API members.
+    int childIdx = appendToBootClassLoader(DEX_CHILD, /* isCorePlatform */ false);
+    doTest(DexDomain.CorePlatform, DexDomain.Platform, false);
+    doUnloading();
+
+    // And finally change child to core-platform dex. With both in the boot classpath
+    // and both core-platform, access should be granted.
+    setDexDomain(childIdx, /* isCorePlatform */ true);
+    doTest(DexDomain.CorePlatform, DexDomain.CorePlatform, false);
     doUnloading();
   }
 
-  private static void doTest(boolean parentInBoot, boolean childInBoot, boolean whitelistAllApis)
-      throws Exception {
+  private static void doTest(DexDomain parentDomain, DexDomain childDomain,
+      boolean whitelistAllApis) throws Exception {
     // Load parent dex if it is not in boot class path.
     ClassLoader parentLoader = null;
-    if (parentInBoot) {
-      parentLoader = BOOT_CLASS_LOADER;
-    } else {
+    if (parentDomain == DexDomain.Application) {
       parentLoader = new PathClassLoader(DEX_PARENT, ClassLoader.getSystemClassLoader());
+    } else {
+      parentLoader = BOOT_CLASS_LOADER;
     }
 
     // Load child dex if it is not in boot class path.
     ClassLoader childLoader = null;
-    if (childInBoot) {
+    if (childDomain == DexDomain.Application) {
+      childLoader = new InMemoryDexClassLoader(readDexFile(DEX_CHILD), parentLoader);
+    } else {
       if (parentLoader != BOOT_CLASS_LOADER) {
         throw new IllegalStateException(
             "DeclaringClass must be in parent class loader of CallingClass");
       }
       childLoader = BOOT_CLASS_LOADER;
-    } else {
-      childLoader = new InMemoryDexClassLoader(readDexFile(DEX_CHILD), parentLoader);
     }
 
     // Create a unique copy of the native library. Each shared library can only
     // be loaded once, but for some reason even classes from a class loader
     // cannot register their native methods against symbols in a shared library
     // loaded by their parent class loader.
-    String nativeLibCopy = createNativeLibCopy(parentInBoot, childInBoot, whitelistAllApis);
+    String nativeLibCopy = createNativeLibCopy(parentDomain, childDomain, whitelistAllApis);
 
-    if (whitelistAllApis) {
-      VMRuntime.getRuntime().setHiddenApiExemptions(new String[]{"L"});
-    }
+    // Set exemptions to "L" (matches all classes) if we are testing whitelisting.
+    setWhitelistAll(whitelistAllApis);
 
     // Invoke ChildClass.runTest
-    Class.forName("ChildClass", true, childLoader)
-        .getDeclaredMethod("runTest", String.class, Boolean.TYPE, Boolean.TYPE, Boolean.TYPE)
-            .invoke(null, nativeLibCopy, parentInBoot, childInBoot, whitelistAllApis);
-
-    VMRuntime.getRuntime().setHiddenApiExemptions(new String[0]);
+    Class<?> childClass = Class.forName("ChildClass", true, childLoader);
+    Method runTestMethod = childClass.getDeclaredMethod(
+        "runTest", String.class, Integer.TYPE, Integer.TYPE, Boolean.TYPE);
+    runTestMethod.invoke(null, nativeLibCopy, parentDomain.ordinal(), childDomain.ordinal(),
+        whitelistAllApis);
   }
 
   // Routine which tries to figure out the absolute path of our native library.
@@ -146,11 +168,11 @@
 
   // Copy native library to a new file with a unique name so it does not
   // conflict with other loaded instance of the same binary file.
-  private static String createNativeLibCopy(
-      boolean parentInBoot, boolean childInBoot, boolean whitelistAllApis) throws Exception {
+  private static String createNativeLibCopy(DexDomain parentDomain, DexDomain childDomain,
+      boolean whitelistAllApis) throws Exception {
     String tempFileName = System.mapLibraryName(
-        "hiddenapitest_" + (parentInBoot ? "1" : "0") + (childInBoot ? "1" : "0") +
-         (whitelistAllApis ? "1" : "0"));
+        "hiddenapitest_" + (parentDomain.ordinal()) + (childDomain.ordinal()) +
+        (whitelistAllApis ? "1" : "0"));
     File tempFile = new File(System.getenv("DEX_LOCATION"), tempFileName);
     Files.copy(new File(nativeLibFileName).toPath(), tempFile.toPath());
     return tempFile.getAbsolutePath();
@@ -175,6 +197,8 @@
 
   private static ClassLoader BOOT_CLASS_LOADER = Object.class.getClassLoader();
 
-  private static native void appendToBootClassLoader(String dexPath);
+  private static native int appendToBootClassLoader(String dexPath, boolean isCorePlatform);
+  private static native void setDexDomain(int index, boolean isCorePlatform);
   private static native void init();
+  private static native void setWhitelistAll(boolean value);
 }
diff --git a/test/674-hiddenapi/src-ex/ChildClass.java b/test/674-hiddenapi/src-ex/ChildClass.java
index db3ba6d..f120bda 100644
--- a/test/674-hiddenapi/src-ex/ChildClass.java
+++ b/test/674-hiddenapi/src-ex/ChildClass.java
@@ -45,7 +45,8 @@
     Whitelist(PrimitiveType.TShort),
     LightGreylist(PrimitiveType.TBoolean),
     DarkGreylist(PrimitiveType.TByte),
-    Blacklist(PrimitiveType.TCharacter);
+    Blacklist(PrimitiveType.TCharacter),
+    BlacklistAndCorePlatformApi(PrimitiveType.TInteger);
 
     Hiddenness(PrimitiveType type) { mAssociatedType = type; }
     public PrimitiveType mAssociatedType;
@@ -67,19 +68,34 @@
     Denied,
   }
 
+  // This needs to be kept in sync with DexDomain in Main.
+  enum DexDomain {
+    CorePlatform,
+    Platform,
+    Application
+  }
+
   private static final boolean booleanValues[] = new boolean[] { false, true };
 
-  public static void runTest(String libFileName, boolean expectedParentInBoot,
-      boolean expectedChildInBoot, boolean everythingWhitelisted) throws Exception {
+  public static void runTest(String libFileName, int parentDomainOrdinal,
+      int childDomainOrdinal, boolean everythingWhitelisted) throws Exception {
     System.load(libFileName);
 
+    parentDomain = DexDomain.values()[parentDomainOrdinal];
+    childDomain = DexDomain.values()[childDomainOrdinal];
+
+    configMessage = "parentDomain=" + parentDomain.name() + ", childDomain=" + childDomain.name()
+        + ", everythingWhitelisted=" + everythingWhitelisted;
+
     // Check expectations about loading into boot class path.
-    isParentInBoot = (ParentClass.class.getClassLoader().getParent() == null);
+    boolean isParentInBoot = (ParentClass.class.getClassLoader().getParent() == null);
+    boolean expectedParentInBoot = (parentDomain != DexDomain.Application);
     if (isParentInBoot != expectedParentInBoot) {
       throw new RuntimeException("Expected ParentClass " +
                                  (expectedParentInBoot ? "" : "not ") + "in boot class path");
     }
-    isChildInBoot = (ChildClass.class.getClassLoader().getParent() == null);
+    boolean isChildInBoot = (ChildClass.class.getClassLoader().getParent() == null);
+    boolean expectedChildInBoot = (childDomain != DexDomain.Application);
     if (isChildInBoot != expectedChildInBoot) {
       throw new RuntimeException("Expected ChildClass " + (expectedChildInBoot ? "" : "not ") +
                                  "in boot class path");
@@ -92,16 +108,26 @@
     // Run meaningful combinations of access flags.
     for (Hiddenness hiddenness : Hiddenness.values()) {
       final Behaviour expected;
+      final boolean invokesMemberCallback;
       // Warnings are now disabled whenever access is granted, even for
       // greylisted APIs. This is the behaviour for release builds.
-      if (isSameBoot || everythingWhitelisted || hiddenness == Hiddenness.Whitelist) {
+      if (everythingWhitelisted || hiddenness == Hiddenness.Whitelist) {
         expected = Behaviour.Granted;
-      } else if (hiddenness == Hiddenness.Blacklist) {
+        invokesMemberCallback = false;
+      } else if (parentDomain == DexDomain.CorePlatform && childDomain == DexDomain.Platform) {
+        expected = (hiddenness == Hiddenness.BlacklistAndCorePlatformApi)
+            ? Behaviour.Granted : Behaviour.Denied;
+        invokesMemberCallback = false;
+      } else if (isSameBoot) {
+        expected = Behaviour.Granted;
+        invokesMemberCallback = false;
+      } else if (hiddenness == Hiddenness.Blacklist ||
+                 hiddenness == Hiddenness.BlacklistAndCorePlatformApi) {
         expected = Behaviour.Denied;
-      } else if (isDebuggable) {
-        expected = Behaviour.Warning;
+        invokesMemberCallback = true;
       } else {
-        expected = Behaviour.Granted;
+        expected = Behaviour.Warning;
+        invokesMemberCallback = true;
       }
 
       for (boolean isStatic : booleanValues) {
@@ -111,8 +137,10 @@
           // Test reflection and JNI on methods and fields
           for (Class klass : new Class<?>[] { ParentClass.class, ParentInterface.class }) {
             String baseName = visibility.name() + suffix;
-            checkField(klass, "field" + baseName, isStatic, visibility, expected);
-            checkMethod(klass, "method" + baseName, isStatic, visibility, expected);
+            checkField(klass, "field" + baseName, isStatic, visibility, expected,
+                invokesMemberCallback);
+            checkMethod(klass, "method" + baseName, isStatic, visibility, expected,
+                invokesMemberCallback);
           }
 
           // Check whether one can use a class constructor.
@@ -120,7 +148,8 @@
 
           // Check whether one can use an interface default method.
           String name = "method" + visibility.name() + "Default" + hiddenness.name();
-          checkMethod(ParentInterface.class, name, /*isStatic*/ false, visibility, expected);
+          checkMethod(ParentInterface.class, name, /*isStatic*/ false, visibility, expected,
+              invokesMemberCallback);
         }
 
         // Test whether static linking succeeds.
@@ -145,7 +174,7 @@
   }
 
   private static void checkMemberCallback(Class<?> klass, String name,
-          boolean isPublic, boolean isField) {
+          boolean isPublic, boolean isField, boolean expectedCallback) {
       try {
           RecordingConsumer consumer = new RecordingConsumer();
           VMRuntime.setNonSdkApiUsageConsumer(consumer);
@@ -168,8 +197,14 @@
               // only interested in whether the callback is invoked.
           }
 
-          if (consumer.recordedValue == null || !consumer.recordedValue.contains(name)) {
-              throw new RuntimeException("No callback for member: " + name);
+          boolean actualCallback = consumer.recordedValue != null &&
+                          consumer.recordedValue.contains(name);
+          if (expectedCallback != actualCallback) {
+              if (expectedCallback) {
+                throw new RuntimeException("Expected callback for member: " + name);
+              } else {
+                throw new RuntimeException("Did not expect callback for member: " + name);
+              }
           }
       } finally {
           VMRuntime.setNonSdkApiUsageConsumer(null);
@@ -177,11 +212,10 @@
   }
 
   private static void checkField(Class<?> klass, String name, boolean isStatic,
-      Visibility visibility, Behaviour behaviour) throws Exception {
+      Visibility visibility, Behaviour behaviour, boolean invokesMemberCallback) throws Exception {
 
     boolean isPublic = (visibility == Visibility.Public);
     boolean canDiscover = (behaviour != Behaviour.Denied);
-    boolean setsWarning = (behaviour == Behaviour.Warning);
 
     if (klass.isInterface() && (!isStatic || !isPublic)) {
       // Interfaces only have public static fields.
@@ -243,8 +277,6 @@
                               canDiscover);
     }
 
-    // Finish here if we could not discover the field.
-
     if (canDiscover) {
       // Test that modifiers are unaffected.
 
@@ -254,48 +286,26 @@
 
       // Test getters and setters when meaningful.
 
-      clearWarning();
       if (!Reflection.canGetField(klass, name)) {
         throwAccessException(klass, name, true, "Field.getInt()");
       }
-      if (hasPendingWarning() != setsWarning) {
-        throwWarningException(klass, name, true, "Field.getInt()", setsWarning);
-      }
-
-      clearWarning();
       if (!Reflection.canSetField(klass, name)) {
         throwAccessException(klass, name, true, "Field.setInt()");
       }
-      if (hasPendingWarning() != setsWarning) {
-        throwWarningException(klass, name, true, "Field.setInt()", setsWarning);
-      }
-
-      clearWarning();
       if (!JNI.canGetField(klass, name, isStatic)) {
         throwAccessException(klass, name, true, "getIntField");
       }
-      if (hasPendingWarning() != setsWarning) {
-        throwWarningException(klass, name, true, "getIntField", setsWarning);
-      }
-
-      clearWarning();
       if (!JNI.canSetField(klass, name, isStatic)) {
         throwAccessException(klass, name, true, "setIntField");
       }
-      if (hasPendingWarning() != setsWarning) {
-        throwWarningException(klass, name, true, "setIntField", setsWarning);
-      }
     }
 
     // Test that callbacks are invoked correctly.
-    clearWarning();
-    if (setsWarning || !canDiscover) {
-      checkMemberCallback(klass, name, isPublic, true /* isField */);
-    }
+    checkMemberCallback(klass, name, isPublic, true /* isField */, invokesMemberCallback);
   }
 
   private static void checkMethod(Class<?> klass, String name, boolean isStatic,
-      Visibility visibility, Behaviour behaviour) throws Exception {
+      Visibility visibility, Behaviour behaviour, boolean invokesMemberCallback) throws Exception {
 
     boolean isPublic = (visibility == Visibility.Public);
     if (klass.isInterface() && !isPublic) {
@@ -304,7 +314,6 @@
     }
 
     boolean canDiscover = (behaviour != Behaviour.Denied);
-    boolean setsWarning = (behaviour == Behaviour.Warning);
 
     // Test discovery with reflection.
 
@@ -354,39 +363,21 @@
       }
 
       // Test whether we can invoke the method. This skips non-static interface methods.
-
       if (!klass.isInterface() || isStatic) {
-        clearWarning();
         if (!Reflection.canInvokeMethod(klass, name)) {
           throwAccessException(klass, name, false, "invoke()");
         }
-        if (hasPendingWarning() != setsWarning) {
-          throwWarningException(klass, name, false, "invoke()", setsWarning);
-        }
-
-        clearWarning();
         if (!JNI.canInvokeMethodA(klass, name, isStatic)) {
           throwAccessException(klass, name, false, "CallMethodA");
         }
-        if (hasPendingWarning() != setsWarning) {
-          throwWarningException(klass, name, false, "CallMethodA()", setsWarning);
-        }
-
-        clearWarning();
         if (!JNI.canInvokeMethodV(klass, name, isStatic)) {
           throwAccessException(klass, name, false, "CallMethodV");
         }
-        if (hasPendingWarning() != setsWarning) {
-          throwWarningException(klass, name, false, "CallMethodV()", setsWarning);
-        }
       }
     }
 
     // Test that callbacks are invoked correctly.
-    clearWarning();
-    if (setsWarning || !canDiscover) {
-        checkMemberCallback(klass, name, isPublic, false /* isField */);
-    }
+    checkMemberCallback(klass, name, isPublic, false /* isField */, invokesMemberCallback);
   }
 
   private static void checkConstructor(Class<?> klass, Visibility visibility, Hiddenness hiddenness,
@@ -403,7 +394,6 @@
     MethodType methodType = MethodType.methodType(void.class, args);
 
     boolean canDiscover = (behaviour != Behaviour.Denied);
-    boolean setsWarning = (behaviour == Behaviour.Warning);
 
     // Test discovery with reflection.
 
@@ -446,69 +436,38 @@
                               canDiscover);
     }
 
-    // Finish here if we could not discover the constructor.
+    if (canDiscover) {
+      // Test whether we can invoke the constructor.
 
-    if (!canDiscover) {
-      return;
-    }
-
-    // Test whether we can invoke the constructor.
-
-    clearWarning();
-    if (!Reflection.canInvokeConstructor(klass, args, initargs)) {
-      throwAccessException(klass, fullName, false, "invoke()");
-    }
-    if (hasPendingWarning() != setsWarning) {
-      throwWarningException(klass, fullName, false, "invoke()", setsWarning);
-    }
-
-    clearWarning();
-    if (!JNI.canInvokeConstructorA(klass, signature)) {
-      throwAccessException(klass, fullName, false, "NewObjectA");
-    }
-    if (hasPendingWarning() != setsWarning) {
-      throwWarningException(klass, fullName, false, "NewObjectA", setsWarning);
-    }
-
-    clearWarning();
-    if (!JNI.canInvokeConstructorV(klass, signature)) {
-      throwAccessException(klass, fullName, false, "NewObjectV");
-    }
-    if (hasPendingWarning() != setsWarning) {
-      throwWarningException(klass, fullName, false, "NewObjectV", setsWarning);
+      if (!Reflection.canInvokeConstructor(klass, args, initargs)) {
+        throwAccessException(klass, fullName, false, "invoke()");
+      }
+      if (!JNI.canInvokeConstructorA(klass, signature)) {
+        throwAccessException(klass, fullName, false, "NewObjectA");
+      }
+      if (!JNI.canInvokeConstructorV(klass, signature)) {
+        throwAccessException(klass, fullName, false, "NewObjectV");
+      }
     }
   }
 
   private static void checkNullaryConstructor(Class<?> klass, Behaviour behaviour)
       throws Exception {
     boolean canAccess = (behaviour != Behaviour.Denied);
-    boolean setsWarning = (behaviour == Behaviour.Warning);
 
-    clearWarning();
     if (Reflection.canUseNewInstance(klass) != canAccess) {
       throw new RuntimeException("Expected to " + (canAccess ? "" : "not ") +
-          "be able to construct " + klass.getName() + ". " +
-          "isParentInBoot = " + isParentInBoot + ", " + "isChildInBoot = " + isChildInBoot);
-    }
-    if (canAccess && hasPendingWarning() != setsWarning) {
-      throwWarningException(klass, "nullary constructor", false, "newInstance", setsWarning);
+          "be able to construct " + klass.getName() + ". " + configMessage);
     }
   }
 
   private static void checkLinking(String className, boolean takesParameter, Behaviour behaviour)
       throws Exception {
     boolean canAccess = (behaviour != Behaviour.Denied);
-    boolean setsWarning = (behaviour == Behaviour.Warning);
 
-    clearWarning();
     if (Linking.canAccess(className, takesParameter) != canAccess) {
       throw new RuntimeException("Expected to " + (canAccess ? "" : "not ") +
-          "be able to verify " + className + "." +
-          "isParentInBoot = " + isParentInBoot + ", " + "isChildInBoot = " + isChildInBoot);
-    }
-    if (canAccess && hasPendingWarning() != setsWarning) {
-      throwWarningException(
-          Class.forName(className), "access", false, "static linking", setsWarning);
+          "be able to verify " + className + "." + configMessage);
     }
   }
 
@@ -516,25 +475,13 @@
       String fn, boolean canAccess) {
     throw new RuntimeException("Expected " + (isField ? "field " : "method ") + klass.getName() +
         "." + name + " to " + (canAccess ? "" : "not ") + "be discoverable with " + fn + ". " +
-        "isParentInBoot = " + isParentInBoot + ", " + "isChildInBoot = " + isChildInBoot + ", " +
-        "everythingWhitelisted = " + everythingWhitelisted);
+        configMessage);
   }
 
   private static void throwAccessException(Class<?> klass, String name, boolean isField,
       String fn) {
     throw new RuntimeException("Expected to be able to access " + (isField ? "field " : "method ") +
-        klass.getName() + "." + name + " using " + fn + ". " +
-        "isParentInBoot = " + isParentInBoot + ", " + "isChildInBoot = " + isChildInBoot + ", " +
-        "everythingWhitelisted = " + everythingWhitelisted);
-  }
-
-  private static void throwWarningException(Class<?> klass, String name, boolean isField,
-      String fn, boolean setsWarning) {
-    throw new RuntimeException("Expected access to " + (isField ? "field " : "method ") +
-        klass.getName() + "." + name + " using " + fn + " to " + (setsWarning ? "" : "not ") +
-        "set the warning flag. " +
-        "isParentInBoot = " + isParentInBoot + ", " + "isChildInBoot = " + isChildInBoot + ", " +
-        "everythingWhitelisted = " + everythingWhitelisted);
+        klass.getName() + "." + name + " using " + fn + ". " + configMessage);
   }
 
   private static void throwModifiersException(Class<?> klass, String name, boolean isField) {
@@ -542,10 +489,9 @@
         "." + name + " to not expose hidden modifiers");
   }
 
-  private static boolean isParentInBoot;
-  private static boolean isChildInBoot;
+  private static DexDomain parentDomain;
+  private static DexDomain childDomain;
   private static boolean everythingWhitelisted;
 
-  private static native boolean hasPendingWarning();
-  private static native void clearWarning();
+  private static String configMessage;
 }
diff --git a/test/674-hiddenapi/src-ex/Linking.java b/test/674-hiddenapi/src-ex/Linking.java
index 0fa0b19..5aa3663 100644
--- a/test/674-hiddenapi/src-ex/Linking.java
+++ b/test/674-hiddenapi/src-ex/Linking.java
@@ -62,6 +62,12 @@
   }
 }
 
+class LinkFieldGetBlacklistAndCorePlatformApi {
+  public static int access() {
+    return new ParentClass().fieldPublicBlacklistAndCorePlatformApi;
+  }
+}
+
 // INSTANCE FIELD SET
 
 class LinkFieldSetWhitelist {
@@ -92,6 +98,13 @@
   }
 }
 
+class LinkFieldSetBlacklistAndCorePlatformApi {
+  public static void access(int x) {
+    // Need to use a different field from the getter to bypass DexCache.
+    new ParentClass().fieldPublicBlacklistAndCorePlatformApiB = x;
+  }
+}
+
 // STATIC FIELD GET
 
 class LinkFieldGetStaticWhitelist {
@@ -118,6 +131,12 @@
   }
 }
 
+class LinkFieldGetStaticBlacklistAndCorePlatformApi {
+  public static int access() {
+    return ParentClass.fieldPublicStaticBlacklistAndCorePlatformApi;
+  }
+}
+
 // STATIC FIELD SET
 
 class LinkFieldSetStaticWhitelist {
@@ -148,6 +167,13 @@
   }
 }
 
+class LinkFieldSetStaticBlacklistAndCorePlatformApi {
+  public static void access(int x) {
+    // Need to use a different field from the getter to bypass DexCache.
+    ParentClass.fieldPublicStaticBlacklistAndCorePlatformApiB = x;
+  }
+}
+
 // INVOKE INSTANCE METHOD
 
 class LinkMethodWhitelist {
@@ -174,6 +200,12 @@
   }
 }
 
+class LinkMethodBlacklistAndCorePlatformApi {
+  public static int access() {
+    return new ParentClass().methodPublicBlacklistAndCorePlatformApi();
+  }
+}
+
 // INVOKE INSTANCE INTERFACE METHOD
 
 class LinkMethodInterfaceWhitelist {
@@ -200,6 +232,12 @@
   }
 }
 
+class LinkMethodInterfaceBlacklistAndCorePlatformApi {
+  public static int access() {
+    return DummyClass.getInterfaceInstance().methodPublicBlacklistAndCorePlatformApi();
+  }
+}
+
 // INVOKE STATIC METHOD
 
 class LinkMethodStaticWhitelist {
@@ -226,6 +264,12 @@
   }
 }
 
+class LinkMethodStaticBlacklistAndCorePlatformApi {
+  public static int access() {
+    return ParentClass.methodPublicStaticBlacklistAndCorePlatformApi();
+  }
+}
+
 // INVOKE INTERFACE STATIC METHOD
 
 class LinkMethodInterfaceStaticWhitelist {
@@ -251,3 +295,9 @@
     return ParentInterface.methodPublicStaticBlacklist();
   }
 }
+
+class LinkMethodInterfaceStaticBlacklistAndCorePlatformApi {
+  public static int access() {
+    return ParentInterface.methodPublicStaticBlacklistAndCorePlatformApi();
+  }
+}
diff --git a/test/674-hiddenapi/src/DummyClass.java b/test/674-hiddenapi/src/DummyClass.java
index 51281a2..afba747 100644
--- a/test/674-hiddenapi/src/DummyClass.java
+++ b/test/674-hiddenapi/src/DummyClass.java
@@ -19,6 +19,7 @@
   public int methodPublicLightGreylist() { return 2; }
   public int methodPublicDarkGreylist() { return 3; }
   public int methodPublicBlacklist() { return 4; }
+  public int methodPublicBlacklistAndCorePlatformApi() { return 5; }
 
   public static ParentInterface getInterfaceInstance() {
     return new DummyClass();
diff --git a/test/674-hiddenapi/src/NullaryConstructorBlacklistAndCorePlatformApi.java b/test/674-hiddenapi/src/NullaryConstructorBlacklistAndCorePlatformApi.java
new file mode 100644
index 0000000..86af29e
--- /dev/null
+++ b/test/674-hiddenapi/src/NullaryConstructorBlacklistAndCorePlatformApi.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class NullaryConstructorBlacklistAndCorePlatformApi {
+  public NullaryConstructorBlacklistAndCorePlatformApi() { x = 22; }
+  public NullaryConstructorBlacklistAndCorePlatformApi(int y) { x = y; }
+  protected int x;
+}
diff --git a/test/674-hiddenapi/src/ParentClass.java b/test/674-hiddenapi/src/ParentClass.java
index 07e84cc..1442392 100644
--- a/test/674-hiddenapi/src/ParentClass.java
+++ b/test/674-hiddenapi/src/ParentClass.java
@@ -43,6 +43,12 @@
   private int fieldPrivateBlacklist = 244;
   public int fieldPublicBlacklistB = 245;
 
+  public int fieldPublicBlacklistAndCorePlatformApi = 251;
+  int fieldPackageBlacklistAndCorePlatformApi = 252;
+  protected int fieldProtectedBlacklistAndCorePlatformApi = 253;
+  private int fieldPrivateBlacklistAndCorePlatformApi = 254;
+  public int fieldPublicBlacklistAndCorePlatformApiB = 255;
+
   // STATIC FIELD
 
   public static int fieldPublicStaticWhitelist = 111;
@@ -69,6 +75,12 @@
   private static int fieldPrivateStaticBlacklist = 144;
   public static int fieldPublicStaticBlacklistB = 145;
 
+  public static int fieldPublicStaticBlacklistAndCorePlatformApi = 151;
+  static int fieldPackageStaticBlacklistAndCorePlatformApi = 152;
+  protected static int fieldProtectedStaticBlacklistAndCorePlatformApi = 153;
+  private static int fieldPrivateStaticBlacklistAndCorePlatformApi = 154;
+  public static int fieldPublicStaticBlacklistAndCorePlatformApiB = 155;
+
   // INSTANCE METHOD
 
   public int methodPublicWhitelist() { return 411; }
@@ -91,6 +103,11 @@
   protected int methodProtectedBlacklist() { return 443; }
   private int methodPrivateBlacklist() { return 444; }
 
+  public int methodPublicBlacklistAndCorePlatformApi() { return 451; }
+  int methodPackageBlacklistAndCorePlatformApi() { return 452; }
+  protected int methodProtectedBlacklistAndCorePlatformApi() { return 453; }
+  private int methodPrivateBlacklistAndCorePlatformApi() { return 454; }
+
   // STATIC METHOD
 
   public static int methodPublicStaticWhitelist() { return 311; }
@@ -113,6 +130,11 @@
   protected static int methodProtectedStaticBlacklist() { return 343; }
   private static int methodPrivateStaticBlacklist() { return 344; }
 
+  public static int methodPublicStaticBlacklistAndCorePlatformApi() { return 351; }
+  static int methodPackageStaticBlacklistAndCorePlatformApi() { return 352; }
+  protected static int methodProtectedStaticBlacklistAndCorePlatformApi() { return 353; }
+  private static int methodPrivateStaticBlacklistAndCorePlatformApi() { return 354; }
+
   // CONSTRUCTOR
 
   // Whitelist
@@ -139,6 +161,12 @@
   protected ParentClass(long x, char y) {}
   private ParentClass(double x, char y) {}
 
+  // Blacklist and CorePlatformApi
+  public ParentClass(int x, int y) {}
+  ParentClass(float x, int y) {}
+  protected ParentClass(long x, int y) {}
+  private ParentClass(double x, int y) {}
+
   // HELPERS
 
   public int callMethodPublicWhitelist() { return methodPublicWhitelist(); }
@@ -157,4 +185,15 @@
   public int callMethodPackageBlacklist() { return methodPackageBlacklist(); }
   public int callMethodProtectedBlacklist() { return methodProtectedBlacklist(); }
 
+  public int callMethodPublicBlacklistAndCorePlatformApi() {
+    return methodPublicBlacklistAndCorePlatformApi();
+  }
+
+  public int callMethodPackageBlacklistAndCorePlatformApi() {
+    return methodPackageBlacklistAndCorePlatformApi();
+  }
+
+  public int callMethodProtectedBlacklistAndCorePlatformApi() {
+    return methodProtectedBlacklistAndCorePlatformApi();
+  }
 }
diff --git a/test/674-hiddenapi/src/ParentInterface.java b/test/674-hiddenapi/src/ParentInterface.java
index f79ac9d..1c5b58f 100644
--- a/test/674-hiddenapi/src/ParentInterface.java
+++ b/test/674-hiddenapi/src/ParentInterface.java
@@ -20,22 +20,26 @@
   static int fieldPublicStaticLightGreylist = 12;
   static int fieldPublicStaticDarkGreylist = 13;
   static int fieldPublicStaticBlacklist = 14;
+  static int fieldPublicStaticBlacklistAndCorePlatformApi = 15;
 
   // INSTANCE METHOD
   int methodPublicWhitelist();
   int methodPublicLightGreylist();
   int methodPublicDarkGreylist();
   int methodPublicBlacklist();
+  int methodPublicBlacklistAndCorePlatformApi();
 
   // STATIC METHOD
   static int methodPublicStaticWhitelist() { return 21; }
   static int methodPublicStaticLightGreylist() { return 22; }
   static int methodPublicStaticDarkGreylist() { return 23; }
   static int methodPublicStaticBlacklist() { return 24; }
+  static int methodPublicStaticBlacklistAndCorePlatformApi() { return 25; }
 
   // DEFAULT METHOD
   default int methodPublicDefaultWhitelist() { return 31; }
   default int methodPublicDefaultLightGreylist() { return 32; }
   default int methodPublicDefaultDarkGreylist() { return 33; }
   default int methodPublicDefaultBlacklist() { return 34; }
+  default int methodPublicDefaultBlacklistAndCorePlatformApi() { return 35; }
 }
diff --git a/test/684-checker-simd-dotprod/expected.txt b/test/684-checker-simd-dotprod/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/684-checker-simd-dotprod/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/684-checker-simd-dotprod/info.txt b/test/684-checker-simd-dotprod/info.txt
new file mode 100644
index 0000000..6c1efb6
--- /dev/null
+++ b/test/684-checker-simd-dotprod/info.txt
@@ -0,0 +1 @@
+Functional tests on dot product idiom SIMD vectorization.
diff --git a/test/684-checker-simd-dotprod/src/Main.java b/test/684-checker-simd-dotprod/src/Main.java
new file mode 100644
index 0000000..e0c8716
--- /dev/null
+++ b/test/684-checker-simd-dotprod/src/Main.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import other.TestByte;
+import other.TestCharShort;
+import other.TestVarious;
+
+/**
+ * Tests for dot product idiom vectorization.
+ */
+public class Main {
+  public static void main(String[] args) {
+     TestByte.run();
+     TestCharShort.run();
+     TestVarious.run();
+     System.out.println("passed");
+  }
+}
diff --git a/test/684-checker-simd-dotprod/src/other/TestByte.java b/test/684-checker-simd-dotprod/src/other/TestByte.java
new file mode 100644
index 0000000..9acfc59
--- /dev/null
+++ b/test/684-checker-simd-dotprod/src/other/TestByte.java
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package other;
+
+/**
+ * Tests for dot product idiom vectorization: byte case.
+ */
+public class TestByte {
+
+  public static final int ARRAY_SIZE = 1024;
+
+  /// CHECK-START: int other.TestByte.testDotProdSimple(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<Get1>>,<<Get2>>]                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestByte.testDotProdSimple(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16                                        loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<Load1>>,<<Load2>>] type:Int8   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const16>>]                            loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                                  loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]                         loop:none
+  public static final int testDotProdSimple(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = a[i] * b[i];
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestByte.testDotProdComplex(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddC1:i\d+>>   Add [<<Get1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC1:b\d+>>  TypeConversion [<<AddC1>>]                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddC2:i\d+>>   Add [<<Get2>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC2:b\d+>>  TypeConversion [<<AddC2>>]                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<TypeC1>>,<<TypeC2>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestByte.testDotProdComplex(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16                                        loop:none
+  /// CHECK-DAG: <<Repl:d\d+>>    VecReplicateScalar [<<Const1>>]                       loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd1:d\d+>>   VecAdd [<<Load1>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd2:d\d+>>   VecAdd [<<Load2>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Int8   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const16>>]                            loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                                  loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]                         loop:none
+  public static final int testDotProdComplex(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = ((byte)(a[i] + 1)) * ((byte)(b[i] + 1));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestByte.testDotProdSimpleUnsigned(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:a\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:a\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<Get1>>,<<Get2>>]                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestByte.testDotProdSimpleUnsigned(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16                                        loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<Load1>>,<<Load2>>] type:Uint8  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const16>>]                            loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                                  loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]                         loop:none
+  public static final int testDotProdSimpleUnsigned(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = (a[i] & 0xff) * (b[i] & 0xff);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestByte.testDotProdComplexUnsigned(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:a\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddC:i\d+>>    Add [<<Get1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC1:a\d+>>  TypeConversion [<<AddC>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:a\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddGets:i\d+>> Add [<<Get2>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC2:a\d+>>  TypeConversion [<<AddGets>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<TypeC1>>,<<TypeC2>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestByte.testDotProdComplexUnsigned(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16                                        loop:none
+  /// CHECK-DAG: <<Repl:d\d+>>    VecReplicateScalar [<<Const1>>]                       loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd1:d\d+>>   VecAdd [<<Load1>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd2:d\d+>>   VecAdd [<<Load2>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Uint8  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const16>>]                            loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                                  loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]                         loop:none
+  public static final int testDotProdComplexUnsigned(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = (((a[i] & 0xff) + 1) & 0xff) * (((b[i] & 0xff) + 1) & 0xff);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestByte.testDotProdComplexUnsignedCastedToSigned(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:a\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddC:i\d+>>    Add [<<Get1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC1:b\d+>>  TypeConversion [<<AddC>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:a\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddGets:i\d+>> Add [<<Get2>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC2:b\d+>>  TypeConversion [<<AddGets>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<TypeC1>>,<<TypeC2>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestByte.testDotProdComplexUnsignedCastedToSigned(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16                                        loop:none
+  /// CHECK-DAG: <<Repl:d\d+>>    VecReplicateScalar [<<Const1>>]                       loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd1:d\d+>>   VecAdd [<<Load1>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd2:d\d+>>   VecAdd [<<Load2>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Int8   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const16>>]                            loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                                  loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]                         loop:none
+  public static final int testDotProdComplexUnsignedCastedToSigned(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = ((byte)((a[i] & 0xff) + 1)) * ((byte)((b[i] & 0xff) + 1));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestByte.testDotProdComplexSignedCastedToUnsigned(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddC:i\d+>>    Add [<<Get1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC1:a\d+>>  TypeConversion [<<AddC>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddGets:i\d+>> Add [<<Get2>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC2:a\d+>>  TypeConversion [<<AddGets>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<TypeC1>>,<<TypeC2>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestByte.testDotProdComplexSignedCastedToUnsigned(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16                                        loop:none
+  /// CHECK-DAG: <<Repl:d\d+>>    VecReplicateScalar [<<Const1>>]                       loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd1:d\d+>>   VecAdd [<<Load1>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd2:d\d+>>   VecAdd [<<Load2>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Uint8  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const16>>]                            loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                                  loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]                         loop:none
+  public static final int testDotProdComplexSignedCastedToUnsigned(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = ((a[i] + 1) & 0xff) * ((b[i] + 1) & 0xff);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START-{ARM64}: int other.TestByte.testDotProdSignedWidening(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG:                  VecDotProd type:Int8
+  public static final int testDotProdSignedWidening(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = ((short)(a[i])) * ((short)(b[i]));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START-{ARM64}: int other.TestByte.testDotProdParamSigned(int, byte[]) loop_optimization (after)
+  /// CHECK-DAG:                  VecDotProd type:Int8
+  public static final int testDotProdParamSigned(int x, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = (byte)(x) * b[i];
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START-{ARM64}: int other.TestByte.testDotProdParamUnsigned(int, byte[]) loop_optimization (after)
+  /// CHECK-DAG:                  VecDotProd type:Uint8
+  public static final int testDotProdParamUnsigned(int x, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = (x & 0xff) * (b[i] & 0xff);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  // No DOTPROD cases.
+
+  /// CHECK-START: int other.TestByte.testDotProdIntParam(int, byte[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdIntParam(int x, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = b[i] * (x);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestByte.testDotProdSignedToChar(byte[], byte[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSignedToChar(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = ((char)(a[i])) * ((char)(b[i]));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  // Cases when result of Mul is type-converted are not supported.
+
+  /// CHECK-START: int other.TestByte.testDotProdSimpleCastedToSignedByte(byte[], byte[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleCastedToSignedByte(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      byte temp = (byte)(a[i] * b[i]);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestByte.testDotProdSimpleCastedToUnsignedByte(byte[], byte[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleCastedToUnsignedByte(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      s += (a[i] * b[i]) & 0xff;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestByte.testDotProdSimpleUnsignedCastedToSignedByte(byte[], byte[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleUnsignedCastedToSignedByte(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      byte temp = (byte)((a[i] & 0xff) * (b[i] & 0xff));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestByte.testDotProdSimpleUnsignedCastedToUnsignedByte(byte[], byte[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleUnsignedCastedToUnsignedByte(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      s += ((a[i] & 0xff) * (b[i] & 0xff)) & 0xff;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestByte.testDotProdSimpleCastedToShort(byte[], byte[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleCastedToShort(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      short temp = (short)(a[i] * b[i]);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestByte.testDotProdSimpleCastedToChar(byte[], byte[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleCastedToChar(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      char temp = (char)(a[i] * b[i]);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestByte.testDotProdSimpleUnsignedCastedToShort(byte[], byte[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleUnsignedCastedToShort(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      short temp = (short)((a[i] & 0xff) * (b[i] & 0xff));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestByte.testDotProdSimpleUnsignedCastedToChar(byte[], byte[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleUnsignedCastedToChar(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      char temp = (char)((a[i] & 0xff) * (b[i] & 0xff));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestByte.testDotProdSimpleUnsignedCastedToLong(byte[], byte[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleUnsignedCastedToLong(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      long temp = (long)((a[i] & 0xff) * (b[i] & 0xff));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestByte.testDotProdUnsignedSigned(byte[], byte[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdUnsignedSigned(byte[] a, byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = (a[i] & 0xff) * b[i];
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  private static void testDotProd(byte[] b1, byte[] b2, int[] results) {
+    expectEquals(results[0], testDotProdSimple(b1, b2));
+    expectEquals(results[1], testDotProdComplex(b1, b2));
+    expectEquals(results[2], testDotProdSimpleUnsigned(b1, b2));
+    expectEquals(results[3], testDotProdComplexUnsigned(b1, b2));
+    expectEquals(results[4], testDotProdComplexUnsignedCastedToSigned(b1, b2));
+    expectEquals(results[5], testDotProdComplexSignedCastedToUnsigned(b1, b2));
+    expectEquals(results[6], testDotProdSignedWidening(b1, b2));
+    expectEquals(results[7], testDotProdParamSigned(-128, b2));
+    expectEquals(results[8], testDotProdParamUnsigned(-128, b2));
+    expectEquals(results[9], testDotProdIntParam(-128, b2));
+    expectEquals(results[10], testDotProdSignedToChar(b1, b2));
+    expectEquals(results[11], testDotProdSimpleCastedToSignedByte(b1, b2));
+    expectEquals(results[12], testDotProdSimpleCastedToUnsignedByte(b1, b2));
+    expectEquals(results[13], testDotProdSimpleUnsignedCastedToSignedByte(b1, b2));
+    expectEquals(results[14], testDotProdSimpleUnsignedCastedToUnsignedByte(b1, b2));
+    expectEquals(results[15], testDotProdSimpleCastedToShort(b1, b2));
+    expectEquals(results[16], testDotProdSimpleCastedToChar(b1, b2));
+    expectEquals(results[17], testDotProdSimpleUnsignedCastedToShort(b1, b2));
+    expectEquals(results[18], testDotProdSimpleUnsignedCastedToChar(b1, b2));
+    expectEquals(results[19], testDotProdSimpleUnsignedCastedToLong(b1, b2));
+    expectEquals(results[20], testDotProdUnsignedSigned(b1, b2));
+  }
+
+  public static void run() {
+    byte[] b1_1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 127, 127, 127 };
+    byte[] b2_1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 127, 127, 127 };
+    int[] results_1 = { 64516, 65548, 64516, 65548, 65548, 65548, 64516, -65024, 65024, -65024,
+                        64516, 4, 4, 4, 4, 64516, 64516, 64516, 64516, 64516, 64516 };
+    testDotProd(b1_1, b2_1, results_1);
+
+    byte[] b1_2 = { 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 127, 127, 127 };
+    byte[] b2_2 = { 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 127, 127, 127 };
+    int[] results_2 = { 80645, 81931, 80645, 81931, 81931, 81931, 80645, -81280, 81280, -81280,
+                        80645, 5, 5, 5, 5, 80645, 80645, 80645, 80645, 80645, 80645 };
+    testDotProd(b1_2, b2_2, results_2);
+
+    byte[] b1_3 = { -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -128, -128, -128, -128 };
+    byte[] b2_3 = {  127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,  127,  127,  127,  127 };
+    int[] results_3 = { -81280, 81291, 81280, 82571, 81291, 82571, -81280, -81280, 81280, -81280,
+                        41534080, -640, 640, -640, 640, -81280, 246400, 81280, 81280, 81280, 81280 };
+    testDotProd(b1_3, b2_3, results_3);
+
+    byte[] b1_4 = { -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -128, -128, -128, -128 };
+    byte[] b2_4 = { -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -128, -128, -128, -128 };
+    int[] results_4 = { 81920, 80656, 81920, 83216, 80656, 83216, 81920, 81920, 81920, 81920,
+                       -83804160, 0, 0, 0, 0, 81920, 81920, 81920, 81920, 81920, -81920 };
+    testDotProd(b1_4, b2_4, results_4);
+  }
+
+  public static void main(String[] args) {
+    run();
+  }
+}
diff --git a/test/684-checker-simd-dotprod/src/other/TestCharShort.java b/test/684-checker-simd-dotprod/src/other/TestCharShort.java
new file mode 100644
index 0000000..9cb9db5
--- /dev/null
+++ b/test/684-checker-simd-dotprod/src/other/TestCharShort.java
@@ -0,0 +1,552 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package other;
+
+/**
+ * Tests for dot product idiom vectorization: char and short case.
+ */
+public class TestCharShort {
+
+  public static final int ARRAY_SIZE = 1024;
+
+  /// CHECK-START: int other.TestCharShort.testDotProdSimple(short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<Get1>>,<<Get2>>]                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdSimple(short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const8:i\d+>>  IntConstant 8                                         loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<Load1>>,<<Load2>>] type:Int16  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const8>>]                             loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                                  loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]                         loop:none
+  public static final int testDotProdSimple(short[] a, short[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = a[i] * b[i];
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdComplex(short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddC1:i\d+>>   Add [<<Get1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC1:s\d+>>  TypeConversion [<<AddC1>>]                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddC2:i\d+>>   Add [<<Get2>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC2:s\d+>>  TypeConversion [<<AddC2>>]                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<TypeC1>>,<<TypeC2>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdComplex(short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const8:i\d+>>  IntConstant 8                                         loop:none
+  /// CHECK-DAG: <<Repl:d\d+>>    VecReplicateScalar [<<Const1>>]                       loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd1:d\d+>>   VecAdd [<<Load1>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd2:d\d+>>   VecAdd [<<Load2>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Int16  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const8>>]                             loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                                  loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]                         loop:none
+  public static final int testDotProdComplex(short[] a, short[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = ((short)(a[i] + 1)) * ((short)(b[i] + 1));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdSimpleUnsigned(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<Get1>>,<<Get2>>]                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdSimpleUnsigned(char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const8:i\d+>>  IntConstant 8                                         loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<Load1>>,<<Load2>>] type:Uint16 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const8>>]                             loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                                  loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]                         loop:none
+  public static final int testDotProdSimpleUnsigned(char[] a, char[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = a[i] * b[i];
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdComplexUnsigned(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddC:i\d+>>    Add [<<Get1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC1:c\d+>>  TypeConversion [<<AddC>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddGets:i\d+>> Add [<<Get2>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC2:c\d+>>  TypeConversion [<<AddGets>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<TypeC1>>,<<TypeC2>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdComplexUnsigned(char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const8:i\d+>>  IntConstant 8                                         loop:none
+  /// CHECK-DAG: <<Repl:d\d+>>    VecReplicateScalar [<<Const1>>]                       loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd1:d\d+>>   VecAdd [<<Load1>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd2:d\d+>>   VecAdd [<<Load2>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Uint16 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const8>>]                             loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                                  loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]                         loop:none
+  public static final int testDotProdComplexUnsigned(char[] a, char[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = ((char)(a[i] + 1)) * ((char)(b[i] + 1));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdComplexUnsignedCastedToSigned(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddC:i\d+>>    Add [<<Get1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC1:s\d+>>  TypeConversion [<<AddC>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddGets:i\d+>> Add [<<Get2>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC2:s\d+>>  TypeConversion [<<AddGets>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<TypeC1>>,<<TypeC2>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdComplexUnsignedCastedToSigned(char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const8:i\d+>>  IntConstant 8                                         loop:none
+  /// CHECK-DAG: <<Repl:d\d+>>    VecReplicateScalar [<<Const1>>]                       loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd1:d\d+>>   VecAdd [<<Load1>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd2:d\d+>>   VecAdd [<<Load2>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Int16  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const8>>]                             loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                                  loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]                         loop:none
+  public static final int testDotProdComplexUnsignedCastedToSigned(char[] a, char[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = ((short)(a[i] + 1)) * ((short)(b[i] + 1));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdComplexSignedCastedToUnsigned(short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddC:i\d+>>    Add [<<Get1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC1:c\d+>>  TypeConversion [<<AddC>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<AddGets:i\d+>> Add [<<Get2>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC2:c\d+>>  TypeConversion [<<AddGets>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<TypeC1>>,<<TypeC2>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdComplexSignedCastedToUnsigned(short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const8:i\d+>>  IntConstant 8                                         loop:none
+  /// CHECK-DAG: <<Repl:d\d+>>    VecReplicateScalar [<<Const1>>]                       loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd1:d\d+>>   VecAdd [<<Load1>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<VAdd2:d\d+>>   VecAdd [<<Load2>>,<<Repl>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Uint16 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const8>>]                             loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                                  loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]                         loop:none
+  public static final int testDotProdComplexSignedCastedToUnsigned(short[] a, short[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = ((char)(a[i] + 1)) * ((char)(b[i] + 1));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdSignedToInt(short[], short[]) loop_optimization (after)
+  /// CHECK-DAG:                  VecDotProd type:Int16
+  public static final int testDotProdSignedToInt(short[] a, short[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = ((int)(a[i])) * ((int)(b[i]));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdParamSigned(int, short[]) loop_optimization (after)
+  /// CHECK-DAG:                  VecDotProd type:Int16
+  public static final int testDotProdParamSigned(int x, short[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = (short)(x) * b[i];
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdParamUnsigned(int, char[]) loop_optimization (after)
+  /// CHECK-DAG:                  VecDotProd type:Uint16
+  public static final int testDotProdParamUnsigned(int x, char[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = (char)(x) * b[i];
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdIntParam(int, short[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdIntParam(int x, short[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = b[i] * (x);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdSignedToChar(short[], short[]) loop_optimization (after)
+  /// CHECK-DAG:                  VecDotProd type:Uint16
+  public static final int testDotProdSignedToChar(short[] a, short[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = ((char)(a[i])) * ((char)(b[i]));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  // Cases when result of Mul is type-converted are not supported.
+
+  /// CHECK-START: int other.TestCharShort.testDotProdSimpleMulCastedToSigned(short[], short[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd type:Uint16
+  public static final int testDotProdSimpleMulCastedToSigned(short[] a, short[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      short temp = (short)(a[i] * b[i]);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+
+  /// CHECK-START: int other.TestCharShort.testDotProdSimpleMulCastedToUnsigned(short[], short[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleMulCastedToUnsigned(short[] a, short[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      char temp = (char)(a[i] * b[i]);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdSimpleUnsignedMulCastedToSigned(char[], char[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleUnsignedMulCastedToSigned(char[] a, char[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      short temp = (short)(a[i] * b[i]);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdSimpleUnsignedMulCastedToUnsigned(char[], char[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleUnsignedMulCastedToUnsigned(char[] a, char[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      char temp = (char)(a[i] * b[i]);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdSimpleCastedToShort(short[], short[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleCastedToShort(short[] a, short[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      short temp = (short)(a[i] * b[i]);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdSimpleCastedToChar(short[], short[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleCastedToChar(short[] a, short[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      char temp = (char)(a[i] * b[i]);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdSimpleUnsignedCastedToShort(char[], char[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleUnsignedCastedToShort(char[] a, char[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      short temp = (short)(a[i] * b[i]);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdSimpleUnsignedCastedToChar(char[], char[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleUnsignedCastedToChar(char[] a, char[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      char temp = (char)(a[i] * b[i]);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdSimpleUnsignedCastedToLong(char[], char[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSimpleUnsignedCastedToLong(char[] a, char[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      long temp = (long)(a[i] * b[i]);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  // Narrowing conversions.
+
+  /// CHECK-START: int other.TestCharShort.testDotProdSignedNarrowerSigned(short[], short[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSignedNarrowerSigned(short[] a, short[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = ((byte)(a[i])) * ((byte)(b[i]));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdSignedNarrowerUnsigned(short[], short[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdSignedNarrowerUnsigned(short[] a, short[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = (a[i] & 0xff) * (b[i] & 0xff);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdUnsignedNarrowerSigned(char[], char[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdUnsignedNarrowerSigned(char[] a, char[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = ((byte)(a[i])) * ((byte)(b[i]));
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdUnsignedNarrowerUnsigned(char[], char[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdUnsignedNarrowerUnsigned(char[] a, char[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = (a[i] & 0xff) * (b[i] & 0xff);
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  /// CHECK-START: int other.TestCharShort.testDotProdUnsignedSigned(char[], short[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdUnsignedSigned(char[] a, short[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = a[i] * b[i];
+      s += temp;
+    }
+    return s - 1;
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  private static void testDotProd(short[] s1, short[] s2, char[] c1, char[] c2, int[] results) {
+    expectEquals(results[0], testDotProdSimple(s1, s2));
+    expectEquals(results[1], testDotProdComplex(s1, s2));
+    expectEquals(results[2], testDotProdSimpleUnsigned(c1, c2));
+    expectEquals(results[3], testDotProdComplexUnsigned(c1, c2));
+    expectEquals(results[4], testDotProdComplexUnsignedCastedToSigned(c1, c2));
+    expectEquals(results[5], testDotProdComplexSignedCastedToUnsigned(s1, s2));
+    expectEquals(results[6], testDotProdSignedToInt(s1, s2));
+    expectEquals(results[7], testDotProdParamSigned(-32768, s2));
+    expectEquals(results[8], testDotProdParamUnsigned(-32768, c2));
+    expectEquals(results[9], testDotProdIntParam(-32768, s2));
+    expectEquals(results[10], testDotProdSignedToChar(s1, s2));
+    expectEquals(results[11], testDotProdSimpleMulCastedToSigned(s1, s2));
+    expectEquals(results[12], testDotProdSimpleMulCastedToUnsigned(s1, s2));
+    expectEquals(results[13], testDotProdSimpleUnsignedMulCastedToSigned(c1, c2));
+    expectEquals(results[14], testDotProdSimpleUnsignedMulCastedToUnsigned(c1, c2));
+    expectEquals(results[15], testDotProdSimpleCastedToShort(s1, s2));
+    expectEquals(results[16], testDotProdSimpleCastedToChar(s1, s2));
+    expectEquals(results[17], testDotProdSimpleUnsignedCastedToShort(c1, c2));
+    expectEquals(results[18], testDotProdSimpleUnsignedCastedToChar(c1, c2));
+    expectEquals(results[19], testDotProdSimpleUnsignedCastedToLong(c1, c2));
+    expectEquals(results[20], testDotProdSignedNarrowerSigned(s1, s2));
+    expectEquals(results[21], testDotProdSignedNarrowerUnsigned(s1, s2));
+    expectEquals(results[22], testDotProdUnsignedNarrowerSigned(c1, c2));
+    expectEquals(results[23], testDotProdUnsignedNarrowerUnsigned(c1, c2));
+    expectEquals(results[24], testDotProdUnsignedSigned(c1, s2));
+  }
+
+  public static void run() {
+    final short MAX_S = Short.MAX_VALUE;
+    final short MIN_S = Short.MAX_VALUE;
+
+    short[] s1_1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S };
+    short[] s2_1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S };
+    char[]  c1_1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S };
+    char[]  c2_1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S };
+    int[] results_1 = { 2147352578, -2147483634, 2147352578, -2147483634, -2147483634, -2147483634,
+                        2147352578, -2147418112, 2147418112, -2147418112, 2147352578,
+                        2, 2, 2, 2, 2, 2, 2, 2, 2147352578, 2, 130050, 2, 130050, 2147352578 };
+    testDotProd(s1_1, s2_1, c1_1, c2_1, results_1);
+
+    short[] s1_2 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S, MAX_S, MAX_S };
+    short[] s2_2 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S, MAX_S, MAX_S };
+    char[]  c1_2 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S, MAX_S, MAX_S };
+    char[]  c2_2 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S, MAX_S, MAX_S };
+    int[] results_2 = { -262140, 12, -262140, 12, 12, 12, -262140, 131072, -131072, 131072,
+                        -262140, 4, 4, 4, 4, 4, 4, 4, 4, -262140, 4, 260100, 4, 260100, -262140 };
+    testDotProd(s1_2, s2_2, c1_2, c2_2, results_2);
+
+    short[] s1_3 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+    short[] s2_3 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S };
+    char[]  c1_3 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+    char[]  c2_3 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S };
+    int[] results_3 = { 2147352578, -2147483634, 2147352578, -2147483634, -2147483634,
+                        -2147483634, 2147352578, -2147418112, 2147418112, -2147418112,
+                        2147352578, 2, 2, 2, 2, 2, 2, 2, 2, 2147352578, 2, 130050, 2,
+                        130050, 2147352578};
+    testDotProd(s1_3, s2_3, c1_3, c2_3, results_3);
+
+
+    short[] s1_4 = { MIN_S, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+    short[] s2_4 = { MIN_S, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+    char[]  c1_4 = { MIN_S, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+    char[]  c2_4 = { MIN_S, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+    int[] results_4 = { -1073938429, -1073741811, -1073938429, -1073741811, -1073741811,
+                        -1073741811, -1073938429, 1073840128, -1073840128, 1073840128,
+                        -1073938429, 3, 3, 3, 3, 3, 3, 3, 3, -1073938429, 3, 195075, 3,
+                        195075, -1073938429 };
+    testDotProd(s1_4, s2_4, c1_4, c2_4, results_4);
+  }
+
+  public static void main(String[] args) {
+    run();
+  }
+}
diff --git a/test/684-checker-simd-dotprod/src/other/TestVarious.java b/test/684-checker-simd-dotprod/src/other/TestVarious.java
new file mode 100644
index 0000000..3f46098
--- /dev/null
+++ b/test/684-checker-simd-dotprod/src/other/TestVarious.java
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package other;
+
+/**
+ * Tests for dot product idiom vectorization.
+ */
+public class TestVarious {
+
+  /// CHECK-START: int other.TestVarious.testDotProdConstRight(byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const89:i\d+>> IntConstant 89                                        loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<Get1>>,<<Const89>>]                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdConstRight(byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16                                        loop:none
+  /// CHECK-DAG: <<Const89:i\d+>> IntConstant 89                                        loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Repl:d\d+>>    VecReplicateScalar [<<Const89>>]                      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<Load1>>,<<Repl>>] type:Int8    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const16>>]                            loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                                  loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]                         loop:none
+  public static final int testDotProdConstRight(byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp =  b[i] * 89;
+      s += temp;
+    }
+    return s;
+  }
+
+  /// CHECK-START: int other.TestVarious.testDotProdConstLeft(byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const89:i\d+>> IntConstant 89                                        loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:a\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<Get1>>,<<Const89>>]                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdConstLeft(byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16                                        loop:none
+  /// CHECK-DAG: <<Const89:i\d+>> IntConstant 89                                        loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Repl:d\d+>>    VecReplicateScalar [<<Const89>>]                      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<Load1>>,<<Repl>>] type:Uint8   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const16>>]                            loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                                  loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]                         loop:none
+  public static final int testDotProdConstLeft(byte[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = 89 * (b[i] & 0xff);
+      s += temp;
+    }
+    return s;
+  }
+
+  /// CHECK-START: int other.TestVarious.testDotProdLoopInvariantConvRight(byte[], int) loop_optimization (before)
+  /// CHECK-DAG: <<Param:i\d+>>   ParameterValue                                        loop:none
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<ConstL:i\d+>>  IntConstant 129                                       loop:none
+  /// CHECK-DAG: <<AddP:i\d+>>    Add [<<Param>>,<<ConstL>>]                            loop:none
+  /// CHECK-DAG: <<TypeCnv:b\d+>> TypeConversion [<<AddP>>]                             loop:none
+  //
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]                          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<Get1>>,<<TypeCnv>>]                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                             loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdLoopInvariantConvRight(byte[], int) loop_optimization (after)
+  /// CHECK-DAG: <<Param:i\d+>>   ParameterValue                                        loop:none
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16                                        loop:none
+  /// CHECK-DAG: <<ConstL:i\d+>>  IntConstant 129                                       loop:none
+  /// CHECK-DAG: <<AddP:i\d+>>    Add [<<Param>>,<<ConstL>>]                            loop:none
+  /// CHECK-DAG: <<TypeCnv:b\d+>> TypeConversion [<<AddP>>]                             loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Repl:d\d+>>    VecReplicateScalar [<<TypeCnv>>]                      loop:none
+  //
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<Load1>>,<<Repl>>] type:Int8    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const16>>]                            loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                                  loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]                         loop:none
+  public static final int testDotProdLoopInvariantConvRight(byte[] b, int param) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = b[i] * ((byte)(param + 129));
+      s += temp;
+    }
+    return s;
+  }
+
+  /// CHECK-START: int other.TestVarious.testDotProdByteToChar(char[], char[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdByteToChar(char[] a, char[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = ((char)((byte)(a[i] + 129))) * b[i];
+      s += temp;
+    }
+    return s;
+  }
+
+  /// CHECK-START: int other.TestVarious.testDotProdMixedSize(byte[], short[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdMixedSize(byte[] a, short[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = a[i] * b[i];
+      s += temp;
+    }
+    return s;
+  }
+
+  /// CHECK-START: int other.TestVarious.testDotProdMixedSizeAndSign(byte[], char[]) loop_optimization (after)
+  /// CHECK-NOT:                  VecDotProd
+  public static final int testDotProdMixedSizeAndSign(byte[] a, char[] b) {
+    int s = 1;
+    for (int i = 0; i < b.length; i++) {
+      int temp = a[i] * b[i];
+      s += temp;
+    }
+    return s;
+  }
+
+  /// CHECK-START: int other.TestVarious.testDotProdInt32(int[], int[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                             loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                             loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:i\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:i\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:i\d+>>     Mul [<<Get1>>,<<Get2>>]                   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul>>]                    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdInt32(int[], int[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                             loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                             loop:none
+  /// CHECK-DAG: <<Set:d\d+>>     VecSetScalars [<<Const1>>]                loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set>>,{{d\d+}}]                    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul:d\d+>>     VecMul [<<Load1>>,<<Load2>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecAdd [<<Phi2>>,<<Mul>>]                 loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-DAG: <<Reduce:d\d+>>  VecReduce [<<Phi2>>]                      loop:none
+  /// CHECK-DAG:                  VecExtractScalar [<<Reduce>>]             loop:none
+  public static final int testDotProdInt32(int[] a, int[] b) {
+    int s = 1;
+    for (int i = 0;  i < b.length; i++) {
+      int temp = a[i] * b[i];
+      s += temp;
+    }
+    return s;
+  }
+
+  /// CHECK-START: int other.TestVarious.testDotProdBothSignedUnsigned1(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                             loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                             loop:none
+  /// CHECK-DAG: <<Const2:i\d+>>  IntConstant 2                             loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Phi3:i\d+>>    Phi [<<Const2>>,{{i\d+}}]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul1:i\d+>>    Mul [<<Get1>>,<<Get2>>]                   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul1>>]                   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC1:a\d+>>  TypeConversion [<<Get1>>]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC2:a\d+>>  TypeConversion [<<Get2>>]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul2:i\d+>>    Mul [<<TypeC1>>,<<TypeC2>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi3>>,<<Mul2>>]                   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdBothSignedUnsigned1(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const2:i\d+>>  IntConstant 2                                         loop:none
+  /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16                                        loop:none
+  /// CHECK-DAG: <<Set1:d\d+>>    VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Set2:d\d+>>    VecSetScalars [<<Const2>>]                            loop:none
+  //
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set1>>,{{d\d+}}]                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Phi3:d\d+>>    Phi [<<Set2>>,{{d\d+}}]                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<Load1>>,<<Load2>>] type:Int8   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi3>>,<<Load1>>,<<Load2>>] type:Uint8  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const16>>]                            loop:<<Loop>>      outer_loop:none
+  public static final int testDotProdBothSignedUnsigned1(byte[] a, byte[] b) {
+    int s1 = 1;
+    int s2 = 2;
+    for (int i = 0; i < b.length; i++) {
+      byte a_val = a[i];
+      byte b_val = b[i];
+      s1 += a_val * b_val;
+      s2 += (a_val & 0xff) * (b_val & 0xff);
+    }
+    return s1 + s2;
+  }
+
+  /// CHECK-START: int other.TestVarious.testDotProdBothSignedUnsigned2(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                             loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                             loop:none
+  /// CHECK-DAG: <<Const2:i\d+>>  IntConstant 2                             loop:none
+  /// CHECK-DAG: <<Const42:i\d+>> IntConstant 42                            loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Phi3:i\d+>>    Phi [<<Const2>>,{{i\d+}}]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:a\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeC1:a\d+>>  TypeConversion [<<Get1>>]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul1:i\d+>>    Mul [<<Get2>>,<<TypeC1>>]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi3>>,<<Mul1>>]                   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul2:i\d+>>    Mul [<<Get1>>,<<Const42>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul2>>]                   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdBothSignedUnsigned2(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const2:i\d+>>  IntConstant 2                                         loop:none
+  /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16                                        loop:none
+  /// CHECK-DAG: <<Const42:i\d+>> IntConstant 42                                        loop:none
+  /// CHECK-DAG: <<Repl:d\d+>>    VecReplicateScalar [<<Const42>>]                      loop:none
+  /// CHECK-DAG: <<Set1:d\d+>>    VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Set2:d\d+>>    VecSetScalars [<<Const2>>]                            loop:none
+  //
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set1>>,{{d\d+}}]                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Phi3:d\d+>>    Phi [<<Set2>>,{{d\d+}}]                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi3>>,<<Load2>>,<<Load1>>] type:Uint8  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<Load1>>,<<Repl>>] type:Int8    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const16>>]                            loop:<<Loop>>      outer_loop:none
+  public static final int testDotProdBothSignedUnsigned2(byte[] a, byte[] b) {
+    int s1 = 1;
+    int s2 = 2;
+    for (int i = 0; i < b.length; i++) {
+      byte a_val = a[i];
+      byte b_val = b[i];
+      s2 += (a_val & 0xff) * (b_val & 0xff);
+      s1 += a_val * 42;
+    }
+    return s1 + s2;
+  }
+
+  /// CHECK-START: int other.TestVarious.testDotProdBothSignedUnsignedDoubleLoad(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                             loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                             loop:none
+  /// CHECK-DAG: <<Const2:i\d+>>  IntConstant 2                             loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Phi3:i\d+>>    Phi [<<Const2>>,{{i\d+}}]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<GetB1:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<GetB2:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul1:i\d+>>    Mul [<<GetB1>>,<<GetB2>>]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul1>>]                   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<GetA1:a\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<GetA2:a\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul2:i\d+>>    Mul [<<GetA1>>,<<GetA2>>]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi3>>,<<Mul2>>]                   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdBothSignedUnsignedDoubleLoad(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const2:i\d+>>  IntConstant 2                                         loop:none
+  /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16                                        loop:none
+  /// CHECK-DAG: <<Set1:d\d+>>    VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Set2:d\d+>>    VecSetScalars [<<Const2>>]                            loop:none
+  //
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set1>>,{{d\d+}}]                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Phi3:d\d+>>    Phi [<<Set2>>,{{d\d+}}]                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<Load1>>,<<Load2>>] type:Int8   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load3:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load4:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi3>>,<<Load3>>,<<Load4>>] type:Uint8  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const16>>]                            loop:<<Loop>>      outer_loop:none
+  public static final int testDotProdBothSignedUnsignedDoubleLoad(byte[] a, byte[] b) {
+    int s1 = 1;
+    int s2 = 2;
+    for (int i = 0; i < b.length; i++) {
+      s1 += a[i] * b[i];
+      s2 += (a[i] & 0xff) * (b[i] & 0xff);
+    }
+    return s1 + s2;
+  }
+
+  /// CHECK-START: int other.TestVarious.testDotProdBothSignedUnsignedChar(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                             loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                             loop:none
+  /// CHECK-DAG: <<Const2:i\d+>>  IntConstant 2                             loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>    Phi [<<Const1>>,{{i\d+}}]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Phi3:i\d+>>    Phi [<<Const2>>,{{i\d+}}]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeS1:s\d+>>  TypeConversion [<<Get1>>]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<TypeS2:s\d+>>  TypeConversion [<<Get2>>]                 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul1:i\d+>>    Mul [<<TypeS1>>,<<TypeS2>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi3>>,<<Mul1>>]                   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Mul2:i\d+>>    Mul [<<Get1>>,<<Get2>>]                   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi2>>,<<Mul2>>]                   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const1>>]                 loop:<<Loop>>      outer_loop:none
+
+  /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdBothSignedUnsignedChar(char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Const0:i\d+>>  IntConstant 0                                         loop:none
+  /// CHECK-DAG: <<Const1:i\d+>>  IntConstant 1                                         loop:none
+  /// CHECK-DAG: <<Const2:i\d+>>  IntConstant 2                                         loop:none
+  /// CHECK-DAG: <<Const8:i\d+>>  IntConstant 8                                         loop:none
+  /// CHECK-DAG: <<Set1:d\d+>>    VecSetScalars [<<Const1>>]                            loop:none
+  /// CHECK-DAG: <<Set2:d\d+>>    VecSetScalars [<<Const2>>]                            loop:none
+  //
+  /// CHECK-DAG: <<Phi1:i\d+>>    Phi [<<Const0>>,{{i\d+}}]                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>    Phi [<<Set1>>,{{d\d+}}]                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Phi3:d\d+>>    Phi [<<Set2>>,{{d\d+}}]                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]                           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi3>>,<<Load1>>,<<Load2>>] type:Int16  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  VecDotProd [<<Phi2>>,<<Load1>>,<<Load2>>] type:Uint16 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                  Add [<<Phi1>>,<<Const8>>]                             loop:<<Loop>>      outer_loop:none
+  public static final int testDotProdBothSignedUnsignedChar(char[] a, char[] b) {
+    int s1 = 1;
+    int s2 = 2;
+    for (int i = 0; i < b.length; i++) {
+      char a_val = a[i];
+      char b_val = b[i];
+      s2 += ((short)a_val) * ((short)b_val);
+      s1 += a_val * b_val;
+    }
+    return s1 + s2;
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void run() {
+    final short MAX_S = Short.MAX_VALUE;
+    final short MIN_S = Short.MAX_VALUE;
+
+    byte[] b1 = { -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -128, -128, -128, -128 };
+    byte[] b2 = {  127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,  127,  127,  127,  127 };
+
+    char[] c1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+    char[] c2 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+
+    int[] i1 = { -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -128, -128, -128, -128 };
+    int[] i2 = {  127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,  127,  127,  127,  127 };
+
+    short[] s1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+
+    expectEquals(56516, testDotProdConstRight(b2));
+    expectEquals(56516, testDotProdConstLeft(b2));
+    expectEquals(1271, testDotProdLoopInvariantConvRight(b2, 129));
+    expectEquals(-8519423, testDotProdByteToChar(c1, c2));
+    expectEquals(-8388351, testDotProdMixedSize(b1, s1));
+    expectEquals(-8388351, testDotProdMixedSizeAndSign(b1, c2));
+    expectEquals(-81279, testDotProdInt32(i1, i2));
+    expectEquals(3, testDotProdBothSignedUnsigned1(b1, b2));
+    expectEquals(54403, testDotProdBothSignedUnsigned2(b1, b2));
+    expectEquals(3, testDotProdBothSignedUnsignedDoubleLoad(b1, b2));
+    expectEquals(-262137, testDotProdBothSignedUnsignedChar(c1, c2));
+  }
+
+  public static void main(String[] args) {
+    run();
+  }
+}
diff --git a/test/684-select-condition/expected.txt b/test/684-select-condition/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/684-select-condition/expected.txt
diff --git a/test/684-select-condition/info.txt b/test/684-select-condition/info.txt
new file mode 100644
index 0000000..f9d4acd
--- /dev/null
+++ b/test/684-select-condition/info.txt
@@ -0,0 +1 @@
+Regression test for a bug in ARM's code generator for HSelect.
diff --git a/test/684-select-condition/src/Main.java b/test/684-select-condition/src/Main.java
new file mode 100644
index 0000000..196ff1a
--- /dev/null
+++ b/test/684-select-condition/src/Main.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+  public static void main(String args[]) {
+    doFloatingPointTest("1", "1.0");
+    doFloatingPointTest("4", "2.0");
+    checkValue(String.valueOf(doIntegerTest1(4)), "0");
+    checkValue(String.valueOf(doIntegerTest2(4)), "4");
+
+    // Another variant of the floating point test, but less brittle.
+    staticField = 1;
+    checkValue(String.valueOf($noinline$test()), "1.0");
+    staticField = 4;
+    checkValue(String.valueOf($noinline$test()), "2.0");
+  }
+
+  // This code is a reduced version of the original reproducer. The arm
+  // code generator used to generate wrong code for it. Note that this
+  // test is very brittle and a simple change in it could cause the compiler
+  // to not trip.
+  public static void doFloatingPointTest(String s, String expected) {
+    float a = (float)Integer.valueOf(s);
+    a = a < 2.0f ? a : 2.0f;
+    checkValue("" + a, expected);
+  }
+
+  // The compiler used to trip on the two following methods. The test there
+  // is very brittle and requires not running constant folding after
+  // load/store elimination.
+  public static int doIntegerTest1(int param) {
+    Main main = new Main();
+    main.field = 0;
+    return (main.field == 0) ? 0 : param;
+  }
+
+  public static int doIntegerTest2(int param) {
+    Main main = new Main();
+    main.field = 0;
+    return (main.field != 0) ? 0 : param;
+  }
+
+  public static void checkValue(String actual, String expected) {
+    if (!expected.equals(actual)) {
+      throw new Error("Expected " + expected + ", got " + actual);
+    }
+  }
+
+  static void $noinline$nothing() {}
+  static int $noinline$getField() { return staticField; }
+
+  static float $noinline$test() {
+    // The 2.0f shall be materialized for GreaterThanOrEqual at the beginning of the method;
+    // since the following call clobbers caller-saves, it is allocated to s16.
+    // r0(field) = InvokeStaticOrDirect[]
+    int one = $noinline$getField();
+    // s0(a_1) = TypeConversion[r0(one)]
+    float a = (float)one;
+    // s16(a_2) = Select[s0(a_1), C(2.0f), GreaterThanOrEqual[s0(a_1), s16(2.0f)]]
+    a = a < 2.0f ? a : 2.0f;
+    // The following call is added to clobber caller-saves, forcing the output of the Select
+    // to be allocated to s16.
+    $noinline$nothing();
+    return a;
+  }
+
+  int field;
+  static int staticField;
+}
diff --git a/test/685-deoptimizeable/expected.txt b/test/685-deoptimizeable/expected.txt
new file mode 100644
index 0000000..f993efc
--- /dev/null
+++ b/test/685-deoptimizeable/expected.txt
@@ -0,0 +1,2 @@
+JNI_OnLoad called
+Finishing
diff --git a/test/685-deoptimizeable/info.txt b/test/685-deoptimizeable/info.txt
new file mode 100644
index 0000000..d0952f9
--- /dev/null
+++ b/test/685-deoptimizeable/info.txt
@@ -0,0 +1 @@
+Test various cases for full/partial-fragment deoptimization.
diff --git a/test/685-deoptimizeable/src/Main.java b/test/685-deoptimizeable/src/Main.java
new file mode 100644
index 0000000..fc7fdea
--- /dev/null
+++ b/test/685-deoptimizeable/src/Main.java
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+
+class DummyObject {
+    public static boolean sHashCodeInvoked = false;
+    private int i;
+
+    public DummyObject(int i) {
+        this.i = i;
+    }
+
+    public boolean equals(Object obj) {
+        return (obj instanceof DummyObject) && (i == ((DummyObject)obj).i);
+    }
+
+    public int hashCode() {
+        sHashCodeInvoked = true;
+        Main.assertIsManaged();
+        Main.deoptimizeAll();
+        Main.assertIsInterpreted();
+        return i % 64;
+    }
+}
+
+public class Main {
+    static boolean sFlag = false;
+
+    public static native void deoptimizeAll();
+    public static native void undeoptimizeAll();
+    public static native void assertIsInterpreted();
+    public static native void assertIsManaged();
+    public static native void assertCallerIsInterpreted();
+    public static native void disableStackFrameAsserts();
+    public static native boolean hasJit();
+    private static native void ensureJitCompiled(Class<?> itf, String method_name);
+
+    public static void execute(Runnable runnable) throws Exception {
+      Thread t = new Thread(runnable);
+      t.start();
+      t.join();
+    }
+
+    public static void ensureAllJitCompiled() {
+        ensureJitCompiled(HashMap.class, "hash");
+        ensureJitCompiled(Main.class, "$noinline$run1");
+        ensureJitCompiled(Main.class, "$noinline$run2");
+        ensureJitCompiled(Main.class, "$noinline$run3A");
+        ensureJitCompiled(Main.class, "$noinline$run3B");
+        ensureJitCompiled(DummyObject.class, "hashCode");
+    }
+
+    public static void main(String[] args) throws Exception {
+        System.loadLibrary(args[0]);
+        // Only test stack frames in compiled mode.
+        if (!hasJit()) {
+          disableStackFrameAsserts();
+        }
+
+        ensureAllJitCompiled();
+
+        final HashMap<DummyObject, Long> map = new HashMap<DummyObject, Long>();
+
+        // Single-frame deoptimization that covers partial fragment.
+        execute(new Runnable() {
+            public void run() {
+                ensureJitCompiled(this.getClass(), "runInternal");
+                runInternal();
+            }
+
+            public void runInternal() {
+                int[] arr = new int[3];
+                assertIsManaged();
+                int res = $noinline$run1(arr);
+                assertIsManaged();  // Only single frame is deoptimized.
+                if (res != 79) {
+                    System.out.println("Failure 1!");
+                    System.exit(0);
+                }
+            }
+        });
+
+        // Single-frame deoptimization that covers a full fragment.
+        execute(new Runnable() {
+            public void run() {
+                ensureJitCompiled(this.getClass(), "runInternal");
+                runInternal();
+            }
+
+            public void runInternal() {
+                try {
+                    int[] arr = new int[3];
+                    assertIsManaged();
+                    // Use reflection to call $noinline$run2 so that it does
+                    // full-fragment deoptimization since that is an upcall.
+                    Class<?> cls = Class.forName("Main");
+                    Method method = cls.getDeclaredMethod("$noinline$run2", int[].class);
+                    double res = (double)method.invoke(Main.class, arr);
+                    assertIsManaged();  // Only single frame is deoptimized.
+                    if (res != 79.3d) {
+                        System.out.println("Failure 2!");
+                        System.exit(0);
+                    }
+                } catch (Exception e) {
+                    e.printStackTrace(System.out);
+                }
+            }
+        });
+
+        // Full-fragment deoptimization.
+        execute(new Runnable() {
+            public void run() {
+                ensureJitCompiled(this.getClass(), "runInternal");
+                runInternal();
+            }
+
+            public void runInternal() {
+                assertIsManaged();
+                float res = $noinline$run3B();
+                assertIsInterpreted();  // Every deoptimizeable method is deoptimized.
+                if (res != 0.034f) {
+                    System.out.println("Failure 3!");
+                    System.exit(0);
+                }
+            }
+        });
+
+        undeoptimizeAll();  // Make compiled code useable again.
+        ensureAllJitCompiled();
+
+        // Partial-fragment deoptimization.
+        execute(new Runnable() {
+            public void run() {
+                ensureJitCompiled(this.getClass(), "runInternal");
+                ensureJitCompiled(HashMap.class, "hash");
+                runInternal();
+            }
+
+            public void runInternal() {
+                try {
+                    assertIsManaged();
+                    map.put(new DummyObject(10), Long.valueOf(100));
+                    assertIsInterpreted();  // Every deoptimizeable method is deoptimized.
+                } catch (Exception e) {
+                    e.printStackTrace(System.out);
+                }
+            }
+        });
+
+        undeoptimizeAll();  // Make compiled code useable again.
+        ensureAllJitCompiled();
+
+        if (!DummyObject.sHashCodeInvoked) {
+            System.out.println("hashCode() method not invoked!");
+        }
+        if (map.get(new DummyObject(10)) != 100) {
+            System.out.println("Wrong hashmap value!");
+        }
+        System.out.println("Finishing");
+    }
+
+    public static int $noinline$run1(int[] arr) {
+        assertIsManaged();
+        // Prevent inlining.
+        if (sFlag) {
+            throw new Error();
+        }
+        boolean caught = false;
+        // BCE will use deoptimization for the code below.
+        try {
+            arr[0] = 1;
+            arr[1] = 1;
+            arr[2] = 1;
+            // This causes AIOOBE and triggers deoptimization from compiled code.
+            arr[3] = 1;
+        } catch (ArrayIndexOutOfBoundsException e) {
+            assertIsInterpreted(); // Single-frame deoptimization triggered.
+            caught = true;
+        }
+        if (!caught) {
+            System.out.println("Expected exception");
+        }
+        assertIsInterpreted();
+        return 79;
+    }
+
+    public static double $noinline$run2(int[] arr) {
+        assertIsManaged();
+        // Prevent inlining.
+        if (sFlag) {
+            throw new Error();
+        }
+        boolean caught = false;
+        // BCE will use deoptimization for the code below.
+        try {
+            arr[0] = 1;
+            arr[1] = 1;
+            arr[2] = 1;
+            // This causes AIOOBE and triggers deoptimization from compiled code.
+            arr[3] = 1;
+        } catch (ArrayIndexOutOfBoundsException e) {
+            assertIsInterpreted();  // Single-frame deoptimization triggered.
+            caught = true;
+        }
+        if (!caught) {
+            System.out.println("Expected exception");
+        }
+        assertIsInterpreted();
+        return 79.3d;
+    }
+
+    public static float $noinline$run3A() {
+        assertIsManaged();
+        // Prevent inlining.
+        if (sFlag) {
+            throw new Error();
+        }
+        // Deoptimize callers.
+        deoptimizeAll();
+        assertIsInterpreted();
+        assertCallerIsInterpreted();  // $noinline$run3B is deoptimizeable.
+        return 0.034f;
+    }
+
+    public static float $noinline$run3B() {
+        assertIsManaged();
+        // Prevent inlining.
+        if (sFlag) {
+            throw new Error();
+        }
+        float res = $noinline$run3A();
+        assertIsInterpreted();
+        return res;
+    }
+}
diff --git a/test/685-shifts/expected.txt b/test/685-shifts/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/685-shifts/expected.txt
diff --git a/test/685-shifts/info.txt b/test/685-shifts/info.txt
new file mode 100644
index 0000000..9cf3e6d
--- /dev/null
+++ b/test/685-shifts/info.txt
@@ -0,0 +1 @@
+Tests for the compiler when shift instructions have 0 or 1 as constant shifts.
diff --git a/test/685-shifts/smali/Test.smali b/test/685-shifts/smali/Test.smali
new file mode 100644
index 0000000..f8dfd6f
--- /dev/null
+++ b/test/685-shifts/smali/Test.smali
@@ -0,0 +1,58 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTest;
+.super Ljava/lang/Object;
+
+.method public static shlZero(J)J
+    .registers 6
+    const v2, 0x0
+    shl-long v0, p0, v2
+    return-wide v0
+.end method
+
+.method public static shrZero(J)J
+    .registers 6
+    const v2, 0x0
+    shr-long v0, p0, v2
+    return-wide v0
+.end method
+
+.method public static ushrZero(J)J
+    .registers 6
+    const v2, 0x0
+    ushr-long v0, p0, v2
+    return-wide v0
+.end method
+
+.method public static shlOne(J)J
+    .registers 6
+    const v2, 0x1
+    shl-long v0, p0, v2
+    return-wide v0
+.end method
+
+.method public static shrOne(J)J
+    .registers 6
+    const v2, 0x1
+    shr-long v0, p0, v2
+    return-wide v0
+.end method
+
+.method public static ushrOne(J)J
+    .registers 6
+    const v2, 0x1
+    ushr-long v0, p0, v2
+    return-wide v0
+.end method
diff --git a/test/685-shifts/src/Main.java b/test/685-shifts/src/Main.java
new file mode 100644
index 0000000..d186363
--- /dev/null
+++ b/test/685-shifts/src/Main.java
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+  static long smallLong = 42L;
+  static long smallLongShlOne = 84L;
+  static long smallLongShrOne = 21L;
+  static long smallLongUShrOne = 21L;
+  static long longLong = 123456789123456789L;
+  static long longLongShlOne = 246913578246913578L;
+  static long longLongShrOne = 61728394561728394L;
+  static long longLongUShrOne = 61728394561728394L;
+
+  static long negativeSmallLong = -42L;
+  static long negativeSmallLongShlOne = -84L;
+  static long negativeSmallLongShrOne = -21L;
+  static long negativeSmallLongUShrOne = 9223372036854775787L;
+  static long negativeLongLong = -123456789123456789L;
+  static long negativeLongLongShlOne = -246913578246913578L;
+  static long negativeLongLongShrOne = -61728394561728395L;
+  static long negativeLongLongUShrOne = 9161643642293047413L;
+
+  private static void assertEquals(long expected, long actual) {
+    if (expected != actual) {
+      throw new Error("Expected " + expected + ", got " + actual);
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    Class<?> c = Class.forName("Test");
+    Method m = c.getMethod("shlZero", long.class);
+    assertEquals(smallLong, (Long)m.invoke(null, smallLong));
+    assertEquals(longLong, (Long)m.invoke(null, longLong));
+
+    m = c.getMethod("shrZero", long.class);
+    assertEquals(smallLong, (Long)m.invoke(null, smallLong));
+    assertEquals(longLong, (Long)m.invoke(null, longLong));
+
+    m = c.getMethod("ushrZero", long.class);
+    assertEquals(smallLong, (Long)m.invoke(null, smallLong));
+    assertEquals(longLong, (Long)m.invoke(null, longLong));
+
+    m = c.getMethod("shlOne", long.class);
+    assertEquals(smallLongShlOne, (Long)m.invoke(null, smallLong));
+    assertEquals(longLongShlOne, (Long)m.invoke(null, longLong));
+
+    m = c.getMethod("shrOne", long.class);
+    assertEquals(smallLongShrOne, (Long)m.invoke(null, smallLong));
+    assertEquals(longLongShrOne, (Long)m.invoke(null, longLong));
+
+    m = c.getMethod("ushrOne", long.class);
+    assertEquals(smallLongUShrOne, (Long)m.invoke(null, smallLong));
+    assertEquals(longLongUShrOne, (Long)m.invoke(null, longLong));
+
+    // Test with negative numbers.
+
+    m = c.getMethod("shlZero", long.class);
+    assertEquals(negativeSmallLong, (Long)m.invoke(null, negativeSmallLong));
+    assertEquals(negativeLongLong, (Long)m.invoke(null, negativeLongLong));
+
+    m = c.getMethod("shrZero", long.class);
+    assertEquals(negativeSmallLong, (Long)m.invoke(null, negativeSmallLong));
+    assertEquals(negativeLongLong, (Long)m.invoke(null, negativeLongLong));
+
+    m = c.getMethod("ushrZero", long.class);
+    assertEquals(negativeSmallLong, (Long)m.invoke(null, negativeSmallLong));
+    assertEquals(negativeLongLong, (Long)m.invoke(null, negativeLongLong));
+
+    m = c.getMethod("shlOne", long.class);
+    assertEquals(negativeSmallLongShlOne, (Long)m.invoke(null, negativeSmallLong));
+    assertEquals(negativeLongLongShlOne, (Long)m.invoke(null, negativeLongLong));
+
+    m = c.getMethod("shrOne", long.class);
+    assertEquals(negativeSmallLongShrOne, (Long)m.invoke(null, negativeSmallLong));
+    assertEquals(negativeLongLongShrOne, (Long)m.invoke(null, negativeLongLong));
+
+    m = c.getMethod("ushrOne", long.class);
+    assertEquals(negativeSmallLongUShrOne, (Long)m.invoke(null, negativeSmallLong));
+    assertEquals(negativeLongLongUShrOne, (Long)m.invoke(null, negativeLongLong));
+  }
+}
diff --git a/test/686-get-this/expected.txt b/test/686-get-this/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/686-get-this/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/686-get-this/info.txt b/test/686-get-this/info.txt
new file mode 100644
index 0000000..7227bad
--- /dev/null
+++ b/test/686-get-this/info.txt
@@ -0,0 +1,2 @@
+Test that we can successfully call StackVisitor.GetThis() even when
+'this' gets overwritten.
diff --git a/test/686-get-this/smali/Test.smali b/test/686-get-this/smali/Test.smali
new file mode 100644
index 0000000..533f607
--- /dev/null
+++ b/test/686-get-this/smali/Test.smali
@@ -0,0 +1,45 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTest;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+    .registers 2
+    invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+    const/4 v0, 0x1
+    sput v0, LTest;->field:I
+    return-void
+.end method
+
+
+.method public testEmpty()V
+  .registers 2
+  const/4 p0, 0x1
+  invoke-static {}, LMain;->getThisOfCaller()Ljava/lang/Object;
+  move-result-object v0
+  sput-object v0, LMain;->field:Ljava/lang/Object;
+  return-void
+.end method
+
+.method public testPrimitive()I
+  .registers 2
+  sget p0, LTest;->field:I
+  invoke-static {}, LMain;->getThisOfCaller()Ljava/lang/Object;
+  move-result-object v0
+  sput-object v0, LMain;->field:Ljava/lang/Object;
+  return p0
+.end method
+
+.field static public field:I
diff --git a/test/686-get-this/src/Main.java b/test/686-get-this/src/Main.java
new file mode 100644
index 0000000..4ea5301
--- /dev/null
+++ b/test/686-get-this/src/Main.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    System.loadLibrary(args[0]);
+
+    Class<?> c = Class.forName("Test");
+    ensureJitCompiled(c, "testEmpty");
+    ensureJitCompiled(c, "testPrimitive");
+
+    Method m = c.getMethod("testEmpty");
+    m.invoke(c.newInstance());
+    if (field != null) {
+      throw new Error("Expected null");
+    }
+
+    m = c.getMethod("testPrimitive");
+    int a = (Integer)m.invoke(c.newInstance());
+    if (a != 1) {
+      throw new Error("Expected 1, got " + a);
+    }
+    if (field != null) {
+      throw new Error("Expected null");
+    }
+  }
+
+  public static Object field;
+
+  private static native void ensureJitCompiled(Class<?> itf, String method_name);
+  public static native Object getThisOfCaller();
+}
diff --git a/test/687-deopt/expected.txt b/test/687-deopt/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/687-deopt/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/687-deopt/info.txt b/test/687-deopt/info.txt
new file mode 100644
index 0000000..ef56f51
--- /dev/null
+++ b/test/687-deopt/info.txt
@@ -0,0 +1,2 @@
+Regression test for instrumentation deopt, which previously did not expect a
+quickened instruction when returning from instrumentation stub.
diff --git a/test/687-deopt/src/Main.java b/test/687-deopt/src/Main.java
new file mode 100644
index 0000000..afe90d6
--- /dev/null
+++ b/test/687-deopt/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.HashMap;
+
+public class Main {
+  public static void main(String[] args) {
+    System.loadLibrary(args[0]);
+
+    // Jit compile HashMap.hash method, so that instrumentation stubs
+    // will deoptimize it.
+    ensureJitCompiled(HashMap.class, "hash");
+
+    Main key = new Main();
+    Integer value = new Integer(10);
+    HashMap<Main, Integer> map = new HashMap<>();
+    map.put(key, value);
+    Integer res = map.get(key);
+    if (!value.equals(res)) {
+      throw new Error("Expected 10, got " + res);
+    }
+  }
+
+  public int hashCode() {
+    // The call stack at this point is:
+    // Main.main
+    //  HashMap.put
+    //    HashMap.hash
+    //      Main.hashCode
+    //
+    // The opcode at HashMap.hash is invoke-virtual-quick which the
+    // instrumentation code did not expect and used to fetch the wrong
+    // method index for it.
+    deoptimizeAll();
+    return 42;
+  }
+
+  public static native void deoptimizeAll();
+  public static native void ensureJitCompiled(Class<?> cls, String methodName);
+}
diff --git a/test/688-shared-library/check b/test/688-shared-library/check
new file mode 100644
index 0000000..0b6c9e4
--- /dev/null
+++ b/test/688-shared-library/check
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Finalizers of DexFile will complain not being able to close
+# the main dex file, as it's still open. That's OK to ignore.
+sed -e '/^E\/System/d' "$2" > "$2.tmp"
+
+diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/688-shared-library/expected.txt b/test/688-shared-library/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/688-shared-library/expected.txt
diff --git a/test/688-shared-library/info.txt b/test/688-shared-library/info.txt
new file mode 100644
index 0000000..2eda65d
--- /dev/null
+++ b/test/688-shared-library/info.txt
@@ -0,0 +1,2 @@
+Tests on BaseDexClassLoader shared libraries and their class
+loading behavior.
diff --git a/test/688-shared-library/run b/test/688-shared-library/run
new file mode 100644
index 0000000..fa6ab58
--- /dev/null
+++ b/test/688-shared-library/run
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# App images are incompatible with what the test is doing: loading one
+# dex file multiple times.
+exec ${RUN} "${@}" --no-app-image
diff --git a/test/688-shared-library/src-art/Main.java b/test/688-shared-library/src-art/Main.java
new file mode 100644
index 0000000..d59e7dc
--- /dev/null
+++ b/test/688-shared-library/src-art/Main.java
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import dalvik.system.DelegateLastClassLoader;
+import dalvik.system.PathClassLoader;
+
+public class Main {
+    static final String TEST_NAME = "688-shared-library";
+    static final String MAIN_JAR_FILE = System.getenv("DEX_LOCATION") + "/" + TEST_NAME + ".jar";
+    static final String EX_JAR_FILE = System.getenv("DEX_LOCATION") + "/" + TEST_NAME + "-ex.jar";
+    static ClassLoader bootLoader = Object.class.getClassLoader();
+
+    public static void main(String[] args) throws Exception {
+      testNoLibrary();
+      testOneLibrary();
+      testTwoLibraries1();
+      testTwoLibraries2();
+      testTransitive1();
+      testTransitive2();
+      testTransitive3();
+      testTransitive4();
+    }
+
+    public static void assertIdentical(Object expected, Object actual) {
+      if (expected != actual) {
+        throw new Error("Expected " + expected + ", got " + actual);
+      }
+    }
+
+    public static void testNoLibrary() throws Exception {
+      ClassLoader loader = new PathClassLoader(MAIN_JAR_FILE, null, bootLoader);
+      Class<?> cls = loader.loadClass("Main");
+      assertIdentical(loader, cls.getClassLoader());
+    }
+
+    public static void testOneLibrary() throws Exception {
+      ClassLoader[] sharedLibraries = {
+          new PathClassLoader(EX_JAR_FILE, null, bootLoader),
+      };
+      ClassLoader delegateFirst =
+          new PathClassLoader(MAIN_JAR_FILE, null, bootLoader, sharedLibraries);
+      Class<?> cls = delegateFirst.loadClass("Main");
+      assertIdentical(sharedLibraries[0], cls.getClassLoader());
+      cls = delegateFirst.loadClass("SharedLibraryOne");
+      assertIdentical(sharedLibraries[0], cls.getClassLoader());
+      
+      ClassLoader delegateLast =
+          new DelegateLastClassLoader(MAIN_JAR_FILE, null, bootLoader, sharedLibraries);
+      cls = delegateLast.loadClass("Main");
+      assertIdentical(sharedLibraries[0], cls.getClassLoader());
+      cls = delegateLast.loadClass("SharedLibraryOne");
+      assertIdentical(sharedLibraries[0], cls.getClassLoader());
+    }
+    
+    public static void testTwoLibraries1() throws Exception {
+      ClassLoader[] sharedLibraries = {
+          new PathClassLoader(MAIN_JAR_FILE, null, bootLoader),
+          new PathClassLoader(EX_JAR_FILE, null, bootLoader),
+      };
+      ClassLoader delegateFirst = new PathClassLoader("", null, bootLoader, sharedLibraries);
+      Class<?> cls = delegateFirst.loadClass("Main");
+      assertIdentical(sharedLibraries[0], cls.getClassLoader());
+      cls = delegateFirst.loadClass("SharedLibraryOne");
+      assertIdentical(sharedLibraries[1], cls.getClassLoader());
+      
+      ClassLoader delegateLast =
+          new DelegateLastClassLoader(MAIN_JAR_FILE, null, bootLoader, sharedLibraries);
+      cls = delegateLast.loadClass("Main");
+      assertIdentical(sharedLibraries[0], cls.getClassLoader());
+      cls = delegateLast.loadClass("SharedLibraryOne");
+      assertIdentical(sharedLibraries[1], cls.getClassLoader());
+    }
+    
+    public static void testTwoLibraries2() throws Exception {
+      ClassLoader[] sharedLibraries = {
+          new PathClassLoader(EX_JAR_FILE, null, bootLoader),
+          new PathClassLoader(MAIN_JAR_FILE, null, bootLoader),
+      };
+      ClassLoader delegateFirst = new PathClassLoader("", null, bootLoader, sharedLibraries);
+      Class<?> cls = delegateFirst.loadClass("Main");
+      assertIdentical(sharedLibraries[0], cls.getClassLoader());
+      cls = delegateFirst.loadClass("SharedLibraryOne");
+      assertIdentical(sharedLibraries[0], cls.getClassLoader());
+      
+      ClassLoader delegateLast = new DelegateLastClassLoader("", null, bootLoader, sharedLibraries);
+      cls = delegateLast.loadClass("Main");
+      assertIdentical(sharedLibraries[0], cls.getClassLoader());
+      cls = delegateLast.loadClass("SharedLibraryOne");
+      assertIdentical(sharedLibraries[0], cls.getClassLoader());
+    }
+
+    public static void testTransitive1() throws Exception {
+      ClassLoader[] sharedLibraryLevel2 = {
+          new PathClassLoader(EX_JAR_FILE, null, bootLoader),
+      };
+      ClassLoader[] sharedLibraryLevel1 = {
+          new PathClassLoader(MAIN_JAR_FILE, null, bootLoader, sharedLibraryLevel2),
+      };
+
+      ClassLoader delegateFirst = new PathClassLoader("", null, bootLoader, sharedLibraryLevel1);
+      Class<?> cls = delegateFirst.loadClass("Main");
+      assertIdentical(sharedLibraryLevel2[0], cls.getClassLoader());
+      cls = delegateFirst.loadClass("SharedLibraryOne");
+      assertIdentical(sharedLibraryLevel2[0], cls.getClassLoader());
+      
+      ClassLoader delegateLast =
+          new DelegateLastClassLoader("", null, bootLoader, sharedLibraryLevel1);
+      cls = delegateLast.loadClass("Main");
+      assertIdentical(sharedLibraryLevel2[0], cls.getClassLoader());
+      cls = delegateLast.loadClass("SharedLibraryOne");
+      assertIdentical(sharedLibraryLevel2[0], cls.getClassLoader());
+    }
+    
+    public static void testTransitive2() throws Exception {
+      ClassLoader[] sharedLibraryLevel2 = {
+          new PathClassLoader(MAIN_JAR_FILE, null, bootLoader),
+      };
+      ClassLoader[] sharedLibraryLevel1 = {
+          new PathClassLoader(EX_JAR_FILE, null, bootLoader, sharedLibraryLevel2),
+      };
+
+      ClassLoader delegateFirst = new PathClassLoader("", null, bootLoader, sharedLibraryLevel1);
+      Class<?> cls = delegateFirst.loadClass("Main");
+      assertIdentical(sharedLibraryLevel2[0], cls.getClassLoader());
+      cls = delegateFirst.loadClass("SharedLibraryOne");
+      assertIdentical(sharedLibraryLevel1[0], cls.getClassLoader());
+      
+      ClassLoader delegateLast =
+          new DelegateLastClassLoader("", null, bootLoader, sharedLibraryLevel1);
+      cls = delegateLast.loadClass("Main");
+      assertIdentical(sharedLibraryLevel2[0], cls.getClassLoader());
+      cls = delegateLast.loadClass("SharedLibraryOne");
+      assertIdentical(sharedLibraryLevel1[0], cls.getClassLoader());
+    }
+
+    public static void testTransitive3() throws Exception {
+      ClassLoader[] sharedLibraryLevel2 = {
+          new PathClassLoader(MAIN_JAR_FILE, null, bootLoader),
+      };
+      ClassLoader[] sharedLibraryLevel1 = {
+          new PathClassLoader(EX_JAR_FILE, null, bootLoader, sharedLibraryLevel2),
+          sharedLibraryLevel2[0],
+      };
+
+      ClassLoader delegateFirst = new PathClassLoader("", null, bootLoader, sharedLibraryLevel1);
+      Class<?> cls = delegateFirst.loadClass("Main");
+      assertIdentical(sharedLibraryLevel2[0], cls.getClassLoader());
+      cls = delegateFirst.loadClass("SharedLibraryOne");
+      assertIdentical(sharedLibraryLevel1[0], cls.getClassLoader());
+      
+      ClassLoader delegateLast =
+          new DelegateLastClassLoader("", null, bootLoader, sharedLibraryLevel1);
+      cls = delegateLast.loadClass("Main");
+      assertIdentical(sharedLibraryLevel2[0], cls.getClassLoader());
+      cls = delegateLast.loadClass("SharedLibraryOne");
+      assertIdentical(sharedLibraryLevel1[0], cls.getClassLoader());
+    }
+    
+    public static void testTransitive4() throws Exception {
+      ClassLoader[] sharedLibraryLevel2 = {
+          new PathClassLoader(EX_JAR_FILE, null, bootLoader),
+      };
+      ClassLoader[] sharedLibraryLevel1 = {
+          new PathClassLoader(MAIN_JAR_FILE, null, bootLoader, sharedLibraryLevel2),
+          sharedLibraryLevel2[0],
+      };
+
+      ClassLoader delegateFirst = new PathClassLoader("", null, bootLoader, sharedLibraryLevel1);
+      Class<?> cls = delegateFirst.loadClass("Main");
+      assertIdentical(sharedLibraryLevel2[0], cls.getClassLoader());
+      cls = delegateFirst.loadClass("SharedLibraryOne");
+      assertIdentical(sharedLibraryLevel2[0], cls.getClassLoader());
+      
+      ClassLoader delegateLast =
+          new DelegateLastClassLoader("", null, bootLoader, sharedLibraryLevel1);
+      cls = delegateLast.loadClass("Main");
+      assertIdentical(sharedLibraryLevel2[0], cls.getClassLoader());
+      cls = delegateLast.loadClass("SharedLibraryOne");
+      assertIdentical(sharedLibraryLevel2[0], cls.getClassLoader());
+    }
+}
diff --git a/test/688-shared-library/src-ex/Main.java b/test/688-shared-library/src-ex/Main.java
new file mode 100644
index 0000000..f6555b9
--- /dev/null
+++ b/test/688-shared-library/src-ex/Main.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+}
diff --git a/test/688-shared-library/src-ex/SharedLibraryOne.java b/test/688-shared-library/src-ex/SharedLibraryOne.java
new file mode 100644
index 0000000..d86755f
--- /dev/null
+++ b/test/688-shared-library/src-ex/SharedLibraryOne.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class SharedLibraryOne {
+}
diff --git a/test/689-multi-catch/expected.txt b/test/689-multi-catch/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/689-multi-catch/expected.txt
diff --git a/test/689-multi-catch/info.txt b/test/689-multi-catch/info.txt
new file mode 100644
index 0000000..0778ea8
--- /dev/null
+++ b/test/689-multi-catch/info.txt
@@ -0,0 +1,2 @@
+Regression test for the optimizing compiler which used
+to wrongly type an exception in a multi-catch handler.
diff --git a/test/689-multi-catch/src/Main.java b/test/689-multi-catch/src/Main.java
new file mode 100644
index 0000000..18a17f9
--- /dev/null
+++ b/test/689-multi-catch/src/Main.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) {
+    try {
+      willThrow();
+    } catch (IllegalStateException | NullPointerException e) {
+      if (e instanceof NullPointerException) {
+        return;
+      }
+      throw new Error("Expected NullPointerException");
+    }
+  }
+
+  public static void willThrow() {
+    throw new NullPointerException();
+  }
+}
diff --git a/test/689-zygote-jit-deopt/expected.txt b/test/689-zygote-jit-deopt/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/689-zygote-jit-deopt/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/689-zygote-jit-deopt/info.txt b/test/689-zygote-jit-deopt/info.txt
new file mode 100644
index 0000000..100032e
--- /dev/null
+++ b/test/689-zygote-jit-deopt/info.txt
@@ -0,0 +1,2 @@
+Regression test for debuggable apps that need to deoptimize
+methods JIT-compiled by the zygote.
diff --git a/test/689-zygote-jit-deopt/src/Main.java b/test/689-zygote-jit-deopt/src/Main.java
new file mode 100644
index 0000000..330663e
--- /dev/null
+++ b/test/689-zygote-jit-deopt/src/Main.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) {
+    System.loadLibrary(args[0]);
+    if (!hasJit()) {
+      return;
+    }
+    ensureJitCompiled(Object.class, "toString");
+    transitionJitFromZygote();
+    deoptimizeBootImage();
+    if (hasJitCompiledEntrypoint(Object.class, "toString")) {
+      throw new Error("Expected Object.toString to be deoptimized");
+    }
+  }
+
+  private static native boolean hasJit();
+  private static native void ensureJitCompiled(Class<?> cls, String name);
+  private static native boolean hasJitCompiledEntrypoint(Class<?> cls, String name);
+  private static native void deoptimizeBootImage();
+  private static native void transitionJitFromZygote();
+}
diff --git a/test/690-hiddenapi-same-name-methods/build b/test/690-hiddenapi-same-name-methods/build
new file mode 100644
index 0000000..c364b3b
--- /dev/null
+++ b/test/690-hiddenapi-same-name-methods/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+USE_HIDDENAPI=true ./default-build "$@"
diff --git a/test/690-hiddenapi-same-name-methods/expected.txt b/test/690-hiddenapi-same-name-methods/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/690-hiddenapi-same-name-methods/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/690-hiddenapi-same-name-methods/hiddenapi-flags.csv b/test/690-hiddenapi-same-name-methods/hiddenapi-flags.csv
new file mode 100644
index 0000000..001ab80
--- /dev/null
+++ b/test/690-hiddenapi-same-name-methods/hiddenapi-flags.csv
@@ -0,0 +1,9 @@
+LSpecificClass;->foo()Ljava/lang/Double;,blacklist
+LDirectMethods;->foo()Ljava/lang/Integer;,blacklist
+LDirectMethods;->foo()Ljava/lang/Boolean;,blacklist
+LVirtualMethods;->foo()Ljava/lang/Integer;,blacklist
+LVirtualMethods;->foo()Ljava/lang/Boolean;,blacklist
+LSyntheticMethods;->foo()Ljava/lang/Integer;,blacklist
+LSyntheticMethods;->foo()Ljava/lang/Boolean;,blacklist
+LNonSyntheticMethods;->foo()Ljava/lang/Integer;,blacklist
+LNonSyntheticMethods;->foo()Ljava/lang/Boolean;,blacklist
\ No newline at end of file
diff --git a/test/690-hiddenapi-same-name-methods/info.txt b/test/690-hiddenapi-same-name-methods/info.txt
new file mode 100644
index 0000000..be5b195
--- /dev/null
+++ b/test/690-hiddenapi-same-name-methods/info.txt
@@ -0,0 +1 @@
+Test that Class::GetDeclaredMethodInternal() takes hidden API into account.
\ No newline at end of file
diff --git a/test/690-hiddenapi-same-name-methods/smali-ex/DirectMethods.smali b/test/690-hiddenapi-same-name-methods/smali-ex/DirectMethods.smali
new file mode 100644
index 0000000..8564976
--- /dev/null
+++ b/test/690-hiddenapi-same-name-methods/smali-ex/DirectMethods.smali
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+.class LDirectMethods;
+.super Ljava/lang/Object;
+
+# Expect to choose the non-hidden, non-synthetic method.
+
+# Non-hidden methods
+.method private foo()Ljava/lang/Number;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
+
+.method private synthetic foo()Ljava/lang/Double;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
+
+# Hidden methods
+.method private foo()Ljava/lang/Integer;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
+
+.method private synthetic foo()Ljava/lang/Boolean;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
diff --git a/test/690-hiddenapi-same-name-methods/smali-ex/NonSyntheticMethods.smali b/test/690-hiddenapi-same-name-methods/smali-ex/NonSyntheticMethods.smali
new file mode 100644
index 0000000..f47219f
--- /dev/null
+++ b/test/690-hiddenapi-same-name-methods/smali-ex/NonSyntheticMethods.smali
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+.class LNonSyntheticMethods;
+.super Ljava/lang/Object;
+
+# Expect to choose the non-hidden, virtual method.
+
+# Non-hidden methods
+.method public foo()Ljava/lang/Number;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
+
+.method private foo()Ljava/lang/Double;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
+
+# Hidden methods
+.method public foo()Ljava/lang/Integer;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
+
+.method private foo()Ljava/lang/Boolean;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
diff --git a/test/690-hiddenapi-same-name-methods/smali-ex/SyntheticMethods.smali b/test/690-hiddenapi-same-name-methods/smali-ex/SyntheticMethods.smali
new file mode 100644
index 0000000..afb4d33
--- /dev/null
+++ b/test/690-hiddenapi-same-name-methods/smali-ex/SyntheticMethods.smali
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+.class LSyntheticMethods;
+.super Ljava/lang/Object;
+
+# Expect to choose the non-hidden, virtual method.
+
+# Non-hidden methods
+.method public synthetic foo()Ljava/lang/Number;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
+
+.method private synthetic foo()Ljava/lang/Double;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
+
+# Hidden methods
+.method public synthetic foo()Ljava/lang/Integer;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
+
+.method private synthetic foo()Ljava/lang/Boolean;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
diff --git a/test/690-hiddenapi-same-name-methods/smali-ex/VirtualMethods.smali b/test/690-hiddenapi-same-name-methods/smali-ex/VirtualMethods.smali
new file mode 100644
index 0000000..fb26c74
--- /dev/null
+++ b/test/690-hiddenapi-same-name-methods/smali-ex/VirtualMethods.smali
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+.class LVirtualMethods;
+.super Ljava/lang/Object;
+
+# Expect to choose the non-hidden, non-synthetic method.
+
+# Non-hidden methods
+.method public foo()Ljava/lang/Number;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
+
+.method public synthetic foo()Ljava/lang/Double;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
+
+# Hidden methods
+.method public foo()Ljava/lang/Integer;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
+
+.method public synthetic foo()Ljava/lang/Boolean;
+    .registers 1
+    const/4 v0, 0x0
+    return-object v0
+.end method
diff --git a/test/690-hiddenapi-same-name-methods/src-ex/GenericInterface.java b/test/690-hiddenapi-same-name-methods/src-ex/GenericInterface.java
new file mode 100644
index 0000000..c404402
--- /dev/null
+++ b/test/690-hiddenapi-same-name-methods/src-ex/GenericInterface.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface GenericInterface<T extends Number> {
+  public T foo();
+}
diff --git a/test/690-hiddenapi-same-name-methods/src-ex/SpecificClass.java b/test/690-hiddenapi-same-name-methods/src-ex/SpecificClass.java
new file mode 100644
index 0000000..dd3a835
--- /dev/null
+++ b/test/690-hiddenapi-same-name-methods/src-ex/SpecificClass.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class SpecificClass implements GenericInterface<Double> {
+  public Double foo() {
+    return 42.0;
+  }
+}
diff --git a/test/690-hiddenapi-same-name-methods/src/Main.java b/test/690-hiddenapi-same-name-methods/src/Main.java
new file mode 100644
index 0000000..12cfdd7
--- /dev/null
+++ b/test/690-hiddenapi-same-name-methods/src/Main.java
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.lang.reflect.Method;
+import java.util.Base64;
+
+public class Main {
+  public static void main(String[] args) throws ClassNotFoundException, NoSuchMethodException {
+    System.loadLibrary(args[0]);
+
+    // Run the initialization routine. This will enable hidden API checks in
+    // the runtime, in case they are not enabled by default.
+    init();
+
+    // Load the '-ex' APK and attach it to the boot class path.
+    appendToBootClassLoader(DEX_EXTRA, /* isCorePlatform */ false);
+
+    // All test classes contain just methods named "foo" with different return types
+    // and access flags. Check that:
+    // (a) only the non-hidden ones are returned from getDeclaredMethods
+    //     (they have return types Number and Double), and
+    // (b) getDeclaredMethod picks virtual/non-synthetic methods over direct/synthetic
+    //     (the right one always has return type Number).
+    Class<?> covariantClass = Class.forName(JAVA_CLASS_NAME, true, BOOT_CLASS_LOADER);
+    checkMethodList(covariantClass, /* expectedLength= */ 1);
+    checkMethod(covariantClass);
+
+    String[] classes = new String[] {
+      "VirtualMethods",
+      "DirectMethods",
+      "SyntheticMethods",
+      "NonSyntheticMethods"
+    };
+    for (String className : classes) {
+      Class<?> klass = Class.forName(className, true, BOOT_CLASS_LOADER);
+      checkMethodList(klass, /* expectedLength= */ 2);
+      checkMethod(klass);
+    }
+  }
+
+  private static void checkMethodList(Class<?> klass, int expectedLength) {
+    String className = klass.getName();
+    Method[] methods = klass.getDeclaredMethods();
+    if (methods.length != expectedLength) {
+      throw new RuntimeException(className + ": expected " + expectedLength +
+          " declared method(s), got " + methods.length);
+    }
+    boolean hasNumberReturnType = false;
+    boolean hasDoubleReturnType = false;
+    for (Method method : methods) {
+      if (!METHOD_NAME.equals(method.getName())) {
+        throw new RuntimeException(className + ": expected declared method name: \"" + METHOD_NAME +
+            "\", got: \"" + method.getName() + "\"");
+      }
+      if (Number.class == method.getReturnType()) {
+        hasNumberReturnType = true;
+      } else if (Double.class == method.getReturnType()) {
+        hasDoubleReturnType = true;
+      }
+    }
+    if (methods.length >= 1 && !hasNumberReturnType) {
+      throw new RuntimeException(className + ": expected a method with return type \"Number\"");
+    }
+    if (methods.length >= 2 && !hasDoubleReturnType) {
+      throw new RuntimeException(className + ": expected a method with return type \"Double\"");
+    }
+  }
+
+  private static void checkMethod(Class<?> klass) throws NoSuchMethodException {
+    String className = klass.getName();
+    Method method = klass.getDeclaredMethod(METHOD_NAME);
+    if (!METHOD_NAME.equals(method.getName())) {
+      throw new RuntimeException(className + ": expected declared method name: \"" + METHOD_NAME +
+          "\", got: \"" + method.getName() + "\"");
+    } else if (Number.class != method.getReturnType()) {
+      throw new RuntimeException(className + ": expected method return type: \"Number\", got \"" +
+          method.getReturnType().toString() + "\"");
+    }
+  }
+
+  private static final String DEX_EXTRA = new File(System.getenv("DEX_LOCATION"),
+      "690-hiddenapi-same-name-methods-ex.jar").getAbsolutePath();
+
+  private static ClassLoader BOOT_CLASS_LOADER = Object.class.getClassLoader();
+
+  private static final String JAVA_CLASS_NAME = "SpecificClass";
+  private static final String METHOD_NAME = "foo";
+
+  // Native functions. Note that these are implemented in 674-hiddenapi/hiddenapi.cc.
+  private static native void appendToBootClassLoader(String dexPath, boolean isCorePlatform);
+  private static native void init();
+}
diff --git a/test/709-checker-varhandles/src/Main.java b/test/709-checker-varhandles/src/Main.java
index 46aaa38..d0ea834 100644
--- a/test/709-checker-varhandles/src/Main.java
+++ b/test/709-checker-varhandles/src/Main.java
@@ -28,7 +28,7 @@
   // Fences (native).
   //
 
-  /// CHECK-START: void Main.fullFence() intrinsics_recognition (after)
+  /// CHECK-START: void Main.fullFence() builder (after)
   /// CHECK-DAG: InvokeStaticOrDirect intrinsic:VarHandleFullFence
   //
   /// CHECK-START: void Main.fullFence() instruction_simplifier (after)
@@ -40,7 +40,7 @@
       VarHandle.fullFence();
   }
 
-  /// CHECK-START: void Main.acquireFence() intrinsics_recognition (after)
+  /// CHECK-START: void Main.acquireFence() builder (after)
   /// CHECK-DAG: InvokeStaticOrDirect intrinsic:VarHandleAcquireFence
   //
   /// CHECK-START: void Main.acquireFence() instruction_simplifier (after)
@@ -52,7 +52,7 @@
       VarHandle.acquireFence();
   }
 
-  /// CHECK-START: void Main.releaseFence() intrinsics_recognition (after)
+  /// CHECK-START: void Main.releaseFence() builder (after)
   /// CHECK-DAG: InvokeStaticOrDirect intrinsic:VarHandleReleaseFence
   //
   /// CHECK-START: void Main.releaseFence() instruction_simplifier (after)
@@ -64,7 +64,7 @@
       VarHandle.releaseFence();
   }
 
-  /// CHECK-START: void Main.loadLoadFence() intrinsics_recognition (after)
+  /// CHECK-START: void Main.loadLoadFence() builder (after)
   /// CHECK-DAG: InvokeStaticOrDirect intrinsic:VarHandleLoadLoadFence
   //
   /// CHECK-START: void Main.loadLoadFence() instruction_simplifier (after)
@@ -76,7 +76,7 @@
       VarHandle.loadLoadFence();
   }
 
-  /// CHECK-START: void Main.storeStoreFence() intrinsics_recognition (after)
+  /// CHECK-START: void Main.storeStoreFence() builder (after)
   /// CHECK-DAG: InvokeStaticOrDirect intrinsic:VarHandleStoreStoreFence
   //
   /// CHECK-START: void Main.storeStoreFence() instruction_simplifier (after)
diff --git a/test/719-dm-verify-redefinition/check b/test/719-dm-verify-redefinition/check
new file mode 100644
index 0000000..b5003bd
--- /dev/null
+++ b/test/719-dm-verify-redefinition/check
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Search for the redefinition line and remove unnecessary tags.
+sed -e 's/^dex2oat[d]\?\(\|32\|64\)\ W.*\] Found redefinition of boot classes\. Not doing fast verification\./Found redefinition of boot classes\. Not doing fast verification\./g' "$2" > "$2.tmp1"
+# Remove all other dex2oat/dalvikvm log lines.
+grep -v dex2oat "$2.tmp1" | grep -v dalvikvm >> "$2.tmp2"
+
+./default-check "$1" "$2.tmp2"
diff --git a/test/719-dm-verify-redefinition/expected.txt b/test/719-dm-verify-redefinition/expected.txt
new file mode 100644
index 0000000..64fb4ea
--- /dev/null
+++ b/test/719-dm-verify-redefinition/expected.txt
@@ -0,0 +1,3 @@
+Found redefinition of boot classes. Not doing fast verification.
+Hello, world!
+Correct resolution of boot class.
diff --git a/test/719-dm-verify-redefinition/info.txt b/test/719-dm-verify-redefinition/info.txt
new file mode 100644
index 0000000..1229bdb
--- /dev/null
+++ b/test/719-dm-verify-redefinition/info.txt
@@ -0,0 +1,2 @@
+Verifies that the vdex file from a DexMetadata archive is discarded
+if the app redefines boot classes.
diff --git a/test/719-dm-verify-redefinition/run b/test/719-dm-verify-redefinition/run
new file mode 100644
index 0000000..8e568b5
--- /dev/null
+++ b/test/719-dm-verify-redefinition/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+export ANDROID_LOG_TAGS='*:w'
+exec ${RUN} --external-log-tags --dm "${@}"
diff --git a/test/719-dm-verify-redefinition/src/Main.java b/test/719-dm-verify-redefinition/src/Main.java
new file mode 100644
index 0000000..37575b6
--- /dev/null
+++ b/test/719-dm-verify-redefinition/src/Main.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.util.BitSet;
+
+public class Main {
+  public static void main(String[] args) {
+    System.out.println("Hello, world!");
+    if (BitSet.class.getClassLoader().equals(String.class.getClassLoader())) {
+      System.out.println("Correct resolution of boot class.");
+    } else {
+      System.out.println("Bogus resolution of boot class.");
+    }
+  }
+}
diff --git a/test/719-dm-verify-redefinition/src/java/util/BitSet.java b/test/719-dm-verify-redefinition/src/java/util/BitSet.java
new file mode 100644
index 0000000..5d91fd8
--- /dev/null
+++ b/test/719-dm-verify-redefinition/src/java/util/BitSet.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package java.util;
+
+public class BitSet {
+}
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index f3c3f03..291de72 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -73,4 +73,5 @@
 b/30458218
 b/31313170
 ConstClassAliasing
+b/121191566
 Done!
diff --git a/test/800-smali/smali/b_121191566.smali b/test/800-smali/smali/b_121191566.smali
new file mode 100644
index 0000000..bcf9ef5
--- /dev/null
+++ b/test/800-smali/smali/b_121191566.smali
@@ -0,0 +1,26 @@
+.class public LB121191566;
+.super Ljava/lang/Object;
+
+
+.method public constructor <init>()V
+.registers 1
+       invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+       return-void
+.end method
+
+.method public static run(Ljava/lang/Object;)Z
+.registers 5
+       move-object v3, v4
+       instance-of v4, v3, Ljava/lang/String;
+       if-eqz v4, :Branch
+       # The peephole must not overwrite v4 (from the move-object). Use an integral move
+       # to check.
+       move v0, v4
+       goto :End
+:Branch
+       # See above.
+       move v0, v4
+:End
+       # Triple-check: the merge should be consistent.
+       return v0
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index 9b06e9e..d7979e1 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -193,6 +193,8 @@
         testCases.add(new TestCase("b/31313170", "B31313170", "run", null, null, 0));
         testCases.add(new TestCase("ConstClassAliasing", "ConstClassAliasing", "run", null, null,
                 null, true));
+        testCases.add(new TestCase("b/121191566", "B121191566", "run", new Object[] { "a" }, null,
+                true, false));
     }
 
     public void runTests() {
diff --git a/test/905-object-free/src/art/Test905.java b/test/905-object-free/src/art/Test905.java
index 62b6e62..dddd1aa 100644
--- a/test/905-object-free/src/art/Test905.java
+++ b/test/905-object-free/src/art/Test905.java
@@ -16,10 +16,53 @@
 
 package art;
 
+import java.lang.ref.PhantomReference;
+import java.lang.ref.ReferenceQueue;
 import java.util.ArrayList;
 import java.util.Arrays;
 
 public class Test905 {
+  // Taken from jdwp tests.
+  public static class MarkerObj {
+    public static int cnt = 0;
+    public void finalize() { cnt++; }
+  }
+  public static class GcMarker {
+    private final ReferenceQueue mQueue;
+    private final ArrayList<PhantomReference> mList;
+    public GcMarker() {
+      mQueue = new ReferenceQueue();
+      mList = new ArrayList<PhantomReference>(3);
+    }
+    public void add(Object referent) {
+      mList.add(new PhantomReference(referent, mQueue));
+    }
+    public void waitForGc() {
+      waitForGc(mList.size());
+    }
+    public void waitForGc(int numberOfExpectedFinalizations) {
+      if (numberOfExpectedFinalizations > mList.size()) {
+        throw new IllegalArgumentException("wait condition will never be met");
+      }
+      // Request finalization of objects, and subsequent reference enqueueing.
+      // Repeat until reference queue reaches expected size.
+      do {
+          System.runFinalization();
+          Runtime.getRuntime().gc();
+          try { Thread.sleep(10); } catch (Exception e) {}
+      } while (isLive(numberOfExpectedFinalizations));
+    }
+    private boolean isLive(int numberOfExpectedFinalizations) {
+      int numberFinalized = 0;
+      for (int i = 0, n = mList.size(); i < n; i++) {
+        if (mList.get(i).isEnqueued()) {
+          numberFinalized++;
+        }
+      }
+      return numberFinalized < numberOfExpectedFinalizations;
+    }
+  }
+
   public static void run() throws Exception {
     doTest();
   }
@@ -44,7 +87,7 @@
     allocate(l, 1);
     l.clear();
 
-    Runtime.getRuntime().gc();
+    gcAndWait();
 
     getAndPrintTags();
     System.out.println("---");
@@ -56,12 +99,12 @@
     }
     l.clear();
 
-    Runtime.getRuntime().gc();
+    gcAndWait();
 
     getAndPrintTags();
     System.out.println("---");
 
-    Runtime.getRuntime().gc();
+    gcAndWait();
 
     getAndPrintTags();
     System.out.println("---");
@@ -80,7 +123,7 @@
     for (int i = 1; i <= 100000; ++i) {
       stressAllocate(i);
     }
-    Runtime.getRuntime().gc();
+    gcAndWait();
     long[] freedTags1 = getCollectedTags(0);
     long[] freedTags2 = getCollectedTags(1);
     System.out.println("Free counts " + freedTags1.length + " " + freedTags2.length);
@@ -103,6 +146,17 @@
     System.out.println(Arrays.toString(freedTags));
   }
 
+  private static GcMarker getMarker() {
+    GcMarker m = new GcMarker();
+    m.add(new MarkerObj());
+    return m;
+  }
+
+  private static void gcAndWait() {
+    GcMarker marker = getMarker();
+    marker.waitForGc();
+  }
+
   private static native void setupObjectFreeCallback();
   private static native void enableFreeTracking(boolean enable);
   private static native long[] getCollectedTags(int index);
diff --git a/test/911-get-stack-trace/expected.txt b/test/911-get-stack-trace/expected.txt
index b0a400a..3179424 100644
--- a/test/911-get-stack-trace/expected.txt
+++ b/test/911-get-stack-trace/expected.txt
@@ -21,7 +21,7 @@
  baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
- doTest ()V 33 25
+ doTest ()V 31 25
  run ()V 0 25
 ---------
  print (Ljava/lang/Thread;II)V 0 38
@@ -41,7 +41,7 @@
  baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
- doTest ()V 37 26
+ doTest ()V 35 26
  run ()V 0 25
 ---------
  getStackTrace (Ljava/lang/Thread;II)[[Ljava/lang/String; -1 -2
@@ -62,7 +62,7 @@
  baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
- doTest ()V 60 32
+ doTest ()V 58 32
  run ()V 0 25
 ---------
  bar (IIILart/ControlData;)J 0 26
@@ -857,7 +857,7 @@
 4
 JVMTI_ERROR_ILLEGAL_ARGUMENT
 [public static native java.lang.Object[] art.Frames.getFrameLocation(java.lang.Thread,int), ffffffff]
-[public static void art.Frames.doTestSameThread(), 40]
+[public static void art.Frames.doTestSameThread(), 3e]
 [public static void art.Frames.doTest() throws java.lang.Exception, 0]
 [public void art.Test911$1.run(), 28]
 JVMTI_ERROR_NO_MORE_FRAMES
diff --git a/test/911-get-stack-trace/src/art/PrintThread.java b/test/911-get-stack-trace/src/art/PrintThread.java
index d8b3cbc..798db06 100644
--- a/test/911-get-stack-trace/src/art/PrintThread.java
+++ b/test/911-get-stack-trace/src/art/PrintThread.java
@@ -42,7 +42,7 @@
   // may not exist depending on the environment.
   public final static String IGNORE_THREAD_NAME_REGEX =
       "Binder:|RenderThread|hwuiTask|Jit thread pool worker|Instr:|JDWP|Profile Saver|main|" +
-      "queued-work-looper|InstrumentationConnectionThread";
+      "queued-work-looper|InstrumentationConnectionThread|intel_svc_streamer_thread";
   public final static Matcher IGNORE_THREADS =
       Pattern.compile(IGNORE_THREAD_NAME_REGEX).matcher("");
 
diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt
index 065b854..1bd56d1 100644
--- a/test/913-heaps/expected.txt
+++ b/test/913-heaps/expected.txt
@@ -1,9 +1,9 @@
 ---
 true true
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
 1001@0 --(superclass)--> 1000@0 [size=123456780000, length=-1]
 1002@0 --(interface)--> 2001@0 [size=123456780004, length=-1]
 1002@0 --(superclass)--> 1001@0 [size=123456780001, length=-1]
@@ -44,14 +44,14 @@
 ---
 root@root --(jni-global)--> 1@1000 [size=16, length=-1]
 root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
 root@root --(thread)--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
 1001@0 --(superclass)--> 1000@0 [size=123456780005, length=-1]
 1002@0 --(interface)--> 2001@0 [size=123456780009, length=-1]
 1002@0 --(superclass)--> 1001@0 [size=123456780006, length=-1]
@@ -90,18 +90,18 @@
 5@1002 --(field@9)--> 6@1000 [size=16, length=-1]
 6@1000 --(class)--> 1000@0 [size=123456780005, length=-1]
 ---
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
 ---
 3@1001 --(class)--> 1001@0 [size=123456780011, length=-1]
 ---
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
 ---
 3@1001 --(class)--> 1001@0 [size=123456780016, length=-1]
 ---
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
 ---
 1001@0 --(superclass)--> 1000@0 [size=123456780020, length=-1]
 3@1001 --(class)--> 1001@0 [size=123456780021, length=-1]
@@ -110,14 +110,14 @@
 ---
 root@root --(jni-global)--> 1@1000 [size=16, length=-1]
 root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
 root@root --(thread)--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
 ---
 1001@0 --(superclass)--> 1000@0 [size=123456780025, length=-1]
 3@1001 --(class)--> 1001@0 [size=123456780026, length=-1]
@@ -198,10 +198,10 @@
 ---
 ---
 ---- untagged objects
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
 1001@0 --(superclass)--> 1000@0 [size=123456780050, length=-1]
 1002@0 --(interface)--> 2001@0 [size=123456780054, length=-1]
 1002@0 --(superclass)--> 1001@0 [size=123456780051, length=-1]
@@ -242,14 +242,14 @@
 ---
 root@root --(jni-global)--> 1@1000 [size=16, length=-1]
 root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
 root@root --(thread)--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
 1001@0 --(superclass)--> 1000@0 [size=123456780055, length=-1]
 1002@0 --(interface)--> 2001@0 [size=123456780059, length=-1]
 1002@0 --(superclass)--> 1001@0 [size=123456780056, length=-1]
@@ -289,9 +289,9 @@
 6@1000 --(class)--> 1000@0 [size=123456780055, length=-1]
 ---
 ---- tagged classes
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
 1001@0 --(superclass)--> 1000@0 [size=123456780060, length=-1]
 1002@0 --(interface)--> 2001@0 [size=123456780064, length=-1]
 1002@0 --(superclass)--> 1001@0 [size=123456780061, length=-1]
@@ -316,9 +316,9 @@
 5@1002 --(field@8)--> 500@0 [size=20, length=2]
 6@1000 --(class)--> 1000@0 [size=123456780060, length=-1]
 ---
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
 1001@0 --(superclass)--> 1000@0 [size=123456780065, length=-1]
 1002@0 --(interface)--> 2001@0 [size=123456780069, length=-1]
 1002@0 --(superclass)--> 1001@0 [size=123456780066, length=-1]
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index b0e0f07..28a737d 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -43,6 +43,10 @@
 
 #define UNREACHABLE  __builtin_unreachable
 
+// The tag value used on the Java side to tag the current thread.
+static constexpr jlong kThreadTag = 3000;
+static constexpr const char* kThreadReferree = "3000@0";
+
 extern "C" JNIEXPORT void JNICALL Java_art_Test913_forceGarbageCollection(
     JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
   jvmtiError ret = jvmti_env->ForceGarbageCollection();
@@ -174,12 +178,12 @@
       }
       // Ignore classes (1000 <= tag < 3000) for thread objects. These can be held by the JIT.
       if (reference_kind == JVMTI_HEAP_REFERENCE_THREAD && class_tag == 0 &&
-              (1000 <= *tag_ptr &&  *tag_ptr < 3000)) {
+              (1000 <= *tag_ptr &&  *tag_ptr < kThreadTag)) {
         return 0;
       }
       // Ignore stack-locals of untagged threads. That is the environment.
       if (reference_kind == JVMTI_HEAP_REFERENCE_STACK_LOCAL &&
-          reference_info->stack_local.thread_tag != 3000) {
+          reference_info->stack_local.thread_tag != kThreadTag) {
         return 0;
       }
       // Ignore array elements with an untagged source. These are from the environment.
@@ -422,7 +426,7 @@
           jint index = reference_info->array.index;
           // Normalize if it's "0@0" -> "3000@1".
           // TODO: A pre-pass could probably give us this index to check explicitly.
-          if (referrer == "0@0" && referree == "3000@0") {
+          if (referrer == "0@0" && referree == kThreadReferree) {
             index = 0;
           }
           std::string tmp = StringPrintf("array-element@%d", index);
@@ -645,7 +649,10 @@
                                            const void* elements,
                                            void* user_data) {
       FindArrayCallbacks* p = reinterpret_cast<FindArrayCallbacks*>(user_data);
-      if (*tag_ptr != 0) {
+      // The thread object may be reachable from the starting value because of setup in the
+      // framework (when this test runs as part of CTS). Ignore, we're not testing the thread
+      // here.)
+      if (*tag_ptr != 0 && *tag_ptr != kThreadTag) {
         std::ostringstream oss;
         oss << *tag_ptr
             << '@'
@@ -758,7 +765,10 @@
                                                     jvmtiPrimitiveType value_type,
                                                     void* user_data) {
       FindFieldCallbacks* p = reinterpret_cast<FindFieldCallbacks*>(user_data);
-      if (*tag_ptr != 0) {
+      // The thread object may be reachable from the starting value because of setup in the
+      // framework (when this test runs as part of CTS). Ignore, we're not testing the thread
+      // here.)
+      if (*tag_ptr != 0 && *tag_ptr != kThreadTag) {
         std::ostringstream oss;
         oss << *tag_ptr
             << '@'
diff --git a/test/918-fields/expected.txt b/test/918-fields/expected.txt
index af78615..0114ccc 100644
--- a/test/918-fields/expected.txt
+++ b/test/918-fields/expected.txt
@@ -2,9 +2,9 @@
 class java.lang.Math
 25
 false
-[value, I, null]
-class java.lang.Integer
-18
+[bytesTransferred, I, null]
+class java.io.InterruptedIOException
+1
 false
 [this$0, Lart/Test918;, null]
 class art.Test918$Foo
@@ -18,3 +18,7 @@
 class art.Test918$Generics
 0
 false
+[privateValue, I, null]
+class art.Test918$Generics
+2
+false
diff --git a/test/918-fields/src/art/Test918.java b/test/918-fields/src/art/Test918.java
index ca23c03..5328b0b 100644
--- a/test/918-fields/src/art/Test918.java
+++ b/test/918-fields/src/art/Test918.java
@@ -16,6 +16,7 @@
 
 package art;
 
+import java.io.InterruptedIOException;
 import java.lang.reflect.Field;
 import java.util.Arrays;
 
@@ -26,10 +27,11 @@
 
   public static void doTest() throws Exception {
     testField(Math.class, "PI");
-    testField(Integer.class, "value");
+    testField(InterruptedIOException.class, "bytesTransferred");
     testField(Foo.class, "this$0");
     testField(Bar.class, "VAL");
     testField(Generics.class, "generics");
+    testField(Generics.class, "privateValue");
   }
 
   private static void testField(Class<?> base, String fieldName)
@@ -71,5 +73,6 @@
 
   private static class Generics<T> {
     T generics;
+    private int privateValue = 42;
   }
 }
diff --git a/test/924-threads/src/art/Test924.java b/test/924-threads/src/art/Test924.java
index e8e9781..e97c9c6 100644
--- a/test/924-threads/src/art/Test924.java
+++ b/test/924-threads/src/art/Test924.java
@@ -27,6 +27,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.time.Instant;
 
 public class Test924 {
   public static void run() throws Exception {
@@ -109,6 +110,7 @@
     final CountDownLatch cdl4 = new CountDownLatch(1);
     final CountDownLatch cdl5 = new CountDownLatch(1);
     final Holder h = new Holder();
+    final long ALMOST_INFINITE = 100000000;  // 1.1 days!
     final NativeWaiter w = new NativeWaiter();
     Runnable r = new Runnable() {
       @Override
@@ -121,7 +123,7 @@
 
           cdl2.countDown();
           synchronized(cdl2) {
-            cdl2.wait(1000);  // Wait a second.
+            cdl2.wait(ALMOST_INFINITE);
           }
 
           cdl3_1.await();
@@ -131,7 +133,9 @@
           }
 
           cdl4.countDown();
-          Thread.sleep(1000);
+          try {
+            Thread.sleep(ALMOST_INFINITE);
+          } catch (InterruptedException e) { }
 
           cdl5.countDown();
           while (!h.flag) {
@@ -152,18 +156,20 @@
 
     // Waiting.
     cdl1.await();
-    Thread.yield();
-    Thread.sleep(100);
-    printThreadState(t);
+    // This is super inconsistent so just wait for the desired state for up to 5 minutes then give
+    // up and continue
+    final int WAITING_INDEF = 0x191;
+    waitForState(t, WAITING_INDEF);
     synchronized(cdl1) {
       cdl1.notifyAll();
     }
 
     // Timed waiting.
     cdl2.await();
-    Thread.yield();
-    Thread.sleep(100);
-    printThreadState(t);
+    // This is super inconsistent so just wait for the desired state for up to 5 minutes then give
+    // up and continue
+    final int WAITING_TIMED = 0x1a1;
+    waitForState(t, WAITING_TIMED);
     synchronized(cdl2) {
       cdl2.notifyAll();
     }
@@ -185,14 +191,16 @@
 
     // Sleeping.
     cdl4.await();
-    Thread.yield();
-    Thread.sleep(100);
-    printThreadState(t);
+    // This is super inconsistent so just wait for the desired state for up to 5 minutes then give
+    // up and continue
+    final int WAITING_SLEEP = 0xe1;
+    waitForState(t, WAITING_SLEEP);
+    t.interrupt();
 
     // Running.
     cdl5.await();
     Thread.yield();
-    Thread.sleep(100);
+    Thread.sleep(1000);
     printThreadState(t);
     h.flag = true;
 
@@ -204,11 +212,26 @@
     // Dying.
     t.join();
     Thread.yield();
-    Thread.sleep(100);
+    Thread.sleep(1000);
 
     printThreadState(t);
   }
 
+  private static void waitForState(Thread t, int desired) throws Exception {
+    Thread.yield();
+    Thread.sleep(1000);
+    // This is super inconsistent so just wait for the desired state for up to 5 minutes then give
+    // up and continue
+    int state;
+    Instant deadline = Instant.now().plusSeconds(60 * 5);
+    while ((state = getThreadState(t)) != desired && deadline.isAfter(Instant.now())) {
+      Thread.yield();
+      Thread.sleep(100);
+      Thread.yield();
+    }
+    printThreadState(state);
+  }
+
   private static void doAllThreadsTests() {
     Thread[] threads = getAllThreads();
     List<Thread> threadList = new ArrayList<>(Arrays.asList(threads));
diff --git a/test/966-default-conflict/expected.txt b/test/966-default-conflict/expected.txt
index fad2c25..bbd733c 100644
--- a/test/966-default-conflict/expected.txt
+++ b/test/966-default-conflict/expected.txt
@@ -1,3 +1,4 @@
+JNI_OnLoad called
 Create Main instance
 Calling functions on concrete Main
 Calling non-conflicting function on Main
diff --git a/test/966-default-conflict/src/Main.java b/test/966-default-conflict/src/Main.java
index ce8cb47..f466715 100644
--- a/test/966-default-conflict/src/Main.java
+++ b/test/966-default-conflict/src/Main.java
@@ -15,6 +15,13 @@
  */
 class Main implements Iface, Iface2 {
   public static void main(String[] args) {
+    System.loadLibrary(args[0]);
+    // Ensure we JIT compile the methods to test CHA behavior with default
+    // methods.
+    ensureJitCompiled(Main.class, "callMain");
+    ensureJitCompiled(Main.class, "callIface");
+    ensureJitCompiled(Main.class, "callIface2");
+
     System.out.println("Create Main instance");
     Main m = new Main();
     System.out.println("Calling functions on concrete Main");
@@ -68,4 +75,6 @@
     }
     return;
   }
+
+  private static native void ensureJitCompiled(Class<?> cls, String method_name);
 }
diff --git a/test/979-const-method-handle/expected.txt b/test/979-const-method-handle/expected.txt
index bbaaedb..8531709 100644
--- a/test/979-const-method-handle/expected.txt
+++ b/test/979-const-method-handle/expected.txt
@@ -7,3 +7,11 @@
 2.718281828459045
 repeatConstMethodHandle()
 Attempting to set Math.E raised IAE
+Quack
+Moo
+Woof
+Test
+Getting field in TestTokenizer raised WMTE (woohoo!)
+Stack: tos was 7
+Stack: capacity was 10
+Stack: capacity is 2
diff --git a/test/979-const-method-handle/src/Main.java b/test/979-const-method-handle/src/Main.java
index 427ca7a..5368a22 100644
--- a/test/979-const-method-handle/src/Main.java
+++ b/test/979-const-method-handle/src/Main.java
@@ -18,6 +18,11 @@
 import annotations.ConstantMethodType;
 import java.lang.invoke.MethodHandle;
 import java.lang.invoke.MethodType;
+import java.lang.invoke.WrongMethodTypeException;
+
+import java.io.StreamTokenizer;
+import java.io.StringReader;
+import java.util.Stack;
 
 class Main {
     /**
@@ -45,6 +50,12 @@
         private int field;
     }
 
+    private static class TestTokenizer extends StreamTokenizer {
+        public TestTokenizer(String message) {
+            super(new StringReader(message));
+        }
+    }
+
     @ConstantMethodType(
             returnType = String.class,
             parameterTypes = {int.class, Integer.class, System.class})
@@ -136,6 +147,48 @@
         return null;
     }
 
+    @ConstantMethodHandle(
+        kind = ConstantMethodHandle.INSTANCE_GET,
+        owner = "java/io/StreamTokenizer",
+        fieldOrMethodName = "sval",
+        descriptor = "Ljava/lang/String;")
+     private static MethodHandle getSval() {
+        unreachable();
+        return null;
+    }
+
+    // This constant-method-handle references a private instance field. If
+    // referenced in bytecode it raises IAE at load time.
+    @ConstantMethodHandle(
+        kind = ConstantMethodHandle.INSTANCE_PUT,
+        owner = "java/io/StreamTokenizer",
+        fieldOrMethodName = "peekc",
+        descriptor = "I")
+     private static MethodHandle putPeekc() {
+        unreachable();
+        return null;
+    }
+
+    @ConstantMethodHandle(
+        kind = ConstantMethodHandle.INVOKE_VIRTUAL,
+        owner = "java/util/Stack",
+        fieldOrMethodName = "pop",
+        descriptor = "()Ljava/lang/Object;")
+    private static MethodHandle stackPop() {
+        unreachable();
+        return null;
+    }
+
+    @ConstantMethodHandle(
+        kind = ConstantMethodHandle.INVOKE_VIRTUAL,
+        owner = "java/util/Stack",
+        fieldOrMethodName = "trimToSize",
+        descriptor = "()V")
+    private static MethodHandle stackTrim() {
+        unreachable();
+        return null;
+    }
+
     private static void repeatConstMethodHandle() throws Throwable {
         System.out.println("repeatConstMethodHandle()");
         String[] values = {"A", "B", "C"};
@@ -166,5 +219,29 @@
         } catch (IllegalAccessError expected) {
             System.out.println("Attempting to set Math.E raised IAE");
         }
+
+        StreamTokenizer st = new StreamTokenizer(new StringReader("Quack Moo Woof"));
+        while (st.nextToken() != StreamTokenizer.TT_EOF) {
+            System.out.println((String) getSval().invokeExact(st));
+        }
+
+        TestTokenizer tt = new TestTokenizer("Test message 123");
+        tt.nextToken();
+        System.out.println((String) getSval().invoke(tt));
+        try {
+            System.out.println((String) getSval().invokeExact(tt));
+        } catch (WrongMethodTypeException wmte) {
+            System.out.println("Getting field in TestTokenizer raised WMTE (woohoo!)");
+        }
+
+        Stack stack = new Stack();
+        stack.push(Integer.valueOf(3));
+        stack.push(Integer.valueOf(5));
+        stack.push(Integer.valueOf(7));
+        Object tos = stackPop().invokeExact(stack);
+        System.out.println("Stack: tos was " + tos);
+        System.out.println("Stack: capacity was " + stack.capacity());
+        stackTrim().invokeExact(stack);
+        System.out.println("Stack: capacity is " + stack.capacity());
     }
 }
diff --git a/test/980-redefine-object/redef_object.cc b/test/980-redefine-object/redef_object.cc
index b4d82ad..a8393dc 100644
--- a/test/980-redefine-object/redef_object.cc
+++ b/test/980-redefine-object/redef_object.cc
@@ -80,13 +80,13 @@
    public:
     explicit JvmtiAllocator(jvmtiEnv* jvmti) : jvmti_(jvmti) {}
 
-    void* Allocate(size_t size) {
+    void* Allocate(size_t size) override {
       unsigned char* res = nullptr;
       jvmti_->Allocate(size, &res);
       return res;
     }
 
-    void Free(void* ptr) {
+    void Free(void* ptr) override {
       jvmti_->Deallocate(reinterpret_cast<unsigned char*>(ptr));
     }
 
diff --git a/test/999-redefine-hiddenapi/api-blacklist.txt b/test/999-redefine-hiddenapi/api-blacklist.txt
deleted file mode 100644
index 63e37aa..0000000
--- a/test/999-redefine-hiddenapi/api-blacklist.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Lart/Test999;->foo()V
-Lart/Test999;->bar:I
diff --git a/test/999-redefine-hiddenapi/hiddenapi-flags.csv b/test/999-redefine-hiddenapi/hiddenapi-flags.csv
new file mode 100644
index 0000000..7f632d3
--- /dev/null
+++ b/test/999-redefine-hiddenapi/hiddenapi-flags.csv
@@ -0,0 +1,2 @@
+Lart/Test999;->foo()V,blacklist
+Lart/Test999;->bar:I,blacklist
diff --git a/test/999-redefine-hiddenapi/src-redefine/gen.sh b/test/999-redefine-hiddenapi/src-redefine/gen.sh
index 6948cbb..f92d797 100755
--- a/test/999-redefine-hiddenapi/src-redefine/gen.sh
+++ b/test/999-redefine-hiddenapi/src-redefine/gen.sh
@@ -18,9 +18,11 @@
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 TMP=`mktemp -d`
 
-CLASS "art/Test999"
+CLASS="art/Test999"
 
-(cd "$TMP" && javac -d "${TMP}" "$DIR/${CLASS}.java" && d8 --output . "$TMP/${CLASS}.class")
+(cd "$TMP" && \
+    javac -d "${TMP}" "$DIR/${CLASS}.java" && \
+    d8 --output . "$TMP/${CLASS}.class" &&
 
 echo '  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode('
 base64 "${TMP}/${CLASS}.class" | sed -E 's/^/    "/' | sed ':a;N;$!ba;s/\n/" +\n/g' | sed -E '$ s/$/");/'
diff --git a/test/999-redefine-hiddenapi/src/Main.java b/test/999-redefine-hiddenapi/src/Main.java
index c6365ac..014ea16 100644
--- a/test/999-redefine-hiddenapi/src/Main.java
+++ b/test/999-redefine-hiddenapi/src/Main.java
@@ -19,7 +19,7 @@
 import java.util.Base64;
 
 public class Main {
-  public static void main(String[] args) throws Exception {
+  public static void main(String[] args) throws ClassNotFoundException {
     System.loadLibrary(args[0]);
 
     // Run the initialization routine. This will enable hidden API checks in
@@ -27,39 +27,37 @@
     init();
 
     // Load the '-ex' APK and attach it to the boot class path.
-    appendToBootClassLoader(DEX_EXTRA);
+    appendToBootClassLoader(DEX_EXTRA, /* isCorePlatform */ false);
 
     // Find the test class in boot class loader and verify that its members are hidden.
     Class<?> klass = Class.forName("art.Test999", true, BOOT_CLASS_LOADER);
-    assertMethodIsHidden(klass, "before redefinition");
     assertFieldIsHidden(klass, "before redefinition");
+    assertMethodIsHidden(klass, "before redefinition");
 
-    // Redefine the class using JVMTI.
+    // Redefine the class using JVMTI. Use dex file without hiddenapi flags.
     art.Redefinition.setTestConfiguration(art.Redefinition.Config.COMMON_REDEFINE);
     art.Redefinition.doCommonClassRedefinition(klass, CLASS_BYTES, DEX_BYTES);
 
     // Verify that the class members are still hidden.
-    assertMethodIsHidden(klass, "after redefinition");
-    assertFieldIsHidden(klass, "after redefinition");
+    assertFieldIsHidden(klass, "after first redefinition");
+    assertMethodIsHidden(klass, "after first redefinition");
   }
 
-  private static void assertMethodIsHidden(Class<?> klass, String msg) throws Exception {
+  private static void assertMethodIsHidden(Class<?> klass, String msg) {
     try {
       klass.getDeclaredMethod("foo");
       // Unexpected. Should have thrown NoSuchMethodException.
-      throw new Exception("Method should not be accessible " + msg);
+      throw new RuntimeException("Method should not be accessible " + msg);
     } catch (NoSuchMethodException ex) {
-      // Expected.
     }
   }
 
-  private static void assertFieldIsHidden(Class<?> klass, String msg) throws Exception {
+  private static void assertFieldIsHidden(Class<?> klass, String msg) {
     try {
       klass.getDeclaredField("bar");
       // Unexpected. Should have thrown NoSuchFieldException.
-      throw new Exception("Field should not be accessible " + msg);
+      throw new RuntimeException("Field should not be accessible " + msg);
     } catch (NoSuchFieldException ex) {
-      // Expected.
     }
   }
 
@@ -69,7 +67,7 @@
   private static ClassLoader BOOT_CLASS_LOADER = Object.class.getClassLoader();
 
   // Native functions. Note that these are implemented in 674-hiddenapi/hiddenapi.cc.
-  private static native void appendToBootClassLoader(String dexPath);
+  private static native void appendToBootClassLoader(String dexPath, boolean isCorePlatform);
   private static native void init();
 
   /**
@@ -93,19 +91,20 @@
     "ASoQQLUAArEAAAABAA0AAAAKAAIAAAATAAQAGAABAA4ACwABAAwAAAAlAAIAAQAAAAmyAAMSBLYA" +
     "BbEAAAABAA0AAAAKAAIAAAAVAAgAFgABAA8AAAACABA=");
   private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
-    "ZGV4CjAzNQD0dZ+IWxOi+cJDSWjfTnUerlZj1Lll3ONIAwAAcAAAAHhWNBIAAAAAAAAAAJwCAAAQ" +
-    "AAAAcAAAAAcAAACwAAAAAgAAAMwAAAACAAAA5AAAAAQAAAD0AAAAAQAAABQBAAAUAgAANAEAAIYB" +
+    "ZGV4CjAzNQDlfmgFfKulToQpDF+P4dsgeOkgfzzH+5lgAwAAcAAAAHhWNBIAAAAAAAAAALQCAAAQ" +
+    "AAAAcAAAAAcAAACwAAAAAgAAAMwAAAACAAAA5AAAAAQAAAD0AAAAAQAAABQBAAAsAgAANAEAAIYB" +
     "AACOAQAAlwEAAJoBAACpAQAAwAEAANQBAADoAQAA/AEAAAoCAAANAgAAEQIAABYCAAAbAgAAIAIA" +
     "ACkCAAACAAAAAwAAAAQAAAAFAAAABgAAAAcAAAAJAAAACQAAAAYAAAAAAAAACgAAAAYAAACAAQAA" +
     "AQAAAAsAAAAFAAIADQAAAAEAAAAAAAAAAQAAAAwAAAACAAEADgAAAAMAAAAAAAAAAQAAAAEAAAAD" +
-    "AAAAAAAAAAgAAAAAAAAAhwIAAAAAAAACAAEAAQAAAHQBAAAIAAAAcBADAAEAEwBAAFkQAAAOAAMA" +
+    "AAAAAAAAAAgAAAAAAAAAoAIAAAAAAAACAAEAAQAAAHQBAAAIAAAAcBADAAEAEwBAAFkQAAAOAAMA" +
     "AQACAAAAeQEAAAgAAABiAAEAGgEBAG4gAgAQAA4AEwAOQAAVAA54AAAAAQAAAAQABjxpbml0PgAH" +
     "R29vZGJ5ZQABSQANTGFydC9UZXN0OTk5OwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABJMamF2YS9s" +
     "YW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07AAxUZXN0" +
-    "OTk5LmphdmEAAVYAAlZMAANiYXIAA2ZvbwADb3V0AAdwcmludGxuAFx+fkQ4eyJtaW4tYXBpIjox" +
-    "LCJzaGEtMSI6IjU2YzJlMzBmNTIzM2I4NDRmZjZkZGQ4N2ZiNTNkMzRmYjE3MjM3ZGYiLCJ2ZXJz" +
-    "aW9uIjoidjEuMi4xNS1kZXYifQAAAQEBAAEAgYAEtAIBAdQCAAAAAAAOAAAAAAAAAAEAAAAAAAAA" +
-    "AQAAABAAAABwAAAAAgAAAAcAAACwAAAAAwAAAAIAAADMAAAABAAAAAIAAADkAAAABQAAAAQAAAD0" +
-    "AAAABgAAAAEAAAAUAQAAASAAAAIAAAA0AQAAAyAAAAIAAAB0AQAAARAAAAEAAACAAQAAAiAAABAA" +
-    "AACGAQAAACAAAAEAAACHAgAAAxAAAAEAAACYAgAAABAAAAEAAACcAgAA");
+    "OTk5LmphdmEAAVYAAlZMAANiYXIAA2ZvbwADb3V0AAdwcmludGxuAHV+fkQ4eyJjb21waWxhdGlv" +
+    "bi1tb2RlIjoiZGVidWciLCJtaW4tYXBpIjoxLCJzaGEtMSI6ImQyMmFiNGYxOWI3NTYxNDQ3NTI4" +
+    "NTdjYTg2YjJjZWU0ZGQ5Y2ExNjYiLCJ2ZXJzaW9uIjoiMS40LjktZGV2In0AAAEBAQABAIGABLQC" +
+    "AQHUAgAAAAAOAAAAAAAAAAEAAAAAAAAAAQAAABAAAABwAAAAAgAAAAcAAACwAAAAAwAAAAIAAADM" +
+    "AAAABAAAAAIAAADkAAAABQAAAAQAAAD0AAAABgAAAAEAAAAUAQAAASAAAAIAAAA0AQAAAyAAAAIA" +
+    "AAB0AQAAARAAAAEAAACAAQAAAiAAABAAAACGAQAAACAAAAEAAACgAgAAAxAAAAEAAACwAgAAABAA" +
+    "AAEAAAC0AgAA");
 }
diff --git a/test/Android.bp b/test/Android.bp
index e265651..467a717 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -59,8 +59,7 @@
     shared_libs: [
         "libartd",
         "libartd-disassembler",
-        "libvixld-arm",
-        "libvixld-arm64",
+        "libvixld",
         "libart-gtest",
         "libdexfiled",
         "libprofiled",
@@ -291,6 +290,8 @@
         "1946-list-descriptors/descriptors.cc",
         "1950-unprepared-transform/unprepared_transform.cc",
         "1951-monitor-enter-no-suspend/raw_monitor.cc",
+        "1953-pop-frame/pop_frame.cc",
+        "1957-error-ext/lasterror.cc",
     ],
     // Use NDK-compatible headers for ctstiagent.
     header_libs: [
@@ -320,6 +321,8 @@
         "983-source-transform-verify/source_transform_art.cc",
         "1940-ddms-ext/ddm_ext.cc",
         "1944-sudden-exit/sudden_exit.cc",
+        // "1952-pop-frame-jit/pop_frame.cc",
+        "1959-redefine-object-instrument/fake_redef_object.cc",
     ],
     static_libs: [
         "libz",
@@ -417,10 +420,11 @@
 
 art_cc_defaults {
     name: "libtistress-static-defaults",
-    defaults: ["libtistress-srcs"],
-    static_libs: art_static_dependencies + [
-        "slicer",
+    defaults: [
+        "libtistress-srcs",
+        "libart_static_defaults",
     ],
+    static_libs: ["slicer"],
 }
 
 art_cc_test_library {
@@ -453,7 +457,6 @@
         "004-UnsafeTest/unsafe_test.cc",
         "044-proxy/native_proxy.cc",
         "051-thread/thread_test.cc",
-        "117-nopatchoat/nopatchoat.cc",
         "1337-gc-coverage/gc_coverage.cc",
         "136-daemon-jni-shutdown/daemon_jni_shutdown.cc",
         "137-cfi/cfi.cc",
@@ -492,6 +495,7 @@
         "708-jit-cache-churn/jit.cc",
         "800-smali/jni.cc",
         "909-attach-agent/disallow_debugging.cc",
+        "1001-app-image-regions/app_image_regions.cc",
         "1947-breakpoint-redefine-deopt/check_deopt.cc",
         "common/runtime_state.cc",
         "common/stack_inspect.cc",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index ffaa2cd..e3157ef 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -27,38 +27,35 @@
 
 # We need dex2oat and dalvikvm on the target as well as the core images (all images as we sync
 # only once).
-TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUTS)
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES := $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUTS)
 
 # Also need libartagent.
-TEST_ART_TARGET_SYNC_DEPS += libartagent-target libartagentd-target
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libartagent-target libartagentd-target
 
 # Also need libtiagent.
-TEST_ART_TARGET_SYNC_DEPS += libtiagent-target libtiagentd-target
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libtiagent-target libtiagentd-target
 
 # Also need libtistress.
-TEST_ART_TARGET_SYNC_DEPS += libtistress-target libtistressd-target
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libtistress-target libtistressd-target
 
 # Also need libarttest.
-TEST_ART_TARGET_SYNC_DEPS += libarttest-target libarttestd-target
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libarttest-target libarttestd-target
 
 # Also need libnativebridgetest.
-TEST_ART_TARGET_SYNC_DEPS += libnativebridgetest-target
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libnativebridgetest-target
 
 # Also need libopenjdkjvmti.
-TEST_ART_TARGET_SYNC_DEPS += libopenjdkjvmti-target libopenjdkjvmtid-target
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libopenjdkjvmti-target libopenjdkjvmtid-target
 
-TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar
-TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
-TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar
-TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar
-TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar
-TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += \
+  $(foreach jar,$(TARGET_TEST_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar)
 
 # All tests require the host executables. The tests also depend on the core images, but on
 # specific version depending on the compiler.
 ART_TEST_HOST_RUN_TEST_DEPENDENCIES := \
   $(ART_HOST_EXECUTABLES) \
   $(HOST_OUT_EXECUTABLES)/hprof-conv \
+  $(HOST_OUT_EXECUTABLES)/timeout_dumper \
   $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtiagent) \
   $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtiagentd) \
   $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtistress) \
@@ -73,6 +70,7 @@
   $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION) \
   $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvmti$(ART_HOST_SHLIB_EXTENSION) \
   $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvmtid$(ART_HOST_SHLIB_EXTENSION) \
+  $(HOST_CORE_DEX_LOCATIONS) \
 
 ifneq ($(HOST_PREFER_32_BIT),true)
 ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \
diff --git a/test/HiddenApi/AbstractPackageClass.java b/test/HiddenApi/AbstractPackageClass.java
new file mode 100644
index 0000000..8f955ca
--- /dev/null
+++ b/test/HiddenApi/AbstractPackageClass.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+abstract class AbstractPackageClass {
+  public void publicMethod2() {}
+}
diff --git a/test/HiddenApi/PackageClass.java b/test/HiddenApi/PackageClass.java
new file mode 100644
index 0000000..eece100
--- /dev/null
+++ b/test/HiddenApi/PackageClass.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class PackageClass extends AbstractPackageClass implements PublicInterface {
+  public void publicMethod1() {}
+}
diff --git a/test/HiddenApi/PublicInterface.java b/test/HiddenApi/PublicInterface.java
new file mode 100644
index 0000000..77a3709
--- /dev/null
+++ b/test/HiddenApi/PublicInterface.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface PublicInterface {
+  void publicMethod1();
+  void publicMethod2();
+}
diff --git a/test/HiddenApiStubs/HiddenApi b/test/HiddenApiStubs/HiddenApi
new file mode 100644
index 0000000..6841ab5
--- /dev/null
+++ b/test/HiddenApiStubs/HiddenApi
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface PublicInterface {
+  void publicMethod();
+}
diff --git a/test/HiddenApiStubs/PublicInterface.java b/test/HiddenApiStubs/PublicInterface.java
new file mode 100644
index 0000000..77a3709
--- /dev/null
+++ b/test/HiddenApiStubs/PublicInterface.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface PublicInterface {
+  void publicMethod1();
+  void publicMethod2();
+}
diff --git a/test/StringLiterals/StringLiterals.java b/test/StringLiterals/StringLiterals.java
new file mode 100644
index 0000000..9ab37ca
--- /dev/null
+++ b/test/StringLiterals/StringLiterals.java
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class StringLiterals {
+    static class StartupClass {
+        static {
+            System.out.println("Startup init");
+        }
+    }
+
+    static class OtherClass {
+        static {
+            System.out.println("Other class init");
+        }
+    }
+
+    void startUpMethod() {
+        String resource = "abcd.apk";
+        System.out.println("Starting up");
+        System.out.println("Loading " + resource);
+    }
+
+    void otherMethod() {
+        System.out.println("Unexpected error");
+        System.out.println("Shutting down!");
+    }
+}
diff --git a/test/VerifySoftFailDuringClinit/ClassToInitialize.smali b/test/VerifySoftFailDuringClinit/ClassToInitialize.smali
new file mode 100644
index 0000000..0d12ec8
--- /dev/null
+++ b/test/VerifySoftFailDuringClinit/ClassToInitialize.smali
@@ -0,0 +1,22 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LClassToInitialize;
+.super Ljava/lang/Object;
+
+.method public static constructor <clinit>()V
+  .registers 0
+  invoke-static {}, LVerifySoftFail;->empty()V
+  return-void
+.end method
diff --git a/test/VerifySoftFailDuringClinit/VerifySoftFail.smali b/test/VerifySoftFailDuringClinit/VerifySoftFail.smali
new file mode 100644
index 0000000..e0f4946
--- /dev/null
+++ b/test/VerifySoftFailDuringClinit/VerifySoftFail.smali
@@ -0,0 +1,27 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LVerifySoftFail;
+.super Ljava/lang/Object;
+
+.method public static empty()V
+  .registers 0
+  return-void
+.end method
+
+.method public static softFail()V
+  .registers 0
+  invoke-static {}, LMissingClass;->test()V
+  return-void
+.end method
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index c9b789e..bbc3039 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -21,11 +21,13 @@
 
 #include "art_method-inl.h"
 #include "base/enums.h"
+#include "common_throws.h"
 #include "dex/dex_file-inl.h"
 #include "instrumentation.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
 #include "jit/profiling_info.h"
+#include "jni/jni_internal.h"
 #include "mirror/class-inl.h"
 #include "nativehelper/ScopedUtfChars.h"
 #include "oat_file.h"
@@ -159,6 +161,19 @@
   return !interpreter;
 }
 
+static ArtMethod* GetMethod(ScopedObjectAccess& soa, jclass cls, const ScopedUtfChars& chars)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  CHECK(chars.c_str() != nullptr);
+  ArtMethod* method = soa.Decode<mirror::Class>(cls)->FindDeclaredDirectMethodByName(
+        chars.c_str(), kRuntimePointerSize);
+  if (method == nullptr) {
+    method = soa.Decode<mirror::Class>(cls)->FindDeclaredVirtualMethodByName(
+        chars.c_str(), kRuntimePointerSize);
+  }
+  DCHECK(method != nullptr) << "Unable to find method called " << chars.c_str();
+  return method;
+}
+
 extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasJitCompiledEntrypoint(JNIEnv* env,
                                                                          jclass,
                                                                          jclass cls,
@@ -170,9 +185,7 @@
   Thread* self = Thread::Current();
   ScopedObjectAccess soa(self);
   ScopedUtfChars chars(env, method_name);
-  CHECK(chars.c_str() != nullptr);
-  ArtMethod* method = soa.Decode<mirror::Class>(cls)->FindDeclaredDirectMethodByName(
-        chars.c_str(), kRuntimePointerSize);
+  ArtMethod* method = GetMethod(soa, cls, chars);
   ScopedAssertNoThreadSuspension sants(__FUNCTION__);
   return jit->GetCodeCache()->ContainsPc(
       Runtime::Current()->GetInstrumentation()->GetCodeForInvoke(method));
@@ -189,12 +202,60 @@
   Thread* self = Thread::Current();
   ScopedObjectAccess soa(self);
   ScopedUtfChars chars(env, method_name);
-  CHECK(chars.c_str() != nullptr);
-  ArtMethod* method = soa.Decode<mirror::Class>(cls)->FindDeclaredDirectMethodByName(
-        chars.c_str(), kRuntimePointerSize);
+  ArtMethod* method = GetMethod(soa, cls, chars);
   return jit->GetCodeCache()->ContainsMethod(method);
 }
 
+static void ForceJitCompiled(Thread* self, ArtMethod* method) REQUIRES(!Locks::mutator_lock_) {
+  {
+    ScopedObjectAccess soa(self);
+    if (method->IsNative()) {
+      std::string msg(method->PrettyMethod());
+      msg += ": is native";
+      ThrowIllegalArgumentException(msg.c_str());
+      return;
+    } else if (!Runtime::Current()->GetRuntimeCallbacks()->IsMethodSafeToJit(method)) {
+      std::string msg(method->PrettyMethod());
+      msg += ": is not safe to jit!";
+      ThrowIllegalStateException(msg.c_str());
+      return;
+    }
+  }
+  jit::Jit* jit = GetJitIfEnabled();
+  jit::JitCodeCache* code_cache = jit->GetCodeCache();
+  // Update the code cache to make sure the JIT code does not get deleted.
+  // Note: this will apply to all JIT compilations.
+  code_cache->SetGarbageCollectCode(false);
+  while (true) {
+    if (code_cache->WillExecuteJitCode(method)) {
+      break;
+    } else {
+      // Sleep to yield to the compiler thread.
+      usleep(1000);
+      ScopedObjectAccess soa(self);
+      // Make sure there is a profiling info, required by the compiler.
+      ProfilingInfo::Create(self, method, /* retry_allocation */ true);
+      // Will either ensure it's compiled or do the compilation itself.
+      jit->CompileMethod(method, self, /*baseline=*/ false, /*osr=*/ false);
+    }
+  }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_ensureMethodJitCompiled(JNIEnv*, jclass, jobject meth) {
+  jit::Jit* jit = GetJitIfEnabled();
+  if (jit == nullptr) {
+    return;
+  }
+
+  Thread* self = Thread::Current();
+  ArtMethod* method;
+  {
+    ScopedObjectAccess soa(self);
+    method = ArtMethod::FromReflectedMethod(soa, meth);
+  }
+  ForceJitCompiled(self, method);
+}
+
 extern "C" JNIEXPORT void JNICALL Java_Main_ensureJitCompiled(JNIEnv* env,
                                                              jclass,
                                                              jclass cls,
@@ -210,33 +271,9 @@
     ScopedObjectAccess soa(self);
 
     ScopedUtfChars chars(env, method_name);
-    CHECK(chars.c_str() != nullptr);
-    method = soa.Decode<mirror::Class>(cls)->FindDeclaredDirectMethodByName(
-        chars.c_str(), kRuntimePointerSize);
-    if (method == nullptr) {
-      method = soa.Decode<mirror::Class>(cls)->FindDeclaredVirtualMethodByName(
-          chars.c_str(), kRuntimePointerSize);
-    }
-    DCHECK(method != nullptr) << "Unable to find method called " << chars.c_str();
+    method = GetMethod(soa, cls, chars);
   }
-
-  jit::JitCodeCache* code_cache = jit->GetCodeCache();
-  // Update the code cache to make sure the JIT code does not get deleted.
-  // Note: this will apply to all JIT compilations.
-  code_cache->SetGarbageCollectCode(false);
-  while (true) {
-    if (code_cache->WillExecuteJitCode(method)) {
-      break;
-    } else {
-      // Sleep to yield to the compiler thread.
-      usleep(1000);
-      ScopedObjectAccess soa(self);
-      // Make sure there is a profiling info, required by the compiler.
-      ProfilingInfo::Create(self, method, /* retry_allocation */ true);
-      // Will either ensure it's compiled or do the compilation itself.
-      jit->CompileMethod(method, self, /* osr */ false);
-    }
-  }
+  ForceJitCompiled(self, method);
 }
 
 extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasSingleImplementation(JNIEnv* env,
@@ -313,4 +350,25 @@
   }
 }
 
+extern "C" JNIEXPORT jint JNICALL Java_Main_getJitThreshold(JNIEnv*, jclass) {
+  jit::Jit* jit = Runtime::Current()->GetJit();
+  return (jit != nullptr) ? jit->HotMethodThreshold() : 0;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_transitionJitFromZygote(JNIEnv*, jclass) {
+  jit::Jit* jit = Runtime::Current()->GetJit();
+  if (jit == nullptr) {
+    return;
+  }
+  // Mimic the transition behavior a zygote fork would have.
+  jit->PreZygoteFork();
+  jit->GetCodeCache()->PostForkChildAction(/*is_system_server=*/ false, /*is_zygote=*/ false);
+  jit->PostForkChildAction(/*is_zygote=*/ false);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_deoptimizeBootImage(JNIEnv*, jclass) {
+  ScopedSuspendAll ssa(__FUNCTION__);
+  Runtime::Current()->DeoptimizeBootImage();
+}
+
 }  // namespace art
diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc
index d74d2ef..cb011a8 100644
--- a/test/common/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -18,6 +18,7 @@
 
 #include <android-base/logging.h>
 
+#include "arch/context.h"
 #include "base/mutex.h"
 #include "dex/dex_file-inl.h"
 #include "jni/jni_internal.h"
@@ -66,42 +67,30 @@
 
 // public static native boolean isInterpretedFunction(String smali);
 
-// TODO Remove 'allow_runtime_frames' option once we have deoptimization through runtime frames.
-struct MethodIsInterpretedVisitor : public StackVisitor {
- public:
-  MethodIsInterpretedVisitor(Thread* thread, ArtMethod* goal, bool require_deoptable)
-      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        goal_(goal),
-        method_is_interpreted_(true),
-        method_found_(false),
-        prev_was_runtime_(true),
-        require_deoptable_(require_deoptable) {}
-
-  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (goal_ == GetMethod()) {
-      method_is_interpreted_ = (require_deoptable_ && prev_was_runtime_) || IsShadowFrame();
-      method_found_ = true;
-      return false;
-    }
-    prev_was_runtime_ = GetMethod()->IsRuntimeMethod();
-    return true;
-  }
-
-  bool IsInterpreted() {
-    return method_is_interpreted_;
-  }
-
-  bool IsFound() {
-    return method_found_;
-  }
-
- private:
-  const ArtMethod* goal_;
-  bool method_is_interpreted_;
-  bool method_found_;
-  bool prev_was_runtime_;
-  bool require_deoptable_;
-};
+static bool IsMethodInterpreted(Thread* self,
+                                const ArtMethod* goal,
+                                const bool require_deoptable,
+                                /* out */ bool* method_is_interpreted)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  *method_is_interpreted = true;
+  bool method_found = false;
+  bool prev_was_runtime = true;
+  StackVisitor::WalkStack(
+      [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        if (goal == stack_visitor->GetMethod()) {
+          *method_is_interpreted =
+              (require_deoptable && prev_was_runtime) || stack_visitor->IsShadowFrame();
+          method_found = true;
+          return false;
+        }
+        prev_was_runtime = stack_visitor->GetMethod()->IsRuntimeMethod();
+        return true;
+      },
+      self,
+      /* context= */ nullptr,
+      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+  return method_found;
+}
 
 // TODO Remove 'require_deoptimizable' option once we have deoptimization through runtime frames.
 extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpretedFunction(
@@ -119,23 +108,18 @@
     env->ThrowNew(env->FindClass("java/lang/Error"), "Unable to interpret method argument!");
     return JNI_FALSE;
   }
-  bool result;
-  bool found;
   {
     ScopedObjectAccess soa(env);
     ArtMethod* goal = jni::DecodeArtMethod(id);
-    MethodIsInterpretedVisitor v(soa.Self(), goal, require_deoptimizable);
-    v.WalkStack();
+    bool is_interpreted;
+    if (!IsMethodInterpreted(soa.Self(), goal, require_deoptimizable, &is_interpreted)) {
+      env->ThrowNew(env->FindClass("java/lang/Error"), "Unable to find given method in stack!");
+      return JNI_FALSE;
+    }
     bool enters_interpreter = Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(
         goal->GetEntryPointFromQuickCompiledCode());
-    result = (v.IsInterpreted() || enters_interpreter);
-    found = v.IsFound();
+    return (is_interpreted || enters_interpreter);
   }
-  if (!found) {
-    env->ThrowNew(env->FindClass("java/lang/Error"), "Unable to find given method in stack!");
-    return JNI_FALSE;
-  }
-  return result;
 }
 
 // public static native void assertIsInterpreted();
@@ -196,4 +180,24 @@
   }
 }
 
+extern "C" JNIEXPORT jobject JNICALL Java_Main_getThisOfCaller(
+    JNIEnv* env, jclass cls ATTRIBUTE_UNUSED) {
+  ScopedObjectAccess soa(env);
+  std::unique_ptr<art::Context> context(art::Context::Create());
+  jobject result = nullptr;
+  StackVisitor::WalkStack(
+      [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+        // Discard stubs and Main.getThisOfCaller.
+        if (stack_visitor->GetMethod() == nullptr || stack_visitor->GetMethod()->IsNative()) {
+          return true;
+        }
+        result = soa.AddLocalReference<jobject>(stack_visitor->GetThisObject());
+        return false;
+      },
+      soa.Self(),
+      context.get(),
+      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+  return result;
+}
+
 }  // namespace art
diff --git a/test/etc/default-build b/test/etc/default-build
index 8542ad0..d203698 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -80,6 +80,12 @@
   HAS_JASMIN_MULTIDEX=false
 fi
 
+if [ -d smali-ex ]; then
+  HAS_SMALI_EX=true
+else
+  HAS_SMALI_EX=false
+fi
+
 if [ -d src-ex ]; then
   HAS_SRC_EX=true
 else
@@ -92,7 +98,7 @@
   HAS_SRC_DEX2OAT_UNRESOLVED=false
 fi
 
-if [ -f api-light-greylist.txt -o -f api-dark-greylist.txt -o -f api-blacklist.txt ]; then
+if [ -f hiddenapi-flags.csv ]; then
   HAS_HIDDENAPI_SPEC=true
 else
   HAS_HIDDENAPI_SPEC=false
@@ -323,18 +329,12 @@
 function make_hiddenapi() {
   local args=( "encode" )
   while [[ $# -gt 0 ]]; do
-    args+=("--dex=$1")
+    args+=("--input-dex=$1")
+    args+=("--output-dex=$1")
     shift
   done
-  if [ -f api-light-greylist.txt ]; then
-    args+=("--light-greylist=api-light-greylist.txt")
-  fi
-  if [ -f api-dark-greylist.txt ]; then
-    args+=("--dark-greylist=api-dark-greylist.txt")
-  fi
-  if [ -f api-blacklist.txt ]; then
-    args+=("--blacklist=api-blacklist.txt")
-  fi
+  args+=("--api-flags=hiddenapi-flags.csv")
+  args+=("--no-force-assign-all")
   ${HIDDENAPI} "${args[@]}"
 }
 
@@ -460,13 +460,25 @@
     javac_with_bootclasspath -d classes-tmp-for-ex `find src-art -name '*.java'`
     src_tmp_for_ex="-cp classes-tmp-for-ex"
   fi
-  mkdir classes-ex
+  mkdir -p classes-ex
   javac_with_bootclasspath -d classes-ex $src_tmp_for_ex `find src-ex -name '*.java'`
 fi
 
 if [[ -d classes-ex ]] && [ ${NEED_DEX} = "true" ]; then
   make_dex classes-ex
+fi
 
+if [ "${HAS_SMALI_EX}" = "true" -a ${NEED_DEX} = "true" ]; then
+  # Compile Smali classes
+  ${SMALI} -JXmx512m assemble ${SMALI_ARGS} --output smali_classes-ex.dex `find smali-ex -name '*.smali'`
+  if [[ ! -s smali_classes-ex.dex ]] ; then
+    fail "${SMALI} produced no output."
+  fi
+  # Merge smali files into classes-ex.dex.
+  make_dexmerge classes-ex.dex smali_classes-ex.dex
+fi
+
+if [[ -f classes-ex.dex ]]; then
   # Apply hiddenapi on the dex files if the test has API list file(s).
   if [ ${USE_HIDDENAPI} = "true" -a ${HAS_HIDDENAPI_SPEC} = "true" ]; then
     make_hiddenapi classes-ex.dex
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index bd58ae3..660c971 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -9,6 +9,7 @@
 }
 
 ANDROID_ROOT="/system"
+ANDROID_RUNTIME_ROOT="/apex/com.android.runtime"
 ARCHITECTURES_32="(arm|x86|mips|none)"
 ARCHITECTURES_64="(arm64|x86_64|mips64|none)"
 ARCHITECTURES_PATTERN="${ARCHITECTURES_32}"
@@ -17,11 +18,12 @@
 COMPILE_FLAGS=""
 DALVIKVM="dalvikvm32"
 DEBUGGER="n"
-WITH_AGENT=""
+WITH_AGENT=()
 DEBUGGER_AGENT=""
 WRAP_DEBUGGER_AGENT="n"
 DEV_MODE="n"
-DEX2OAT=""
+DEX2OAT_NDEBUG_BINARY="dex2oat"
+DEX2OAT_DEBUG_BINARY="dex2oatd"
 EXPERIMENTAL=""
 FALSE_BIN="false"
 FLAGS=""
@@ -31,6 +33,10 @@
 GDB_SERVER="gdbserver"
 HAVE_IMAGE="y"
 HOST="n"
+BIONIC="n"
+CREATE_ANDROID_ROOT="n"
+USE_ZIPAPEX="n"
+ZIPAPEX_LOC=""
 INTERPRETER="n"
 JIT="n"
 INVOKE_WITH=""
@@ -40,13 +46,13 @@
 TEST_DIRECTORY="nativetest"
 MAIN=""
 OPTIMIZE="y"
-PATCHOAT=""
 PREBUILD="y"
 QUIET="n"
 RELOCATE="n"
 STRIP_DEX="n"
 SECONDARY_DEX=""
 TIME_OUT="gdb"  # "n" (disabled), "timeout" (use timeout), "gdb" (use gdb)
+TIMEOUT_DUMPER=timeout_dumper
 # Value in seconds
 if [ "$ART_USE_READ_BARRIER" != "false" ]; then
   TIME_OUT_VALUE=2400  # 40 minutes.
@@ -59,7 +65,6 @@
 VERIFY="y" # y=yes,n=no,s=softfail
 ZYGOTE=""
 DEX_VERIFY=""
-USE_PATCHOAT="y"
 INSTRUCTION_SET_FEATURES=""
 ARGS=""
 EXTERNAL_LOG_TAGS="n" # if y respect externally set ANDROID_LOG_TAGS.
@@ -166,10 +171,6 @@
         shift
         BOOT_IMAGE="$1"
         shift
-    elif [ "x$1" = "x--no-patchoat" ]; then
-        PATCHOAT="-Xpatchoat:${FALSE_BIN}"
-        USE_PATCHOAT="n"
-        shift
     elif [ "x$1" = "x--relocate" ]; then
         RELOCATE="y"
         shift
@@ -213,7 +214,25 @@
         shift
     elif [ "x$1" = "x--host" ]; then
         HOST="y"
-        ANDROID_ROOT="$ANDROID_HOST_OUT"
+        ANDROID_ROOT="${ANDROID_HOST_OUT}"
+        ANDROID_RUNTIME_ROOT="${ANDROID_HOST_OUT}/com.android.runtime"
+        shift
+    elif [ "x$1" = "x--bionic" ]; then
+        BIONIC="y"
+        # We need to create an ANDROID_ROOT because currently we cannot create
+        # the frameworks/libcore with linux_bionic so we need to use the normal
+        # host ones which are in a different location.
+        CREATE_ANDROID_ROOT="y"
+        shift
+    elif [ "x$1" = "x--runtime-zipapex" ]; then
+        shift
+        USE_ZIPAPEX="y"
+        ZIPAPEX_LOC="$1"
+        # TODO (b/119942078): Currently apex does not support
+        # symlink_preferred_arch so we will not have a dex2oatd to execute and
+        # need to manually provide
+        # dex2oatd64.
+        DEX2OAT_DEBUG_BINARY="dex2oatd64"
         shift
     elif [ "x$1" = "x--no-prebuild" ]; then
         PREBUILD="n"
@@ -232,7 +251,7 @@
     elif [ "x$1" = "x--with-agent" ]; then
         shift
         USE_JVMTI="y"
-        WITH_AGENT="$1"
+        WITH_AGENT+=("$1")
         shift
     elif [ "x$1" = "x--debug-wrap-agent" ]; then
         WRAP_DEBUGGER_AGENT="y"
@@ -271,6 +290,9 @@
     elif [ "x$1" = "x--jit" ]; then
         JIT="y"
         shift
+    elif [ "x$1" = "x--baseline" ]; then
+        FLAGS="${FLAGS} -Xcompiler-option --baseline"
+        shift
     elif [ "x$1" = "x--jvm" ]; then
         USE_JVM="y"
         shift
@@ -303,6 +325,10 @@
         shift
         ANDROID_ROOT="$1"
         shift
+    elif [ "x$1" = "x--android-runtime-root" ]; then
+        shift
+        ANDROID_RUNTIME_ROOT="$1"
+        shift
     elif [ "x$1" = "x--instruction-set-features" ]; then
         shift
         INSTRUCTION_SET_FEATURES="$1"
@@ -322,10 +348,6 @@
         TEST_DIRECTORY="nativetest64"
         ARCHITECTURES_PATTERN="${ARCHITECTURES_64}"
         shift
-    elif [ "x$1" = "x--pic-test" ]; then
-        FLAGS="${FLAGS} -Xcompiler-option --compile-pic"
-        COMPILE_FLAGS="${COMPILE_FLAGS} --compile-pic"
-        shift
     elif [ "x$1" = "x--experimental" ]; then
         if [ "$#" -lt 2 ]; then
             echo "missing --experimental option" 1>&2
@@ -381,6 +403,10 @@
     done
 fi
 
+if [ "$CREATE_ANDROID_ROOT" = "y" ]; then
+    ANDROID_ROOT=$DEX_LOCATION/android-root
+fi
+
 if [ "x$1" = "x" ] ; then
   MAIN="Main"
 else
@@ -454,9 +480,9 @@
   DEBUGGER_OPTS="-agentpath:${AGENTPATH}=transport=dt_socket,address=$PORT,server=y,suspend=y"
 fi
 
-if [ "x$WITH_AGENT" != "x" ]; then
-  FLAGS="${FLAGS} -agentpath:${WITH_AGENT}"
-fi
+for agent in "${WITH_AGENT[@]}"; do
+  FLAGS="${FLAGS} -agentpath:${agent}"
+done
 
 if [ "$USE_JVMTI" = "y" ]; then
   if [ "$USE_JVM" = "n" ]; then
@@ -533,23 +559,41 @@
   exit
 fi
 
+# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
+# because that's what we use for compiling the core.art image.
+# It may contain additional modules from TEST_CORE_JARS.
+bpath_modules="core-oj core-libart okhttp bouncycastle apache-xml conscrypt"
+if [ "${HOST}" = "y" ]; then
+    framework="${ANDROID_HOST_OUT}/framework"
+    if [ "${ANDROID_HOST_OUT:0:${#ANDROID_BUILD_TOP}+1}" = "${ANDROID_BUILD_TOP}/" ]; then
+      framework_location="${ANDROID_HOST_OUT:${#ANDROID_BUILD_TOP}+1}/framework"
+    else
+      echo "error: ANDROID_BUILD_TOP/ is not a prefix of ANDROID_HOST_OUT"
+      echo "ANDROID_BUILD_TOP=${ANDROID_BUILD_TOP}"
+      echo "ANDROID_HOST_OUT=${ANDROID_HOST_OUT}"
+      exit
+    fi
+    bpath_suffix="-hostdex"
+else
+    framework="${ANDROID_ROOT}/framework"
+    framework_location="${ANDROID_ROOT}/framework"
+    bpath_suffix="-testdex"
+fi
+bpath=""
+bpath_locations=""
+bpath_separator=""
+for bpath_module in ${bpath_modules}; do
+  bpath+="${bpath_separator}${framework}/${bpath_module}${bpath_suffix}.jar"
+  bpath_locations+="${bpath_separator}${framework_location}/${bpath_module}${bpath_suffix}.jar"
+  bpath_separator=":"
+done
+# Pass down the bootclasspath
+FLAGS="${FLAGS} -Xbootclasspath:${bpath}"
+FLAGS="${FLAGS} -Xbootclasspath-locations:${bpath_locations}"
+COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xbootclasspath:${bpath}"
+COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xbootclasspath-locations:${bpath_locations}"
 
 if [ "$HAVE_IMAGE" = "n" ]; then
-    if [ "${HOST}" = "y" ]; then
-        framework="${ANDROID_HOST_OUT}/framework"
-        bpath_suffix="-hostdex"
-    else
-        framework="${ANDROID_ROOT}/framework"
-        bpath_suffix="-testdex"
-    fi
-    bpath="${framework}/core-libart${bpath_suffix}.jar"
-    bpath="${bpath}:${framework}/core-oj${bpath_suffix}.jar"
-    bpath="${bpath}:${framework}/core-simple${bpath_suffix}.jar"
-    bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar"
-    bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar"
-    bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar"
-    # Pass down the bootclasspath
-    FLAGS="${FLAGS} -Xbootclasspath:${bpath}"
     # Disable image dex2oat - this will forbid the runtime to patch or compile an image.
     FLAGS="${FLAGS} -Xnoimage-dex2oat"
 
@@ -651,6 +695,17 @@
   fi
 fi
 
+if [ "$BIONIC" = "y" ]; then
+  # This is the location that soong drops linux_bionic builds. Despite being
+  # called linux_bionic-x86 the build is actually amd64 (x86_64) only.
+  if [ ! -e "$OUT_DIR/soong/host/linux_bionic-x86" ]; then
+    echo "linux_bionic-x86 target doesn't seem to have been built!" >&2
+    exit 1
+  fi
+  # Set timeout_dumper manually so it works even with apex's
+  TIMEOUT_DUMPER=$OUT_DIR/soong/host/linux_bionic-x86/bin/timeout_dumper
+fi
+
 # Prevent test from silently falling back to interpreter in no-prebuild mode. This happens
 # when DEX_LOCATION path is too long, because vdex/odex filename is constructed by taking
 # full path to dex, stripping leading '/', appending '@classes.vdex' and changing every
@@ -671,6 +726,8 @@
     exit 1
 fi
 
+BIN_DIR=$ANDROID_ROOT/bin
+
 profman_cmdline="true"
 dex2oat_cmdline="true"
 vdex_cmdline="true"
@@ -678,11 +735,41 @@
 mkdir_locations="${DEX_LOCATION}/dalvik-cache/$ISA"
 strip_cmdline="true"
 sync_cmdline="true"
+linkroot_cmdline="true"
+linkroot_overlay_cmdline="true"
+setupapex_cmdline="true"
+installapex_cmdline="true"
+
+linkdirs() {
+  find "$1" -maxdepth 1 -mindepth 1 -type d | xargs -i ln -sf '{}' "$2"
+}
+
+if [ "$CREATE_ANDROID_ROOT" = "y" ]; then
+  mkdir_locations="${mkdir_locations} ${ANDROID_ROOT}"
+  linkroot_cmdline="linkdirs ${ANDROID_HOST_OUT} ${ANDROID_ROOT}"
+  if [ "${BIONIC}" = "y" ]; then
+    # TODO Make this overlay more generic.
+    linkroot_overlay_cmdline="linkdirs $OUT_DIR/soong/host/linux_bionic-x86 ${ANDROID_ROOT}"
+  fi
+fi
+
+if [ "$USE_ZIPAPEX" = "y" ]; then
+  # TODO Currently this only works for linux_bionic zipapexes because those are
+  # stripped and so small enough that the ulimit doesn't kill us.
+  mkdir_locations="${mkdir_locations} $DEX_LOCATION/zipapex"
+  zip_options="-qq"
+  if [ "$DEV_MODE" = "y" ]; then
+    zip_options=""
+  fi
+  setupapex_cmdline="unzip -o -u ${zip_options} ${ZIPAPEX_LOC} apex_payload.zip -d ${DEX_LOCATION}"
+  installapex_cmdline="unzip -o -u ${zip_options} ${DEX_LOCATION}/apex_payload.zip -d ${DEX_LOCATION}/zipapex"
+  BIN_DIR=$DEX_LOCATION/zipapex/bin
+fi
 
 # PROFILE takes precedence over RANDOM_PROFILE, since PROFILE tests require a
 # specific profile to run properly.
 if [ "$PROFILE" = "y" ] || [ "$RANDOM_PROFILE" = "y" ]; then
-  profman_cmdline="${ANDROID_ROOT}/bin/profman  \
+  profman_cmdline="$BIN_DIR/profman  \
     --apk=$DEX_LOCATION/$TEST_NAME.jar \
     --dex-location=$DEX_LOCATION/$TEST_NAME.jar"
   if [ -f $DEX_LOCATION/$TEST_NAME-ex.jar ]; then
@@ -708,11 +795,11 @@
     app_image="--base=0x4000 --app-image-file=$DEX_LOCATION/oat/$ISA/$TEST_NAME.art"
   fi
 
-  dex2oat_binary=dex2oatd
+  dex2oat_binary=${DEX2OAT_DEBUG_BINARY}
   if  [[ "$TEST_IS_NDEBUG" = "y" ]]; then
-    dex2oat_binary=dex2oat
+    dex2oat_binary=${DEX2OAT_NDEBUG_BINARY}
   fi
-  dex2oat_cmdline="$INVOKE_WITH $ANDROID_ROOT/bin/$dex2oat_binary \
+  dex2oat_cmdline="$INVOKE_WITH $BIN_DIR/$dex2oat_binary \
                       $COMPILE_FLAGS \
                       --boot-image=${BOOT_IMAGE} \
                       --dex-file=$DEX_LOCATION/$TEST_NAME.jar \
@@ -769,12 +856,11 @@
 # We set DumpNativeStackOnSigQuit to false to avoid stressing libunwind.
 # b/27185632
 # b/24664297
-dalvikvm_cmdline="$INVOKE_WITH $GDB $ANDROID_ROOT/bin/$DALVIKVM \
+dalvikvm_cmdline="$INVOKE_WITH $GDB $BIN_DIR/$DALVIKVM \
                   $GDB_ARGS \
                   $FLAGS \
                   $DEX_VERIFY \
                   -XXlib:$LIB \
-                  $PATCHOAT \
                   $DEX2OAT \
                   $DALVIKVM_ISA_FEATURES_ARGS \
                   $ZYGOTE \
@@ -803,15 +889,11 @@
 fi
 RUN_TEST_ASAN_OPTIONS="${RUN_TEST_ASAN_OPTIONS}detect_leaks=0"
 
-# For running, we must turn off logging when dex2oat or patchoat are missing. Otherwise we use
+# For running, we must turn off logging when dex2oat is missing. Otherwise we use
 # the same defaults as for prebuilt: everything when --dev, otherwise errors and above only.
 if [ "$EXTERNAL_LOG_TAGS" = "n" ]; then
   if [ "$DEV_MODE" = "y" ]; then
       export ANDROID_LOG_TAGS='*:d'
-  elif [ "$USE_PATCHOAT" = "n" ]; then
-      # All tests would log the error of failing dex2oat/patchoat. Be silent here and only
-      # log fatal events.
-      export ANDROID_LOG_TAGS='*:s'
   elif [ "$HAVE_IMAGE" = "n" ]; then
       # All tests would log the error of missing image. Be silent here and only log fatal
       # events.
@@ -877,11 +959,12 @@
              export ANDROID_ADDITIONAL_PUBLIC_LIBRARIES=$PUBLIC_LIBS && \
              export DEX_LOCATION=$DEX_LOCATION && \
              export ANDROID_ROOT=$ANDROID_ROOT && \
+             export ANDROID_RUNTIME_ROOT=$ANDROID_RUNTIME_ROOT && \
              export ANDROID_LOG_TAGS=$ANDROID_LOG_TAGS && \
              rm -rf ${DEX_LOCATION}/dalvik-cache/ && \
              mkdir -p ${mkdir_locations} && \
              export LD_LIBRARY_PATH=$LD_LIBRARY_PATH && \
-             export PATH=$ANDROID_ROOT/bin:$PATH && \
+             export PATH=$BIN_DIR:$PATH && \
              $profman_cmdline && \
              $dex2oat_cmdline && \
              $dm_cmdline && \
@@ -921,9 +1004,14 @@
 
     export ANDROID_DATA="$DEX_LOCATION"
     export ANDROID_ROOT="${ANDROID_ROOT}"
+    export ANDROID_RUNTIME_ROOT="${ANDROID_RUNTIME_ROOT}"
     export LD_LIBRARY_PATH="${ANDROID_ROOT}/${LIBRARY_DIRECTORY}:${ANDROID_ROOT}/${TEST_DIRECTORY}"
+    if [ "$USE_ZIPAPEX" = "y" ]; then
+      # Put the zipapex files in front of the ld-library-path
+      export LD_LIBRARY_PATH="${ANDROID_DATA}/zipapex/${LIBRARY_DIRECTORY}:${LD_LIBRARY_PATH}"
+    fi
     export DYLD_LIBRARY_PATH="${ANDROID_ROOT}/${LIBRARY_DIRECTORY}:${ANDROID_ROOT}/${TEST_DIRECTORY}"
-    export PATH="$PATH:${ANDROID_ROOT}/bin"
+    export PATH="$PATH:$BIN_DIR"
 
     # Temporarily disable address space layout randomization (ASLR).
     # This is needed on the host so that the linker loads core.oat at the necessary address.
@@ -954,14 +1042,16 @@
       # Note: We first send SIGRTMIN+2 (usually 36) to ART, which will induce a full thread dump
       #       before abort. However, dumping threads might deadlock, so we also use the "-k"
       #       option to definitely kill the child.
-      cmdline="timeout -k 120s -s SIGRTMIN+2 ${TIME_OUT_VALUE}s $cmdline"
+      # Note: Using "--foreground" to not propagate the signal to children, i.e., the runtime.
+      cmdline="timeout --foreground -k 120s -s SIGRTMIN+2 ${TIME_OUT_VALUE}s ${TIMEOUT_DUMPER} $cmdline"
     fi
 
     if [ "$DEV_MODE" = "y" ]; then
       for var in ANDROID_PRINTF_LOG ANDROID_DATA ANDROID_ROOT LD_LIBRARY_PATH DYLD_LIBRARY_PATH PATH LD_USE_LOAD_BIAS; do
         echo EXPORT $var=${!var}
       done
-      echo "mkdir -p ${mkdir_locations} && $profman_cmdline && $dex2oat_cmdline && $dm_cmdline && $vdex_cmdline && $strip_cmdline && $sync_cmdline && $cmdline"
+      echo "$(declare -f linkdirs)"
+      echo "mkdir -p ${mkdir_locations} && $setupapex_cmdline && $installapex_cmdline && $linkroot_cmdline && $linkroot_overlay_cmdline && $profman_cmdline && $dex2oat_cmdline && $dm_cmdline && $vdex_cmdline && $strip_cmdline && $sync_cmdline && $cmdline"
     fi
 
     cd $ANDROID_BUILD_TOP
@@ -975,6 +1065,10 @@
     export ASAN_OPTIONS=$RUN_TEST_ASAN_OPTIONS
 
     mkdir -p ${mkdir_locations} || exit 1
+    $setupapex_cmdline || { echo "zipapex extraction failed." >&2 ; exit 2; }
+    $installapex_cmdline || { echo "zipapex install failed. cmd was: ${installapex_cmdline}." >&2; find ${mkdir_locations} -type f >&2; exit 2; }
+    $linkroot_cmdline || { echo "create symlink android-root failed." >&2 ; exit 2; }
+    $linkroot_overlay_cmdline || { echo "overlay android-root failed." >&2 ; exit 2; }
     $profman_cmdline || { echo "Profman failed." >&2 ; exit 2; }
     $dex2oat_cmdline || { echo "Dex2oat failed." >&2 ; exit 2; }
     $dm_cmdline || { echo "Dex2oat failed." >&2 ; exit 2; }
@@ -988,6 +1082,7 @@
 
     if [ "$USE_GDB" = "y" ]; then
       # When running under gdb, we cannot do piping and grepping...
+      echo "Run 'gdbclient.py -p <pid printed below>' to debug."
       $cmdline "$@"
     else
       if [ "$TIME_OUT" != "gdb" ]; then
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 05c9aa9..cf6e69c 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -6,6 +6,12 @@
         "bug": "http://b/33389022"
     },
     {
+        "tests": "132-daemon-locks-shutdown",
+        "description": ["This test seems to fail occasionally on redefine-stress for unknown reasons without stack-traces"],
+        "variant": "redefine-stress",
+        "bug": "http://b/121302864"
+    },
+    {
         "tests": "579-inline-infinite",
         "description": ["This test seems to fail often on redefine-stress for unknown reasons"],
         "variant": "redefine-stress",
@@ -13,7 +19,17 @@
     },
     {
         "tests": "080-oom-fragmentation",
-        "description": "Disable 080-oom-fragmentation due to flakes.",
+        "description": ["Disable 080-oom-fragmentation for GSS GC due to lack of",
+                        "support for allocations larger than 32MB."],
+        "env_vars": {"ART_DEFAULT_GC_TYPE": "GSS"},
+        "bug": "http://b/33795328"
+    },
+    {
+        "tests": "080-oom-fragmentation",
+        "description": ["Disable 080-oom-fragmentation for CC collector in debug mode",
+                        "because of potential fragmentation caused by the region space's",
+                        "cyclic region allocation (which is enabled in debug mode)."],
+        "variant": "debug",
         "bug": "http://b/33795328"
     },
     {
@@ -60,23 +76,22 @@
                         "doesn't (and isn't meant to) work with --prebuild."]
     },
     {
-        "tests": ["117-nopatchoat",
-                  "147-stripped-dex-fallback",
+        "tests": ["147-stripped-dex-fallback",
                   "608-checker-unresolved-lse"],
         "variant": "no-prebuild"
     },
     {
-        "tests": ["117-nopatchoat",
-                  "118-noimage-dex2oat",
-                  "119-noimage-patchoat"],
+        "tests": ["118-noimage-dex2oat",
+                  "1001-app-image-regions"],
         "variant": "no-relocate",
-        "description": ["117-nopatchoat is not broken per-se it just doesn't",
-                        "work (and isn't meant to) without --prebuild",
-                        "--relocate"]
+        "description": ["118-noimage-dex2oat is not broken per-se it just ",
+                        "doesn't work (and isn't meant to) without --prebuild ",
+                        "--relocate. 1001-app-image-regions is disabled since it",
+                        "doesn't have the app image loaded for no-relocate"]
     },
     {
         "tests" : "629-vdex-speed",
-        "variant": "interp-ac | interpreter | jit | relocate-npatchoat",
+        "variant": "interp-ac | interpreter | jit",
         "description": "629 requires compilation."
     },
     {
@@ -163,20 +178,18 @@
     },
     {
         "tests": "147-stripped-dex-fallback",
-        "variant": "no-image | relocate-npatchoat",
+        "variant": "no-image",
         "description": ["147-stripped-dex-fallback is disabled because it",
                         "requires --prebuild."]
     },
     {
         "tests": ["116-nodex2oat",
-                  "117-nopatchoat",
                   "118-noimage-dex2oat",
-                  "119-noimage-patchoat",
                   "137-cfi",
                   "138-duplicate-classes-check2"],
-        "variant": "no-image | relocate-npatchoat",
+        "variant": "no-image",
         "description": ["All these tests check that we have sane behavior if we",
-                        "don't have a patchoat or dex2oat. Therefore we",
+                        "don't have a dex2oat. Therefore we",
                         "shouldn't run them in situations where we actually",
                         "don't have these since they explicitly test for them.",
                         "These all also assume we have an image."]
@@ -286,10 +299,11 @@
     {
         "tests": ["454-get-vreg",
                   "457-regs",
-                  "602-deoptimizeable"],
+                  "602-deoptimizeable",
+                  "685-deoptimizeable"],
         "description": ["Tests that should fail when the optimizing compiler ",
                         "compiles them non-debuggable."],
-        "variant": "optimizing & ndebuggable | regalloc_gc & ndebuggable | speed-profile & ndebuggable | jit & ndebuggable"
+        "variant": "optimizing & ndebuggable | regalloc_gc & ndebuggable | speed-profile & ndebuggable | jit & ndebuggable | jit-on-first-use & ndebuggable"
     },
     {
         "tests": "596-app-images",
@@ -331,9 +345,7 @@
     {
         "tests": ["018-stack-overflow",
                   "116-nodex2oat",
-                  "117-nopatchoat",
                   "118-noimage-dex2oat",
-                  "119-noimage-patchoat",
                   "126-miranda-multidex",
                   "137-cfi"],
         "description": "The test run dalvikvm more than once.",
@@ -443,19 +455,63 @@
     },
     {
         "tests": [
-            "137-cfi",
-            "595-profile-saving",
-            "900-hello-plugin",
-            "909-attach-agent",
-            "981-dedup-original-dex",
-            "1900-track-alloc"
+            "004-ThreadStress",
+            "130-hprof",
+            "579-inline-infinite",
+            "1946-list-descriptors"
         ],
-        "description": ["Tests that require exact knowledge of the number of plugins and agents."],
+        "description": ["Too slow to finish in the timeout"],
         "variant": "jvmti-stress | redefine-stress | trace-stress | field-stress | step-stress"
     },
     {
         "tests": [
+            "911-get-stack-trace"
+        ],
+        "description": ["Tests that fail when run with step-stress for unknown reasons."],
+        "bug": "b/120995005",
+        "variant": "jvmti-stress | step-stress"
+    },
+    {
+        "tests": [
+            "004-SignalTest",
+            "004-StackWalk",
+            "064-field-access",
+            "083-compiler-regressions",
+            "098-ddmc",
+            "107-int-math2",
+            "129-ThreadGetId",
+            "135-MirandaDispatch",
             "132-daemon-locks-shutdown",
+            "163-app-image-methods",
+            "607-daemon-stress",
+            "674-hiddenapi",
+            "687-deopt",
+            "904-object-allocation"
+        ],
+        "description": ["Tests that sometimes fail when run with jvmti-stress for unknown reasons."],
+        "bug": "b/120995005",
+        "variant": "jvmti-stress | trace-stress | field-stress | step-stress"
+    },
+    {
+        "tests": [
+            "018-stack-overflow",
+            "137-cfi",
+            "595-profile-saving",
+            "597-deopt-busy-loop",
+            "597-deopt-new-string",
+            "660-clinit",
+            "900-hello-plugin",
+            "909-attach-agent",
+            "924-threads",
+            "981-dedup-original-dex",
+            "1900-track-alloc"
+        ],
+        "description": ["Tests that require exact knowledge of the deoptimization state, the ",
+                        "number of plugins and agents, or breaks other openjdkjvmti assumptions."],
+        "variant": "jvmti-stress | redefine-stress | trace-stress | field-stress | step-stress"
+    },
+    {
+        "tests": [
             "607-daemon-stress",
             "602-deoptimizeable",
             "121-simple-suspend-check",
@@ -478,8 +534,9 @@
             "097-duplicate-method",
             "138-duplicate-classes-check2",
             "159-app-image-fields",
-            "674-hiddenapi",
             "649-vdex-duplicate-method",
+            "674-hiddenapi",
+            "690-hiddenapi-same-name-methods",
             "804-class-extends-itself",
             "921-hello-failure",
             "999-redefine-hiddenapi"
@@ -499,6 +556,7 @@
             "629-vdex-speed",
             "647-jni-get-field-id",
             "674-hiddenapi",
+            "690-hiddenapi-same-name-methods",
             "944-transform-classloaders",
             "999-redefine-hiddenapi"
         ],
@@ -565,6 +623,12 @@
         "env_vars": {"SANITIZE_HOST": "address"}
     },
     {
+        "tests": "175-alloc-big-bignums",
+        "description": "ASAN runs out of memory due to huge allocations.",
+        "variant": "host",
+        "env_vars": {"SANITIZE_HOST": "address"}
+    },
+    {
         "tests": "202-thread-oome",
         "description": "ASAN aborts when large thread stacks are requested.",
         "variant": "host",
@@ -697,9 +761,14 @@
         "description": ["Tests that depend on input-vdex are not supported with compact dex"]
     },
     {
-        "tests": ["661-oat-writer-layout", "004-StackWalk"],
-        "variant": "interp-ac | interpreter | jit | no-prebuild | no-image | trace | redefine-stress | jvmti-stress",
-        "description": ["Tests are designed to only check --optimizing"]
+        "tests": ["661-oat-writer-layout"],
+        "variant": "interp-ac | interpreter | jit | jit-on-first-use | no-prebuild | no-image | trace | redefine-stress | jvmti-stress",
+        "description": ["Test is designed to only check --optimizing"]
+    },
+    {
+        "tests": ["004-StackWalk"],
+        "variant": "interp-ac | interpreter | jit | no-prebuild | no-image | trace | redefine-stress | jvmti-stress | debuggable",
+        "description": ["Test is designed to only check --optimizing"]
     },
     {
         "tests": "674-HelloWorld-Dm",
@@ -755,9 +824,7 @@
           "111-unresolvable-exception",
           "115-native-bridge",
           "116-nodex2oat",
-          "117-nopatchoat",
           "118-noimage-dex2oat",
-          "119-noimage-patchoat",
           "127-checker-secondarydex",
           "129-ThreadGetId",
           "130-hprof",
@@ -938,6 +1005,10 @@
           "675-checker-unverified-method",
           "676-proxy-jit-at-first-use",
           "676-resolve-field-type",
+          "685-deoptimizeable",
+          "685-shifts",
+          "686-get-this",
+          "687-deopt",
           "706-checker-scheduler",
           "707-checker-invalid-profile",
           "714-invoke-custom-lambda-metafactory",
@@ -1009,14 +1080,20 @@
         "description": ["Failing on RI. Needs further investigating."]
     },
     {
-        "tests": ["616-cha-unloading",
+        "tests": ["530-checker-peel-unroll",
+                  "616-cha-unloading",
                   "674-hiddenapi",
                   "677-fsi2",
                   "678-quickening",
                   "679-locks",
+                  "688-shared-library",
+                  "690-hiddenapi-same-name-methods",
                   "999-redefine-hiddenapi",
                   "1000-non-moving-space-stress",
-                  "1951-monitor-enter-no-suspend"],
+                  "1001-app-image-regions",
+                  "1339-dead-reference-safe",
+                  "1951-monitor-enter-no-suspend",
+                  "1957-error-ext"],
         "variant": "jvm",
         "description": ["Doesn't run on RI."]
     },
@@ -1034,7 +1111,7 @@
     },
     {
         "tests": "677-fsi",
-        "variant": "no-image | no-prebuild | relocate-npatchoat | jvm",
+        "variant": "no-image | no-prebuild | jvm",
         "description": ["Test requires a successful dex2oat invocation"]
     },
     {
@@ -1077,5 +1154,44 @@
         "tests": ["566-polymorphic-inlining"],
         "variant": "jit & debuggable",
         "description": ["We do not inline with debuggable."]
+    },
+    {
+        "tests": ["1955-pop-frame-jit-called", "1956-pop-frame-jit-calling"],
+        "variant": "jit-on-first-use",
+        "description": [
+          "These tests directly set -Xjitthreshold:1000 to prevent the jit from compiling any",
+          "extra methods. jit-at-first-use would disrupt this."
+        ]
+    },
+    {
+        "tests": ["135-MirandaDispatch"],
+        "variant": "interp-ac & 32 & host",
+        "env_vars": {"SANITIZE_HOST": "address"},
+        "bug": "b/112993554",
+        "description": ["Timeout with ASan and interp-ac on 32-bit host (x86)."]
+    },
+    {
+        "tests": ["454-get-vreg", "457-regs"],
+        "variant": "baseline",
+        "description": ["Tests are expected to fail with baseline."]
+    },
+    {
+        "tests": ["1339-dead-reference-safe"],
+        "variant": "debuggable",
+        "description": [ "Fails to eliminate dead reference when debuggable." ]
+    },
+    {
+        "tests": ["708-jit-cache-churn"],
+        "variant": "jit-on-first-use",
+        "bug": "b/120112467",
+        "description": [ "Fails on Android Build hosts with uncaught std::bad_alloc." ]
+    },
+    {
+        "tests": ["719-dm-verify-redefinition"],
+        "variant": "jvm | speed-profile | interp-ac | target | no-prebuild",
+        "description": ["Doesn't run on RI because of boot class redefintion.",
+                        "Doesn't work with profiles because the run-test is not setup to",
+                        "support both. It also needs full verification, so no interp-ac.",
+                        "Requires zip, which isn't available on device"]
     }
 ]
diff --git a/test/run-test b/test/run-test
index ef17302..67bcce7 100755
--- a/test/run-test
+++ b/test/run-test
@@ -76,11 +76,20 @@
     export ANDROID_BUILD_TOP=$oldwd
 fi
 
+# OUT_DIR defaults to out, and may be relative to $ANDROID_BUILD_TOP.
+# Convert it to an absolute path, since we cd into the tmp_dir to run the tests.
+export OUT_DIR=${OUT_DIR:-out}
+if [[ "$OUT_DIR" != /* ]]; then
+    export OUT_DIR=$ANDROID_BUILD_TOP/$OUT_DIR
+fi
+
 # ANDROID_HOST_OUT is not set in a build environment.
 if [ -z "$ANDROID_HOST_OUT" ]; then
-    export ANDROID_HOST_OUT=${OUT_DIR:-$ANDROID_BUILD_TOP/out}/host/linux-x86
+    export ANDROID_HOST_OUT=${OUT_DIR}/host/linux-x86
 fi
 
+host_lib_root=${ANDROID_HOST_OUT}
+
 # Allow changing DESUGAR script to something else, or to disable it with DESUGAR=false.
 if [ -z "$DESUGAR" ]; then
   export DESUGAR="$ANDROID_BUILD_TOP/art/tools/desugar.sh"
@@ -148,15 +157,21 @@
 strace="false"
 always_clean="no"
 never_clean="no"
-have_patchoat="yes"
 have_image="yes"
-multi_image_suffix=""
 android_root="/system"
 bisection_search="no"
 suspend_timeout="500000"
-# By default we will use optimizing.
-image_args=""
 image_suffix=""
+run_optimizing="false"
+
+# To cause tests to fail fast, limit the file sizes created by dx, dex2oat and
+# ART output to approximately 128MB. This should be more than sufficient
+# for any test while still catching cases of runaway output.
+# Set a hard limit to encourage ART developers to increase the ulimit here if
+# needed to support a test case rather than resetting the limit in the run
+# script for the particular test in question. Adjust this if needed for
+# particular configurations.
+file_ulimit=128000
 
 while true; do
     if [ "x$1" = "x--host" ]; then
@@ -180,7 +195,6 @@
         target_mode="no"
         DEX_LOCATION="$tmp_dir"
         runtime="jvm"
-        image_args=""
         prebuild_mode="no"
         NEED_DEX="false"
         run_args="${run_args} --jvm"
@@ -194,18 +208,9 @@
         lib="libdvm.so"
         runtime="dalvik"
         shift
-    elif [ "x$1" = "x--no-patchoat" ]; then
-        have_patchoat="no"
-        shift
     elif [ "x$1" = "x--no-image" ]; then
         have_image="no"
         shift
-    elif [ "x$1" = "x--multi-image" ]; then
-        multi_image_suffix="-multi"
-        shift
-    elif [ "x$1" = "x--pic-test" ]; then
-        run_args="${run_args} --pic-test"
-        shift
     elif [ "x$1" = "x--relocate" ]; then
         relocate="yes"
         shift
@@ -313,17 +318,20 @@
         image_suffix="-interpreter"
         shift
     elif [ "x$1" = "x--jit" ]; then
-        image_args="--jit"
+        run_args="${run_args} --jit"
         image_suffix="-interpreter"
         shift
+    elif [ "x$1" = "x--baseline" ]; then
+        run_args="${run_args} --baseline"
+        shift
     elif [ "x$1" = "x--optimizing" ]; then
-        image_args="-Xcompiler-option --compiler-backend=Optimizing"
+        run_optimizing="true"
         shift
     elif [ "x$1" = "x--no-verify" ]; then
         run_args="${run_args} --no-verify"
         shift
     elif [ "x$1" = "x--verify-soft-fail" ]; then
-        image_args="--verify-soft-fail"
+        run_args="${run_args} --verify-soft-fail"
         image_suffix="-interp-ac"
         shift
     elif [ "x$1" = "x--no-optimize" ]; then
@@ -378,6 +386,15 @@
         android_root="$1"
         run_args="${run_args} --android-root $1"
         shift
+    elif [ "x$1" = "x--android-runtime-root" ]; then
+        shift
+        if [ "x$1" = "x" ]; then
+            echo "$0 missing argument to --android-runtime-root" 1>&2
+            usage="yes"
+            break
+        fi
+        run_args="${run_args} --android-runtime-root $1"
+        shift
     elif [ "x$1" = "x--update" ]; then
         update_mode="yes"
         shift
@@ -388,6 +405,26 @@
         run_args="${run_args} --64"
         suffix64="64"
         shift
+    elif [ "x$1" = "x--bionic" ]; then
+        # soong linux_bionic builds are 64bit only.
+        run_args="${run_args} --bionic --host --64"
+        suffix64="64"
+        target_mode="no"
+        DEX_LOCATION=$tmp_dir
+        host_lib_root=$OUT_DIR/soong/host/linux_bionic-x86
+        shift
+    elif [ "x$1" = "x--runtime-zipapex" ]; then
+        shift
+        # TODO Should we allow the java.library.path to search the zipapex too?
+        # Not needed at the moment and adding it will be complicated so for now
+        # we'll ignore this.
+        run_args="${run_args} --host --runtime-zipapex $1"
+        target_mode="no"
+        DEX_LOCATION=$tmp_dir
+        # apex_payload.zip is quite large we need a high enough ulimit to
+        # extract it. 512mb should be good enough.
+        file_ulimit=512000
+        shift
     elif [ "x$1" = "x--trace" ]; then
         trace="true"
         shift
@@ -445,7 +482,6 @@
 # The DEX_LOCATION with the chroot prefix, if any.
 chroot_dex_location="$chroot$DEX_LOCATION"
 
-run_args="${run_args} ${image_args}"
 # Allocate file descriptor real_stderr and redirect it to the shell's error
 # output (fd 2).
 if [ ${BASH_VERSINFO[1]} -ge 4 ] && [ ${BASH_VERSINFO[2]} -ge 1 ]; then
@@ -572,10 +608,6 @@
     fi
 fi
 
-if [ "$have_patchoat" = "no" ]; then
-  run_args="${run_args} --no-patchoat"
-fi
-
 if [ ! "$runtime" = "jvm" ]; then
   run_args="${run_args} --lib $lib"
 fi
@@ -591,12 +623,12 @@
 elif [ "$runtime" = "art" ]; then
     if [ "$target_mode" = "no" ]; then
         guess_host_arch_name
-        run_args="${run_args} --boot ${ANDROID_HOST_OUT}/framework/core${image_suffix}${multi_image_suffix}.art"
-        run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib${suffix64}:${ANDROID_HOST_OUT}/nativetest${suffix64}"
+        run_args="${run_args} --boot ${ANDROID_HOST_OUT}/framework/core${image_suffix}.art"
+        run_args="${run_args} --runtime-option -Djava.library.path=${host_lib_root}/lib${suffix64}:${host_lib_root}/nativetest${suffix64}"
     else
         guess_target_arch_name
         run_args="${run_args} --runtime-option -Djava.library.path=/data/nativetest${suffix64}/art/${target_arch_name}"
-        run_args="${run_args} --boot /data/art-test/core${image_suffix}${multi_image_suffix}.art"
+        run_args="${run_args} --boot /data/art-test/core${image_suffix}.art"
     fi
     if [ "$relocate" = "yes" ]; then
       run_args="${run_args} --relocate"
@@ -631,11 +663,6 @@
     usage="yes"
 fi
 
-if [ "$bisection_search" = "yes" -a "$have_patchoat" = "no" ]; then
-    err_echo "--bisection-search and --no-patchoat are mutually exclusive"
-    usage="yes"
-fi
-
 # TODO: Chroot-based bisection search is not supported yet (see below); implement it.
 if [ "$bisection_search" = "yes" -a -n "$chroot" ]; then
   err_echo "--chroot with --bisection-search is unsupported"
@@ -704,7 +731,6 @@
              "If used, then the"
         echo "                          other runtime options are ignored."
         echo "    --no-dex2oat          Run as though dex2oat was failing."
-        echo "    --no-patchoat         Run as though patchoat was failing."
         echo "    --prebuild            Run dex2oat on the files before starting test. (default)"
         echo "    --no-prebuild         Do not run dex2oat on the files before starting"
         echo "                          the test."
@@ -725,6 +751,9 @@
         echo "    --output-path [path]  Location where to store the build" \
              "files."
         echo "    --64                  Run the test in 64-bit mode"
+        echo "    --bionic              Use the (host, 64-bit only) linux_bionic libc runtime"
+        echo "    --runtime-zipapex [file]"
+        echo "                          Use the given zipapex file to provide runtime binaries"
         echo "    --trace               Run with method tracing"
         echo "    --strace              Run with syscall tracing from strace."
         echo "    --stream              Run method tracing in streaming mode (requires --trace)"
@@ -738,12 +767,12 @@
         echo "    --never-clean         Keep the test files even if the test succeeds."
         echo "    --chroot [newroot]    Run with root directory set to newroot."
         echo "    --android-root [path] The path on target for the android root. (/system by default)."
+        echo "    --android-runtime-root [path]"
+        echo "                          The path on target for the Android Runtime root."
+        echo "                          (/apex/com.android.runtime by default)."
         echo "    --dex2oat-swap        Use a dex2oat swap file."
         echo "    --instruction-set-features [string]"
         echo "                          Set instruction-set-features for compilation."
-        echo "    --multi-image         Use a set of images compiled with dex2oat multi-image for"
-        echo "                          the boot class path."
-        echo "    --pic-test            Compile the test code position independent."
         echo "    --quiet               Don't print anything except failure messages"
         echo "    --bisection-search    Perform bisection bug search."
         echo "    --vdex                Test using vdex as in input to dex2oat. Only works with --prebuild."
@@ -774,7 +803,7 @@
 echo "${test_dir}: building..." 1>&2
 
 rm -rf "$tmp_dir"
-cp -Rp "$test_dir" "$tmp_dir"
+cp -LRp "$test_dir" "$tmp_dir"
 cd "$tmp_dir"
 
 if [ '!' -r "$build" ]; then
@@ -804,7 +833,7 @@
 # Tests named '<number>-checker-*' will also have their CFGs verified with
 # Checker when compiled with Optimizing on host.
 if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
-  if [ "$runtime" = "art" -a "$image_suffix" = "" ]; then
+  if [ "$runtime" = "art" -a "$image_suffix" = "" -a "$run_optimizing" = "true" ]; then
     # In no-prebuild or no-image mode, the compiler only quickens so disable the checker.
     if [ "$prebuild_mode" = "yes" -a "$have_image" = "yes" ]; then
       run_checker="yes"
@@ -827,15 +856,9 @@
   fi
 fi
 
-  run_args="${run_args} --testlib ${testlib}"
+run_args="${run_args} --testlib ${testlib}"
 
-# To cause tests to fail fast, limit the file sizes created by dx, dex2oat and
-# ART output to approximately 128MB. This should be more than sufficient
-# for any test while still catching cases of runaway output.
-# Set a hard limit to encourage ART developers to increase the ulimit here if
-# needed to support a test case rather than resetting the limit in the run
-# script for the particular test in question.
-if ! ulimit -f 128000; then
+if ! ulimit -f ${file_ulimit}; then
   err_echo "ulimit file size setting failed"
 fi
 
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
index 1f4b829..c2d5e7d 100644
--- a/test/testrunner/env.py
+++ b/test/testrunner/env.py
@@ -89,8 +89,8 @@
   HOST_2ND_ARCH_PREFIX + 'DEX2OAT_HOST_INSTRUCTION_SET_FEATURES')
 
 ART_TEST_CHROOT = _env.get('ART_TEST_CHROOT')
-
 ART_TEST_ANDROID_ROOT = _env.get('ART_TEST_ANDROID_ROOT')
+ART_TEST_ANDROID_RUNTIME_ROOT = _env.get('ART_TEST_ANDROID_RUNTIME_ROOT')
 
 ART_TEST_WITH_STRACE = _getEnvBoolean('ART_TEST_DEBUG_GC', False)
 
diff --git a/test/testrunner/run_build_test_target.py b/test/testrunner/run_build_test_target.py
index 3160079..19f03c3 100755
--- a/test/testrunner/run_build_test_target.py
+++ b/test/testrunner/run_build_test_target.py
@@ -28,6 +28,7 @@
 
 import argparse
 import os
+import pathlib
 import subprocess
 import sys
 
@@ -62,12 +63,25 @@
 print(custom_env)
 os.environ.update(custom_env)
 
+# build is just a binary/script that is directly executed to build any artifacts needed for the
+# test.
+if 'build' in target:
+  build_command = target.get('build').format(
+      ANDROID_BUILD_TOP = env.ANDROID_BUILD_TOP,
+      MAKE_OPTIONS='DX=  -j{threads}'.format(threads = n_threads))
+  sys.stdout.write(str(build_command) + '\n')
+  sys.stdout.flush()
+  if subprocess.call(build_command.split()):
+    sys.exit(1)
+
+# make runs soong/kati to build the target listed in the entry.
 if 'make' in target:
-  build_command = 'make'
+  build_command = 'build/soong/soong_ui.bash --make-mode'
   build_command += ' DX='
   build_command += ' -j' + str(n_threads)
-  build_command += ' -C ' + env.ANDROID_BUILD_TOP
   build_command += ' ' + target.get('make')
+  if env.DIST_DIR:
+    build_command += ' dist'
   sys.stdout.write(str(build_command) + '\n')
   sys.stdout.flush()
   if subprocess.call(build_command.split()):
@@ -95,7 +109,10 @@
   run_test_command = [os.path.join(env.ANDROID_BUILD_TOP,
                                    'art/test/testrunner/testrunner.py')]
   test_flags = target.get('run-test', [])
-  run_test_command += test_flags
+  out_dir = pathlib.PurePath(env.SOONG_OUT_DIR)
+  if not out_dir.is_absolute():
+    out_dir = pathlib.PurePath(env.ANDROID_BUILD_TOP).joinpath(out_dir)
+  run_test_command += list(map(lambda a: a.format(SOONG_OUT_DIR=str(out_dir)), test_flags))
   # Let testrunner compute concurrency based on #cpus.
   # b/65822340
   # run_test_command += ['-j', str(n_threads)]
@@ -106,7 +123,8 @@
     run_test_command += ['--host']
     run_test_command += ['--dex2oat-jobs']
     run_test_command += ['4']
-  run_test_command += ['-b']
+  if '--no-build-dependencies' not in test_flags:
+    run_test_command += ['-b']
   run_test_command += ['--verbose']
 
   sys.stdout.write(str(run_test_command) + '\n')
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index 84490bf..bc22360 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -1,23 +1,23 @@
 target_config = {
 
-# Configuration syntax:
-#
-#   Required keys: (Use one or more of these)
-#    * golem - specify a golem machine-type to build, e.g. android-armv8
-#              (uses art/tools/golem/build-target.sh)
-#    * make - specify a make target to build, e.g. build-art-host
-#    * run-test - runs the tests in art/test/ directory with testrunner.py,
-#                 specify a list of arguments to pass to testrunner.py
-#
-#   Optional keys: (Use any of these)
-#    * env - Add additional environment variable to the current environment.
-#
-# *** IMPORTANT ***:
-#    This configuration is used by the android build server. Targets must not be renamed
-#    or removed.
-#
+    # Configuration syntax:
+    #
+    #   Required keys: (Use one or more of these)
+    #    * golem - specify a golem machine-type to build, e.g. android-armv8
+    #              (uses art/tools/golem/build-target.sh)
+    #    * make - specify a make target to build, e.g. build-art-host
+    #    * run-test - runs the tests in art/test/ directory with testrunner.py,
+    #                 specify a list of arguments to pass to testrunner.py
+    #
+    #   Optional keys: (Use any of these)
+    #    * env - Add additional environment variable to the current environment.
+    #
+    # *** IMPORTANT ***:
+    #    This configuration is used by the android build server. Targets must not be renamed
+    #    or removed.
+    #
 
-##########################################
+    ##########################################
 
     # General ART configurations.
     # Calls make and testrunner both.
@@ -40,6 +40,12 @@
     'art-interpreter' : {
         'run-test' : ['--interpreter']
     },
+    'art-interpreter-cxx' : {
+        'run-test' : ['--interpreter'],
+        'env' : {
+            'ART_USE_CXX_INTERPRETER' : 'true'
+        }
+    },
     'art-interpreter-access-checks' : {
         'run-test' : ['--interp-ac']
     },
@@ -47,61 +53,45 @@
         'run-test' : ['--jit', '--debuggable', '--ndebuggable']
     },
     'art-jit-on-first-use' : {
-        'run-test' : ['--jit',
-                      '--runtime-option=-Xjitthreshold:0']
+        'run-test' : ['--jit-on-first-use']
     },
     'art-pictest' : {
-        'run-test' : ['--pictest',
-                      '--optimizing']
+        # Deprecated config: All AOT-compiled code is PIC now.
+        'run-test' : ['--optimizing']
     },
     'art-gcstress-gcverify': {
         # Do not exercise '--interpreter', '--optimizing', nor '--jit' in this
-        # configuration, as they are covered by the 'art-interpreter-gcstress',
-        # 'art-optimizing-gcstress' and 'art-jit-gcstress' configurations below.
+        # configuration, as they are covered by the
+        # 'art-interpreter-gcstress-gcverify',
+        # 'art-optimizing-gcstress-gcverify' and 'art-jit-gcstress-gcverify'
+        # configurations below.
         'run-test': ['--interp-ac',
                      '--speed-profile',
                      '--gcstress',
                      '--gcverify']
     },
-    # Rename this configuration as 'art-interpreter-gcstress-gcverify' (b/62611253).
-    'art-interpreter-gcstress' : {
+    'art-interpreter-gcstress-gcverify' : {
         'run-test' : ['--interpreter',
                       '--gcstress',
                       '--gcverify']
     },
-    # Rename this configuration as 'art-optimizing-gcstress-gcverify' (b/62611253).
-    'art-optimizing-gcstress' : {
+    'art-optimizing-gcstress-gcverify' : {
         'run-test' : ['--optimizing',
                       '--gcstress',
                       '--gcverify']
     },
-    # Rename this configuration as 'art-jit-gcstress-gcverify' (b/62611253).
-    'art-jit-gcstress' : {
+    'art-jit-gcstress-gcverify' : {
         'run-test' : ['--jit',
                       '--gcstress',
                       '--gcverify']
     },
     'art-jit-on-first-use-gcstress' : {
-        'run-test' : ['--jit',
-                      '--gcstress',
-                      '--runtime-option=-Xjitthreshold:0']
+        'run-test' : ['--jit-on-first-use',
+                      '--gcstress']
     },
-    # TODO: Rename or repurpose this configuration as
-    # 'art-read-barrier-heap-poisoning' (b/62611253).
-    'art-read-barrier' : {
+    'art-read-barrier-heap-poisoning' : {
         'run-test': ['--interpreter',
-                  '--optimizing'],
-        'env' : {
-            'ART_HEAP_POISONING' : 'true'
-        }
-    },
-    # TODO: Remove or disable this configuration, as it is now covered
-    # by 'art-interpreter-gcstress' and 'art-optimizing-gcstress' --
-    # except for heap poisoning, but that's fine (b/62611253).
-    'art-read-barrier-gcstress' : {
-        'run-test' : ['--interpreter',
-                      '--optimizing',
-                      '--gcstress'],
+                     '--optimizing'],
         'env' : {
             'ART_HEAP_POISONING' : 'true'
         }
@@ -122,6 +112,9 @@
             'ART_USE_READ_BARRIER' : 'false'
         }
     },
+    # TODO: Consider removing this configuration when it is no longer used by
+    # any continuous testing target (b/62611253), as the SS collector overlaps
+    # with the CC collector, since both move objects.
     'art-ss-gc' : {
         'run-test' : ['--interpreter',
                       '--optimizing',
@@ -131,6 +124,7 @@
             'ART_USE_READ_BARRIER' : 'false'
         }
     },
+    # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
     'art-gss-gc' : {
         'run-test' : ['--interpreter',
                       '--optimizing',
@@ -140,6 +134,9 @@
             'ART_USE_READ_BARRIER' : 'false'
         }
     },
+    # TODO: Consider removing this configuration when it is no longer used by
+    # any continuous testing target (b/62611253), as the SS collector overlaps
+    # with the CC collector, since both move objects.
     'art-ss-gc-tlab' : {
         'run-test' : ['--interpreter',
                       '--optimizing',
@@ -150,6 +147,7 @@
             'ART_USE_READ_BARRIER' : 'false'
         }
     },
+    # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
     'art-gss-gc-tlab' : {
         'run-test' : ['--interpreter',
                       '--optimizing',
@@ -180,12 +178,6 @@
         'run-test' : ['--interpreter',
                       '--no-image']
     },
-    'art-relocate-no-patchoat' : {
-        'run-test' : ['--relocate-npatchoat']
-    },
-    'art-no-dex2oat' : {
-        # Deprecated configuration.
-    },
     'art-heap-poisoning' : {
         'run-test' : ['--interpreter',
                       '--optimizing',
@@ -199,10 +191,9 @@
     },
     'art-preopt' : {
         # This test configuration is intended to be representative of the case
-        # of preopted apps, which are precompiled compiled pic against an
+        # of preopted apps, which are precompiled against an
         # unrelocated image, then used with a relocated image.
-        'run-test' : ['--pictest',
-                      '--prebuild',
+        'run-test' : ['--prebuild',
                       '--relocate',
                       '--jit']
     },
@@ -226,6 +217,9 @@
             'ART_HEAP_POISONING' : 'true'
         }
     },
+    # TODO: Consider removing this configuration when it is no longer used by
+    # any continuous testing target (b/62611253), as the SS collector overlaps
+    # with the CC collector, since both move objects.
     'art-gtest-ss-gc': {
         'make' :  'test-art-host-gtest',
         'env': {
@@ -235,6 +229,7 @@
             'ART_DEFAULT_COMPACT_DEX_LEVEL' : 'none'
         }
     },
+    # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
     'art-gtest-gss-gc': {
         'make' :  'test-art-host-gtest',
         'env' : {
@@ -242,6 +237,9 @@
             'ART_USE_READ_BARRIER' : 'false'
         }
     },
+    # TODO: Consider removing this configuration when it is no longer used by
+    # any continuous testing target (b/62611253), as the SS collector overlaps
+    # with the CC collector, since both move objects.
     'art-gtest-ss-gc-tlab': {
         'make' :  'test-art-host-gtest',
         'env': {
@@ -250,6 +248,7 @@
             'ART_USE_READ_BARRIER' : 'false',
         }
     },
+    # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
     'art-gtest-gss-gc-tlab': {
         'make' :  'test-art-host-gtest',
         'env': {
@@ -273,10 +272,10 @@
         }
     },
 
-   # ASAN (host) configurations.
+    # ASAN (host) configurations.
 
-   # These configurations need detect_leaks=0 to work in non-setup environments like build bots,
-   # as our build tools leak. b/37751350
+    # These configurations need detect_leaks=0 to work in non-setup environments like build bots,
+    # as our build tools leak. b/37751350
 
     'art-gtest-asan': {
         'make' : 'test-art-host-gtest',
@@ -306,11 +305,11 @@
         }
     },
 
-   # ART Golem build targets used by go/lem (continuous ART benchmarking),
-   # (art-opt-cc is used by default since it mimics the default preopt config),
-   #
-   # calls golem/build-target.sh which builds a golem tarball of the target name,
-   #     e.g. 'golem: android-armv7' produces an 'android-armv7.tar.gz' upon success.
+    # ART Golem build targets used by go/lem (continuous ART benchmarking),
+    # (art-opt-cc is used by default since it mimics the default preopt config),
+    #
+    # calls golem/build-target.sh which builds a golem tarball of the target name,
+    #     e.g. 'golem: android-armv7' produces an 'android-armv7.tar.gz' upon success.
 
     'art-golem-android-armv7': {
         'golem' : 'android-armv7'
@@ -330,4 +329,19 @@
     'art-golem-linux-x64': {
         'golem' : 'linux-x64'
     },
+    'art-linux-bionic-x64': {
+        'build': '{ANDROID_BUILD_TOP}/art/tools/build_linux_bionic_tests.sh {MAKE_OPTIONS}',
+        'run-test': ['--run-test-option=--bionic',
+                     '--host',
+                     '--64',
+                     '--no-build-dependencies'],
+    },
+    'art-linux-bionic-x64-zipapex': {
+        'build': '{ANDROID_BUILD_TOP}/art/tools/build_linux_bionic_tests.sh {MAKE_OPTIONS} com.android.runtime.host',
+        'run-test': ['--run-test-option=--bionic',
+                     "--run-test-option='--runtime-zipapex {SOONG_OUT_DIR}/host/linux_bionic-x86/apex/com.android.runtime.host.zipapex'",
+                     '--host',
+                     '--64',
+                     '--no-build-dependencies'],
+    },
 }
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 10c8619..0456fdb 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -31,7 +31,7 @@
 There are various options to invoke the script which are:
 -t: Either the test name as in art/test or the test name including the variant
     information. Eg, "-t 001-HelloWorld",
-    "-t test-art-host-run-test-debug-prebuild-optimizing-relocate-ntrace-cms-checkjni-picimage-npictest-ndebuggable-001-HelloWorld32"
+    "-t test-art-host-run-test-debug-prebuild-optimizing-relocate-ntrace-cms-checkjni-picimage-ndebuggable-001-HelloWorld32"
 -j: Number of thread workers to be used. Eg - "-j64"
 --dry-run: Instead of running the test name, just print its name.
 --verbose
@@ -117,6 +117,7 @@
 gdb = False
 gdb_arg = ''
 runtime_option = ''
+with_agent = []
 run_test_option = []
 stop_testrunner = False
 dex2oat_jobs = -1   # -1 corresponds to default threads for dex2oat
@@ -138,22 +139,21 @@
   global TOTAL_VARIANTS_SET
   global DISABLED_TEST_CONTAINER
   # TODO: Avoid duplication of the variant names in different lists.
-  VARIANT_TYPE_DICT['pictest'] = {'pictest', 'npictest'}
   VARIANT_TYPE_DICT['run'] = {'ndebug', 'debug'}
   VARIANT_TYPE_DICT['target'] = {'target', 'host', 'jvm'}
   VARIANT_TYPE_DICT['trace'] = {'trace', 'ntrace', 'stream'}
-  VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image', 'multipicimage'}
+  VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image'}
   VARIANT_TYPE_DICT['debuggable'] = {'ndebuggable', 'debuggable'}
   VARIANT_TYPE_DICT['gc'] = {'gcstress', 'gcverify', 'cms'}
   VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'prebuild'}
   VARIANT_TYPE_DICT['cdex_level'] = {'cdex-none', 'cdex-fast'}
-  VARIANT_TYPE_DICT['relocate'] = {'relocate-npatchoat', 'relocate', 'no-relocate'}
+  VARIANT_TYPE_DICT['relocate'] = {'relocate', 'no-relocate'}
   VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
   VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'}
   VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress', 'redefine-stress', 'trace-stress',
                                 'field-stress', 'step-stress'}
-  VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'optimizing',
-                                   'regalloc_gc', 'speed-profile'}
+  VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'jit-on-first-use',
+                                   'optimizing', 'regalloc_gc', 'speed-profile', 'baseline'}
 
   for v_type in VARIANT_TYPE_DICT:
     TOTAL_VARIANTS_SET = TOTAL_VARIANTS_SET.union(VARIANT_TYPE_DICT.get(v_type))
@@ -180,7 +180,6 @@
   # These are the default variant-options we will use if nothing in the group is specified.
   default_variants = {
       'target': {'host', 'target'},
-      'pictest': {'npictest'},
       'prebuild': {'prebuild'},
       'cdex_level': {'cdex-fast'},
       'jvmti': { 'no-jvmti'},
@@ -194,7 +193,6 @@
       'gc': {'cms'},
       'jni': {'checkjni'},
       'image': {'picimage'},
-      'pictest': {'pictest'},
       'debuggable': {'ndebuggable'},
       'run': {'debug'},
       # address_sizes_target depends on the target so it is dealt with below.
@@ -333,6 +331,9 @@
   if runtime_option:
     for opt in runtime_option:
       options_all += ' --runtime-option ' + opt
+  if with_agent:
+    for opt in with_agent:
+      options_all += ' --with-agent ' + opt
 
   if dex2oat_jobs != -1:
     options_all += ' --dex2oat-jobs ' + str(dex2oat_jobs)
@@ -342,7 +343,7 @@
                                  user_input_variants['prebuild'], user_input_variants['compiler'],
                                  user_input_variants['relocate'], user_input_variants['trace'],
                                  user_input_variants['gc'], user_input_variants['jni'],
-                                 user_input_variants['image'], user_input_variants['pictest'],
+                                 user_input_variants['image'],
                                  user_input_variants['debuggable'], user_input_variants['jvmti'],
                                  user_input_variants['cdex_level'])
     return config
@@ -355,13 +356,13 @@
       'prebuild': [''], 'compiler': [''],
       'relocate': [''], 'trace': [''],
       'gc': [''], 'jni': [''],
-      'image': [''], 'pictest': [''],
+      'image': [''],
       'debuggable': [''], 'jvmti': [''],
       'cdex_level': ['']})
 
   def start_combination(config_tuple, address_size):
       test, target, run, prebuild, compiler, relocate, trace, gc, \
-      jni, image, pictest, debuggable, jvmti, cdex_level = config_tuple
+      jni, image, debuggable, jvmti, cdex_level = config_tuple
 
       if stop_testrunner:
         # When ART_TEST_KEEP_GOING is set to false, then as soon as a test
@@ -383,7 +384,6 @@
       test_name += gc + '-'
       test_name += jni + '-'
       test_name += image + '-'
-      test_name += pictest + '-'
       test_name += debuggable + '-'
       test_name += jvmti + '-'
       test_name += cdex_level + '-'
@@ -391,7 +391,7 @@
       test_name += address_size
 
       variant_set = {target, run, prebuild, compiler, relocate, trace, gc, jni,
-                     image, pictest, debuggable, jvmti, cdex_level, address_size}
+                     image, debuggable, jvmti, cdex_level, address_size}
 
       options_test = options_all
 
@@ -400,12 +400,15 @@
       elif target == 'jvm':
         options_test += ' --jvm'
 
-      # Honor ART_TEST_CHROOT and ART_TEST_ANDROID_ROOT, but only for target tests.
+      # Honor ART_TEST_CHROOT, ART_TEST_ANDROID_ROOT and ART_TEST_ANDROID_RUNTIME_ROOT,
+      # but only for target tests.
       if target == 'target':
         if env.ART_TEST_CHROOT:
           options_test += ' --chroot ' + env.ART_TEST_CHROOT
         if env.ART_TEST_ANDROID_ROOT:
           options_test += ' --android-root ' + env.ART_TEST_ANDROID_ROOT
+        if env.ART_TEST_ANDROID_RUNTIME_ROOT:
+          options_test += ' --android-runtime-root ' + env.ART_TEST_ANDROID_RUNTIME_ROOT
 
       if run == 'ndebug':
         options_test += ' -O'
@@ -429,15 +432,17 @@
         options_test += ' --interpreter --verify-soft-fail'
       elif compiler == 'jit':
         options_test += ' --jit'
+      elif compiler == 'jit-on-first-use':
+        options_test += ' --jit --runtime-option -Xjitthreshold:0'
       elif compiler == 'speed-profile':
         options_test += ' --random-profile'
+      elif compiler == 'baseline':
+        options_test += ' --baseline'
 
       if relocate == 'relocate':
         options_test += ' --relocate'
       elif relocate == 'no-relocate':
         options_test += ' --no-relocate'
-      elif relocate == 'relocate-npatchoat':
-        options_test += ' --relocate --no-patchoat'
 
       if trace == 'trace':
         options_test += ' --trace'
@@ -456,11 +461,6 @@
 
       if image == 'no-image':
         options_test += ' --no-image'
-      elif image == 'multipicimage':
-        options_test += ' --multi-image'
-
-      if pictest == 'pictest':
-        options_test += ' --pic-test'
 
       if debuggable == 'debuggable':
         options_test += ' --debuggable'
@@ -813,7 +813,7 @@
   It supports two types of test_name:
   1) Like 001-HelloWorld. In this case, it will just verify if the test actually
   exists and if it does, it returns the testname.
-  2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-picimage-npictest-ndebuggable-001-HelloWorld32
+  2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-picimage-ndebuggable-001-HelloWorld32
   In this case, it will parse all the variants and check if they are placed
   correctly. If yes, it will set the various VARIANT_TYPES to use the
   variants required to run the test. Again, it returns the test_name
@@ -837,7 +837,6 @@
   regex += '(' + '|'.join(VARIANT_TYPE_DICT['gc']) + ')-'
   regex += '(' + '|'.join(VARIANT_TYPE_DICT['jni']) + ')-'
   regex += '(' + '|'.join(VARIANT_TYPE_DICT['image']) + ')-'
-  regex += '(' + '|'.join(VARIANT_TYPE_DICT['pictest']) + ')-'
   regex += '(' + '|'.join(VARIANT_TYPE_DICT['debuggable']) + ')-'
   regex += '(' + '|'.join(VARIANT_TYPE_DICT['jvmti']) + ')-'
   regex += '(' + '|'.join(VARIANT_TYPE_DICT['cdex_level']) + ')-'
@@ -854,12 +853,11 @@
     _user_input_variants['gc'].add(match.group(7))
     _user_input_variants['jni'].add(match.group(8))
     _user_input_variants['image'].add(match.group(9))
-    _user_input_variants['pictest'].add(match.group(10))
-    _user_input_variants['debuggable'].add(match.group(11))
-    _user_input_variants['jvmti'].add(match.group(12))
-    _user_input_variants['cdex_level'].add(match.group(13))
-    _user_input_variants['address_sizes'].add(match.group(15))
-    return {match.group(14)}
+    _user_input_variants['debuggable'].add(match.group(10))
+    _user_input_variants['jvmti'].add(match.group(11))
+    _user_input_variants['cdex_level'].add(match.group(12))
+    _user_input_variants['address_sizes'].add(match.group(14))
+    return {match.group(13)}
   raise ValueError(test_name + " is not a valid test")
 
 
@@ -911,6 +909,7 @@
   global timeout
   global dex2oat_jobs
   global run_all_configs
+  global with_agent
 
   parser = argparse.ArgumentParser(description="Runs all or a subset of the ART test suite.")
   parser.add_argument('-t', '--test', action='append', dest='tests', help='name(s) of the test(s)')
@@ -943,6 +942,8 @@
                             This should be enclosed in single-quotes to allow for spaces. The option
                             will be split using shlex.split() prior to invoking run-test.
                             Example \"--run-test-option='--with-agent libtifast.so=MethodExit'\"""")
+  global_group.add_argument('--with-agent', action='append', dest='with_agent',
+                            help="""Pass an agent to be attached to the runtime""")
   global_group.add_argument('--runtime-option', action='append', dest='runtime_option',
                             help="""Pass an option to the runtime. Runtime options
                             starting with a '-' must be separated by a '=', for
@@ -991,6 +992,7 @@
     if options['gdb_arg']:
       gdb_arg = options['gdb_arg']
   runtime_option = options['runtime_option'];
+  with_agent = options['with_agent'];
   run_test_option = sum(map(shlex.split, options['run_test_option']), [])
 
   timeout = options['timeout']
@@ -1013,10 +1015,8 @@
       build_targets += 'test-art-target-run-test-dependencies '
     if 'jvm' in _user_input_variants['target']:
       build_targets += 'test-art-host-run-test-dependencies '
-    build_command = 'make'
+    build_command = env.ANDROID_BUILD_TOP + '/build/soong/soong_ui.bash --make-mode'
     build_command += ' DX='
-    build_command += ' -j'
-    build_command += ' -C ' + env.ANDROID_BUILD_TOP
     build_command += ' ' + build_targets
     if subprocess.call(build_command.split()):
       # Debugging for b/62653020
diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc
index f5e1e1b..3be48a3 100644
--- a/test/ti-agent/common_helper.cc
+++ b/test/ti-agent/common_helper.cc
@@ -24,6 +24,7 @@
 #include "jvmti.h"
 
 #include "jvmti_helper.h"
+#include "ti_macros.h"
 
 namespace art {
 
@@ -87,7 +88,7 @@
       break;
     default:
       LOG(FATAL) << "Unable to figure out type!";
-      return nullptr;
+      UNREACHABLE();
   }
   std::ostringstream oss;
   oss << "(" << type << ")L" << name << ";";
diff --git a/test/ti-agent/jni_binder.cc b/test/ti-agent/jni_binder.cc
index 32236de..a115c22 100644
--- a/test/ti-agent/jni_binder.cc
+++ b/test/ti-agent/jni_binder.cc
@@ -174,7 +174,7 @@
                                                          class_loader));
 }
 
-jclass FindClass(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name, jobject class_loader) {
+jclass GetClass(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name, jobject class_loader) {
   if (class_loader != nullptr) {
     return FindClassWithClassLoader(env, class_name, class_loader);
   }
@@ -223,7 +223,7 @@
   }
 
   // TODO: Implement scanning *all* classloaders.
-  LOG(FATAL) << "Unimplemented";
+  LOG(WARNING) << "Scanning all classloaders unimplemented";
 
   return nullptr;
 }
@@ -251,7 +251,7 @@
 
 void BindFunctions(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name, jobject class_loader) {
   // Use JNI to load the class.
-  ScopedLocalRef<jclass> klass(env, FindClass(jvmti_env, env, class_name, class_loader));
+  ScopedLocalRef<jclass> klass(env, GetClass(jvmti_env, env, class_name, class_loader));
   CHECK(klass.get() != nullptr) << class_name;
   BindFunctionsOnClass(jvmti_env, env, klass.get());
 }
diff --git a/test/ti-agent/jni_binder.h b/test/ti-agent/jni_binder.h
index e998dc5..3d2ff9c 100644
--- a/test/ti-agent/jni_binder.h
+++ b/test/ti-agent/jni_binder.h
@@ -24,7 +24,7 @@
 
 // Find the given classname. First try the implied classloader, then the system classloader,
 // then use JVMTI to find all classloaders.
-jclass FindClass(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name, jobject class_loader);
+jclass GetClass(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name, jobject class_loader);
 
 // Load the class through JNI. Inspect it, find all native methods. Construct the corresponding
 // mangled name, run dlsym and bind the method.
diff --git a/test/ti-stress/stress.cc b/test/ti-stress/stress.cc
index bd320c6..cd7af10 100644
--- a/test/ti-stress/stress.cc
+++ b/test/ti-stress/stress.cc
@@ -92,7 +92,7 @@
 
   struct Allocator : public dex::Writer::Allocator {
     explicit Allocator(jvmtiEnv* jvmti_env) : jvmti_env_(jvmti_env) {}
-    virtual void* Allocate(size_t size) {
+    void* Allocate(size_t size) override {
       unsigned char* out = nullptr;
       if (JVMTI_ERROR_NONE != jvmti_env_->Allocate(size, &out)) {
         return nullptr;
@@ -100,7 +100,7 @@
         return out;
       }
     }
-    virtual void Free(void* ptr) {
+    void Free(void* ptr) override {
       jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(ptr));
     }
    private:
@@ -157,14 +157,12 @@
       : jvmtienv_(jvmtienv),
         class_(c),
         name_(nullptr),
-        generic_(nullptr),
         file_(nullptr),
         debug_ext_(nullptr) {}
 
   ~ScopedClassInfo() {
     if (class_ != nullptr) {
       jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
-      jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
       jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(file_));
       jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(debug_ext_));
     }
@@ -173,12 +171,11 @@
   bool Init() {
     if (class_ == nullptr) {
       name_ = const_cast<char*>("<NONE>");
-      generic_ = const_cast<char*>("<NONE>");
       return true;
     } else {
       jvmtiError ret1 = jvmtienv_->GetSourceFileName(class_, &file_);
       jvmtiError ret2 = jvmtienv_->GetSourceDebugExtension(class_, &debug_ext_);
-      return jvmtienv_->GetClassSignature(class_, &name_, &generic_) == JVMTI_ERROR_NONE &&
+      return jvmtienv_->GetClassSignature(class_, &name_, nullptr) == JVMTI_ERROR_NONE &&
           ret1 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
           ret1 != JVMTI_ERROR_INVALID_CLASS &&
           ret2 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
@@ -192,9 +189,6 @@
   const char* GetName() const {
     return name_;
   }
-  const char* GetGeneric() const {
-    return generic_;
-  }
   const char* GetSourceDebugExtension() const {
     if (debug_ext_ == nullptr) {
       return "<UNKNOWN_SOURCE_DEBUG_EXTENSION>";
@@ -214,7 +208,6 @@
   jvmtiEnv* jvmtienv_;
   jclass class_;
   char* name_;
-  char* generic_;
   char* file_;
   char* debug_ext_;
 };
@@ -229,14 +222,12 @@
         class_info_(nullptr),
         name_(nullptr),
         signature_(nullptr),
-        generic_(nullptr),
         first_line_(-1) {}
 
   ~ScopedMethodInfo() {
     DeleteLocalRef(env_, declaring_class_);
     jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
     jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(signature_));
-    jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
   }
 
   bool Init() {
@@ -257,7 +248,7 @@
       return false;
     }
     return class_info_->Init() &&
-        (jvmtienv_->GetMethodName(method_, &name_, &signature_, &generic_) == JVMTI_ERROR_NONE);
+        (jvmtienv_->GetMethodName(method_, &name_, &signature_, nullptr) == JVMTI_ERROR_NONE);
   }
 
   const ScopedClassInfo& GetDeclaringClassInfo() const {
@@ -276,10 +267,6 @@
     return signature_;
   }
 
-  const char* GetGeneric() const {
-    return generic_;
-  }
-
   jint GetFirstLine() const {
     return first_line_;
   }
@@ -292,7 +279,6 @@
   std::unique_ptr<ScopedClassInfo> class_info_;
   char* name_;
   char* signature_;
-  char* generic_;
   jint first_line_;
 
   friend std::ostream& operator<<(std::ostream &os, ScopedMethodInfo const& m);
@@ -306,20 +292,18 @@
         field_(field),
         class_info_(nullptr),
         name_(nullptr),
-        type_(nullptr),
-        generic_(nullptr) {}
+        type_(nullptr) {}
 
   ~ScopedFieldInfo() {
     jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
     jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(type_));
-    jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
   }
 
   bool Init() {
     class_info_.reset(new ScopedClassInfo(jvmtienv_, declaring_class_));
     return class_info_->Init() &&
         (jvmtienv_->GetFieldName(
-            declaring_class_, field_, &name_, &type_, &generic_) == JVMTI_ERROR_NONE);
+            declaring_class_, field_, &name_, &type_, nullptr) == JVMTI_ERROR_NONE);
   }
 
   const ScopedClassInfo& GetDeclaringClassInfo() const {
@@ -338,10 +322,6 @@
     return type_;
   }
 
-  const char* GetGeneric() const {
-    return generic_;
-  }
-
  private:
   jvmtiEnv* jvmtienv_;
   jclass declaring_class_;
@@ -349,7 +329,6 @@
   std::unique_ptr<ScopedClassInfo> class_info_;
   char* name_;
   char* type_;
-  char* generic_;
 
   friend std::ostream& operator<<(std::ostream &os, ScopedFieldInfo const& m);
 };
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 32e5b55..e27878f 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -104,9 +104,18 @@
 $(AHAT_TEST_DUMP_PROGUARD_MAP): $(proguard_dictionary)
 	cp $(PRIVATE_AHAT_SOURCE_PROGUARD_MAP) $@
 
+ifeq (true,$(HOST_PREFER_32_BIT))
+  AHAT_TEST_DALVIKVM_DEP := $(HOST_OUT_EXECUTABLES)/dalvikvm32
+  AHAT_TEST_DALVIKVM_ARG := --32
+else
+  AHAT_TEST_DALVIKVM_DEP := $(HOST_OUT_EXECUTABLES)/dalvikvm64
+  AHAT_TEST_DALVIKVM_ARG := --64
+endif
+
 # Run ahat-test-dump.jar to generate test-dump.hprof and test-dump-base.hprof
 AHAT_TEST_DUMP_DEPENDENCIES := \
-  $(HOST_OUT_EXECUTABLES)/dalvikvm64 \
+  $(AHAT_TEST_DALVIKVM_DEP) \
+  $(ART_HOST_SHARED_LIBRARY_DEPENDENCIES) \
   $(ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES) \
   $(HOST_OUT_EXECUTABLES)/art \
   $(HOST_CORE_IMG_OUT_BASE)$(CORE_IMG_SUFFIX)
@@ -114,20 +123,24 @@
 $(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art
 $(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR)
 $(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ANDROID_DATA := $(AHAT_TEST_DUMP_ANDROID_DATA)
+$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_DALVIKVM_ARG := $(AHAT_TEST_DALVIKVM_ARG)
 $(AHAT_TEST_DUMP_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIES)
 	rm -rf $(PRIVATE_AHAT_TEST_ANDROID_DATA)
 	mkdir -p $(PRIVATE_AHAT_TEST_ANDROID_DATA)
 	ANDROID_DATA=$(PRIVATE_AHAT_TEST_ANDROID_DATA) \
-	  $(PRIVATE_AHAT_TEST_ART) -d --64 -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@
+	  $(PRIVATE_AHAT_TEST_ART) -d $(PRIVATE_AHAT_TEST_DALVIKVM_ARG) \
+	  -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@
 
 $(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art
 $(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR)
 $(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_ANDROID_DATA := $(AHAT_TEST_DUMP_BASE_ANDROID_DATA)
+$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_DALVIKVM_ARG := $(AHAT_TEST_DALVIKVM_ARG)
 $(AHAT_TEST_DUMP_BASE_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIES)
 	rm -rf $(PRIVATE_AHAT_TEST_ANDROID_DATA)
 	mkdir -p $(PRIVATE_AHAT_TEST_ANDROID_DATA)
 	ANDROID_DATA=$(PRIVATE_AHAT_TEST_ANDROID_DATA) \
-	  $(PRIVATE_AHAT_TEST_ART) -d --64 -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ --base
+	  $(PRIVATE_AHAT_TEST_ART) -d $(PRIVATE_AHAT_TEST_DALVIKVM_ARG) \
+	  -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ --base
 
 # --- ahat-ri-test-dump.jar -------
 include $(CLEAR_VARS)
diff --git a/tools/ahat/etc/ahat_api.txt b/tools/ahat/etc/ahat_api.txt
index 7aa994a..01e00e9 100644
--- a/tools/ahat/etc/ahat_api.txt
+++ b/tools/ahat/etc/ahat_api.txt
@@ -73,6 +73,9 @@
     method public com.android.ahat.heapdump.AhatInstance getAssociatedBitmapInstance();
     method public com.android.ahat.heapdump.AhatClassObj getAssociatedClassForOverhead();
     method public com.android.ahat.heapdump.AhatInstance getBaseline();
+    method public java.lang.String getBinderProxyInterfaceName();
+    method public java.lang.String getBinderStubInterfaceName();
+    method public java.lang.String getBinderTokenDescriptor();
     method public java.lang.String getClassName();
     method public com.android.ahat.heapdump.AhatClassObj getClassObj();
     method public java.lang.String getDexCacheLocation(int);
diff --git a/tools/ahat/src/main/com/android/ahat/Summarizer.java b/tools/ahat/src/main/com/android/ahat/Summarizer.java
index ab88c04..df3b577 100644
--- a/tools/ahat/src/main/com/android/ahat/Summarizer.java
+++ b/tools/ahat/src/main/com/android/ahat/Summarizer.java
@@ -112,6 +112,24 @@
       formatted.append(" overhead for ");
       formatted.append(summarize(cls));
     }
+
+    // Annotate BinderProxy with its interface name.
+    String binderProxyInterface = inst.getBinderProxyInterfaceName();
+    if (binderProxyInterface != null) {
+      formatted.appendFormat(" for %s", binderProxyInterface);
+    }
+
+    // Annotate Binder tokens with their descriptor
+    String binderTokenDescriptor = inst.getBinderTokenDescriptor();
+    if (binderTokenDescriptor != null) {
+      formatted.appendFormat(" binder token (%s)", binderTokenDescriptor);
+    }
+    // Annotate Binder services with their interface name.
+    String binderStubInterface = inst.getBinderStubInterfaceName();
+    if (binderStubInterface != null) {
+      formatted.appendFormat(" binder service (%s)", binderStubInterface);
+    }
+
     return formatted;
   }
 
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java
index 0511798..d2ba68d 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java
@@ -145,6 +145,52 @@
     return null;
   }
 
+  @Override public String getBinderProxyInterfaceName() {
+    if (isInstanceOfClass("android.os.BinderProxy")) {
+      for (AhatInstance inst : getReverseReferences()) {
+        String className = inst.getClassName();
+        if (className.endsWith("$Stub$Proxy")) {
+          Value value = inst.getField("mRemote");
+          if (value != null && value.asAhatInstance() == this) {
+            return className.substring(0, className.lastIndexOf("$Stub$Proxy"));
+          }
+        }
+      }
+    }
+    return null;
+  }
+
+  @Override public String getBinderTokenDescriptor() {
+    String descriptor = getBinderDescriptor();
+    if (descriptor == null) {
+      return null;
+    }
+
+    if (isInstanceOfClass(descriptor + "$Stub")) {
+      // This is an instance of an auto-generated interface class, and
+      // therefore not a binder token.
+      return null;
+    }
+
+    return descriptor;
+  }
+
+  @Override public String getBinderStubInterfaceName() {
+    String descriptor = getBinderDescriptor();
+    if (descriptor == null || descriptor.isEmpty()) {
+      // Binder interface stubs always have a non-empty descriptor
+      return null;
+    }
+
+    // We only consider something a binder service if it's an instance of the
+    // auto-generated descriptor$Stub class.
+    if (isInstanceOfClass(descriptor + "$Stub")) {
+      return descriptor;
+    }
+
+    return null;
+  }
+
   @Override public AhatInstance getAssociatedBitmapInstance() {
     return getBitmapInfo() == null ? null : this;
   }
@@ -162,6 +208,25 @@
   }
 
   /**
+   * Returns the descriptor of an android.os.Binder object.
+   * If no descriptor is set, returns an empty string.
+   * If the object is not an android.os.Binder object, returns null.
+   */
+  private String getBinderDescriptor() {
+    if (isInstanceOfClass("android.os.Binder")) {
+      Value value = getField("mDescriptor");;
+
+      if (value == null) {
+        return "";
+      } else {
+        return value.asAhatInstance().asString();
+      }
+    } else {
+      return null;
+    }
+  }
+
+  /**
    * Read the given field from the given instance.
    * The field is assumed to be a byte[] field.
    * Returns null if the field value is null, not a byte[] or could not be read.
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
index c85a057..281c977 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
@@ -490,6 +490,44 @@
   }
 
   /**
+   * Returns the name of the Binder proxy interface associated with this object.
+   * Only applies to instances of android.os.BinderProxy. If this is an
+   * instance of BinderProxy, returns the fully qualified binder interface name,
+   * otherwise returns null.
+   *
+   * @return the name of the binder interface associated with this object
+   */
+  public String getBinderProxyInterfaceName() {
+    return null;
+  }
+
+  /**
+   * Returns the descriptor of the Binder token associated with this object.
+   * Only applies to instances of android.os.Binder. If this is an instance of
+   * android.os.Binder with a subclass of the name "descriptor$Stub", the
+   * object in question is a binder stub, and this function will return null.
+   * In that case, @see AhatInstance#getBinderStubInterfaceName
+   *
+   * @return the descriptor of this object, if it's a binder token
+   */
+  public String getBinderTokenDescriptor() {
+    return null;
+  }
+
+  /**
+   * Returns the name of the Binder stub interface associated with this object.
+   * Only applies to instances which are a subclass of android.os.Binder,
+   * and are an instance of class 'descriptor$Stub', where descriptor
+   * is the descriptor of the android.os.Binder object.
+   *
+   * @return the name of the binder interface associated with this object,
+   *         or null if this is not a binder stub interface.
+   */
+  public String getBinderStubInterfaceName() {
+    return null;
+  }
+
+  /**
    * Returns the android.graphics.Bitmap instance associated with this object.
    * Instances of android.graphics.Bitmap return themselves. If this is a
    * byte[] array containing pixel data for an instance of
diff --git a/tools/ahat/src/test-dump/DumpedStuff.java b/tools/ahat/src/test-dump/DumpedStuff.java
index 804a3a3..de2968f 100644
--- a/tools/ahat/src/test-dump/DumpedStuff.java
+++ b/tools/ahat/src/test-dump/DumpedStuff.java
@@ -124,6 +124,47 @@
     }
   }
 
+  public interface IDumpedManager {
+    public static class Stub extends android.os.Binder implements IDumpedManager {
+      private static final java.lang.String DESCRIPTOR = "DumpedStuff$IDumpedManager";
+      public Stub() {
+        super(DESCRIPTOR);
+      }
+      public static class Proxy implements IDumpedManager {
+        android.os.IBinder mRemote;
+        Proxy(android.os.IBinder binderProxy) {
+          mRemote = binderProxy;
+        }
+      }
+    }
+  }
+
+  public interface IBinderInterfaceImpostor {
+    public static class Stub {
+      public static class Proxy implements IBinderInterfaceImpostor {
+        android.os.IBinder mFakeRemote = new android.os.BinderProxy();
+        Proxy(android.os.IBinder binderProxy) {
+          mFakeRemote = binderProxy;
+        }
+      }
+    }
+  }
+
+  private static class BinderProxyCarrier {
+    android.os.IBinder mRemote;
+    BinderProxyCarrier(android.os.IBinder binderProxy) {
+      mRemote = binderProxy;
+    }
+  }
+
+  private static class BinderService extends IDumpedManager.Stub {
+    // Intentionally empty
+  };
+
+  private static class FakeBinderService extends IBinderInterfaceImpostor.Stub {
+    // Intentionally empty
+  };
+
   public String basicString = "hello, world";
   public String nonAscii = "Sigma (Æ©) is not ASCII";
   public String embeddedZero = "embedded\0...";  // Non-ASCII for string compression purposes.
@@ -158,6 +199,17 @@
   public int[] modifiedArray;
   public Object objectAllocatedAtKnownSite;
   public Object objectAllocatedAtKnownSubSite;
+  public android.os.IBinder correctBinderProxy = new android.os.BinderProxy();
+  public android.os.IBinder imposedBinderProxy = new android.os.BinderProxy();
+  public android.os.IBinder carriedBinderProxy = new android.os.BinderProxy();
+  Object correctBinderProxyObject = new IDumpedManager.Stub.Proxy(correctBinderProxy);
+  Object impostorBinderProxyObject = new IBinderInterfaceImpostor.Stub.Proxy(imposedBinderProxy);
+  Object carrierBinderProxyObject = new BinderProxyCarrier(carriedBinderProxy);
+
+  Object binderService = new BinderService();
+  Object fakeBinderService = new FakeBinderService();
+  Object binderToken = new android.os.Binder();
+  Object namedBinderToken = new android.os.Binder("awesomeToken");
 
   // Allocate those objects that we need to not be GC'd before taking the heap
   // dump.
diff --git a/tools/ahat/src/test-dump/android/os/Binder.java b/tools/ahat/src/test-dump/android/os/Binder.java
new file mode 100644
index 0000000..e89bb74
--- /dev/null
+++ b/tools/ahat/src/test-dump/android/os/Binder.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+import java.lang.String;
+
+/** Fake android.os.Binder class that just holds a descriptor.
+ *
+ * Note that having this class will cause Proguard to issue warnings when
+ * building ahat-test-dump with 'mm' or 'mma':
+ *
+ * Warning: Library class android.net.wifi.rtt.IWifiRttManager$Stub extends
+ * program class android.os.Binder
+ *
+ * This is because when building for the device, proguard will include the
+ * framework jars, which contain Stub classes that extend android.os.Binder,
+ * which is defined again here.
+ *
+ * Since we don't actually run this code on the device, these warnings can
+ * be ignored.
+ */
+public class Binder implements IBinder {
+  public Binder() {
+    mDescriptor = null;
+  }
+
+  public Binder(String descriptor) {
+    mDescriptor = descriptor;
+  }
+
+  private String mDescriptor;
+}
diff --git a/tools/ahat/src/test-dump/android/os/BinderProxy.java b/tools/ahat/src/test-dump/android/os/BinderProxy.java
new file mode 100644
index 0000000..5f35c61
--- /dev/null
+++ b/tools/ahat/src/test-dump/android/os/BinderProxy.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+/** Fake android.os.BinderProxy class that does absolutely nothing. */
+public class BinderProxy implements IBinder {}
diff --git a/tools/ahat/src/test-dump/android/os/IBinder.java b/tools/ahat/src/test-dump/android/os/IBinder.java
new file mode 100644
index 0000000..6f01468
--- /dev/null
+++ b/tools/ahat/src/test-dump/android/os/IBinder.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+/** Fake android.os.IBinder that means nothing. */
+public interface IBinder {}
diff --git a/tools/ahat/src/test/com/android/ahat/InstanceTest.java b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
index 196eb1e..af0a73b 100644
--- a/tools/ahat/src/test/com/android/ahat/InstanceTest.java
+++ b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
@@ -549,4 +549,60 @@
     // Other kinds of objects should not have associated classes for overhead.
     assertNull(cls.getAssociatedClassForOverhead());
   }
+
+  @Test
+  public void binderProxy() throws IOException {
+    TestDump dump = TestDump.getTestDump();
+
+    AhatInstance correctObj = dump.getDumpedAhatInstance("correctBinderProxy");
+    assertEquals("DumpedStuff$IDumpedManager", correctObj.getBinderProxyInterfaceName());
+
+    AhatInstance imposedObj = dump.getDumpedAhatInstance("imposedBinderProxy");
+    assertNull(imposedObj.getBinderProxyInterfaceName());
+
+    AhatInstance carriedObj = dump.getDumpedAhatInstance("carriedBinderProxy");
+    assertNull(carriedObj.getBinderProxyInterfaceName());
+  }
+
+  @Test
+  public void binderToken() throws IOException {
+    TestDump dump = TestDump.getTestDump();
+
+    // Tokens without a descriptor return an empty string
+    AhatInstance binderToken = dump.getDumpedAhatInstance("binderToken");
+    assertEquals("", binderToken.getBinderTokenDescriptor());
+
+    // Named binder tokens return their descriptor
+    AhatInstance namedBinderToken = dump.getDumpedAhatInstance("namedBinderToken");
+    assertEquals("awesomeToken", namedBinderToken.getBinderTokenDescriptor());
+
+    // Binder stubs aren't considered binder tokens
+    AhatInstance binderService = dump.getDumpedAhatInstance("binderService");
+    assertEquals(null, binderService.getBinderTokenDescriptor());
+  }
+
+  @Test
+  public void binderStub() throws IOException {
+    TestDump dump = TestDump.getTestDump();
+
+    // Regular binder service returns the interface name and no token descriptor
+    AhatInstance binderService = dump.getDumpedAhatInstance("binderService");
+    assertEquals("DumpedStuff$IDumpedManager", binderService.getBinderStubInterfaceName());
+
+    // Binder tokens aren't considered binder services
+    AhatInstance binderToken = dump.getDumpedAhatInstance("binderToken");
+    assertEquals(null, binderToken.getBinderStubInterfaceName());
+
+    // Named binder tokens aren't considered binder services
+    AhatInstance namedBinderToken = dump.getDumpedAhatInstance("namedBinderToken");
+    assertEquals(null, namedBinderToken.getBinderStubInterfaceName());
+
+    // Fake service returns null
+    AhatInstance fakeService = dump.getDumpedAhatInstance("fakeBinderService");
+    assertNull(fakeService.getBinderStubInterfaceName());
+
+    // Random non-binder object returns null
+    AhatInstance nonBinderObject = dump.getDumpedAhatInstance("anObject");
+    assertNull(nonBinderObject.getBinderStubInterfaceName());
+  }
 }
diff --git a/tools/amm/Android.bp b/tools/amm/Android.bp
new file mode 100644
index 0000000..e6f6ff7
--- /dev/null
+++ b/tools/amm/Android.bp
@@ -0,0 +1,25 @@
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// --- ammtestjni.so -------------
+
+cc_library_shared {
+    name: "libammtestjni",
+
+    srcs: [
+        "AmmTest/jni/ammtest.c",
+    ],
+
+    sdk_version: "current",
+}
diff --git a/tools/amm/Android.mk b/tools/amm/Android.mk
index 47030c5..fa4ca44 100644
--- a/tools/amm/Android.mk
+++ b/tools/amm/Android.mk
@@ -14,13 +14,6 @@
 
 LOCAL_PATH := $(call my-dir)
 
-# --- ammtestjni.so -------------
-include $(CLEAR_VARS)
-LOCAL_MODULE := libammtestjni
-LOCAL_SRC_FILES := $(call all-c-files-under, AmmTest/jni)
-LOCAL_SDK_VERSION := current
-include $(BUILD_SHARED_LIBRARY)
-
 # --- AmmTest.apk --------------
 include $(CLEAR_VARS)
 LOCAL_PACKAGE_NAME := AmmTest
@@ -31,4 +24,3 @@
 LOCAL_JAVA_RESOURCE_FILES := $(LOCAL_PATH)/AmmTest/aahat.png
 LOCAL_MANIFEST_FILE := AmmTest/AndroidManifest.xml
 include $(BUILD_PACKAGE)
-
diff --git a/tools/art b/tools/art
index 9c032c0..58a8150 100644
--- a/tools/art
+++ b/tools/art
@@ -49,6 +49,7 @@
   --profile                Run with profiling, then run using profile data.
   --verbose                Run script verbosely.
   --no-clean               Don't cleanup oat directories.
+  --no-compile             Don't invoke dex2oat before running.
   --allow-default-jdwp     Don't automatically put in -XjdwpProvider:none.
                            You probably do not want this.
 
@@ -198,6 +199,7 @@
     # (see run_art function)
     verbose_run ANDROID_DATA=$ANDROID_DATA                    \
           ANDROID_ROOT=$ANDROID_ROOT                          \
+          ANDROID_RUNTIME_ROOT=$ANDROID_RUNTIME_ROOT          \
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH                    \
           PATH=$ANDROID_ROOT/bin:$PATH                        \
           LD_USE_LOAD_BIAS=1                                  \
@@ -221,6 +223,8 @@
 # -Xcompiler-options arguments are stored in DEX2OAT_FLAGS array
 # -cp argument is split by ':' and stored in DEX2OAT_CLASSPATH
 # -Ximage argument is stored in DEX2OAT_BOOT_IMAGE
+# -Xbootclasspath argument is stored in DEX2OAT_BCP
+# -Xbootclasspath-locations argument is stored in DEX2OAT_BCP_LOCS
 function extract_dex2oat_flags() {
   while [ $# -gt 0 ]; do
     case $1 in
@@ -233,6 +237,16 @@
         # Remove '-Ximage:' from the argument.
         DEX2OAT_BOOT_IMAGE=${DEX2OAT_BOOT_IMAGE##-Ximage:}
         ;;
+      -Xbootclasspath:*)
+        DEX2OAT_BCP=$1
+        # Remove '-Xbootclasspath:' from the argument.
+        DEX2OAT_BCP=${DEX2OAT_BCP##-Xbootclasspath:}
+        ;;
+      -Xbootclasspath-locations:*)
+        DEX2OAT_BCP_LOCS=$1
+        # Remove '-Xbootclasspath-locations:' from the argument.
+        DEX2OAT_BCP_LOCS=${DEX2OAT_BCP_LOCS##-Xbootclasspath-locations:}
+        ;;
       -cp)
         # Reset any previously parsed classpath, just like dalvikvm
         # only supports one -cp argument.
@@ -260,6 +274,7 @@
   # Run dalvikvm.
   verbose_run ANDROID_DATA="$ANDROID_DATA"                  \
               ANDROID_ROOT="$ANDROID_ROOT"                  \
+              ANDROID_RUNTIME_ROOT="$ANDROID_RUNTIME_ROOT"  \
               LD_LIBRARY_PATH="$LD_LIBRARY_PATH"            \
               PATH="$ANDROID_ROOT/bin:$PATH"                \
               LD_USE_LOAD_BIAS=1                            \
@@ -290,6 +305,7 @@
 ALLOW_DEFAULT_JDWP="no"
 VERBOSE="no"
 CLEAN_OAT_FILES="yes"
+RUN_DEX2OAT="yes"
 EXTRA_OPTIONS=()
 DEX2OAT_FLAGS=()
 DEX2OAT_CLASSPATH=()
@@ -347,6 +363,10 @@
   --no-clean)
     CLEAN_OAT_FILES="no"
     ;;
+  --no-compile)
+    CLEAN_OAT_FILES="no"
+    RUN_DEX2OAT="no"
+    ;;
   --allow-default-jdwp)
     ALLOW_DEFAULT_JDWP="yes"
     ;;
@@ -379,7 +399,42 @@
 done
 
 PROG_DIR="$(cd "${PROG_NAME%/*}" ; pwd -P)"
-ANDROID_ROOT=$PROG_DIR/..
+ANDROID_ROOT="$(cd $PROG_DIR/..; pwd -P)"
+
+# If ANDROID_RUNTIME_ROOT is not set, try to detect whether we are running on
+# target or host and set that environment variable to the usual default value.
+if [ -z "$ANDROID_RUNTIME_ROOT" ]; then
+  # This script is used on host and target (device). However, the (expected)
+  # default value `ANDROID_RUNTIME_ROOT` is not the same on host and target:
+  # - on host, `ANDROID_RUNTIME_ROOT` is expected to be "$ANDROID_ROOT/com.android.apex";
+  # - on target, `ANDROID_RUNTIME_ROOT` is expected to be "$ANDROID_ROOT/../apex/com.android.apex".
+  #
+  # We use the presence/absence of the `$ANDROID_ROOT/../apex` directory to
+  # determine whether we are on target or host (this is brittle, but simple).
+  if [ -d "$ANDROID_ROOT/../apex" ]; then
+    # Target case.
+    #
+    # We should be setting `ANDROID_RUNTIME_ROOT` to
+    # "$ANDROID_ROOT/../apex/com.android.runtime" here. However, the Runtime APEX
+    # is not (yet) supported by the ART Buildbot setup (see b/121117762); and yet
+    # ICU code depends on `ANDROID_RUNTIME_ROOT` to find ICU .dat files.
+    #
+    # As a temporary workaround, we:
+    # - make the ART Buildbot build script (art/tools/buildbot-build.sh) also
+    #   generate the ICU .dat files in `/system/etc/icu` on device (these files
+    #   are normally only put in the Runtime APEX on device);
+    # - set `ANDROID_RUNTIME_ROOT` to `$ANDROID_ROOT` (i.e. "/system") here.
+    #
+    # TODO(b/121117762): Set `ANDROID_RUNTIME_ROOT` to
+    # "$ANDROID_ROOT/../apex/com.android.runtime" when the Runtime APEX is fully
+    # supported on the ART Buildbot and Golem.
+    ANDROID_RUNTIME_ROOT=$ANDROID_ROOT
+  else
+    # Host case.
+    ANDROID_RUNTIME_ROOT="$ANDROID_ROOT/com.android.runtime"
+  fi
+fi
+
 ART_BINARY_PATH=$ANDROID_ROOT/bin/$ART_BINARY
 
 if [ ! -x "$ART_BINARY_PATH" ]; then
@@ -414,7 +469,7 @@
 # Extract the dex2oat flags from the list of arguments.
 # -Xcompiler-options arguments are stored in DEX2OAT_FLAGS array
 # -cp argument is split by ':' and stored in DEX2OAT_CLASSPATH
-# -Ximage argument is stored in DEX2OAT_BOOTIMAGE
+# -Ximage argument is stored in DEX2OAT_BOOT_IMAGE
 extract_dex2oat_flags "$@"
 
 # If ANDROID_DATA is the system ANDROID_DATA or is not set, use our own,
@@ -431,6 +486,55 @@
   DELETE_ANDROID_DATA="yes"
 fi
 
+if [[ "$DEX2OAT_BCP" = "" && "$DEX2OAT_BCP_LOCS" != "" ]]; then
+  echo "Cannot use -Xbootclasspath-locations without -Xbootclasspath"
+  exit 1
+fi
+
+if [[ "$DEX2OAT_BOOT_IMAGE" = *core*.art && "$DEX2OAT_BCP" = "" ]]; then
+  # Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
+  # because that's what we use for compiling the core.art image.
+  # It may contain additional modules from TEST_CORE_JARS.
+  core_jars_list="core-oj core-libart okhttp bouncycastle apache-xml conscrypt"
+  core_jars_suffix=
+  if [[ -e $ANDROID_ROOT/framework/core-oj-hostdex.jar ]]; then
+    core_jars_suffix=-hostdex
+    core_locations_dir=$ANDROID_ROOT/framework
+    prefix=$PWD/
+    if [[ ${core_locations_dir:0:${#prefix}} = $prefix ]]; then
+      core_locations_dir="${core_locations_dir##$prefix}"
+    fi
+  elif [[ -e $ANDROID_ROOT/framework/core-oj-testdex.jar ]]; then
+    core_jars_suffix=-testdex
+    core_locations_dir=/system/framework
+  fi
+  if [[ $core_jars_suffix != "" ]]; then
+    boot_separator=""
+    for boot_module in ${core_jars_list}; do
+      DEX_FILENAME="$boot_module$core_jars_suffix.jar"
+      DEX2OAT_BCP+="$boot_separator$ANDROID_ROOT/framework/${DEX_FILENAME}"
+      DEX2OAT_BCP_LOCS+="$boot_separator$core_locations_dir/${DEX_FILENAME}"
+      boot_separator=":"
+    done
+    if [ "$VERBOSE" = "yes" ]; then
+      echo "Using predefined -Xbootclasspath for image $DEX2OAT_BOOT_IMAGE:"
+      echo DEX2OAT_BOOT_IMAGE=$DEX2OAT_BOOT_IMAGE
+      echo DEX2OAT_BCP=$DEX2OAT_BCP
+      echo DEX2OAT_BCP_LOCS=$DEX2OAT_BCP_LOCS
+    fi
+  fi
+fi
+
+if [ "$DEX2OAT_BCP" != "" ]; then
+  EXTRA_OPTIONS+=("-Xbootclasspath:$DEX2OAT_BCP")
+  DEX2OAT_FLAGS+=("--runtime-arg" "-Xbootclasspath:$DEX2OAT_BCP")
+  if [ "$DEX2OAT_BCP_LOCS" != "" ]; then
+    EXTRA_OPTIONS+=("-Xbootclasspath-locations:$DEX2OAT_BCP_LOCS")
+    DEX2OAT_FLAGS+=("--runtime-arg" \
+                    "-Xbootclasspath-locations:$DEX2OAT_BCP_LOCS")
+  fi
+fi
+
 if [ "$PERF" != "" ]; then
   LAUNCH_WRAPPER="perf record -g --call-graph dwarf -F 10000 -o $ANDROID_DATA/perf.data -e cycles:u $LAUNCH_WRAPPER"
   DEX2OAT_FLAGS+=(--generate-debug-info)
@@ -480,8 +584,10 @@
 fi
 
 if [ -x "$DEX2OAT_BINARY_PATH" ]; then
-  # Run dex2oat before launching ART to generate the oat files for the classpath.
-  run_dex2oat
+  if [ "$RUN_DEX2OAT" = "yes" ]; then
+    # Run dex2oat before launching ART to generate the oat files for the classpath.
+    run_dex2oat
+  fi
 fi
 
 # Do not continue if the dex2oat failed.
diff --git a/tools/art_verifier/Android.bp b/tools/art_verifier/Android.bp
index afd52fb..6fff27a 100644
--- a/tools/art_verifier/Android.bp
+++ b/tools/art_verifier/Android.bp
@@ -16,7 +16,10 @@
 
 art_cc_defaults {
     name: "art_verifier-defaults",
-    defaults: ["art_defaults"],
+    defaults: [
+        "art_defaults",
+        "libart_static_defaults",
+    ],
     host_supported: true,
     srcs: [
         "art_verifier.cc",
@@ -24,11 +27,8 @@
     header_libs: [
         "art_cmdlineparser_headers",
     ],
-    static_libs: art_static_dependencies + [
-        "libart",
-        "libartbase",
-        "libdexfile",
-        "libprofile",
+    static_libs: [
+        "libsigchain_dummy",
     ],
     target: {
         android: {
diff --git a/tools/art_verifier/art_verifier.cc b/tools/art_verifier/art_verifier.cc
index 8f412bf..0ef6c06 100644
--- a/tools/art_verifier/art_verifier.cc
+++ b/tools/art_verifier/art_verifier.cc
@@ -46,8 +46,8 @@
   std::string error_msg;
   if (!dex_file_loader.Open(dex_filename.c_str(),
                             dex_filename.c_str(),
-                            /* verify */ true,
-                            /* verify_checksum */ true,
+                            /* verify= */ true,
+                            /* verify_checksum= */ true,
                             &error_msg,
                             dex_files)) {
     LOG(ERROR) << error_msg;
@@ -137,7 +137,7 @@
     return kParseOk;
   }
 
-  virtual std::string GetUsage() const {
+  std::string GetUsage() const override {
     std::string usage;
 
     usage +=
diff --git a/tools/bootjars.sh b/tools/bootjars.sh
index dca209d..78df99c 100755
--- a/tools/bootjars.sh
+++ b/tools/bootjars.sh
@@ -54,7 +54,7 @@
 
 if [[ $mode == target ]]; then
   if [[ $core_jars_only == y ]]; then
-    selected_env_var=TARGET_CORE_JARS
+    selected_env_var=TARGET_TEST_CORE_JARS
   else
     selected_env_var=PRODUCT_BOOT_JARS
   fi
@@ -64,11 +64,33 @@
     echo "Error: --host does not have non-core boot jars, --core required" >&2
     exit 1
   fi
-  selected_env_var=HOST_CORE_JARS
+  selected_env_var=HOST_TEST_CORE_JARS
   intermediates_env_var=HOST_OUT_COMMON_INTERMEDIATES
 fi
 
-boot_jars_list=$(get_build_var "$selected_env_var")
+if [[ $core_jars_only == y ]]; then
+  # FIXME: The soong invocation we're using for getting the variables does not give us anything
+  # defined in Android.common_path.mk, otherwise we would just use HOST-/TARGET_TEST_CORE_JARS.
+
+  # Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
+  # because that's what we use for compiling the core.art image.
+  # It may contain additional modules from TEST_CORE_JARS.
+  core_jars_list="core-oj core-libart okhttp bouncycastle apache-xml conscrypt"
+  core_jars_suffix=
+  if [[ $mode == target ]]; then
+    core_jars_suffix=-testdex
+  elif [[ $mode == host ]]; then
+    core_jars_suffix=-hostdex
+  fi
+  boot_jars_list=""
+  boot_separator=""
+  for boot_module in ${core_jars_list}; do
+    boot_jars_list+="${boot_separator}${boot_module}${core_jars_suffix}"
+    boot_separator=" "
+  done
+else
+  boot_jars_list=$(get_build_var "$selected_env_var")
+fi
 
 # Print only the list of boot jars.
 if [[ $print_file_path == n ]]; then
diff --git a/tools/build_linux_bionic.sh b/tools/build_linux_bionic.sh
new file mode 100755
index 0000000..b401071
--- /dev/null
+++ b/tools/build_linux_bionic.sh
@@ -0,0 +1,89 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This will build a target using linux_bionic. It can be called with normal make
+# flags.
+#
+# TODO This runs a 'm clean' prior to building the targets in order to ensure
+# that obsolete kati files don't mess up the build.
+
+if [[ -z $ANDROID_BUILD_TOP ]]; then
+  pushd .
+else
+  pushd $ANDROID_BUILD_TOP
+fi
+
+if [ ! -d art ]; then
+  echo "Script needs to be run at the root of the android tree"
+  exit 1
+fi
+
+source build/envsetup.sh >&/dev/null # for get_build_var
+# Soong needs a bunch of variables set and will not run if they are missing.
+# The default values of these variables is only contained in make, so use
+# nothing to create the variables then remove all the other artifacts.
+
+# TODO(b/123645297) Move hiddenapi steps to soong.
+#
+# Currently hiddenapi relies on .mk to build some of it's configuration files.
+# This prevents us from just cleaning using soong and forces us to do this
+# hacky workaround where we build the targets without linux_bionic and delete
+# the build-config files before going around again. If we fix this issue we can
+# change to only building 'nothing' instead.
+build/soong/soong_ui.bash --make-mode "$@"
+
+if [ $? != 0 ]; then
+  exit 1
+fi
+
+out_dir=$(get_build_var OUT_DIR)
+host_out=$(get_build_var HOST_OUT)
+
+# TODO(b/31559095) Figure out a better way to do this.
+#
+# There is no good way to force soong to generate host-bionic builds currently
+# so this is a hacky workaround.
+tmp_soong_var=$(mktemp --tmpdir soong.variables.bak.XXXXXX)
+
+cat $out_dir/soong/soong.variables > ${tmp_soong_var}
+
+# See comment above about b/123645297 for why we cannot just do m clean. Clear
+# out all files except for intermediates and installed files.
+find $out_dir/ -maxdepth 1 -mindepth 1 \
+               -not -name soong        \
+               -not -name host         \
+               -not -name target | xargs -I '{}' rm -rf '{}'
+find $out_dir/soong/ -maxdepth 1 -mindepth 1   \
+                     -not -name .intermediates \
+                     -not -name host           \
+                     -not -name target | xargs -I '{}' rm -rf '{}'
+
+python3 <<END - ${tmp_soong_var} ${out_dir}/soong/soong.variables
+import json
+import sys
+x = json.load(open(sys.argv[1]))
+x['Allow_missing_dependencies'] = True
+x['HostArch'] = 'x86_64'
+x['CrossHost'] = 'linux_bionic'
+x['CrossHostArch'] = 'x86_64'
+if 'CrossHostSecondaryArch' in x:
+  del x['CrossHostSecondaryArch']
+json.dump(x, open(sys.argv[2], mode='w'))
+END
+
+rm $tmp_soong_var
+
+build/soong/soong_ui.bash --make-mode --skip-make $@
diff --git a/tools/build_linux_bionic_tests.sh b/tools/build_linux_bionic_tests.sh
new file mode 100755
index 0000000..c532c90
--- /dev/null
+++ b/tools/build_linux_bionic_tests.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+if [[ -z $ANDROID_BUILD_TOP ]]; then
+  pushd .
+else
+  pushd $ANDROID_BUILD_TOP
+fi
+
+if [ ! -d art ]; then
+  echo "Script needs to be run at the root of the android tree"
+  exit 1
+fi
+
+source build/envsetup.sh >&/dev/null # for get_build_var
+
+out_dir=$(get_build_var OUT_DIR)
+host_out=$(get_build_var HOST_OUT)
+
+# TODO(b/31559095) Figure out a better way to do this.
+#
+# There is no good way to force soong to generate host-bionic builds currently
+# so this is a hacky workaround.
+
+# First build all the targets still in .mk files (also build normal glibc host
+# targets so we know what's needed to run the tests).
+build/soong/soong_ui.bash --make-mode "$@" test-art-host-run-test-dependencies build-art-host-tests
+if [ $? != 0 ]; then
+  exit 1
+fi
+
+tmp_soong_var=$(mktemp --tmpdir soong.variables.bak.XXXXXX)
+
+echo "Saving soong.variables to " $tmp_soong_var
+cat $out_dir/soong/soong.variables > ${tmp_soong_var}
+python3 <<END - ${tmp_soong_var} ${out_dir}/soong/soong.variables
+import json
+import sys
+x = json.load(open(sys.argv[1]))
+x['Allow_missing_dependencies'] = True
+x['HostArch'] = 'x86_64'
+x['CrossHost'] = 'linux_bionic'
+x['CrossHostArch'] = 'x86_64'
+if 'CrossHostSecondaryArch' in x:
+  del x['CrossHostSecondaryArch']
+json.dump(x, open(sys.argv[2], mode='w'))
+END
+if [ $? != 0 ]; then
+  mv $tmp_soong_var $out_dir/soong/soong.variables
+  exit 2
+fi
+
+soong_out=$out_dir/soong/host/linux_bionic-x86
+declare -a bionic_targets
+# These are the binaries actually used in tests. Since some of the files are
+# java targets or 32 bit we cannot just do the same find for the bin files.
+#
+# We look at what the earlier build generated to figure out what to ask soong to
+# build since we cannot use the .mk defined phony targets.
+bionic_targets=(
+  $soong_out/bin/dalvikvm
+  $soong_out/bin/dalvikvm64
+  $soong_out/bin/dex2oat
+  $soong_out/bin/dex2oatd
+  $soong_out/bin/profman
+  $soong_out/bin/profmand
+  $soong_out/bin/hiddenapi
+  $soong_out/bin/hprof-conv
+  $soong_out/bin/timeout_dumper
+  $(find $host_out/apex -type f | sed "s:$host_out:$soong_out:g")
+  $(find $host_out/lib64 -type f | sed "s:$host_out:$soong_out:g")
+  $(find $host_out/nativetest64 -type f | sed "s:$host_out:$soong_out:g"))
+
+echo building ${bionic_targets[*]}
+
+build/soong/soong_ui.bash --make-mode --skip-make "$@" ${bionic_targets[*]}
+ret=$?
+
+mv $tmp_soong_var $out_dir/soong/soong.variables
+
+# Having built with host-bionic confuses soong somewhat by making it think the
+# linux_bionic targets are needed for art phony targets like
+# test-art-host-run-test-dependencies. To work around this blow away all
+# ninja files in OUT_DIR. The build system is smart enough to not need to
+# rebuild stuff so this should be fine.
+rm -f $OUT_DIR/*.ninja $OUT_DIR/soong/*.ninja
+
+popd
+
+exit $ret
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 8305051..6be243a 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -61,26 +61,46 @@
 done
 
 # Allow to build successfully in master-art.
-extra_args=SOONG_ALLOW_MISSING_DEPENDENCIES=true
+extra_args="SOONG_ALLOW_MISSING_DEPENDENCIES=true TEMPORARY_DISABLE_PATH_RESTRICTIONS=true"
 
 if [[ $mode == "host" ]]; then
   make_command="make $j_arg $extra_args $showcommands build-art-host-tests $common_targets"
   make_command+=" dx-tests"
   mode_suffix="-host"
 elif [[ $mode == "target" ]]; then
-  if [[ -z "$TARGET_PRODUCT" ]]; then
-    echo 'TARGET_PRODUCT environment variable is empty; did you forget to run `lunch`?'
+  if [[ -z "${ANDROID_PRODUCT_OUT}" ]]; then
+    echo 'ANDROID_PRODUCT_OUT environment variable is empty; did you forget to run `lunch`?'
     exit 1
   fi
   make_command="make $j_arg $extra_args $showcommands build-art-target-tests $common_targets"
-  make_command+=" libjavacrypto-target libnetd_client-target linker toybox toolbox sh"
+  make_command+=" libjavacrypto-target libnetd_client-target linker toybox toolbox sh unzip"
   make_command+=" debuggerd su"
-  make_command+=" ${out_dir}/host/linux-x86/bin/adb libstdc++ "
-  make_command+=" ${out_dir}/target/product/${TARGET_PRODUCT}/system/etc/public.libraries.txt"
+  make_command+=" libstdc++ "
+  make_command+=" ${ANDROID_PRODUCT_OUT#"${ANDROID_BUILD_TOP}/"}/system/etc/public.libraries.txt"
+  make_command+=" art-bionic-files"
   if [[ -n "$ART_TEST_CHROOT" ]]; then
     # These targets are needed for the chroot environment.
     make_command+=" crash_dump event-log-tags"
   fi
+  # Build the Debug Runtime APEX (which is a superset of the Release Runtime APEX).
+  make_command+=" com.android.runtime.debug"
+  # Build the bootstrap Bionic libraries (libc, libdl, libm). These are required
+  # as the "main" libc, libdl, and libm have moved to the Runtime APEX. This is
+  # a temporary change needed until both the ART Buildbot and Golem fully
+  # support the Runtime APEX.
+  #
+  # TODO(b/121117762): Remove this when the ART Buildbot and Golem have full
+  # support for the Runtime APEX.
+  make_command+=" libc.bootstrap libdl.bootstrap libm.bootstrap"
+  # Create a copy of the ICU .dat prebuilt files in /system/etc/icu on target,
+  # so that it can found even if the Runtime APEX is not available, by setting
+  # the environment variable `ART_TEST_ANDROID_RUNTIME_ROOT` to "/system" on
+  # device. This is a temporary change needed until both the ART Buildbot and
+  # Golem fully support the Runtime APEX.
+  #
+  # TODO(b/121117762): Remove this when the ART Buildbot and Golem have full
+  # support for the Runtime APEX.
+  make_command+=" icu-data-art-test"
   mode_suffix="-target"
 fi
 
@@ -92,4 +112,5 @@
 
 
 echo "Executing $make_command"
+# Disable path restrictions to enable luci builds using vpython.
 bash -c "$make_command"
diff --git a/tools/buildbot-sync.sh b/tools/buildbot-sync.sh
new file mode 100755
index 0000000..01b3c0d
--- /dev/null
+++ b/tools/buildbot-sync.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+adb wait-for-device
+
+if [[ -z "${ANDROID_PRODUCT_OUT}" ]]; then
+  echo 'ANDROID_PRODUCT_OUT environment variable is empty; did you forget to run `lunch`?'
+  exit 1
+fi
+
+if [[ -z "${ART_TEST_CHROOT}" ]]; then
+  echo 'ART_TEST_CHROOT environment variable is empty'
+  exit 1
+fi
+
+adb push ${ANDROID_PRODUCT_OUT}/system ${ART_TEST_CHROOT}/
+adb push ${ANDROID_PRODUCT_OUT}/data ${ART_TEST_CHROOT}/
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotatedClassContext.java b/tools/class2greylist/src/com/android/class2greylist/AnnotatedClassContext.java
new file mode 100644
index 0000000..1dd74dd
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotatedClassContext.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.class2greylist;
+
+import java.util.Formatter;
+import java.util.Locale;
+import org.apache.bcel.Const;
+import org.apache.bcel.classfile.FieldOrMethod;
+import org.apache.bcel.classfile.JavaClass;
+
+/**
+ * Encapsulates context for a single annotation on a class.
+ */
+public class AnnotatedClassContext extends AnnotationContext {
+
+    public final String signatureFormatString;
+
+    public AnnotatedClassContext(
+            Status status,
+            JavaClass definingClass,
+            String signatureFormatString) {
+        super(status, definingClass);
+        this.signatureFormatString = signatureFormatString;
+    }
+
+    @Override
+    public String getMemberDescriptor() {
+        return String.format(Locale.US, signatureFormatString, getClassDescriptor());
+    }
+
+    @Override
+    public void reportError(String message, Object... args) {
+        Formatter error = new Formatter();
+        error
+            .format("%s: %s: ", definingClass.getSourceFileName(), definingClass.getClassName())
+            .format(Locale.US, message, args);
+
+        status.error(error.toString());
+    }
+
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotatedMemberContext.java b/tools/class2greylist/src/com/android/class2greylist/AnnotatedMemberContext.java
new file mode 100644
index 0000000..4802788
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotatedMemberContext.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.class2greylist;
+
+import java.util.Formatter;
+import org.apache.bcel.Const;
+import org.apache.bcel.classfile.FieldOrMethod;
+import org.apache.bcel.classfile.JavaClass;
+
+import java.util.Locale;
+
+/**
+ * Encapsulates context for a single annotation on a class member.
+ */
+public class AnnotatedMemberContext extends AnnotationContext {
+
+    public final FieldOrMethod member;
+    public final String signatureFormatString;
+
+    public AnnotatedMemberContext(
+        Status status,
+        JavaClass definingClass,
+        FieldOrMethod member,
+        String signatureFormatString) {
+        super(status, definingClass);
+        this.member = member;
+        this.signatureFormatString = signatureFormatString;
+    }
+
+    @Override
+    public String getMemberDescriptor() {
+        return String.format(Locale.US, signatureFormatString,
+            getClassDescriptor(), member.getName(), member.getSignature());
+    }
+
+    @Override
+    public void reportError(String message, Object... args) {
+        Formatter error = new Formatter();
+        error
+            .format("%s: %s.%s: ", definingClass.getSourceFileName(),
+                definingClass.getClassName(), member.getName())
+            .format(Locale.US, message, args);
+
+        status.error(error.toString());
+    }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationConsumer.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationConsumer.java
new file mode 100644
index 0000000..0f5f413
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationConsumer.java
@@ -0,0 +1,18 @@
+package com.android.class2greylist;
+
+import java.util.Map;
+import java.util.Set;
+
+public interface AnnotationConsumer {
+    /**
+     * Handle a parsed annotation for a class member.
+     *
+     * @param apiSignature Signature of the class member.
+     * @param annotationProperties Map of stringified properties of this annotation.
+     * @param parsedFlags Array of flags parsed from the annotation for this member.
+     */
+    public void consume(String apiSignature, Map<String, String> annotationProperties,
+            Set<String> parsedFlags);
+
+    public void close();
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java
index eb54a33..73b74a9 100644
--- a/tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java
@@ -1,66 +1,51 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package com.android.class2greylist;
 
 import org.apache.bcel.Const;
-import org.apache.bcel.classfile.FieldOrMethod;
 import org.apache.bcel.classfile.JavaClass;
 
-import java.util.Locale;
-
 /**
- * Encapsulates context for a single annotation on a class member.
  */
-public class AnnotationContext {
+public abstract class AnnotationContext {
 
-    public final Status status;
-    public final FieldOrMethod member;
-    public final JavaClass definingClass;
-    public final String signatureFormatString;
+  public final Status status;
+  public final JavaClass definingClass;
 
-    public AnnotationContext(
-            Status status,
-            FieldOrMethod member,
-            JavaClass definingClass,
-            String signatureFormatString) {
-        this.status = status;
-        this.member = member;
-        this.definingClass = definingClass;
-        this.signatureFormatString = signatureFormatString;
-    }
+  public AnnotationContext(Status status, JavaClass definingClass) {
+    this.status = status;
+    this.definingClass = definingClass;
+  }
 
-    /**
-     * @return the full descriptor of enclosing class.
-     */
-    public String getClassDescriptor() {
-        // JavaClass.getName() returns the Java-style name (with . not /), so we must fetch
-        // the original class name from the constant pool.
-        return definingClass.getConstantPool().getConstantString(
-                definingClass.getClassNameIndex(), Const.CONSTANT_Class);
-    }
+  public String getClassDescriptor() {
+      // JavaClass.getName() returns the Java-style name (with . not /), so we must fetch
+      // the original class name from the constant pool.
+      return definingClass.getConstantPool().getConstantString(
+              definingClass.getClassNameIndex(), Const.CONSTANT_Class);
+  }
 
-    /**
-     * @return the full descriptor of this member, in the format expected in
-     * the greylist.
-     */
-    public String getMemberDescriptor() {
-        return String.format(Locale.US, signatureFormatString,
-                getClassDescriptor(), member.getName(), member.getSignature());
-    }
+  /**
+   * @return the full descriptor of this member, in the format expected in
+   * the greylist.
+   */
+  public abstract String getMemberDescriptor();
 
-    /**
-     * Report an error in this context. The final error message will include
-     * the class and member names, and the source file name.
-     */
-    public void reportError(String message, Object... args) {
-        StringBuilder error = new StringBuilder();
-        error.append(definingClass.getSourceFileName())
-                .append(": ")
-                .append(definingClass.getClassName())
-                .append(".")
-                .append(member.getName())
-                .append(": ")
-                .append(String.format(Locale.US, message, args));
-
-        status.error(error.toString());
-    }
-
+  /**
+   * Report an error in this context. The final error message will include
+   * the class and member names, and the source file name.
+   */
+  public abstract void reportError(String message, Object... args);
 }
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationHandler.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationHandler.java
index 92d2ab6..ba1f583 100644
--- a/tools/class2greylist/src/com/android/class2greylist/AnnotationHandler.java
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationHandler.java
@@ -1,11 +1,27 @@
 package com.android.class2greylist;
 
+import java.util.Map;
+import java.util.HashMap;
+
 import org.apache.bcel.classfile.AnnotationEntry;
+import org.apache.bcel.classfile.ElementValuePair;
+
 
 /**
- * Interface for an annotation handler, which handle individual annotations on
+ * Base class for an annotation handler, which handle individual annotations on
  * class members.
  */
-public interface AnnotationHandler {
-    void handleAnnotation(AnnotationEntry annotation, AnnotationContext context);
+public abstract class AnnotationHandler {
+    abstract void handleAnnotation(AnnotationEntry annotation, AnnotationContext context);
+
+    protected Map<String, String> stringifyAnnotationProperties(AnnotationEntry annotation) {
+        Map<String, String> content = new HashMap<String, String>();
+
+        // Stringify all annotation properties.
+        for (ElementValuePair prop : annotation.getElementValuePairs()) {
+            content.put(prop.getNameString(), prop.getValue().stringifyValue());
+        }
+
+        return content;
+    }
 }
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationPropertyWriter.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationPropertyWriter.java
new file mode 100644
index 0000000..aacd963
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationPropertyWriter.java
@@ -0,0 +1,56 @@
+package com.android.class2greylist;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+public class AnnotationPropertyWriter implements AnnotationConsumer {
+
+    private final PrintStream mOutput;
+    private final List<Map<String, String>> mContents;
+    private final Set<String> mColumns;
+
+    public AnnotationPropertyWriter(String csvFile) throws FileNotFoundException {
+        mOutput = new PrintStream(new FileOutputStream(new File(csvFile)));
+        mContents = new ArrayList<>();
+        mColumns = new HashSet<>();
+    }
+
+    public void consume(String apiSignature, Map<String, String> annotationProperties,
+            Set<String> parsedFlags) {
+        // Clone properties map.
+        Map<String, String> contents = new HashMap(annotationProperties);
+
+        // Append the member signature.
+        contents.put("signature", apiSignature);
+
+        // Store data.
+        mColumns.addAll(contents.keySet());
+        mContents.add(contents);
+    }
+
+    public void close() {
+        // Sort columns by name and print header row.
+        List<String> columns = new ArrayList<>(mColumns);
+        columns.sort(Comparator.naturalOrder());
+        mOutput.println(columns.stream().collect(Collectors.joining(",")));
+
+        // Sort contents according to columns and print.
+        for (Map<String, String> row : mContents) {
+            mOutput.println(columns.stream().map(column -> row.getOrDefault(column, ""))
+                    .collect(Collectors.joining(",")));
+        }
+
+        // Close output.
+        mOutput.close();
+    }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
index b805b30..3a58cf1 100644
--- a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
@@ -55,6 +55,10 @@
 
     public void visit() {
         mStatus.debug("Visit class %s", mClass.getClassName());
+        AnnotationContext context = new AnnotatedClassContext(mStatus, mClass, "L%s;");
+        AnnotationEntry[] annotationEntries = mClass.getAnnotationEntries();
+        handleAnnotations(context, annotationEntries);
+
         mDescendingVisitor.visit();
     }
 
@@ -70,13 +74,22 @@
 
     private void visitMember(FieldOrMethod member, String signatureFormatString) {
         mStatus.debug("Visit member %s : %s", member.getName(), member.getSignature());
-        AnnotationContext context = new AnnotationContext(mStatus, member,
-                (JavaClass) mDescendingVisitor.predecessor(), signatureFormatString);
-        for (AnnotationEntry a : member.getAnnotationEntries()) {
+        AnnotationContext context = new AnnotatedMemberContext(mStatus,
+            (JavaClass) mDescendingVisitor.predecessor(), member,
+            signatureFormatString);
+        AnnotationEntry[] annotationEntries = member.getAnnotationEntries();
+        handleAnnotations(context, annotationEntries);
+    }
+
+    private void handleAnnotations(AnnotationContext context, AnnotationEntry[] annotationEntries) {
+        for (AnnotationEntry a : annotationEntries) {
             if (mAnnotationHandlers.containsKey(a.getAnnotationType())) {
                 mStatus.debug("Member has annotation %s for which we have a handler",
                         a.getAnnotationType());
                 mAnnotationHandlers.get(a.getAnnotationType()).handleAnnotation(a, context);
+            } else {
+                mStatus.debug("Member has annotation %s for which we do not have a handler",
+                    a.getAnnotationType());
             }
         }
     }
diff --git a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
index 9262076..9c4e57e 100644
--- a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
+++ b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
@@ -17,7 +17,9 @@
 package com.android.class2greylist;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Splitter;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableMap.Builder;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
 import com.google.common.io.Files;
@@ -36,9 +38,9 @@
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
+import java.util.stream.Collectors;
 
 /**
  * Build time tool for extracting a list of members from jar files that have the @UsedByApps
@@ -46,39 +48,46 @@
  */
 public class Class2Greylist {
 
-    private static final String GREYLIST_ANNOTATION = "Landroid/annotation/UnsupportedAppUsage;";
+    private static final Set<String> GREYLIST_ANNOTATIONS =
+            ImmutableSet.of(
+                    "android.annotation.UnsupportedAppUsage",
+                    "dalvik.annotation.compat.UnsupportedAppUsage");
     private static final Set<String> WHITELIST_ANNOTATIONS = ImmutableSet.of();
 
-    private final Status mStatus;
-    private final String mPublicApiListFile;
-    private final String[] mPerSdkOutputFiles;
-    private final String mWhitelistFile;
-    private final String[] mJarFiles;
-    private final GreylistConsumer mOutput;
-    private final Set<Integer> mAllowedSdkVersions;
-    private final Set<String> mPublicApis;
+    public static final String FLAG_WHITELIST = "whitelist";
+    public static final String FLAG_GREYLIST = "greylist";
+    public static final String FLAG_BLACKLIST = "blacklist";
+    public static final String FLAG_GREYLIST_MAX_O = "greylist-max-o";
+    public static final String FLAG_GREYLIST_MAX_P = "greylist-max-p";
 
+    private static final Map<Integer, String> TARGET_SDK_TO_LIST_MAP;
+    static {
+        Map<Integer, String> map = new HashMap<>();
+        map.put(null, FLAG_GREYLIST);
+        map.put(26, FLAG_GREYLIST_MAX_O);
+        map.put(28, FLAG_GREYLIST_MAX_P);
+        TARGET_SDK_TO_LIST_MAP = Collections.unmodifiableMap(map);
+    }
+
+    private final Status mStatus;
+    private final String mCsvFlagsFile;
+    private final String mCsvMetadataFile;
+    private final String[] mJarFiles;
+    private final AnnotationConsumer mOutput;
+    private final Set<String> mPublicApis;
 
     public static void main(String[] args) {
         Options options = new Options();
         options.addOption(OptionBuilder
-                .withLongOpt("public-api-list")
+                .withLongOpt("stub-api-flags")
                 .hasArgs(1)
-                .withDescription("Public API list file. Used to de-dupe bridge methods.")
-                .create("p"));
+                .withDescription("CSV file with API flags generated from public API stubs. " +
+                        "Used to de-dupe bridge methods.")
+                .create("s"));
         options.addOption(OptionBuilder
-                .withLongOpt("write-greylist")
-                .hasArgs()
-                .withDescription(
-                        "Specify file to write greylist to. Can be specified multiple times. " +
-                        "Format is either just a filename, or \"int:filename\". If an integer is " +
-                        "given, members with a matching maxTargetSdk are written to the file; if " +
-                        "no integer is given, members with no maxTargetSdk are written.")
-                .create("g"));
-        options.addOption(OptionBuilder
-                .withLongOpt("write-whitelist")
+                .withLongOpt("write-flags-csv")
                 .hasArgs(1)
-                .withDescription("Specify file to write whitelist to.")
+                .withDescription("Specify file to write hiddenapi flags to.")
                 .create('w'));
         options.addOption(OptionBuilder
                 .withLongOpt("debug")
@@ -92,10 +101,17 @@
                 .hasArgs(0)
                 .create('m'));
         options.addOption(OptionBuilder
+                .withLongOpt("write-metadata-csv")
+                .hasArgs(1)
+                .withDescription("Specify a file to write API metaadata to. This is a CSV file " +
+                        "containing any annotation properties for all members. Do not use in " +
+                        "conjunction with --write-flags-csv.")
+                .create('c'));
+        options.addOption(OptionBuilder
                 .withLongOpt("help")
                 .hasArgs(0)
                 .withDescription("Show this help")
-                .create("h"));
+                .create('h'));
 
         CommandLineParser parser = new GnuParser();
         CommandLine cmd;
@@ -126,9 +142,9 @@
             try {
                 Class2Greylist c2gl = new Class2Greylist(
                         status,
-                        cmd.getOptionValue('p', null),
-                        cmd.getOptionValues('g'),
+                        cmd.getOptionValue('s', null),
                         cmd.getOptionValue('w', null),
+                        cmd.getOptionValue('c', null),
                         jarFiles);
                 c2gl.main();
             } catch (IOException e) {
@@ -145,43 +161,71 @@
     }
 
     @VisibleForTesting
-    Class2Greylist(Status status, String publicApiListFile, String[] perSdkLevelOutputFiles,
-            String whitelistOutputFile, String[] jarFiles) throws IOException {
+    Class2Greylist(Status status, String stubApiFlagsFile, String csvFlagsFile,
+            String csvMetadataFile, String[] jarFiles)
+            throws IOException {
         mStatus = status;
-        mPublicApiListFile = publicApiListFile;
-        mPerSdkOutputFiles = perSdkLevelOutputFiles;
-        mWhitelistFile = whitelistOutputFile;
+        mCsvFlagsFile = csvFlagsFile;
+        mCsvMetadataFile = csvMetadataFile;
         mJarFiles = jarFiles;
-        if (mPerSdkOutputFiles != null) {
-            Map<Integer, String> outputFiles = readGreylistMap(mStatus, mPerSdkOutputFiles);
-            mOutput = new FileWritingGreylistConsumer(mStatus, outputFiles, mWhitelistFile);
-            mAllowedSdkVersions = outputFiles.keySet();
+        if (mCsvMetadataFile != null) {
+            mOutput = new AnnotationPropertyWriter(mCsvMetadataFile);
         } else {
-            // TODO remove this once per-SDK greylist support integrated into the build.
-            // Right now, mPerSdkOutputFiles is always null as the build never passes the
-            // corresponding command lind flags. Once the build is updated, can remove this.
-            mOutput = new SystemOutGreylistConsumer();
-            mAllowedSdkVersions = new HashSet<>(Arrays.asList(null, 26, 28));
+            mOutput = new HiddenapiFlagsWriter(mCsvFlagsFile);
         }
 
-        if (mPublicApiListFile != null) {
-            mPublicApis = Sets.newHashSet(
-                    Files.readLines(new File(mPublicApiListFile), Charset.forName("UTF-8")));
+        if (stubApiFlagsFile != null) {
+            mPublicApis =
+                    Files.readLines(new File(stubApiFlagsFile), Charset.forName("UTF-8")).stream()
+                        .map(s -> Splitter.on(",").splitToList(s))
+                        .filter(s -> s.contains(FLAG_WHITELIST))
+                        .map(s -> s.get(0))
+                        .collect(Collectors.toSet());
         } else {
             mPublicApis = Collections.emptySet();
         }
     }
 
     private Map<String, AnnotationHandler> createAnnotationHandlers() {
-        return ImmutableMap.<String, AnnotationHandler>builder()
-                .put(GreylistAnnotationHandler.ANNOTATION_NAME,
-                        new GreylistAnnotationHandler(
-                                mStatus, mOutput, mPublicApis, mAllowedSdkVersions))
-                .put(CovariantReturnTypeHandler.ANNOTATION_NAME,
-                        new CovariantReturnTypeHandler(mOutput, mPublicApis))
-                .put(CovariantReturnTypeMultiHandler.ANNOTATION_NAME,
-                        new CovariantReturnTypeMultiHandler(mOutput, mPublicApis))
-                .build();
+        Builder<String, AnnotationHandler> builder = ImmutableMap.builder();
+        UnsupportedAppUsageAnnotationHandler greylistAnnotationHandler =
+                new UnsupportedAppUsageAnnotationHandler(
+                    mStatus, mOutput, mPublicApis, TARGET_SDK_TO_LIST_MAP);
+        GREYLIST_ANNOTATIONS
+            .forEach(a -> addRepeatedAnnotationHandlers(
+                builder,
+                classNameToSignature(a),
+                classNameToSignature(a + "$Container"),
+                greylistAnnotationHandler));
+
+        CovariantReturnTypeHandler covariantReturnTypeHandler = new CovariantReturnTypeHandler(
+            mOutput, mPublicApis, FLAG_WHITELIST);
+
+        return addRepeatedAnnotationHandlers(builder, CovariantReturnTypeHandler.ANNOTATION_NAME,
+            CovariantReturnTypeHandler.REPEATED_ANNOTATION_NAME, covariantReturnTypeHandler)
+            .build();
+    }
+
+    private String classNameToSignature(String a) {
+        return "L" + a.replace('.', '/') + ";";
+    }
+
+    /**
+     * Add a handler for an annotation as well as an handler for the container annotation that is
+     * used when the annotation is repeated.
+     *
+     * @param builder the builder for the map to which the handlers will be added.
+     * @param annotationName the name of the annotation.
+     * @param containerAnnotationName the name of the annotation container.
+     * @param handler the handler for the annotation.
+     */
+    private static Builder<String, AnnotationHandler> addRepeatedAnnotationHandlers(
+        Builder<String, AnnotationHandler> builder,
+        String annotationName, String containerAnnotationName,
+        AnnotationHandler handler) {
+        return builder
+            .put(annotationName, handler)
+            .put(containerAnnotationName, new RepeatedAnnotationHandler(annotationName, handler));
     }
 
     private void main() throws IOException {
@@ -200,38 +244,6 @@
         mOutput.close();
     }
 
-    @VisibleForTesting
-    static Map<Integer, String> readGreylistMap(Status status, String[] argValues) {
-        Map<Integer, String> map = new HashMap<>();
-        for (String sdkFile : argValues) {
-            Integer maxTargetSdk = null;
-            String filename;
-            int colonPos = sdkFile.indexOf(':');
-            if (colonPos != -1) {
-                try {
-                    maxTargetSdk = Integer.valueOf(sdkFile.substring(0, colonPos));
-                } catch (NumberFormatException nfe) {
-                    status.error("Not a valid integer: %s from argument value '%s'",
-                            sdkFile.substring(0, colonPos), sdkFile);
-                }
-                filename = sdkFile.substring(colonPos + 1);
-                if (filename.length() == 0) {
-                    status.error("Not a valid file name: %s from argument value '%s'",
-                            filename, sdkFile);
-                }
-            } else {
-                maxTargetSdk = null;
-                filename = sdkFile;
-            }
-            if (map.containsKey(maxTargetSdk)) {
-                status.error("Multiple output files for maxTargetSdk %s", maxTargetSdk);
-            } else {
-                map.put(maxTargetSdk, filename);
-            }
-        }
-        return map;
-    }
-
     private static void dumpAllMembers(Status status, String[] jarFiles) {
         for (String jarFile : jarFiles) {
             status.debug("Processing jar file %s", jarFile);
diff --git a/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeHandler.java b/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeHandler.java
index afd15b4..eb2e42d 100644
--- a/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeHandler.java
+++ b/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeHandler.java
@@ -1,6 +1,7 @@
 package com.android.class2greylist;
 
 import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
 
 import org.apache.bcel.classfile.AnnotationEntry;
 import org.apache.bcel.classfile.ElementValuePair;
@@ -20,23 +21,35 @@
  * <p>Methods are also validated against the public API list, to assert that
  * the annotated method is already a public API.
  */
-public class CovariantReturnTypeHandler implements AnnotationHandler {
+public class CovariantReturnTypeHandler extends AnnotationHandler {
 
     private static final String SHORT_NAME = "CovariantReturnType";
     public static final String ANNOTATION_NAME = "Ldalvik/annotation/codegen/CovariantReturnType;";
+    public static final String REPEATED_ANNOTATION_NAME =
+        "Ldalvik/annotation/codegen/CovariantReturnType$CovariantReturnTypes;";
 
     private static final String RETURN_TYPE = "returnType";
 
-    private final GreylistConsumer mConsumer;
+    private final AnnotationConsumer mAnnotationConsumer;
     private final Set<String> mPublicApis;
+    private final String mHiddenapiFlag;
 
-    public CovariantReturnTypeHandler(GreylistConsumer consumer, Set<String> publicApis) {
-        mConsumer = consumer;
+    public CovariantReturnTypeHandler(AnnotationConsumer consumer, Set<String> publicApis,
+            String hiddenapiFlag) {
+        mAnnotationConsumer = consumer;
         mPublicApis = publicApis;
+        mHiddenapiFlag = hiddenapiFlag;
     }
 
     @Override
     public void handleAnnotation(AnnotationEntry annotation, AnnotationContext context) {
+        if (context instanceof AnnotatedClassContext) {
+            return;
+        }
+        handleAnnotation(annotation, (AnnotatedMemberContext) context);
+    }
+
+    private void handleAnnotation(AnnotationEntry annotation, AnnotatedMemberContext context) {
         // Verify that the annotation has been applied to what we expect, and
         // has the right form. Note, this should not strictly be necessary, as
         // the annotation has a target of just 'method' and the property
@@ -74,7 +87,9 @@
                     signature, SHORT_NAME);
             return;
         }
-        mConsumer.whitelistEntry(signature);
+
+        mAnnotationConsumer.consume(signature, stringifyAnnotationProperties(annotation),
+                ImmutableSet.of(mHiddenapiFlag));
     }
 
     private String findReturnType(AnnotationEntry a) {
diff --git a/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeMultiHandler.java b/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeMultiHandler.java
deleted file mode 100644
index bd0bf79..0000000
--- a/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeMultiHandler.java
+++ /dev/null
@@ -1,72 +0,0 @@
-package com.android.class2greylist;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-import org.apache.bcel.classfile.AnnotationElementValue;
-import org.apache.bcel.classfile.AnnotationEntry;
-import org.apache.bcel.classfile.ArrayElementValue;
-import org.apache.bcel.classfile.ElementValue;
-import org.apache.bcel.classfile.ElementValuePair;
-
-import java.util.Set;
-
-/**
- * Handles {@code CovariantReturnType$CovariantReturnTypes} annotations, which
- * are generated by the compiler when multiple {@code CovariantReturnType}
- * annotations appear on a single method.
- *
- * <p>The enclosed annotations are passed to {@link CovariantReturnTypeHandler}.
- */
-public class CovariantReturnTypeMultiHandler implements AnnotationHandler {
-
-    public static final String ANNOTATION_NAME =
-            "Ldalvik/annotation/codegen/CovariantReturnType$CovariantReturnTypes;";
-
-    private static final String VALUE = "value";
-
-    private final CovariantReturnTypeHandler mWrappedHandler;
-    private final String mInnerAnnotationName;
-
-    public CovariantReturnTypeMultiHandler(GreylistConsumer consumer, Set<String> publicApis) {
-        this(consumer, publicApis, CovariantReturnTypeHandler.ANNOTATION_NAME);
-    }
-
-    @VisibleForTesting
-    public CovariantReturnTypeMultiHandler(GreylistConsumer consumer, Set<String> publicApis,
-            String innerAnnotationName) {
-        mWrappedHandler = new CovariantReturnTypeHandler(consumer, publicApis);
-        mInnerAnnotationName = innerAnnotationName;
-    }
-
-    @Override
-    public void handleAnnotation(AnnotationEntry annotation, AnnotationContext context) {
-        // Verify that the annotation has the form we expect
-        ElementValuePair value = findValue(annotation);
-        if (value == null) {
-            context.reportError("No value found on CovariantReturnType$CovariantReturnTypes");
-            return;
-        }
-        Preconditions.checkArgument(value.getValue() instanceof ArrayElementValue);
-        ArrayElementValue array = (ArrayElementValue) value.getValue();
-
-        // call wrapped handler on each enclosed annotation:
-        for (ElementValue v : array.getElementValuesArray()) {
-            Preconditions.checkArgument(v instanceof AnnotationElementValue);
-            AnnotationElementValue aev = (AnnotationElementValue) v;
-            Preconditions.checkArgument(
-                    aev.getAnnotationEntry().getAnnotationType().equals(mInnerAnnotationName));
-            mWrappedHandler.handleAnnotation(aev.getAnnotationEntry(), context);
-        }
-    }
-
-    private ElementValuePair findValue(AnnotationEntry a) {
-        for (ElementValuePair property : a.getElementValuePairs()) {
-            if (property.getNameString().equals(VALUE)) {
-                return property;
-            }
-        }
-        // not found
-        return null;
-    }
-}
diff --git a/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java
deleted file mode 100644
index 9f33467..0000000
--- a/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java
+++ /dev/null
@@ -1,66 +0,0 @@
-package com.android.class2greylist;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.PrintStream;
-import java.util.HashMap;
-import java.util.Map;
-
-public class FileWritingGreylistConsumer implements GreylistConsumer {
-
-    private final Status mStatus;
-    private final Map<Integer, PrintStream> mSdkToPrintStreamMap;
-    private final PrintStream mWhitelistStream;
-
-    private static PrintStream openFile(String filename) throws FileNotFoundException {
-        if (filename == null) {
-            return null;
-        }
-        return new PrintStream(new FileOutputStream(new File(filename)));
-    }
-
-    private static Map<Integer, PrintStream> openFiles(
-            Map<Integer, String> filenames) throws FileNotFoundException {
-        Map<Integer, PrintStream> streams = new HashMap<>();
-        for (Map.Entry<Integer, String> entry : filenames.entrySet()) {
-            streams.put(entry.getKey(), openFile(entry.getValue()));
-        }
-        return streams;
-    }
-
-    public FileWritingGreylistConsumer(Status status, Map<Integer, String> sdkToFilenameMap,
-            String whitelistFile) throws FileNotFoundException {
-        mStatus = status;
-        mSdkToPrintStreamMap = openFiles(sdkToFilenameMap);
-        mWhitelistStream = openFile(whitelistFile);
-    }
-
-    @Override
-    public void greylistEntry(String signature, Integer maxTargetSdk) {
-        PrintStream p = mSdkToPrintStreamMap.get(maxTargetSdk);
-        if (p == null) {
-            mStatus.error("No output file for signature %s with maxTargetSdk of %d", signature,
-                    maxTargetSdk == null ? "<absent>" : maxTargetSdk.toString());
-            return;
-        }
-        p.println(signature);
-    }
-
-    @Override
-    public void whitelistEntry(String signature) {
-        if (mWhitelistStream != null) {
-            mWhitelistStream.println(signature);
-        }
-    }
-
-    @Override
-    public void close() {
-        for (PrintStream p : mSdkToPrintStreamMap.values()) {
-            p.close();
-        }
-        if (mWhitelistStream != null) {
-            mWhitelistStream.close();
-        }
-    }
-}
diff --git a/tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java b/tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java
deleted file mode 100644
index 460f2c3..0000000
--- a/tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java
+++ /dev/null
@@ -1,146 +0,0 @@
-package com.android.class2greylist;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Joiner;
-
-import org.apache.bcel.Const;
-import org.apache.bcel.classfile.AnnotationEntry;
-import org.apache.bcel.classfile.ElementValue;
-import org.apache.bcel.classfile.ElementValuePair;
-import org.apache.bcel.classfile.FieldOrMethod;
-import org.apache.bcel.classfile.Method;
-import org.apache.bcel.classfile.SimpleElementValue;
-
-import java.util.Set;
-import java.util.function.Predicate;
-
-/**
- * Processes {@code UnsupportedAppUsage} annotations to generate greylist
- * entries.
- *
- * Any annotations with a {@link #EXPECTED_SIGNATURE} property will have their
- * generated signature verified against this, and an error will be reported if
- * it does not match. Exclusions are made for bridge methods.
- *
- * Any {@link #MAX_TARGET_SDK} properties will be validated against the given
- * set of valid values, then passed through to the greylist consumer.
- */
-public class GreylistAnnotationHandler implements AnnotationHandler {
-
-    public static final String ANNOTATION_NAME = "Landroid/annotation/UnsupportedAppUsage;";
-
-    // properties of greylist annotations:
-    private static final String EXPECTED_SIGNATURE = "expectedSignature";
-    private static final String MAX_TARGET_SDK = "maxTargetSdk";
-
-    private final Status mStatus;
-    private final Predicate<GreylistMember> mGreylistFilter;
-    private final GreylistConsumer mGreylistConsumer;
-    private final Set<Integer> mValidMaxTargetSdkValues;
-
-    /**
-     * Represents a member of a class file (a field or method).
-     */
-    @VisibleForTesting
-    public static class GreylistMember {
-
-        /**
-         * Signature of this member.
-         */
-        public final String signature;
-        /**
-         * Indicates if this is a synthetic bridge method.
-         */
-        public final boolean bridge;
-        /**
-         * Max target SDK of property this member, if it is set, else null.
-         *
-         * Note: even though the annotation itself specified a default value,
-         * that default value is not encoded into instances of the annotation
-         * in class files. So when no value is specified in source, it will
-         * result in null appearing in here.
-         */
-        public final Integer maxTargetSdk;
-
-        public GreylistMember(String signature, boolean bridge, Integer maxTargetSdk) {
-            this.signature = signature;
-            this.bridge = bridge;
-            this.maxTargetSdk = maxTargetSdk;
-        }
-    }
-
-    public GreylistAnnotationHandler(
-            Status status,
-            GreylistConsumer greylistConsumer,
-            Set<String> publicApis,
-            Set<Integer> validMaxTargetSdkValues) {
-        this(status, greylistConsumer,
-                member -> !(member.bridge && publicApis.contains(member.signature)),
-                validMaxTargetSdkValues);
-    }
-
-    @VisibleForTesting
-    public GreylistAnnotationHandler(
-            Status status,
-            GreylistConsumer greylistConsumer,
-            Predicate<GreylistMember> greylistFilter,
-            Set<Integer> validMaxTargetSdkValues) {
-        mStatus = status;
-        mGreylistConsumer = greylistConsumer;
-        mGreylistFilter = greylistFilter;
-        mValidMaxTargetSdkValues = validMaxTargetSdkValues;
-    }
-
-    @Override
-    public void handleAnnotation(AnnotationEntry annotation, AnnotationContext context) {
-        FieldOrMethod member = context.member;
-        boolean bridge = (member instanceof Method)
-                && (member.getAccessFlags() & Const.ACC_BRIDGE) != 0;
-        if (bridge) {
-            mStatus.debug("Member is a bridge");
-        }
-        String signature = context.getMemberDescriptor();
-        Integer maxTargetSdk = null;
-        for (ElementValuePair property : annotation.getElementValuePairs()) {
-            switch (property.getNameString()) {
-                case EXPECTED_SIGNATURE:
-                    verifyExpectedSignature(context, property, signature, bridge);
-                    break;
-                case MAX_TARGET_SDK:
-                    maxTargetSdk = verifyAndGetMaxTargetSdk(context, property);
-                    break;
-            }
-        }
-        if (mGreylistFilter.test(new GreylistMember(signature, bridge, maxTargetSdk))) {
-            mGreylistConsumer.greylistEntry(signature, maxTargetSdk);
-        }
-    }
-
-    private void verifyExpectedSignature(AnnotationContext context, ElementValuePair property,
-            String signature, boolean isBridge) {
-        String expected = property.getValue().stringifyValue();
-        // Don't enforce for bridge methods; they're generated so won't match.
-        if (!isBridge && !signature.equals(expected)) {
-            context.reportError("Expected signature does not match generated:\n"
-                            + "Expected:  %s\n"
-                            + "Generated: %s", expected, signature);
-        }
-    }
-
-    private Integer verifyAndGetMaxTargetSdk(AnnotationContext context, ElementValuePair property) {
-        if (property.getValue().getElementValueType() != ElementValue.PRIMITIVE_INT) {
-            context.reportError("Expected property %s to be of type int; got %d",
-                    property.getNameString(), property.getValue().getElementValueType());
-        }
-        int value = ((SimpleElementValue) property.getValue()).getValueInt();
-        if (!mValidMaxTargetSdkValues.contains(value)) {
-            context.reportError("Invalid value for %s: got %d, expected one of [%s]",
-                    property.getNameString(),
-                    value,
-                    Joiner.on(",").join(mValidMaxTargetSdkValues));
-            return null;
-        }
-        return value;
-    }
-
-}
diff --git a/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java
deleted file mode 100644
index fd855e8..0000000
--- a/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java
+++ /dev/null
@@ -1,20 +0,0 @@
-package com.android.class2greylist;
-
-public interface GreylistConsumer {
-    /**
-     * Handle a new greylist entry.
-     *
-     * @param signature Signature of the member.
-     * @param maxTargetSdk maxTargetSdk value from the annotation, or null if none set.
-     */
-    void greylistEntry(String signature, Integer maxTargetSdk);
-
-    /**
-     * Handle a new whitelist entry.
-     *
-     * @param signature Signature of the member.
-     */
-    void whitelistEntry(String signature);
-
-    void close();
-}
diff --git a/tools/class2greylist/src/com/android/class2greylist/HiddenapiFlagsWriter.java b/tools/class2greylist/src/com/android/class2greylist/HiddenapiFlagsWriter.java
new file mode 100644
index 0000000..54ca17c
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/HiddenapiFlagsWriter.java
@@ -0,0 +1,40 @@
+package com.android.class2greylist;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+public class HiddenapiFlagsWriter implements AnnotationConsumer {
+
+    private final PrintStream mOutput;
+
+    public HiddenapiFlagsWriter(String csvFile) throws FileNotFoundException {
+        mOutput = new PrintStream(new FileOutputStream(new File(csvFile)));
+    }
+
+    public void consume(String apiSignature, Map<String, String> annotationProperties,
+            Set<String> parsedFlags) {
+        if (parsedFlags.size() > 0) {
+            mOutput.println(apiSignature + "," + String.join(",", asSortedList(parsedFlags)));
+        }
+    }
+
+    public void close() {
+        mOutput.close();
+    }
+
+    private static List<String> asSortedList(Set<String> s) {
+        List<String> list = new ArrayList<>(s);
+        Collections.sort(list);
+        return list;
+    }
+
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java b/tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java
index 6677a3f..89c8bd7 100644
--- a/tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java
+++ b/tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java
@@ -40,8 +40,9 @@
     }
 
     private void visitMember(FieldOrMethod member, String signatureFormatString) {
-        AnnotationContext context = new AnnotationContext(mStatus, member,
-                (JavaClass) mDescendingVisitor.predecessor(), signatureFormatString);
+        AnnotationContext context = new AnnotatedMemberContext(mStatus,
+            (JavaClass) mDescendingVisitor.predecessor(), member,
+            signatureFormatString);
         System.out.println(context.getMemberDescriptor());
     }
 }
diff --git a/tools/class2greylist/src/com/android/class2greylist/RepeatedAnnotationHandler.java b/tools/class2greylist/src/com/android/class2greylist/RepeatedAnnotationHandler.java
new file mode 100644
index 0000000..61949e3
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/RepeatedAnnotationHandler.java
@@ -0,0 +1,57 @@
+package com.android.class2greylist;
+
+import com.google.common.base.Preconditions;
+import org.apache.bcel.classfile.AnnotationElementValue;
+import org.apache.bcel.classfile.AnnotationEntry;
+import org.apache.bcel.classfile.ArrayElementValue;
+import org.apache.bcel.classfile.ElementValue;
+import org.apache.bcel.classfile.ElementValuePair;
+
+/**
+ * Handles a repeated annotation container.
+ *
+ * <p>The enclosed annotations are passed to the {@link #mWrappedHandler}.
+ */
+public class RepeatedAnnotationHandler extends AnnotationHandler {
+
+    private static final String VALUE = "value";
+
+    private final AnnotationHandler mWrappedHandler;
+    private final String mInnerAnnotationName;
+
+    RepeatedAnnotationHandler(String innerAnnotationName, AnnotationHandler wrappedHandler) {
+        mWrappedHandler = wrappedHandler;
+        mInnerAnnotationName = innerAnnotationName;
+    }
+
+    @Override
+    public void handleAnnotation(AnnotationEntry annotation, AnnotationContext context) {
+        // Verify that the annotation has the form we expect
+        ElementValuePair value = findValue(annotation);
+        if (value == null) {
+            context.reportError("No value found on %s", annotation.getAnnotationType());
+            return;
+        }
+        Preconditions.checkArgument(value.getValue() instanceof ArrayElementValue);
+        ArrayElementValue array = (ArrayElementValue) value.getValue();
+
+        // call wrapped handler on each enclosed annotation:
+        for (ElementValue v : array.getElementValuesArray()) {
+            Preconditions.checkArgument(v instanceof AnnotationElementValue);
+            AnnotationElementValue aev = (AnnotationElementValue) v;
+            Preconditions.checkArgument(
+                    aev.getAnnotationEntry().getAnnotationType().equals(mInnerAnnotationName));
+            mWrappedHandler.handleAnnotation(aev.getAnnotationEntry(), context);
+        }
+    }
+
+    private ElementValuePair findValue(AnnotationEntry a) {
+        for (ElementValuePair property : a.getElementValuePairs()) {
+            if (property.getNameString().equals(VALUE)) {
+                return property;
+            }
+        }
+        // not found
+        return null;
+    }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java
deleted file mode 100644
index ad5ad70..0000000
--- a/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java
+++ /dev/null
@@ -1,18 +0,0 @@
-package com.android.class2greylist;
-
-public class SystemOutGreylistConsumer implements GreylistConsumer {
-    @Override
-    public void greylistEntry(String signature, Integer maxTargetSdk) {
-        System.out.println(signature);
-    }
-
-    @Override
-    public void whitelistEntry(String signature) {
-        // Ignore. This class is only used when no grey/white lists are
-        // specified, so we have nowhere to write whitelist entries.
-    }
-
-    @Override
-    public void close() {
-    }
-}
diff --git a/tools/class2greylist/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandler.java b/tools/class2greylist/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandler.java
new file mode 100644
index 0000000..b45e1b3
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandler.java
@@ -0,0 +1,159 @@
+package com.android.class2greylist;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableSet;
+
+import org.apache.bcel.Const;
+import org.apache.bcel.classfile.AnnotationEntry;
+import org.apache.bcel.classfile.ElementValue;
+import org.apache.bcel.classfile.ElementValuePair;
+import org.apache.bcel.classfile.FieldOrMethod;
+import org.apache.bcel.classfile.Method;
+import org.apache.bcel.classfile.SimpleElementValue;
+
+import java.util.Map;
+import java.util.Set;
+import java.util.function.Predicate;
+
+/**
+ * Processes {@code UnsupportedAppUsage} annotations to generate greylist
+ * entries.
+ *
+ * Any annotations with a {@link #EXPECTED_SIGNATURE_PROPERTY} property will have their
+ * generated signature verified against this, and an error will be reported if
+ * it does not match. Exclusions are made for bridge methods.
+ *
+ * Any {@link #MAX_TARGET_SDK_PROPERTY} properties will be validated against the given
+ * set of valid values, then passed through to the greylist consumer.
+ */
+public class UnsupportedAppUsageAnnotationHandler extends AnnotationHandler {
+
+    // properties of greylist annotations:
+    private static final String EXPECTED_SIGNATURE_PROPERTY = "expectedSignature";
+    private static final String MAX_TARGET_SDK_PROPERTY = "maxTargetSdk";
+    private static final String IMPLICIT_MEMBER_PROPERTY = "implicitMember";
+
+    private final Status mStatus;
+    private final Predicate<ClassMember> mClassMemberFilter;
+    private final Map<Integer, String> mSdkVersionToFlagMap;
+    private final AnnotationConsumer mAnnotationConsumer;
+
+    /**
+     * Represents a member of a class file (a field or method).
+     */
+    @VisibleForTesting
+    public static class ClassMember {
+
+        /**
+         * Signature of this class member.
+         */
+        public final String signature;
+
+        /**
+         * Indicates if this is a synthetic bridge method.
+         */
+        public final boolean isBridgeMethod;
+
+        public ClassMember(String signature, boolean isBridgeMethod) {
+            this.signature = signature;
+            this.isBridgeMethod = isBridgeMethod;
+        }
+    }
+
+    public UnsupportedAppUsageAnnotationHandler(Status status,
+            AnnotationConsumer annotationConsumer, Set<String> publicApis,
+            Map<Integer, String> sdkVersionToFlagMap) {
+        this(status, annotationConsumer,
+                member -> !(member.isBridgeMethod && publicApis.contains(member.signature)),
+                sdkVersionToFlagMap);
+    }
+
+    @VisibleForTesting
+    public UnsupportedAppUsageAnnotationHandler(Status status,
+            AnnotationConsumer annotationConsumer, Predicate<ClassMember> memberFilter,
+            Map<Integer, String> sdkVersionToFlagMap) {
+        mStatus = status;
+        mAnnotationConsumer = annotationConsumer;
+        mClassMemberFilter = memberFilter;
+        mSdkVersionToFlagMap = sdkVersionToFlagMap;
+    }
+
+    @Override
+    public void handleAnnotation(AnnotationEntry annotation, AnnotationContext context) {
+        boolean isBridgeMethod = false;
+        if (context instanceof AnnotatedMemberContext) {
+            AnnotatedMemberContext memberContext = (AnnotatedMemberContext) context;
+            FieldOrMethod member = memberContext.member;
+            isBridgeMethod = (member instanceof Method) &&
+                (member.getAccessFlags() & Const.ACC_BRIDGE) != 0;
+            if (isBridgeMethod) {
+                mStatus.debug("Member is a bridge method");
+            }
+        }
+
+        String signature = context.getMemberDescriptor();
+        Integer maxTargetSdk = null;
+        String implicitMemberSignature = null;
+
+        for (ElementValuePair property : annotation.getElementValuePairs()) {
+            switch (property.getNameString()) {
+                case EXPECTED_SIGNATURE_PROPERTY:
+                    String expected = property.getValue().stringifyValue();
+                    // Don't enforce for bridge methods; they're generated so won't match.
+                    if (!isBridgeMethod && !signature.equals(expected)) {
+                        context.reportError("Expected signature does not match generated:\n"
+                                        + "Expected:  %s\n"
+                                        + "Generated: %s", expected, signature);
+                        return;
+                    }
+                    break;
+                case MAX_TARGET_SDK_PROPERTY:
+                    if (property.getValue().getElementValueType() != ElementValue.PRIMITIVE_INT) {
+                        context.reportError("Expected property %s to be of type int; got %d",
+                                property.getNameString(),
+                                property.getValue().getElementValueType());
+                        return;
+                    }
+
+                    maxTargetSdk = ((SimpleElementValue) property.getValue()).getValueInt();
+                    break;
+                case IMPLICIT_MEMBER_PROPERTY:
+                    implicitMemberSignature = property.getValue().stringifyValue();
+                    if (context instanceof AnnotatedClassContext) {
+                        signature = String.format("L%s;->%s",
+                            context.getClassDescriptor(), implicitMemberSignature);
+                    } else {
+                        context.reportError(
+                            "Expected annotation with an %s property to be on a class but is on %s",
+                            IMPLICIT_MEMBER_PROPERTY,
+                            signature);
+                        return;
+                    }
+                    break;
+            }
+        }
+
+        if (context instanceof AnnotatedClassContext && implicitMemberSignature == null) {
+            context.reportError(
+                "Missing property %s on annotation on class %s",
+                IMPLICIT_MEMBER_PROPERTY,
+                signature);
+            return;
+        }
+
+        // Verify that maxTargetSdk is valid.
+        if (!mSdkVersionToFlagMap.containsKey(maxTargetSdk)) {
+            context.reportError("Invalid value for %s: got %d, expected one of [%s]",
+                    MAX_TARGET_SDK_PROPERTY,
+                    maxTargetSdk,
+                    mSdkVersionToFlagMap.keySet());
+            return;
+        }
+
+        // Consume this annotation if it matches the predicate.
+        if (mClassMemberFilter.test(new ClassMember(signature, isBridgeMethod))) {
+            mAnnotationConsumer.consume(signature, stringifyAnnotationProperties(annotation),
+                    ImmutableSet.of(mSdkVersionToFlagMap.get(maxTargetSdk)));
+        }
+    }
+}
diff --git a/tools/class2greylist/test/Android.mk b/tools/class2greylist/test/Android.mk
index 23f4156..f35e74c 100644
--- a/tools/class2greylist/test/Android.mk
+++ b/tools/class2greylist/test/Android.mk
@@ -21,7 +21,7 @@
 
 LOCAL_MODULE := class2greylisttest
 
-LOCAL_STATIC_JAVA_LIBRARIES := class2greylistlib truth-host-prebuilt mockito-host junit-host
+LOCAL_STATIC_JAVA_LIBRARIES := class2greylistlib truth-host-prebuilt mockito-host junit-host objenesis
 
 # tag this module as a cts test artifact
 LOCAL_COMPATIBILITY_SUITE := general-tests
@@ -29,4 +29,4 @@
 include $(BUILD_HOST_JAVA_LIBRARY)
 
 # Build the test APKs using their own makefiles
-include $(call all-makefiles-under,$(LOCAL_PATH))
\ No newline at end of file
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/AnnotationHandlerTestBase.java b/tools/class2greylist/test/src/com/android/class2greylist/AnnotationHandlerTestBase.java
index 8f4a76f..65ebbf0 100644
--- a/tools/class2greylist/test/src/com/android/class2greylist/AnnotationHandlerTestBase.java
+++ b/tools/class2greylist/test/src/com/android/class2greylist/AnnotationHandlerTestBase.java
@@ -36,14 +36,14 @@
     public TestName mTestName = new TestName();
 
     protected Javac mJavac;
-    protected GreylistConsumer mConsumer;
+    protected AnnotationConsumer mConsumer;
     protected Status mStatus;
 
     @Before
     public void baseSetup() throws IOException {
         System.out.println(String.format("\n============== STARTING TEST: %s ==============\n",
                 mTestName.getMethodName()));
-        mConsumer = mock(GreylistConsumer.class);
+        mConsumer = mock(AnnotationConsumer.class);
         mStatus = mock(Status.class, withSettings().verboseLogging());
         mJavac = new Javac();
     }
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/Class2GreylistTest.java b/tools/class2greylist/test/src/com/android/class2greylist/Class2GreylistTest.java
deleted file mode 100644
index cb75dd3..0000000
--- a/tools/class2greylist/test/src/com/android/class2greylist/Class2GreylistTest.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.class2greylist;
-
-import static com.google.common.truth.Truth.assertThat;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.atLeastOnce;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyZeroInteractions;
-import static org.mockito.MockitoAnnotations.initMocks;
-
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-import org.mockito.Mock;
-
-import java.io.IOException;
-import java.util.Map;
-
-public class Class2GreylistTest {
-
-    @Mock
-    Status mStatus;
-    @Rule
-    public TestName mTestName = new TestName();
-
-    @Before
-    public void setup() throws IOException {
-        System.out.println(String.format("\n============== STARTING TEST: %s ==============\n",
-                mTestName.getMethodName()));
-        initMocks(this);
-    }
-
-    @Test
-    public void testReadGreylistMap() throws IOException {
-        Map<Integer, String> map = Class2Greylist.readGreylistMap(mStatus,
-                new String[]{"noApi", "1:apiOne", "3:apiThree"});
-        verifyZeroInteractions(mStatus);
-        assertThat(map).containsExactly(null, "noApi", 1, "apiOne", 3, "apiThree");
-    }
-
-    @Test
-    public void testReadGreylistMapDuplicate() throws IOException {
-        Class2Greylist.readGreylistMap(mStatus,
-                new String[]{"noApi", "1:apiOne", "1:anotherOne"});
-        verify(mStatus, atLeastOnce()).error(any(), any());
-    }
-
-    @Test
-    public void testReadGreylistMapDuplicateNoApi() {
-        Class2Greylist.readGreylistMap(mStatus,
-                new String[]{"noApi", "anotherNoApi", "1:apiOne"});
-        verify(mStatus, atLeastOnce()).error(any(), any());
-    }
-
-    @Test
-    public void testReadGreylistMapInvalidInt() throws IOException {
-        Class2Greylist.readGreylistMap(mStatus, new String[]{"noApi", "a:apiOne"});
-        verify(mStatus, atLeastOnce()).error(any(), any());
-    }
-
-    @Test
-    public void testReadGreylistMapNoFilename() throws IOException {
-        Class2Greylist.readGreylistMap(mStatus, new String[]{"noApi", "1:"});
-        verify(mStatus, atLeastOnce()).error(any(), any());
-    }
-}
-
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java
index 10fae9b..9f924b2 100644
--- a/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java
+++ b/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java
@@ -40,6 +40,7 @@
 public class CovariantReturnTypeHandlerTest extends AnnotationHandlerTestBase {
 
     private static final String ANNOTATION = "Lannotation/Annotation;";
+    private static final String FLAG = "test-flag";
 
     @Before
     public void setup() throws IOException {
@@ -66,17 +67,19 @@
                 "  @Annotation(returnType=Integer.class)",
                 "  public String method() {return null;}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION,
                         new CovariantReturnTypeHandler(
                                 mConsumer,
-                                ImmutableSet.of("La/b/Class;->method()Ljava/lang/String;")));
+                                ImmutableSet.of("La/b/Class;->method()Ljava/lang/String;"),
+                                FLAG));
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
 
         assertNoErrors();
-        verify(mConsumer, times(1)).whitelistEntry(eq("La/b/Class;->method()Ljava/lang/Integer;"));
+        verify(mConsumer, times(1)).consume(
+                eq("La/b/Class;->method()Ljava/lang/Integer;"), any(), eq(ImmutableSet.of(FLAG)));
     }
 
     @Test
@@ -88,13 +91,14 @@
                 "  @Annotation(returnType=Integer.class)",
                 "  public String method() {return null;}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION,
                         new CovariantReturnTypeHandler(
                                 mConsumer,
-                                emptySet()));
+                                emptySet(),
+                                FLAG));
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
 
         verify(mStatus, atLeastOnce()).error(any(), any());
@@ -109,7 +113,7 @@
                 "  @Annotation(returnType=Integer.class)",
                 "  public String method() {return null;}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION,
@@ -118,7 +122,8 @@
                                 ImmutableSet.of(
                                         "La/b/Class;->method()Ljava/lang/String;",
                                         "La/b/Class;->method()Ljava/lang/Integer;"
-                                )));
+                                ),
+                                FLAG));
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
 
         verify(mStatus, atLeastOnce()).error(any(), any());
@@ -133,13 +138,14 @@
                 "  @Annotation(returnType=Integer.class)",
                 "  public String field;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION,
                         new CovariantReturnTypeHandler(
                                 mConsumer,
-                                emptySet()));
+                                emptySet(),
+                                FLAG));
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
 
         verify(mStatus, atLeastOnce()).error(any(), any());
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeMultiHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeMultiHandlerTest.java
deleted file mode 100644
index 7f4ce62..0000000
--- a/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeMultiHandlerTest.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.class2greylist;
-
-import static com.google.common.truth.Truth.assertThat;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.atLeastOnce;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import static java.util.Collections.emptySet;
-
-import com.google.common.base.Joiner;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-
-import java.io.IOException;
-import java.util.Map;
-
-public class CovariantReturnTypeMultiHandlerTest extends AnnotationHandlerTestBase {
-
-
-    @Before
-    public void setup() throws IOException {
-        // To keep the test simpler and more concise, we don't use the real
-        // @CovariantReturnType annotation here, but use our own @Annotation
-        // and @Annotation.Multi that have the same semantics. It doesn't have
-        // to match the real annotation, just have the same properties
-        // (returnType and value).
-        mJavac.addSource("annotation.Annotation", Joiner.on('\n').join(
-                "package annotation;",
-                "import static java.lang.annotation.RetentionPolicy.CLASS;",
-                "import java.lang.annotation.Repeatable;",
-                "import java.lang.annotation.Retention;",
-                "@Repeatable(Annotation.Multi.class)",
-                "@Retention(CLASS)",
-                "public @interface Annotation {",
-                "  Class<?> returnType();",
-                "  @Retention(CLASS)",
-                "  @interface Multi {",
-                "    Annotation[] value();",
-                "  }",
-                "}"));
-    }
-
-    @Test
-    public void testReturnTypeMulti() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Annotation;",
-                "public class Class {",
-                "  @Annotation(returnType=Integer.class)",
-                "  @Annotation(returnType=Long.class)",
-                "  public String method() {return null;}",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        Map<String, AnnotationHandler> handlerMap =
-                ImmutableMap.of("Lannotation/Annotation$Multi;",
-                        new CovariantReturnTypeMultiHandler(
-                                mConsumer,
-                                ImmutableSet.of("La/b/Class;->method()Ljava/lang/String;"),
-                                "Lannotation/Annotation;"));
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
-
-        assertNoErrors();
-        ArgumentCaptor<String> whitelist = ArgumentCaptor.forClass(String.class);
-        verify(mConsumer, times(2)).whitelistEntry(whitelist.capture());
-        assertThat(whitelist.getAllValues()).containsExactly(
-                "La/b/Class;->method()Ljava/lang/Integer;",
-                "La/b/Class;->method()Ljava/lang/Long;"
-        );
-    }
-
-    @Test
-    public void testReturnTypeMultiNotPublicApi() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Annotation;",
-                "public class Class {",
-                "  @Annotation(returnType=Integer.class)",
-                "  @Annotation(returnType=Long.class)",
-                "  public String method() {return null;}",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        Map<String, AnnotationHandler> handlerMap =
-                ImmutableMap.of("Lannotation/Annotation$Multi;",
-                        new CovariantReturnTypeMultiHandler(
-                                mConsumer,
-                                emptySet(),
-                                "Lannotation/Annotation;"));
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
-
-        verify(mStatus, atLeastOnce()).error(any(), any());
-    }
-}
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/GreylistAnnotationHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/GreylistAnnotationHandlerTest.java
deleted file mode 100644
index 1a4bfb8..0000000
--- a/tools/class2greylist/test/src/com/android/class2greylist/GreylistAnnotationHandlerTest.java
+++ /dev/null
@@ -1,471 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.class2greylist;
-
-import static com.google.common.truth.Truth.assertThat;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import static java.util.Collections.emptySet;
-
-import com.google.common.base.Joiner;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Sets;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.Set;
-import java.util.function.Predicate;
-
-public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
-
-    private static final String ANNOTATION = "Lannotation/Anno;";
-
-    @Before
-    public void setup() throws IOException {
-        mJavac.addSource("annotation.Anno", Joiner.on('\n').join(
-                "package annotation;",
-                "import static java.lang.annotation.RetentionPolicy.CLASS;",
-                "import java.lang.annotation.Retention;",
-                "@Retention(CLASS)",
-                "public @interface Anno {",
-                "  String expectedSignature() default \"\";",
-                "  int maxTargetSdk() default Integer.MAX_VALUE;",
-                "}"));
-    }
-
-    private GreylistAnnotationHandler createGreylistHandler(
-            Predicate<GreylistAnnotationHandler.GreylistMember> greylistFilter,
-            Set<Integer> validMaxTargetSdkValues) {
-        return new GreylistAnnotationHandler(
-                mStatus, mConsumer, greylistFilter, validMaxTargetSdkValues);
-    }
-
-    @Test
-    public void testGreylistMethod() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "public class Class {",
-                "  @Anno",
-                "  public void method() {}",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
-        ).visit();
-
-        assertNoErrors();
-        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
-        verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
-        assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method()V");
-    }
-
-    @Test
-    public void testGreylistConstructor() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "public class Class {",
-                "  @Anno",
-                "  public Class() {}",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
-        ).visit();
-
-        assertNoErrors();
-        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
-        verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
-        assertThat(greylist.getValue()).isEqualTo("La/b/Class;-><init>()V");
-    }
-
-    @Test
-    public void testGreylistField() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "public class Class {",
-                "  @Anno",
-                "  public int i;",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
-        ).visit();
-
-        assertNoErrors();
-        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
-        verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
-        assertThat(greylist.getValue()).isEqualTo("La/b/Class;->i:I");
-    }
-
-    @Test
-    public void testGreylistMethodExpectedSignature() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "public class Class {",
-                "  @Anno(expectedSignature=\"La/b/Class;->method()V\")",
-                "  public void method() {}",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
-        ).visit();
-
-        assertNoErrors();
-        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
-        verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
-        assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method()V");
-    }
-
-    @Test
-    public void testGreylistMethodExpectedSignatureWrong() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "public class Class {",
-                "  @Anno(expectedSignature=\"La/b/Class;->nomethod()V\")",
-                "  public void method() {}",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
-        ).visit();
-
-        verify(mStatus, times(1)).error(any(), any());
-    }
-
-    @Test
-    public void testGreylistInnerClassMethod() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "public class Class {",
-                "  public class Inner {",
-                "    @Anno",
-                "    public void method() {}",
-                "  }",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class$Inner"), mStatus,
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
-        ).visit();
-
-        assertNoErrors();
-        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
-        verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
-        assertThat(greylist.getValue()).isEqualTo("La/b/Class$Inner;->method()V");
-    }
-
-    @Test
-    public void testMethodNotGreylisted() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "public class Class {",
-                "  public void method() {}",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
-        ).visit();
-
-        assertNoErrors();
-        verify(mConsumer, never()).greylistEntry(any(String.class), any());
-    }
-
-    @Test
-    public void testMethodArgGenerics() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "public class Class<T extends String> {",
-                "  @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
-                "  public void method(T arg) {}",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
-        ).visit();
-
-        assertNoErrors();
-        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
-        verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
-        assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method(Ljava/lang/String;)V");
-    }
-
-    @Test
-    public void testOverrideMethodWithBridge() throws IOException {
-        mJavac.addSource("a.b.Base", Joiner.on('\n').join(
-                "package a.b;",
-                "abstract class Base<T> {",
-                "  protected abstract void method(T arg);",
-                "}"));
-
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "public class Class<T extends String> extends Base<T> {",
-                "  @Override",
-                "  @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
-                "  public void method(T arg) {}",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        Map<String, AnnotationHandler> handlerMap =
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()));
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), mStatus, handlerMap).visit();
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
-
-        assertNoErrors();
-        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
-        // A bridge method is generated for the above, so we expect 2 greylist entries.
-        verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any());
-        assertThat(greylist.getAllValues()).containsExactly(
-                "La/b/Class;->method(Ljava/lang/Object;)V",
-                "La/b/Class;->method(Ljava/lang/String;)V");
-    }
-
-    @Test
-    public void testOverridePublicMethodWithBridge() throws IOException {
-        mJavac.addSource("a.b.Base", Joiner.on('\n').join(
-                "package a.b;",
-                "public abstract class Base<T> {",
-                "  public void method(T arg) {}",
-                "}"));
-
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "public class Class<T extends String> extends Base<T> {",
-                "  @Override",
-                "  @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
-                "  public void method(T arg) {}",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        Map<String, AnnotationHandler> handlerMap =
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()));
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), mStatus, handlerMap).visit();
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
-
-        assertNoErrors();
-        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
-        // A bridge method is generated for the above, so we expect 2 greylist entries.
-        verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any());
-        assertThat(greylist.getAllValues()).containsExactly(
-                "La/b/Class;->method(Ljava/lang/Object;)V",
-                "La/b/Class;->method(Ljava/lang/String;)V");
-    }
-
-    @Test
-    public void testBridgeMethodsFromInterface() throws IOException {
-        mJavac.addSource("a.b.Interface", Joiner.on('\n').join(
-                "package a.b;",
-                "public interface Interface {",
-                "  public void method(Object arg);",
-                "}"));
-
-        mJavac.addSource("a.b.Base", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "class Base {",
-                "  @Anno(expectedSignature=\"La/b/Base;->method(Ljava/lang/Object;)V\")",
-                "  public void method(Object arg) {}",
-                "}"));
-
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "public class Class extends Base implements Interface {",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        Map<String, AnnotationHandler> handlerMap =
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()));
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Interface"), mStatus, handlerMap)
-                .visit();
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), mStatus, handlerMap).visit();
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
-
-        assertNoErrors();
-        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
-        // A bridge method is generated for the above, so we expect 2 greylist entries.
-        verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any());
-        assertThat(greylist.getAllValues()).containsExactly(
-                "La/b/Class;->method(Ljava/lang/Object;)V",
-                "La/b/Base;->method(Ljava/lang/Object;)V");
-    }
-
-    @Test
-    public void testPublicBridgeExcluded() throws IOException {
-        mJavac.addSource("a.b.Base", Joiner.on('\n').join(
-                "package a.b;",
-                "public abstract class Base<T> {",
-                "  public void method(T arg) {}",
-                "}"));
-
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "public class Class<T extends String> extends Base<T> {",
-                "  @Override",
-                "  @Anno",
-                "  public void method(T arg) {}",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        Set<String> publicApis = Sets.newHashSet(
-                "La/b/Base;->method(Ljava/lang/Object;)V",
-                "La/b/Class;->method(Ljava/lang/Object;)V");
-        Map<String, AnnotationHandler> handlerMap =
-                ImmutableMap.of(ANNOTATION,
-                        new GreylistAnnotationHandler(
-                                mStatus,
-                                mConsumer,
-                                publicApis,
-                                emptySet()));
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), mStatus, handlerMap).visit();
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
-
-        assertNoErrors();
-        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
-        // The bridge method generated for the above, is a public API so should be excluded
-        verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
-        assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method(Ljava/lang/String;)V");
-    }
-
-    @Test
-    public void testVolatileField() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "public class Class {",
-                "  @Anno(expectedSignature=\"La/b/Class;->field:I\")",
-                "  public volatile int field;",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        Map<String, AnnotationHandler> handlerMap =
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(
-                        member -> !member.bridge, // exclude bridge methods
-                        emptySet()));
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
-        assertNoErrors();
-        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
-        verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
-        assertThat(greylist.getValue()).isEqualTo("La/b/Class;->field:I");
-    }
-
-    @Test
-    public void testVolatileFieldWrongSignature() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "public class Class {",
-                "  @Anno(expectedSignature=\"La/b/Class;->wrong:I\")",
-                "  public volatile int field;",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        Map<String, AnnotationHandler> handlerMap =
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()));
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
-        verify(mStatus, times(1)).error(any(), any());
-    }
-
-    @Test
-    public void testMethodMaxTargetSdk() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "public class Class {",
-                "  @Anno(maxTargetSdk=1)",
-                "  public int field;",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        Map<String, AnnotationHandler> handlerMap =
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(
-                        x -> true,
-                        ImmutableSet.of(1)));
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
-        assertNoErrors();
-        ArgumentCaptor<Integer> maxTargetSdk = ArgumentCaptor.forClass(Integer.class);
-        verify(mConsumer, times(1)).greylistEntry(any(), maxTargetSdk.capture());
-        assertThat(maxTargetSdk.getValue()).isEqualTo(1);
-    }
-
-    @Test
-    public void testMethodNoMaxTargetSdk() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "public class Class {",
-                "  @Anno",
-                "  public int field;",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        Map<String, AnnotationHandler> handlerMap =
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(
-                        x -> true,
-                        ImmutableSet.of(1)));
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
-        assertNoErrors();
-        ArgumentCaptor<Integer> maxTargetSdk = ArgumentCaptor.forClass(Integer.class);
-        verify(mConsumer, times(1)).greylistEntry(any(), maxTargetSdk.capture());
-        assertThat(maxTargetSdk.getValue()).isEqualTo(null);
-    }
-
-    @Test
-    public void testMethodMaxTargetSdkOutOfRange() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Anno;",
-                "public class Class {",
-                "  @Anno(maxTargetSdk=2)",
-                "  public int field;",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        Map<String, AnnotationHandler> handlerMap =
-                ImmutableMap.of(ANNOTATION, createGreylistHandler(
-                        x -> true,
-                        ImmutableSet.of(1)));
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
-        verify(mStatus, times(1)).error(any(), any());
-    }
-
-}
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/RepeatedAnnotationHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/RepeatedAnnotationHandlerTest.java
new file mode 100644
index 0000000..f2f70ee
--- /dev/null
+++ b/tools/class2greylist/test/src/com/android/class2greylist/RepeatedAnnotationHandlerTest.java
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.class2greylist;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableMap;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import org.apache.bcel.classfile.AnnotationEntry;
+import org.junit.Before;
+import org.junit.Test;
+
+public class RepeatedAnnotationHandlerTest extends AnnotationHandlerTestBase {
+
+    @Before
+    public void setup() {
+        // To keep the test simpler and more concise, we don't use a real annotation here, but use
+        // our own @Annotation and @Annotation.Multi that have the same relationship.
+        mJavac.addSource("annotation.Annotation", Joiner.on('\n').join(
+                "package annotation;",
+                "import static java.lang.annotation.RetentionPolicy.CLASS;",
+                "import java.lang.annotation.Repeatable;",
+                "import java.lang.annotation.Retention;",
+                "@Repeatable(Annotation.Multi.class)",
+                "@Retention(CLASS)",
+                "public @interface Annotation {",
+                "  Class<?> clazz();",
+                "  @Retention(CLASS)",
+                "  @interface Multi {",
+                "    Annotation[] value();",
+                "  }",
+                "}"));
+    }
+
+    @Test
+    public void testRepeated() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Annotation;",
+                "public class Class {",
+                "  @Annotation(clazz=Integer.class)",
+                "  @Annotation(clazz=Long.class)",
+                "  public String method() {return null;}",
+                "}"));
+        mJavac.compile();
+
+        TestAnnotationHandler handler = new TestAnnotationHandler();
+        Map<String, AnnotationHandler> handlerMap =
+            ImmutableMap.of("Lannotation/Annotation$Multi;",
+                new RepeatedAnnotationHandler("Lannotation/Annotation;", handler));
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+
+        assertNoErrors();
+        assertThat(handler.getClasses()).containsExactly(
+                "Ljava/lang/Integer;",
+                "Ljava/lang/Long;");
+    }
+
+    private static class TestAnnotationHandler extends AnnotationHandler {
+
+        private final List<String> classes;
+
+        private TestAnnotationHandler() {
+            this.classes = new ArrayList<>();
+        }
+
+        @Override
+        void handleAnnotation(AnnotationEntry annotation,
+            AnnotationContext context) {
+            classes.add(annotation.getElementValuePairs()[0].getValue().stringifyValue());
+        }
+
+        private List<String> getClasses() {
+            return classes;
+        }
+    }
+}
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandlerTest.java
new file mode 100644
index 0000000..a6d6a16
--- /dev/null
+++ b/tools/class2greylist/test/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandlerTest.java
@@ -0,0 +1,581 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.class2greylist;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import static java.util.Collections.emptyMap;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.function.Predicate;
+
+public class UnsupportedAppUsageAnnotationHandlerTest extends AnnotationHandlerTestBase {
+
+    private static final String ANNOTATION = "Lannotation/Anno;";
+
+    private static final Map<Integer, String> NULL_SDK_MAP;
+    static {
+        Map<Integer, String> map = new HashMap<>();
+        map.put(null, "flag-null");
+        NULL_SDK_MAP = Collections.unmodifiableMap(map);
+    }
+
+    @Before
+    public void setup() throws IOException {
+        mJavac.addSource("annotation.Anno", Joiner.on('\n').join(
+                "package annotation;",
+                "import static java.lang.annotation.RetentionPolicy.CLASS;",
+                "import java.lang.annotation.Retention;",
+                "import java.lang.annotation.Repeatable;",
+                "@Retention(CLASS)",
+                "@Repeatable(Anno.Container.class)",
+                "public @interface Anno {",
+                "  String expectedSignature() default \"\";",
+                "  int maxTargetSdk() default Integer.MAX_VALUE;",
+                "  String implicitMember() default \"\";",
+                "  @Retention(CLASS)",
+                "  public @interface Container {",
+                "    Anno[] value();",
+                "  }",
+                "}"));
+    }
+
+    private UnsupportedAppUsageAnnotationHandler createGreylistHandler(
+            Predicate<UnsupportedAppUsageAnnotationHandler.ClassMember> greylistFilter,
+            Map<Integer, String> validMaxTargetSdkValues) {
+        return new UnsupportedAppUsageAnnotationHandler(
+                mStatus, mConsumer, greylistFilter, validMaxTargetSdkValues);
+    }
+
+    @Test
+    public void testGreylistMethod() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class {",
+                "  @Anno",
+                "  public void method() {}",
+                "}"));
+        mJavac.compile();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
+        ).visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        verify(mConsumer, times(1)).consume(greylist.capture(), any(), any());
+        assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method()V");
+    }
+
+    @Test
+    public void testGreylistConstructor() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class {",
+                "  @Anno",
+                "  public Class() {}",
+                "}"));
+        mJavac.compile();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
+        ).visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        verify(mConsumer, times(1)).consume(greylist.capture(), any(), any());
+        assertThat(greylist.getValue()).isEqualTo("La/b/Class;-><init>()V");
+    }
+
+    @Test
+    public void testGreylistField() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class {",
+                "  @Anno",
+                "  public int i;",
+                "}"));
+        mJavac.compile();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
+        ).visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        verify(mConsumer, times(1)).consume(greylist.capture(), any(), any());
+        assertThat(greylist.getValue()).isEqualTo("La/b/Class;->i:I");
+    }
+
+    @Test
+    public void testGreylistImplicit() throws IOException {
+        mJavac.addSource("a.b.EnumClass", Joiner.on('\n').join(
+            "package a.b;",
+            "import annotation.Anno;",
+            "@Anno(implicitMember=\"values()[La/b/EnumClass;\")",
+            "public enum EnumClass {",
+            "  VALUE",
+            "}"));
+        mJavac.compile();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.EnumClass"), mStatus,
+            ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
+        ).visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        verify(mConsumer, times(1)).consume(greylist.capture(), any(), any());
+        assertThat(greylist.getValue()).isEqualTo("La/b/EnumClass;->values()[La/b/EnumClass;");
+    }
+
+    @Test
+    public void testGreylistImplicit_Invalid_MissingOnClass() throws IOException {
+        mJavac.addSource("a.b.EnumClass", Joiner.on('\n').join(
+            "package a.b;",
+            "import annotation.Anno;",
+            "@Anno",
+            "public enum EnumClass {",
+            "  VALUE",
+            "}"));
+        mJavac.compile();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.EnumClass"), mStatus,
+            ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
+        ).visit();
+
+        ArgumentCaptor<String> format = ArgumentCaptor.forClass(String.class);
+        verify(mStatus, times(1)).error(format.capture(), any());
+        // Ensure that the correct error is reported.
+        assertThat(format.getValue())
+            .contains("Missing property implicitMember on annotation on class");
+    }
+
+    @Test
+    public void testGreylistImplicit_Invalid_PresentOnMember() throws IOException {
+        mJavac.addSource("a.b.EnumClass", Joiner.on('\n').join(
+            "package a.b;",
+            "import annotation.Anno;",
+            "public enum EnumClass {",
+            "  @Anno(implicitMember=\"values()[La/b/EnumClass;\")",
+            "  VALUE",
+            "}"));
+        mJavac.compile();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.EnumClass"), mStatus,
+            ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
+        ).visit();
+
+        ArgumentCaptor<String> format = ArgumentCaptor.forClass(String.class);
+        verify(mStatus, times(1)).error(format.capture(), any());
+        assertThat(format.getValue())
+            .contains("Expected annotation with an implicitMember property to be on a class");
+    }
+
+    @Test
+    public void testGreylistMethodExpectedSignature() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class {",
+                "  @Anno(expectedSignature=\"La/b/Class;->method()V\")",
+                "  public void method() {}",
+                "}"));
+        mJavac.compile();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
+        ).visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        verify(mConsumer, times(1)).consume(greylist.capture(), any(), any());
+        assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method()V");
+    }
+
+    @Test
+    public void testGreylistMethodExpectedSignatureWrong() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class {",
+                "  @Anno(expectedSignature=\"La/b/Class;->nomethod()V\")",
+                "  public void method() {}",
+                "}"));
+        mJavac.compile();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
+        ).visit();
+
+        verify(mStatus, times(1)).error(any(), any());
+    }
+
+    @Test
+    public void testGreylistInnerClassMethod() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class {",
+                "  public class Inner {",
+                "    @Anno",
+                "    public void method() {}",
+                "  }",
+                "}"));
+        mJavac.compile();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class$Inner"), mStatus,
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
+        ).visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        verify(mConsumer, times(1)).consume(greylist.capture(), any(), any());
+        assertThat(greylist.getValue()).isEqualTo("La/b/Class$Inner;->method()V");
+    }
+
+    @Test
+    public void testMethodNotGreylisted() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "public class Class {",
+                "  public void method() {}",
+                "}"));
+        mJavac.compile();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
+        ).visit();
+
+        assertNoErrors();
+        verify(mConsumer, never()).consume(any(String.class), any(), any());
+    }
+
+    @Test
+    public void testMethodArgGenerics() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class<T extends String> {",
+                "  @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
+                "  public void method(T arg) {}",
+                "}"));
+        mJavac.compile();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
+        ).visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        verify(mConsumer, times(1)).consume(greylist.capture(), any(), any());
+        assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method(Ljava/lang/String;)V");
+    }
+
+    @Test
+    public void testOverrideMethodWithBridge() throws IOException {
+        mJavac.addSource("a.b.Base", Joiner.on('\n').join(
+                "package a.b;",
+                "abstract class Base<T> {",
+                "  protected abstract void method(T arg);",
+                "}"));
+
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class<T extends String> extends Base<T> {",
+                "  @Override",
+                "  @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
+                "  public void method(T arg) {}",
+                "}"));
+        mJavac.compile();
+
+        Map<String, AnnotationHandler> handlerMap =
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP));
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), mStatus, handlerMap).visit();
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        // A bridge method is generated for the above, so we expect 2 greylist entries.
+        verify(mConsumer, times(2)).consume(greylist.capture(), any(), any());
+        assertThat(greylist.getAllValues()).containsExactly(
+                "La/b/Class;->method(Ljava/lang/Object;)V",
+                "La/b/Class;->method(Ljava/lang/String;)V");
+    }
+
+    @Test
+    public void testOverridePublicMethodWithBridge() throws IOException {
+        mJavac.addSource("a.b.Base", Joiner.on('\n').join(
+                "package a.b;",
+                "public abstract class Base<T> {",
+                "  public void method(T arg) {}",
+                "}"));
+
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class<T extends String> extends Base<T> {",
+                "  @Override",
+                "  @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
+                "  public void method(T arg) {}",
+                "}"));
+        mJavac.compile();
+
+        Map<String, AnnotationHandler> handlerMap =
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP));
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), mStatus, handlerMap).visit();
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        // A bridge method is generated for the above, so we expect 2 greylist entries.
+        verify(mConsumer, times(2)).consume(greylist.capture(), any(), any());
+        assertThat(greylist.getAllValues()).containsExactly(
+                "La/b/Class;->method(Ljava/lang/Object;)V",
+                "La/b/Class;->method(Ljava/lang/String;)V");
+    }
+
+    @Test
+    public void testBridgeMethodsFromInterface() throws IOException {
+        mJavac.addSource("a.b.Interface", Joiner.on('\n').join(
+                "package a.b;",
+                "public interface Interface {",
+                "  public void method(Object arg);",
+                "}"));
+
+        mJavac.addSource("a.b.Base", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "class Base {",
+                "  @Anno(expectedSignature=\"La/b/Base;->method(Ljava/lang/Object;)V\")",
+                "  public void method(Object arg) {}",
+                "}"));
+
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "public class Class extends Base implements Interface {",
+                "}"));
+        mJavac.compile();
+
+        Map<String, AnnotationHandler> handlerMap =
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP));
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Interface"), mStatus, handlerMap)
+                .visit();
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), mStatus, handlerMap).visit();
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        // A bridge method is generated for the above, so we expect 2 greylist entries.
+        verify(mConsumer, times(2)).consume(greylist.capture(), any(), any());
+        assertThat(greylist.getAllValues()).containsExactly(
+                "La/b/Class;->method(Ljava/lang/Object;)V",
+                "La/b/Base;->method(Ljava/lang/Object;)V");
+    }
+
+    @Test
+    public void testPublicBridgeExcluded() throws IOException {
+        mJavac.addSource("a.b.Base", Joiner.on('\n').join(
+                "package a.b;",
+                "public abstract class Base<T> {",
+                "  public void method(T arg) {}",
+                "}"));
+
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class<T extends String> extends Base<T> {",
+                "  @Override",
+                "  @Anno",
+                "  public void method(T arg) {}",
+                "}"));
+        mJavac.compile();
+
+        Set<String> publicApis = Sets.newHashSet(
+                "La/b/Base;->method(Ljava/lang/Object;)V",
+                "La/b/Class;->method(Ljava/lang/Object;)V");
+        Map<String, AnnotationHandler> handlerMap =
+                ImmutableMap.of(ANNOTATION,
+                        new UnsupportedAppUsageAnnotationHandler(
+                                mStatus,
+                                mConsumer,
+                                publicApis,
+                                NULL_SDK_MAP));
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), mStatus, handlerMap).visit();
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        // The bridge method generated for the above, is a public API so should be excluded
+        verify(mConsumer, times(1)).consume(greylist.capture(), any(), any());
+        assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method(Ljava/lang/String;)V");
+    }
+
+    @Test
+    public void testVolatileField() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class {",
+                "  @Anno(expectedSignature=\"La/b/Class;->field:I\")",
+                "  public volatile int field;",
+                "}"));
+        mJavac.compile();
+
+        Map<String, AnnotationHandler> handlerMap =
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(
+                        member -> !member.isBridgeMethod, // exclude bridge methods
+                        NULL_SDK_MAP));
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        verify(mConsumer, times(1)).consume(greylist.capture(), any(), any());
+        assertThat(greylist.getValue()).isEqualTo("La/b/Class;->field:I");
+    }
+
+    @Test
+    public void testVolatileFieldWrongSignature() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class {",
+                "  @Anno(expectedSignature=\"La/b/Class;->wrong:I\")",
+                "  public volatile int field;",
+                "}"));
+        mJavac.compile();
+
+        Map<String, AnnotationHandler> handlerMap =
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP));
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+        verify(mStatus, times(1)).error(any(), any());
+    }
+
+    @Test
+    public void testMethodMaxTargetSdk() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class {",
+                "  @Anno(maxTargetSdk=1)",
+                "  public int field;",
+                "}"));
+        mJavac.compile();
+
+        Map<String, AnnotationHandler> handlerMap =
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(
+                        x -> true,
+                        ImmutableMap.of(1, "flag1")));
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+        assertNoErrors();
+        verify(mConsumer, times(1)).consume(any(), any(), eq(ImmutableSet.of("flag1")));
+    }
+
+    @Test
+    public void testMethodNoMaxTargetSdk() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class {",
+                "  @Anno",
+                "  public int field;",
+                "}"));
+        mJavac.compile();
+
+        Map<String, AnnotationHandler> handlerMap =
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(
+                        x -> true,
+                        NULL_SDK_MAP));
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+        assertNoErrors();
+        verify(mConsumer, times(1)).consume(any(), any(), eq(ImmutableSet.of("flag-null")));
+    }
+
+    @Test
+    public void testMethodMaxTargetSdkOutOfRange() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class {",
+                "  @Anno(maxTargetSdk=2)",
+                "  public int field;",
+                "}"));
+        mJavac.compile();
+
+        Map<String, AnnotationHandler> handlerMap =
+                ImmutableMap.of(ANNOTATION, createGreylistHandler(
+                        x -> true,
+                        NULL_SDK_MAP));
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+        verify(mStatus, times(1)).error(any(), any());
+    }
+
+    @Test
+    public void testAnnotationPropertiesIntoMap() throws IOException {
+        mJavac.addSource("annotation.Anno2", Joiner.on('\n').join(
+                "package annotation;",
+                "import static java.lang.annotation.RetentionPolicy.CLASS;",
+                "import java.lang.annotation.Retention;",
+                "@Retention(CLASS)",
+                "public @interface Anno2 {",
+                "  String expectedSignature() default \"\";",
+                "  int maxTargetSdk() default Integer.MAX_VALUE;",
+                "  long trackingBug() default 0;",
+                "}"));
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno2;",
+                "public class Class {",
+                "  @Anno2(maxTargetSdk=2, trackingBug=123456789)",
+                "  public int field;",
+                "}"));
+        mJavac.compile();
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+                ImmutableMap.of("Lannotation/Anno2;", createGreylistHandler(x -> true,
+                        ImmutableMap.of(2, "flag2")))
+        ).visit();
+
+        assertNoErrors();
+        ArgumentCaptor<Map<String, String>> properties = ArgumentCaptor.forClass(Map.class);
+        verify(mConsumer, times(1)).consume(any(), properties.capture(), any());
+        assertThat(properties.getValue()).containsExactly(
+                "maxTargetSdk", "2",
+                "trackingBug", "123456789");
+    }
+
+}
diff --git a/tools/class2greylist/test/src/com/android/javac/Javac.java b/tools/class2greylist/test/src/com/android/javac/Javac.java
index 202f412..94e4e49 100644
--- a/tools/class2greylist/test/src/com/android/javac/Javac.java
+++ b/tools/class2greylist/test/src/com/android/javac/Javac.java
@@ -18,6 +18,7 @@
 
 import com.google.common.io.Files;
 
+import java.util.stream.Collectors;
 import org.apache.bcel.classfile.ClassParser;
 import org.apache.bcel.classfile.JavaClass;
 
@@ -76,15 +77,24 @@
         return this;
     }
 
-    public boolean compile() {
+    public void compile() {
+        DiagnosticCollector<JavaFileObject> diagnosticCollector = new DiagnosticCollector<>();
         JavaCompiler.CompilationTask task = mJavac.getTask(
                 null,
                 mFileMan,
-                null,
+                diagnosticCollector,
                 null,
                 null,
                 mCompilationUnits);
-        return task.call();
+        boolean result = task.call();
+        if (!result) {
+            throw new IllegalStateException(
+                "Compilation failed:" +
+                    diagnosticCollector.getDiagnostics()
+                        .stream()
+                        .map(Object::toString)
+                        .collect(Collectors.joining("\n")));
+        }
     }
 
     public InputStream getClassFile(String classname) throws IOException {
diff --git a/tools/common/common.py b/tools/common/common.py
index b728e8d..6206dfb 100755
--- a/tools/common/common.py
+++ b/tools/common/common.py
@@ -299,11 +299,13 @@
       os.mkdir(arch_cache_path)
     lib = 'lib64' if x64 else 'lib'
     android_root = GetEnvVariableOrError('ANDROID_HOST_OUT')
+    android_runtime_root = android_root + '/com.android.runtime'
     library_path = android_root + '/' + lib
     path = android_root + '/bin'
     self._shell_env = os.environ.copy()
     self._shell_env['ANDROID_DATA'] = self._env_path
     self._shell_env['ANDROID_ROOT'] = android_root
+    self._shell_env['ANDROID_RUNTIME_ROOT'] = android_runtime_root
     self._shell_env['LD_LIBRARY_PATH'] = library_path
     self._shell_env['DYLD_LIBRARY_PATH'] = library_path
     self._shell_env['PATH'] = (path + ':' + self._shell_env['PATH'])
diff --git a/tools/cpp-define-generator/Android.bp b/tools/cpp-define-generator/Android.bp
index 23cc917..027f128 100644
--- a/tools/cpp-define-generator/Android.bp
+++ b/tools/cpp-define-generator/Android.bp
@@ -14,16 +14,11 @@
 // limitations under the License.
 //
 
-// Build a "data" binary which will hold all the symbol values that will be parsed by the other scripts.
-//
-// Builds are for host only, target-specific define generation is possibly but is trickier and would need extra tooling.
-//
-// In the future we may wish to parameterize this on (32,64)x(read_barrier,no_read_barrier).
-
-cc_binary { // Do not use art_cc_binary because HOST_PREFER_32_BIT is incompatible with genrule.
-    name: "cpp-define-generator-data",
+// This produces human-readable asm_defines.s with the embedded compile-time constants.
+cc_object {
+    name: "asm_defines.s",
     host_supported: true,
-    device_supported: false,
+    device_supported: true,
     defaults: [
         "art_debug_defaults",
         "art_defaults",
@@ -33,20 +28,36 @@
         "art/libdexfile",
         "art/libartbase",
         "art/runtime",
+        "system/core/base/include",
     ],
-    srcs: ["main.cc"],
-    shared_libs: [
-        "libbase",
-    ],
+    // Produce text file rather than binary.
+    cflags: ["-S"],
+    srcs: ["asm_defines.cc"],
 }
 
-// Note: See $OUT_DIR/soong/build.ninja
-// For the exact filename that this generates to run make command on just
-// this rule later.
-genrule {
+// This extracts the compile-time constants from asm_defines.s and creates the header.
+cc_genrule {
     name: "cpp-define-generator-asm-support",
-    out: ["asm_support_gen.h"],
-    tools: ["cpp-define-generator-data"],
-    tool_files: ["verify-asm-support"],
-    cmd: "$(location verify-asm-support) --quiet \"$(location cpp-define-generator-data)\" \"$(out)\"",
+    host_supported: true,
+    device_supported: true,
+    srcs: [":asm_defines.s"],
+    out: ["asm_defines.h"],
+    tool_files: ["make_header.py"],
+    cmd: "$(location make_header.py) \"$(in)\" > \"$(out)\"",
+}
+
+cc_library_headers {
+    name: "cpp-define-generator-definitions",
+    host_supported: true,
+    export_include_dirs: ["."],
+}
+
+python_binary_host {
+    name: "cpp-define-generator-test",
+    main: "make_header_test.py",
+    srcs: [
+        "make_header.py",
+        "make_header_test.py",
+    ],
+    test_suites: ["general-tests"],
 }
diff --git a/tools/cpp-define-generator/art_method.def b/tools/cpp-define-generator/art_method.def
new file mode 100644
index 0000000..21859dc
--- /dev/null
+++ b/tools/cpp-define-generator/art_method.def
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "art_method.h"
+#endif
+
+ASM_DEFINE(ART_METHOD_ACCESS_FLAGS_OFFSET,
+           art::ArtMethod::AccessFlagsOffset().Int32Value())
+ASM_DEFINE(ART_METHOD_DECLARING_CLASS_OFFSET,
+           art::ArtMethod::DeclaringClassOffset().Int32Value())
+ASM_DEFINE(ART_METHOD_JNI_OFFSET_32,
+           art::ArtMethod::EntryPointFromJniOffset(art::PointerSize::k32).Int32Value())
+ASM_DEFINE(ART_METHOD_JNI_OFFSET_64,
+           art::ArtMethod::EntryPointFromJniOffset(art::PointerSize::k64).Int32Value())
+ASM_DEFINE(ART_METHOD_QUICK_CODE_OFFSET_32,
+           art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())
+ASM_DEFINE(ART_METHOD_QUICK_CODE_OFFSET_64,
+           art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())
diff --git a/tools/cpp-define-generator/asm_defines.cc b/tools/cpp-define-generator/asm_defines.cc
new file mode 100644
index 0000000..b79e1ae
--- /dev/null
+++ b/tools/cpp-define-generator/asm_defines.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// This file is used to generate #defines for use in assembly source code.
+//
+// The content of this file will be used to compile an object file
+// (generated as human readable assembly text file, not as binary).
+// This text file will then be post-processed by a python script to find
+// and extract the constants and generate the final asm_defines.h header.
+//
+
+// We use "asm volatile" to generate text that will stand out in the
+// compiler generated intermediate assembly file (eg. ">>FOO 42 0<<").
+// We emit all values as 64-bit integers (which we will printed as text).
+// We also store a flag which specifies whether the constant is negative.
+// Note that "asm volatile" must be inside a method to please the compiler.
+#define ASM_DEFINE(NAME, EXPR) \
+void AsmDefineHelperFor_##NAME() { \
+  asm volatile("\n.ascii \">>" #NAME " %0 %1<<\"" \
+  :: "i" (static_cast<int64_t>(EXPR)), "i" ((EXPR) < 0 ? 1 : 0)); \
+}
+#include "asm_defines.def"
diff --git a/tools/cpp-define-generator/asm_defines.def b/tools/cpp-define-generator/asm_defines.def
new file mode 100644
index 0000000..7a77e8e
--- /dev/null
+++ b/tools/cpp-define-generator/asm_defines.def
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if !defined(ASM_DEFINE_INCLUDE_DEPENDENCIES)
+#define ASM_DEFINE_INCLUDE_DEPENDENCIES 1
+#endif
+
+#include "globals.def"
+#include "art_method.def"
+#include "lockword.def"
+#include "mirror_array.def"
+#include "mirror_class.def"
+#include "mirror_dex_cache.def"
+#include "mirror_object.def"
+#include "mirror_string.def"
+#include "rosalloc.def"
+#include "runtime.def"
+#include "shadow_frame.def"
+#include "thread.def"
diff --git a/tools/cpp-define-generator/common.def b/tools/cpp-define-generator/common.def
deleted file mode 100644
index 76c64c9..0000000
--- a/tools/cpp-define-generator/common.def
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Convenience macro to define an offset expression.
-
-#ifndef DEFINE_OFFSET_EXPR
-#define DEFINE_OFFSET_EXPR(holder_type, field_name, field_type, expr) \
-  DEFINE_EXPR(holder_type ## _ ## field_name ## _OFFSET, field_type, expr)
-#define DEFINE_OFFSET_EXPR_STANDARD_DEFINITION
-#endif
-
diff --git a/tools/cpp-define-generator/common_undef.def b/tools/cpp-define-generator/common_undef.def
deleted file mode 100644
index c44aba7..0000000
--- a/tools/cpp-define-generator/common_undef.def
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifdef DEFINE_OFFSET_EXPR_STANDARD_DEFINITION
-#undef DEFINE_OFFSET_EXPR_STANDARD_DEFINITION
-#undef DEFINE_OFFSET_EXPR
-#endif
diff --git a/tools/cpp-define-generator/constant_card_table.def b/tools/cpp-define-generator/constant_card_table.def
deleted file mode 100644
index ae3e8f3..0000000
--- a/tools/cpp-define-generator/constant_card_table.def
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Export heap values.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "gc/accounting/card_table.h"
-#endif
-
-// Size of references to the heap on the stack.
-DEFINE_EXPR(CARD_TABLE_CARD_SHIFT, size_t, art::gc::accounting::CardTable::kCardShift)
-
diff --git a/tools/cpp-define-generator/constant_class.def b/tools/cpp-define-generator/constant_class.def
deleted file mode 100644
index 1310103..0000000
--- a/tools/cpp-define-generator/constant_class.def
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "base/bit_utils.h"       // MostSignificantBit
-#include "dex/modifiers.h"        // kAccClassIsFinalizable
-#endif
-
-#define DEFINE_FLAG_OFFSET(type_name, field_name, expr) \
-  DEFINE_EXPR(type_name ## _ ## field_name, uint32_t, (expr))
-
-DEFINE_FLAG_OFFSET(ACCESS_FLAGS, CLASS_IS_FINALIZABLE,     art::kAccClassIsFinalizable)
-DEFINE_FLAG_OFFSET(ACCESS_FLAGS, CLASS_IS_INTERFACE,       art::kAccInterface)
-// TODO: We should really have a BitPosition which also checks it's a power of 2.
-DEFINE_FLAG_OFFSET(ACCESS_FLAGS, CLASS_IS_FINALIZABLE_BIT, art::MostSignificantBit(art::kAccClassIsFinalizable))
-
-#undef DEFINE_FLAG_OFFSET
diff --git a/tools/cpp-define-generator/constant_dexcache.def b/tools/cpp-define-generator/constant_dexcache.def
deleted file mode 100644
index 743ebb7..0000000
--- a/tools/cpp-define-generator/constant_dexcache.def
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "mirror/dex_cache.h"   // art::mirror::DexCache, StringDexCachePair
-#endif
-
-DEFINE_EXPR(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT,       int32_t,
-    art::WhichPowerOf2(sizeof(art::mirror::StringDexCachePair)))
-DEFINE_EXPR(STRING_DEX_CACHE_SIZE_MINUS_ONE,           int32_t,
-    art::mirror::DexCache::kDexCacheStringCacheSize - 1)
-DEFINE_EXPR(STRING_DEX_CACHE_HASH_BITS,                int32_t,
-    art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))
-DEFINE_EXPR(STRING_DEX_CACHE_ELEMENT_SIZE,             int32_t,
-    sizeof(art::mirror::StringDexCachePair))
-DEFINE_EXPR(METHOD_DEX_CACHE_SIZE_MINUS_ONE,           int32_t,
-    art::mirror::DexCache::kDexCacheMethodCacheSize - 1)
-DEFINE_EXPR(METHOD_DEX_CACHE_HASH_BITS,                int32_t,
-    art::LeastSignificantBit(art::mirror::DexCache::kDexCacheMethodCacheSize))
diff --git a/tools/cpp-define-generator/constant_globals.def b/tools/cpp-define-generator/constant_globals.def
deleted file mode 100644
index d0d6350..0000000
--- a/tools/cpp-define-generator/constant_globals.def
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Export global values.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include <atomic>            // std::memory_order_relaxed
-#include "base/globals.h"    // art::kObjectAlignment
-#include "dex/modifiers.h"
-#endif
-
-DEFINE_EXPR(STD_MEMORY_ORDER_RELAXED, int32_t, std::memory_order_relaxed)
-
-#define DEFINE_OBJECT_EXPR(macro_name, type, constant_field_name) \
-  DEFINE_EXPR(OBJECT_ ## macro_name, type, constant_field_name)
-
-DEFINE_OBJECT_EXPR(ALIGNMENT_MASK,         size_t,   art::kObjectAlignment - 1)
-DEFINE_OBJECT_EXPR(ALIGNMENT_MASK_TOGGLED, uint32_t, ~static_cast<uint32_t>(art::kObjectAlignment - 1))
-DEFINE_OBJECT_EXPR(ALIGNMENT_MASK_TOGGLED64, uint64_t, ~static_cast<uint64_t>(art::kObjectAlignment - 1))
-
-DEFINE_EXPR(ACC_OBSOLETE_METHOD,           int32_t,  art::kAccObsoleteMethod)
-DEFINE_EXPR(ACC_OBSOLETE_METHOD_SHIFT,     int32_t,  art::WhichPowerOf2(art::kAccObsoleteMethod))
-
-#undef DEFINE_OBJECT_EXPR
-
diff --git a/tools/cpp-define-generator/constant_heap.def b/tools/cpp-define-generator/constant_heap.def
deleted file mode 100644
index dc76736..0000000
--- a/tools/cpp-define-generator/constant_heap.def
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Export heap values.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "gc/heap.h"
-#endif
-
-// Size of references to the heap on the stack.
-DEFINE_EXPR(MIN_LARGE_OBJECT_THRESHOLD, size_t, art::gc::Heap::kMinLargeObjectThreshold)
-
diff --git a/tools/cpp-define-generator/constant_jit.def b/tools/cpp-define-generator/constant_jit.def
deleted file mode 100644
index 5fa5194..0000000
--- a/tools/cpp-define-generator/constant_jit.def
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Constants within jit.h.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "jit/jit.h"   // art::kSuspendRequest, etc.
-#endif
-
-#define DEFINE_JIT_CONSTANT(macro_name, type, expr) \
-  DEFINE_EXPR(JIT_ ## macro_name, type, (expr))
-
-DEFINE_JIT_CONSTANT(CHECK_OSR,       int16_t, art::jit::kJitCheckForOSR)
-DEFINE_JIT_CONSTANT(HOTNESS_DISABLE, int16_t, art::jit::kJitHotnessDisabled)
-
-#undef DEFINE_JIT_CONSTANT
diff --git a/tools/cpp-define-generator/constant_lockword.def b/tools/cpp-define-generator/constant_lockword.def
deleted file mode 100644
index 977d1ca..0000000
--- a/tools/cpp-define-generator/constant_lockword.def
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Export lockword values.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "lock_word.h"         // art::LockWord
-#endif
-
-#define DEFINE_LOCK_WORD_EXPR(macro_name, type, constant_field_name) \
-  DEFINE_EXPR(LOCK_WORD_ ## macro_name, type, art::LockWord::constant_field_name)
-
-// FIXME: The naming is inconsistent, the `Shifted` -> `_SHIFTED` suffix is sometimes missing.
-DEFINE_LOCK_WORD_EXPR(STATE_SHIFT,               int32_t,  kStateShift)
-DEFINE_LOCK_WORD_EXPR(STATE_MASK_SHIFTED,        uint32_t, kStateMaskShifted)
-DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_SHIFT,  int32_t,  kReadBarrierStateShift)
-DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_MASK,   uint32_t, kReadBarrierStateMaskShifted)
-DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_MASK_TOGGLED, uint32_t, kReadBarrierStateMaskShiftedToggled)
-DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_SIZE,      int32_t,  kThinLockCountSize)
-DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_SHIFT,     int32_t,  kThinLockCountShift)
-DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_MASK_SHIFTED, uint32_t, kThinLockCountMaskShifted)
-DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_ONE,       uint32_t, kThinLockCountOne)
-DEFINE_LOCK_WORD_EXPR(THIN_LOCK_OWNER_MASK_SHIFTED, uint32_t, kThinLockOwnerMaskShifted)
-
-DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS,  uint32_t, kStateForwardingAddress)
-DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS_OVERFLOW, uint32_t, kStateForwardingAddressOverflow)
-DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS_SHIFT, uint32_t, kForwardingAddressShift)
-
-DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED,     uint32_t,  kGCStateMaskShifted)
-DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED_TOGGLED, uint32_t, kGCStateMaskShiftedToggled)
-DEFINE_LOCK_WORD_EXPR(GC_STATE_SIZE,             int32_t,  kGCStateSize)
-DEFINE_LOCK_WORD_EXPR(GC_STATE_SHIFT,            int32_t,  kGCStateShift)
-
-DEFINE_LOCK_WORD_EXPR(MARK_BIT_SHIFT,            int32_t,  kMarkBitStateShift)
-DEFINE_LOCK_WORD_EXPR(MARK_BIT_MASK_SHIFTED,     uint32_t, kMarkBitStateMaskShifted)
-
-#undef DEFINE_LOCK_WORD_EXPR
-
diff --git a/tools/cpp-define-generator/constant_reference.def b/tools/cpp-define-generator/constant_reference.def
deleted file mode 100644
index d312f76..0000000
--- a/tools/cpp-define-generator/constant_reference.def
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "mirror/object.h"            // mirror::Object
-#include "stack.h"                    // StackReference
-#include "mirror/object_reference.h"  // mirror::CompressedReference
-#include "base/bit_utils.h"           // WhichPowerOf2
-#endif
-
-// Size of references to the heap on the stack.
-DEFINE_EXPR(STACK_REFERENCE_SIZE,            size_t, sizeof(art::StackReference<art::mirror::Object>))
-// Size of heap references
-DEFINE_EXPR(COMPRESSED_REFERENCE_SIZE,       size_t, sizeof(art::mirror::CompressedReference<art::mirror::Object>))
-DEFINE_EXPR(COMPRESSED_REFERENCE_SIZE_SHIFT, size_t, art::WhichPowerOf2(sizeof(art::mirror::CompressedReference<art::mirror::Object>)))
-
-#undef DEFINE_REFERENCE_OFFSET
diff --git a/tools/cpp-define-generator/constant_rosalloc.def b/tools/cpp-define-generator/constant_rosalloc.def
deleted file mode 100644
index 2007cef..0000000
--- a/tools/cpp-define-generator/constant_rosalloc.def
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Constants within RosAlloc.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "gc/allocator/rosalloc.h"   // art::gc::allocator::RosAlloc
-#endif
-
-#define DEFINE_ROSALLOC_CONSTANT(macro_name, type, expr) \
-  DEFINE_EXPR(ROSALLOC_ ## macro_name, type, (expr))
-
-DEFINE_ROSALLOC_CONSTANT(MAX_THREAD_LOCAL_BRACKET_SIZE, int32_t, art::gc::allocator::RosAlloc::kMaxThreadLocalBracketSize)
-DEFINE_ROSALLOC_CONSTANT(BRACKET_QUANTUM_SIZE_SHIFT,    int32_t, art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSizeShift)
-// TODO: This should be a BitUtils helper, e.g. BitMaskFromSize or something like that.
-DEFINE_ROSALLOC_CONSTANT(BRACKET_QUANTUM_SIZE_MASK,     int32_t, static_cast<int32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
-DEFINE_ROSALLOC_CONSTANT(BRACKET_QUANTUM_SIZE_MASK_TOGGLED32,\
-                                                        uint32_t, ~static_cast<uint32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
-DEFINE_ROSALLOC_CONSTANT(BRACKET_QUANTUM_SIZE_MASK_TOGGLED64,\
-                                                        uint64_t, ~static_cast<uint64_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
-DEFINE_ROSALLOC_CONSTANT(RUN_FREE_LIST_OFFSET,          int32_t, art::gc::allocator::RosAlloc::RunFreeListOffset())
-DEFINE_ROSALLOC_CONSTANT(RUN_FREE_LIST_HEAD_OFFSET,     int32_t, art::gc::allocator::RosAlloc::RunFreeListHeadOffset())
-DEFINE_ROSALLOC_CONSTANT(RUN_FREE_LIST_SIZE_OFFSET,     int32_t, art::gc::allocator::RosAlloc::RunFreeListSizeOffset())
-DEFINE_ROSALLOC_CONSTANT(SLOT_NEXT_OFFSET,              int32_t, art::gc::allocator::RosAlloc::RunSlotNextOffset())
-
-
-#undef DEFINE_ROSALLOC_CONSTANT
diff --git a/tools/cpp-define-generator/constant_thread.def b/tools/cpp-define-generator/constant_thread.def
deleted file mode 100644
index 1364b55..0000000
--- a/tools/cpp-define-generator/constant_thread.def
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Constants within thread.h.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "thread.h"   // art::kSuspendRequest, etc.
-#endif
-
-#define DEFINE_THREAD_CONSTANT(macro_name, type, expr) \
-  DEFINE_EXPR(THREAD_ ## macro_name, type, (expr))
-
-DEFINE_THREAD_CONSTANT(SUSPEND_REQUEST,    int32_t, art::kSuspendRequest)
-DEFINE_THREAD_CONSTANT(CHECKPOINT_REQUEST, int32_t, art::kCheckpointRequest)
-DEFINE_THREAD_CONSTANT(EMPTY_CHECKPOINT_REQUEST, int32_t, art::kEmptyCheckpointRequest)
-DEFINE_THREAD_CONSTANT(SUSPEND_OR_CHECKPOINT_REQUEST,  int32_t, art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest)
-
-#undef DEFINE_THREAD_CONSTANT
diff --git a/tools/cpp-define-generator/generate-asm-support b/tools/cpp-define-generator/generate-asm-support
deleted file mode 100755
index fcdf72f..0000000
--- a/tools/cpp-define-generator/generate-asm-support
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-# Generates asm_support_gen.h
-# - This must be run after a build since it uses cpp-define-generator-data
-
-[[ -z ${ANDROID_BUILD_TOP+x} ]] && (echo "Run source build/envsetup.sh first" >&2 && exit 1)
-
-cpp-define-generator-data > ${ANDROID_BUILD_TOP}/art/runtime/generated/asm_support_gen.h
diff --git a/tools/cpp-define-generator/globals.def b/tools/cpp-define-generator/globals.def
new file mode 100644
index 0000000..10542622
--- /dev/null
+++ b/tools/cpp-define-generator/globals.def
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
+#include "base/globals.h"
+#include "dex/modifiers.h"
+#include "gc/accounting/card_table.h"
+#include "gc/heap.h"
+#include "interpreter/mterp/mterp.h"
+#include "jit/jit.h"
+#include "mirror/object.h"
+#include "mirror/object_reference.h"
+#include "runtime_globals.h"
+#include "stack.h"
+#endif
+
+ASM_DEFINE(ACCESS_FLAGS_CLASS_IS_FINALIZABLE,
+           art::kAccClassIsFinalizable)
+ASM_DEFINE(ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT,
+           art::MostSignificantBit(art::kAccClassIsFinalizable))
+ASM_DEFINE(ACCESS_FLAGS_CLASS_IS_INTERFACE,
+           art::kAccInterface)
+ASM_DEFINE(ACC_OBSOLETE_METHOD,
+           art::kAccObsoleteMethod)
+ASM_DEFINE(ACC_OBSOLETE_METHOD_SHIFT,
+           art::WhichPowerOf2(art::kAccObsoleteMethod))
+ASM_DEFINE(CARD_TABLE_CARD_SHIFT,
+           art::gc::accounting::CardTable::kCardShift)
+ASM_DEFINE(COMPRESSED_REFERENCE_SIZE,
+           sizeof(art::mirror::CompressedReference<art::mirror::Object>))
+ASM_DEFINE(COMPRESSED_REFERENCE_SIZE_SHIFT,
+           art::WhichPowerOf2(sizeof(art::mirror::CompressedReference<art::mirror::Object>)))
+ASM_DEFINE(JIT_CHECK_OSR,
+           art::jit::kJitCheckForOSR)
+ASM_DEFINE(JIT_HOTNESS_DISABLE,
+           art::jit::kJitHotnessDisabled)
+ASM_DEFINE(MIN_LARGE_OBJECT_THRESHOLD,
+           art::gc::Heap::kMinLargeObjectThreshold)
+ASM_DEFINE(MTERP_HANDLER_SIZE,
+           art::interpreter::kMterpHandlerSize)
+ASM_DEFINE(MTERP_HANDLER_SIZE_LOG2,
+           art::WhichPowerOf2(art::interpreter::kMterpHandlerSize))
+ASM_DEFINE(OBJECT_ALIGNMENT_MASK,
+           art::kObjectAlignment - 1)
+ASM_DEFINE(OBJECT_ALIGNMENT_MASK_TOGGLED,
+           ~static_cast<uint32_t>(art::kObjectAlignment - 1))
+ASM_DEFINE(OBJECT_ALIGNMENT_MASK_TOGGLED64,
+           ~static_cast<uint64_t>(art::kObjectAlignment - 1))
+ASM_DEFINE(POINTER_SIZE,
+           static_cast<size_t>(art::kRuntimePointerSize))
+ASM_DEFINE(POINTER_SIZE_SHIFT,
+           art::WhichPowerOf2(static_cast<size_t>(art::kRuntimePointerSize)))
+ASM_DEFINE(STACK_REFERENCE_SIZE,
+           sizeof(art::StackReference<art::mirror::Object>))
+ASM_DEFINE(STD_MEMORY_ORDER_RELAXED,
+           std::memory_order_relaxed)
diff --git a/tools/cpp-define-generator/lockword.def b/tools/cpp-define-generator/lockword.def
new file mode 100644
index 0000000..a170c15
--- /dev/null
+++ b/tools/cpp-define-generator/lockword.def
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "lock_word.h"
+#endif
+
+ASM_DEFINE(LOCK_WORD_GC_STATE_MASK_SHIFTED,
+           art::LockWord::kGCStateMaskShifted)
+ASM_DEFINE(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED,
+           art::LockWord::kGCStateMaskShiftedToggled)
+ASM_DEFINE(LOCK_WORD_GC_STATE_SHIFT,
+           art::LockWord::kGCStateShift)
+ASM_DEFINE(LOCK_WORD_GC_STATE_SIZE,
+           art::LockWord::kGCStateSize)
+ASM_DEFINE(LOCK_WORD_MARK_BIT_MASK_SHIFTED,
+           art::LockWord::kMarkBitStateMaskShifted)
+ASM_DEFINE(LOCK_WORD_MARK_BIT_SHIFT,
+           art::LockWord::kMarkBitStateShift)
+ASM_DEFINE(LOCK_WORD_READ_BARRIER_STATE_MASK,
+           art::LockWord::kReadBarrierStateMaskShifted)
+ASM_DEFINE(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED,
+           art::LockWord::kReadBarrierStateMaskShiftedToggled)
+ASM_DEFINE(LOCK_WORD_READ_BARRIER_STATE_SHIFT,
+           art::LockWord::kReadBarrierStateShift)
+ASM_DEFINE(LOCK_WORD_STATE_FORWARDING_ADDRESS,
+           art::LockWord::kStateForwardingAddress)
+ASM_DEFINE(LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW,
+           art::LockWord::kStateForwardingAddressOverflow)
+ASM_DEFINE(LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT,
+           art::LockWord::kForwardingAddressShift)
+ASM_DEFINE(LOCK_WORD_STATE_MASK_SHIFTED,
+           art::LockWord::kStateMaskShifted)
+ASM_DEFINE(LOCK_WORD_STATE_SHIFT,
+           art::LockWord::kStateShift)
+ASM_DEFINE(LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED,
+           art::LockWord::kThinLockCountMaskShifted)
+ASM_DEFINE(LOCK_WORD_THIN_LOCK_COUNT_ONE,
+           art::LockWord::kThinLockCountOne)
+ASM_DEFINE(LOCK_WORD_THIN_LOCK_COUNT_SHIFT,
+           art::LockWord::kThinLockCountShift)
+ASM_DEFINE(LOCK_WORD_THIN_LOCK_COUNT_SIZE,
+           art::LockWord::kThinLockCountSize)
+ASM_DEFINE(LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED,
+           art::LockWord::kThinLockOwnerMaskShifted)
diff --git a/tools/cpp-define-generator/main.cc b/tools/cpp-define-generator/main.cc
deleted file mode 100644
index 7c515be..0000000
--- a/tools/cpp-define-generator/main.cc
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <algorithm>
-#include <ios>
-#include <iostream>
-#include <sstream>
-#include <string>
-#include <type_traits>
-
-// Art Offset file dependencies
-#define DEFINE_INCLUDE_DEPENDENCIES
-#include "offsets_all.def"
-
-std::string to_upper(std::string input) {
-  std::transform(input.begin(), input.end(), input.begin(), ::toupper);
-  return input;
-}
-
-template <typename T, typename = void>
-typename std::enable_if<!std::is_signed<T>::value, std::string>::type
-pretty_format(T value) {
-  // Print most values as hex.
-  std::stringstream ss;
-  ss << std::showbase << std::hex << value;
-  return ss.str();
-}
-
-template <typename T, typename = void>
-typename std::enable_if<std::is_signed<T>::value, std::string>::type
-pretty_format(T value) {
-  // Print "signed" values as decimal so that the negativity doesn't get lost.
-  std::stringstream ss;
-
-  // For negative values add a (). Omit it from positive values for conciseness.
-  if (value < 0) {
-    ss << "(";
-  }
-
-  ss << value;
-
-  if (value < 0) {
-    ss << ")";
-  }
-  return ss.str();
-}
-
-template <typename T>
-void cpp_define(const std::string& name, T value) {
-  std::cout << "#define " << name << " " << pretty_format(value) << std::endl;
-}
-
-template <typename T>
-void emit_check_eq(T value, const std::string& expr) {
-  std::cout << "DEFINE_CHECK_EQ(" << value << ", (" << expr << "))" << std::endl;
-}
-
-const char *kFileHeader = /* // NOLINT [readability/multiline_string] [5] */ R"L1C3NS3(
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
-#define ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
-
-// This file has been auto-generated by cpp-define-generator; do not edit directly.
-)L1C3NS3";  // NOLINT [readability/multiline_string] [5]
-
-const char *kFileFooter = /* // NOLINT [readability/multiline_string] [5] */ R"F00T3R(
-#endif  // ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
-)F00T3R";  // NOLINT [readability/multiline_string] [5]
-
-#define MACROIZE(holder_type, field_name) to_upper(#holder_type "_" #field_name "_OFFSET")
-
-int main() {
-  std::cout << kFileHeader << std::endl;
-
-  std::string z = "";
-
-  // Print every constant expression to stdout as a #define or a CHECK_EQ
-#define DEFINE_EXPR(macro_name, field_type, expr) \
-  cpp_define(to_upper(#macro_name), static_cast<field_type>(expr)); \
-  emit_check_eq(z + "static_cast<" #field_type ">(" + to_upper(#macro_name) + ")", \
-                "static_cast<" #field_type ">(" #expr ")");
-#include "offsets_all.def"
-
-  std::cout << kFileFooter << std::endl;
-  return 0;
-}
diff --git a/tools/cpp-define-generator/make_header.py b/tools/cpp-define-generator/make_header.py
new file mode 100755
index 0000000..1b13923
--- /dev/null
+++ b/tools/cpp-define-generator/make_header.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script looks through compiled object file (stored human readable text),
+# and looks for the compile-time constants (added through custom "asm" block).
+#   For example:  .ascii  ">>OBJECT_ALIGNMENT_MASK $7 $0<<"
+#
+# It will transform each such line to #define which is usabe in assembly code.
+#   For example:  #define OBJECT_ALIGNMENT_MASK 0x7
+#
+# Usage: make_header.py out/soong/.intermediates/.../asm_defines.o
+#
+
+import argparse
+import re
+import sys
+
+def convert(input):
+  """Find all defines in the compiler generated assembly and convert them to #define pragmas"""
+
+  asm_define_re = re.compile(r'">>(\w+) (?:\$|#)([-0-9]+) (?:\$|#)(0|1)<<"')
+  asm_defines = asm_define_re.findall(input)
+  if not asm_defines:
+    raise RuntimeError("Failed to find any asm defines in the input")
+
+  # Convert the found constants to #define pragmas.
+  # In case the C++ compiler decides to reorder the AsmDefinesFor_${name} functions,
+  # we don't want the order of the .h file to change from one compilation to another.
+  # Sorting ensures deterministic order of the #defines.
+  output = []
+  for name, value, negative_value in sorted(asm_defines):
+    value = int(value)
+    if value < 0 and negative_value == "0":
+      # Overflow - uint64_t constant was pretty printed as negative value.
+      value += 2 ** 64  # Python will use arbitrary precision arithmetic.
+    output.append("#define {0} {1:#x}".format(name, value))
+  return "\n".join(output)
+
+if __name__ == "__main__":
+  parser = argparse.ArgumentParser()
+  parser.add_argument('input', help="Object file as text")
+  args = parser.parse_args()
+  print(convert(open(args.input, "r").read()))
diff --git a/tools/cpp-define-generator/make_header_test.py b/tools/cpp-define-generator/make_header_test.py
new file mode 100755
index 0000000..a484285
--- /dev/null
+++ b/tools/cpp-define-generator/make_header_test.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import make_header
+
+test_input = r'''
+// Check that the various other assembly lines are ignored.
+.globl  _Z49AsmDefineHelperFor_MIRROR_OBJECT_LOCK_WORD_OFFSETv
+.type   _Z49AsmDefineHelperFor_MIRROR_OBJECT_LOCK_WORD_OFFSETv,%function
+.ascii  ">>MIRROR_OBJECT_LOCK_WORD_OFFSET #4 #0<<"
+bx      lr
+
+// Check large positive 32-bit constant.
+.ascii  ">>OBJECT_ALIGNMENT_MASK_TOGGLED #4294967288 #0<<"
+
+// Check large positive 64-bit constant (it overflows into negative value).
+.ascii  ">>OBJECT_ALIGNMENT_MASK_TOGGLED64 #-8 #0<<"
+
+// Check negative constant.
+.ascii  ">>JIT_CHECK_OSR #-1 #1<<"
+'''
+
+test_output = r'''
+#define JIT_CHECK_OSR -0x1
+#define MIRROR_OBJECT_LOCK_WORD_OFFSET 0x4
+#define OBJECT_ALIGNMENT_MASK_TOGGLED 0xfffffff8
+#define OBJECT_ALIGNMENT_MASK_TOGGLED64 0xfffffffffffffff8
+'''
+
+class CppDefineGeneratorTest(unittest.TestCase):
+  def test_convert(self):
+    self.assertEqual(test_output.strip(), make_header.convert(test_input))
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/tools/cpp-define-generator/mirror_array.def b/tools/cpp-define-generator/mirror_array.def
new file mode 100644
index 0000000..f600b41
--- /dev/null
+++ b/tools/cpp-define-generator/mirror_array.def
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "mirror/array.h"
+#endif
+
+ASM_DEFINE(MIRROR_ARRAY_LENGTH_OFFSET,
+           art::mirror::Array::LengthOffset().Int32Value())
+ASM_DEFINE(MIRROR_BOOLEAN_ARRAY_DATA_OFFSET,
+           art::mirror::Array::DataOffset(sizeof(uint8_t)).Int32Value())
+ASM_DEFINE(MIRROR_BYTE_ARRAY_DATA_OFFSET,
+           art::mirror::Array::DataOffset(sizeof(int8_t)).Int32Value())
+ASM_DEFINE(MIRROR_CHAR_ARRAY_DATA_OFFSET,
+           art::mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value())
+ASM_DEFINE(MIRROR_INT_ARRAY_DATA_OFFSET,
+           art::mirror::Array::DataOffset(sizeof(int32_t)).Int32Value())
+ASM_DEFINE(MIRROR_LONG_ARRAY_DATA_OFFSET,
+           art::mirror::Array::DataOffset(sizeof(uint64_t)).Int32Value())
+ASM_DEFINE(MIRROR_OBJECT_ARRAY_COMPONENT_SIZE,
+           sizeof(art::mirror::HeapReference<art::mirror::Object>))
+ASM_DEFINE(MIRROR_OBJECT_ARRAY_DATA_OFFSET,
+           art::mirror::Array::DataOffset(sizeof(art::mirror::HeapReference<art::mirror::Object>)).Int32Value())
+ASM_DEFINE(MIRROR_SHORT_ARRAY_DATA_OFFSET,
+           art::mirror::Array::DataOffset(sizeof(int16_t)).Int32Value())
+ASM_DEFINE(MIRROR_WIDE_ARRAY_DATA_OFFSET,
+           art::mirror::Array::DataOffset(sizeof(uint64_t)).Int32Value())
diff --git a/tools/cpp-define-generator/mirror_class.def b/tools/cpp-define-generator/mirror_class.def
new file mode 100644
index 0000000..c15ae92
--- /dev/null
+++ b/tools/cpp-define-generator/mirror_class.def
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "mirror/class.h"
+#endif
+
+ASM_DEFINE(MIRROR_CLASS_ACCESS_FLAGS_OFFSET,
+           art::mirror::Class::AccessFlagsOffset().Int32Value())
+ASM_DEFINE(MIRROR_CLASS_COMPONENT_TYPE_OFFSET,
+           art::mirror::Class::ComponentTypeOffset().Int32Value())
+ASM_DEFINE(MIRROR_CLASS_DEX_CACHE_OFFSET,
+           art::mirror::Class::DexCacheOffset().Int32Value())
+ASM_DEFINE(MIRROR_CLASS_IF_TABLE_OFFSET,
+           art::mirror::Class::IfTableOffset().Int32Value())
+ASM_DEFINE(MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET,
+           art::mirror::Class::PrimitiveTypeOffset().Int32Value())
+ASM_DEFINE(MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET,
+           art::mirror::Class::ObjectSizeAllocFastPathOffset().Int32Value())
+ASM_DEFINE(MIRROR_CLASS_OBJECT_SIZE_OFFSET,
+           art::mirror::Class::ObjectSizeOffset().Int32Value())
+ASM_DEFINE(MIRROR_CLASS_STATUS_OFFSET,
+           art::mirror::Class::StatusOffset().Int32Value())
+ASM_DEFINE(PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT,
+           art::mirror::Class::kPrimitiveTypeSizeShiftShift)
diff --git a/tools/cpp-define-generator/mirror_dex_cache.def b/tools/cpp-define-generator/mirror_dex_cache.def
new file mode 100644
index 0000000..5272e86
--- /dev/null
+++ b/tools/cpp-define-generator/mirror_dex_cache.def
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "mirror/dex_cache.h"
+#endif
+
+ASM_DEFINE(METHOD_DEX_CACHE_SIZE_MINUS_ONE,
+           art::mirror::DexCache::kDexCacheMethodCacheSize - 1)
+ASM_DEFINE(MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET,
+           art::mirror::DexCache::ResolvedMethodsOffset().Int32Value())
+ASM_DEFINE(STRING_DEX_CACHE_ELEMENT_SIZE,
+           sizeof(art::mirror::StringDexCachePair))
+ASM_DEFINE(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT,
+           art::WhichPowerOf2(sizeof(art::mirror::StringDexCachePair)))
+ASM_DEFINE(STRING_DEX_CACHE_HASH_BITS,
+           art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))
+ASM_DEFINE(STRING_DEX_CACHE_SIZE_MINUS_ONE,
+           art::mirror::DexCache::kDexCacheStringCacheSize - 1)
+ASM_DEFINE(METHOD_DEX_CACHE_HASH_BITS,
+           art::LeastSignificantBit(art::mirror::DexCache::kDexCacheMethodCacheSize))
diff --git a/tools/cpp-define-generator/mirror_object.def b/tools/cpp-define-generator/mirror_object.def
new file mode 100644
index 0000000..facb037
--- /dev/null
+++ b/tools/cpp-define-generator/mirror_object.def
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "mirror/object.h"
+#endif
+
+ASM_DEFINE(MIRROR_OBJECT_CLASS_OFFSET,
+           art::mirror::Object::ClassOffset().Int32Value())
+ASM_DEFINE(MIRROR_OBJECT_HEADER_SIZE,
+           sizeof(art::mirror::Object))
+ASM_DEFINE(MIRROR_OBJECT_LOCK_WORD_OFFSET,
+           art::mirror::Object::MonitorOffset().Int32Value())
diff --git a/tools/cpp-define-generator/mirror_string.def b/tools/cpp-define-generator/mirror_string.def
new file mode 100644
index 0000000..3632b96
--- /dev/null
+++ b/tools/cpp-define-generator/mirror_string.def
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "mirror/string.h"
+#endif
+
+ASM_DEFINE(MIRROR_STRING_COUNT_OFFSET,
+           art::mirror::String::CountOffset().Int32Value())
+ASM_DEFINE(MIRROR_STRING_VALUE_OFFSET,
+           art::mirror::String::ValueOffset().Int32Value())
+ASM_DEFINE(STRING_COMPRESSION_FEATURE,
+           art::mirror::kUseStringCompression)
diff --git a/tools/cpp-define-generator/offset_art_method.def b/tools/cpp-define-generator/offset_art_method.def
deleted file mode 100644
index e6a0907..0000000
--- a/tools/cpp-define-generator/offset_art_method.def
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Offsets within art::ArtMethod.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "art_method.h"         // art::ArtMethod
-#include "base/enums.h"         // PointerSize
-#include "mirror/dex_cache.h"   // art::DexCache
-#endif
-
-#define DEFINE_ART_METHOD_OFFSET_SIZED(field_name, method_name) \
-  DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET_32, int32_t, art::ArtMethod::method_name##Offset(art::PointerSize::k32).Int32Value()) \
-  DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET_64, int32_t, art::ArtMethod::method_name##Offset(art::PointerSize::k64).Int32Value())
-
-#define DEFINE_ART_METHOD_OFFSET(field_name, method_name) \
-  DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET, int32_t, art::ArtMethod::method_name##Offset().Int32Value())
-
-#define DEFINE_DECLARING_CLASS_OFFSET(field_name, method_name) \
-  DEFINE_EXPR(DECLARING_CLASS_ ## field_name ## _OFFSET, int32_t, art::mirror::Class::method_name##Offset().Int32Value())
-
-//                         New macro suffix          Method Name (of the Offset method)
-DEFINE_ART_METHOD_OFFSET_SIZED(JNI,                  EntryPointFromJni)
-DEFINE_ART_METHOD_OFFSET_SIZED(QUICK_CODE,           EntryPointFromQuickCompiledCode)
-DEFINE_ART_METHOD_OFFSET(DECLARING_CLASS,            DeclaringClass)
-DEFINE_ART_METHOD_OFFSET(ACCESS_FLAGS,               AccessFlags)
-
-#undef DEFINE_ART_METHOD_OFFSET
-#undef DEFINE_ART_METHOD_OFFSET_32
-#undef DEFINE_DECLARING_CLASS_OFFSET
diff --git a/tools/cpp-define-generator/offset_mirror_class.def b/tools/cpp-define-generator/offset_mirror_class.def
deleted file mode 100644
index 9b7bfce..0000000
--- a/tools/cpp-define-generator/offset_mirror_class.def
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Offsets within java.lang.Class (mirror::Class).
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "mirror/class.h"         // art::mirror::Object
-#endif
-
-#include "common.def"        // DEFINE_OFFSET_EXPR
-
-#define DEFINE_MIRROR_CLASS_OFFSET(field_name, method_name) \
-  DEFINE_OFFSET_EXPR(MIRROR_CLASS, field_name, int32_t, art::mirror::Class::method_name##Offset().Int32Value())
-
-//                         New macro suffix             Method Name (of the Offset method)
-DEFINE_MIRROR_CLASS_OFFSET(DEX_CACHE,                   DexCache)
-
-#undef DEFINE_MIRROR_CLASS_OFFSET
-#include "common_undef.def"  // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offset_mirror_dex_cache.def b/tools/cpp-define-generator/offset_mirror_dex_cache.def
deleted file mode 100644
index 8f008bb..0000000
--- a/tools/cpp-define-generator/offset_mirror_dex_cache.def
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Offsets within java.lang.DexCache (mirror::DexCache).
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "mirror/class.h"         // art::mirror::Object
-#endif
-
-#include "common.def"        // DEFINE_OFFSET_EXPR
-
-#define DEFINE_MIRROR_DEX_CACHE_OFFSET(field_name, method_name) \
-  DEFINE_OFFSET_EXPR(MIRROR_DEX_CACHE, field_name, int32_t, art::mirror::DexCache::method_name##Offset().Int32Value())
-
-//                             New macro suffix         Method Name (of the Offset method)
-DEFINE_MIRROR_DEX_CACHE_OFFSET(RESOLVED_METHODS,        ResolvedMethods)
-
-#undef DEFINE_MIRROR_CLASS_OFFSET
-#include "common_undef.def"  // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offset_mirror_object.def b/tools/cpp-define-generator/offset_mirror_object.def
deleted file mode 100644
index 9b99634..0000000
--- a/tools/cpp-define-generator/offset_mirror_object.def
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Offsets within java.lang.Object (mirror::Object).
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "mirror/object.h"         // art::mirror::Object
-#endif
-
-#include "common.def"        // DEFINE_OFFSET_EXPR
-
-#define DEFINE_MIRROR_OBJECT_OFFSET(field_name, method_name) \
-  DEFINE_OFFSET_EXPR(MIRROR_OBJECT, field_name, int32_t, art::mirror::Object::method_name##Offset().Int32Value())
-
-//                          New macro suffix            Method Name (of the Offset method)
-DEFINE_MIRROR_OBJECT_OFFSET(CLASS,                      Class)
-DEFINE_MIRROR_OBJECT_OFFSET(LOCK_WORD,                  Monitor)
-
-#undef DEFINE_MIRROR_OBJECT_OFFSET
-#include "common_undef.def"  // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offset_runtime.def b/tools/cpp-define-generator/offset_runtime.def
deleted file mode 100644
index 1d5ce7d..0000000
--- a/tools/cpp-define-generator/offset_runtime.def
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Offsets within ShadowFrame.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "base/callee_save_type.h"  // art::CalleeSaveType
-#include "runtime.h"                // art::Runtime
-#endif
-
-#include "common.def"        // DEFINE_OFFSET_EXPR
-
-// Note: these callee save methods loads require read barriers.
-
-#define DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(field_name, constant_name) \
-  DEFINE_OFFSET_EXPR(Runtime, \
-                     field_name ## _METHOD, \
-                     size_t, \
-                     art::Runtime::GetCalleeSaveMethodOffset(constant_name))
-
-                    //     Macro substring       Constant name
-// Offset of field Runtime::callee_save_methods_[kSaveAllCalleeSaves]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_ALL_CALLEE_SAVES, art::CalleeSaveType::kSaveAllCalleeSaves)
-// Offset of field Runtime::callee_save_methods_[kSaveRefsOnly]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_REFS_ONLY, art::CalleeSaveType::kSaveRefsOnly)
-// Offset of field Runtime::callee_save_methods_[kSaveRefsAndArgs]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_REFS_AND_ARGS, art::CalleeSaveType::kSaveRefsAndArgs)
-// Offset of field Runtime::callee_save_methods_[kSaveEverything]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_EVERYTHING, art::CalleeSaveType::kSaveEverything)
-// Offset of field Runtime::callee_save_methods_[kSaveEverythingForClinit]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_EVERYTHING_FOR_CLINIT, art::CalleeSaveType::kSaveEverythingForClinit)
-// Offset of field Runtime::callee_save_methods_[kSaveEverythingForSuspendCheck]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_EVERYTHING_FOR_SUSPEND_CHECK, art::CalleeSaveType::kSaveEverythingForSuspendCheck)
-
-#undef DEFINE_RUNTIME_CALLEE_SAVE_OFFSET
-#include "common_undef.def"  // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offset_shadow_frame.def b/tools/cpp-define-generator/offset_shadow_frame.def
deleted file mode 100644
index b49a340..0000000
--- a/tools/cpp-define-generator/offset_shadow_frame.def
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Offsets within ShadowFrame.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "stack.h"         // art::ShadowFrame
-#endif
-
-#include "common.def"        // DEFINE_OFFSET_EXPR
-
-#define DEFINE_SHADOW_FRAME_OFFSET(field_name, method_name) \
-  DEFINE_OFFSET_EXPR(ShadowFrame, field_name, int32_t, art::ShadowFrame::method_name##Offset())
-
-//                         New macro suffix            Method Name (of the Offset method)
-DEFINE_SHADOW_FRAME_OFFSET(LINK,                       Link)
-DEFINE_SHADOW_FRAME_OFFSET(METHOD,                     Method)
-DEFINE_SHADOW_FRAME_OFFSET(RESULT_REGISTER,            ResultRegister)
-DEFINE_SHADOW_FRAME_OFFSET(DEX_PC_PTR,                 DexPCPtr)
-DEFINE_SHADOW_FRAME_OFFSET(CODE_ITEM,                  CodeItem)
-DEFINE_SHADOW_FRAME_OFFSET(LOCK_COUNT_DATA,            LockCountData)
-DEFINE_SHADOW_FRAME_OFFSET(NUMBER_OF_VREGS,            NumberOfVRegs)
-DEFINE_SHADOW_FRAME_OFFSET(DEX_PC,                     DexPC)
-DEFINE_SHADOW_FRAME_OFFSET(CACHED_HOTNESS_COUNTDOWN,   CachedHotnessCountdown)
-DEFINE_SHADOW_FRAME_OFFSET(VREGS,                      VRegs)
-
-#undef DEFINE_SHADOW_FRAME_OFFSET
-#include "common_undef.def"  // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offset_thread.def b/tools/cpp-define-generator/offset_thread.def
deleted file mode 100644
index 6f94d38..0000000
--- a/tools/cpp-define-generator/offset_thread.def
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Offsets within ShadowFrame.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "base/enums.h"    // PointerSize
-#include "stack.h"         // art::ShadowFrame
-#endif
-
-#include "common.def"        // DEFINE_OFFSET_EXPR
-
-#define DEFINE_THREAD_OFFSET(field_name, method_name) \
-  DEFINE_OFFSET_EXPR(Thread, field_name, int32_t, art::Thread::method_name##Offset<art::kRuntimePointerSize>().Int32Value())
-
-//                   New macro suffix            Method Name (of the Offset method)
-DEFINE_THREAD_OFFSET(FLAGS,                      ThreadFlags)
-DEFINE_THREAD_OFFSET(ID,                         ThinLockId)
-DEFINE_THREAD_OFFSET(IS_GC_MARKING,              IsGcMarking)
-DEFINE_THREAD_OFFSET(CARD_TABLE,                 CardTable)
-
-// TODO: The rest of the offsets
-// are dependent on __SIZEOF_POINTER__
-
-#undef DEFINE_THREAD_OFFSET
-
-#include "common_undef.def"  // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offsets_all.def b/tools/cpp-define-generator/offsets_all.def
deleted file mode 100644
index 31587d8..0000000
--- a/tools/cpp-define-generator/offsets_all.def
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Includes every single offset file in art.
-// Useful for processing every single offset together.
-
-// Usage:
-// #define DEFINE_INCLUDE_DEPENDENCIES
-// #include "offsets_all.def"
-// to automatically include each def file's header dependencies.
-//
-// Afterwards,
-// #define DEFINE_EXPR(define_name, field_type, expr) ...
-// #include "offsets_all.def"
-// to process each offset however one wants.
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#define DEFINE_EXPR(define_name, field_type, expr)
-#endif
-
-#if !defined(DEFINE_EXPR)
-#error "Either DEFINE_INCLUDE_DEPENDENCIES or DEFINE_EXPR must be defined"
-#endif
-
-#include "constant_reference.def"
-#include "offset_runtime.def"
-// TODO: rest of THREAD_ offsets (depends on __SIZEOF__POINTER__).
-#include "offset_thread.def"
-// TODO: SHADOW_FRAME depends on __SIZEOF__POINTER__
-// #include "offset_shadow_frame.def"
-// TODO: MIRROR_OBJECT_HEADER_SIZE (depends on #ifdef read barrier)
-#include "offset_mirror_class.def"
-#include "offset_mirror_dex_cache.def"
-#include "offset_mirror_object.def"
-#include "constant_class.def"
-// TODO: MIRROR_*_ARRAY offsets (depends on header size)
-// TODO: MIRROR_STRING offsets (depends on header size)
-#include "offset_art_method.def"
-#include "constant_dexcache.def"
-#include "constant_card_table.def"
-#include "constant_heap.def"
-#include "constant_lockword.def"
-#include "constant_globals.def"
-#include "constant_rosalloc.def"
-#include "constant_thread.def"
-#include "constant_jit.def"
-
-// TODO: MIRROR_OBJECT_HEADER_SIZE #ifdef depends on read barriers
-// TODO: Array offsets (depends on MIRROR_OBJECT_HEADER_SIZE)
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#undef DEFINE_EXPR
-#undef DEFINE_INCLUDE_DEPENDENCIES
-#endif
-
-
diff --git a/tools/cpp-define-generator/presubmit-check-files-up-to-date b/tools/cpp-define-generator/presubmit-check-files-up-to-date
deleted file mode 100755
index 0301a3e..0000000
--- a/tools/cpp-define-generator/presubmit-check-files-up-to-date
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ---------------------------------------------------------------------------
-
-# Generates asm_support_gen.h into a temporary location.
-# Then verifies it is the same as our local stored copy.
-
-GEN_TOOL=cpp-define-generator-data
-
-if ! which "$GEN_TOOL"; then
-  if [[ -z $ANDROID_BUILD_TOP ]]; then
-    echo "ERROR: Can't find '$GEN_TOOL' in \$PATH. Perhaps try 'source build/envsetup.sh' ?" >&2
-  else
-    echo "ERROR: Can't find '$GEN_TOOL' in \$PATH. Perhaps try 'make $GEN_TOOL' ?" >&2
-  fi
-  exit 1
-fi
-
-#######################
-#######################
-
-PREUPLOAD_COMMIT_COPY="$(mktemp ${TMPDIR:-/tmp}/tmp.XXXXXX)"
-BUILD_COPY="$(mktemp ${TMPDIR:-/tmp}/tmp.XXXXXX)"
-
-function finish() {
-  # Delete temp files.
-  [[ -f "$PREUPLOAD_COMMIT_COPY" ]] && rm "$PREUPLOAD_COMMIT_COPY"
-  [[ -f "$BUILD_COPY" ]] && rm "$BUILD_COPY"
-}
-trap finish EXIT
-
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-ART_DIR="$( cd "$DIR/../.." && pwd )"
-ASM_SUPPORT_GEN_CHECKED_IN_COPY="runtime/generated/asm_support_gen.h"
-
-# Repo upload hook runs inside of the top-level git directory.
-# If we run this script manually, be in the right place for git.
-cd "$ART_DIR"
-
-if [[ -z $PREUPLOAD_COMMIT ]]; then
-  echo "WARNING: Not running as a pre-upload hook. Assuming commit to check = 'HEAD'"
-  PREUPLOAD_COMMIT=HEAD
-fi
-
-# Get version we are about to push into git.
-git show "$PREUPLOAD_COMMIT:$ASM_SUPPORT_GEN_CHECKED_IN_COPY" > "$PREUPLOAD_COMMIT_COPY" || exit 1
-# Get version that our build would have made.
-"$GEN_TOOL" > "$BUILD_COPY" || exit 1
-
-if ! diff "$PREUPLOAD_COMMIT_COPY" "$BUILD_COPY"; then
-  echo "asm-support: ERROR: Checked-in copy of '$ASM_SUPPORT_GEN_CHECKED_IN_COPY' " >&2
-  echo "             has diverged from the build copy." >&2
-  echo "             Please re-run the 'generate-asm-support' command to resync the header." >&2
-  exit 1
-fi
-
-# Success. Print nothing to avoid spamming users.
diff --git a/tools/cpp-define-generator/rosalloc.def b/tools/cpp-define-generator/rosalloc.def
new file mode 100644
index 0000000..eb8d8f2
--- /dev/null
+++ b/tools/cpp-define-generator/rosalloc.def
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "gc/allocator/rosalloc.h"
+#endif
+
+ASM_DEFINE(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK,
+           art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1)
+ASM_DEFINE(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32,
+           ~static_cast<uint32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
+ASM_DEFINE(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64,
+           ~static_cast<uint64_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
+ASM_DEFINE(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT,
+           art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSizeShift)
+ASM_DEFINE(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE,
+           art::gc::allocator::RosAlloc::kMaxThreadLocalBracketSize)
+ASM_DEFINE(ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET,
+           art::gc::allocator::RosAlloc::RunFreeListHeadOffset())
+ASM_DEFINE(ROSALLOC_RUN_FREE_LIST_OFFSET,
+           art::gc::allocator::RosAlloc::RunFreeListOffset())
+ASM_DEFINE(ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET,
+           art::gc::allocator::RosAlloc::RunFreeListSizeOffset())
+ASM_DEFINE(ROSALLOC_SLOT_NEXT_OFFSET,
+           art::gc::allocator::RosAlloc::RunSlotNextOffset())
diff --git a/tools/cpp-define-generator/runtime.def b/tools/cpp-define-generator/runtime.def
new file mode 100644
index 0000000..2a2e303
--- /dev/null
+++ b/tools/cpp-define-generator/runtime.def
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "runtime.h"
+#endif
+
+ASM_DEFINE(RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET,
+           art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveAllCalleeSaves))
+ASM_DEFINE(RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET,
+           art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveEverythingForClinit))
+ASM_DEFINE(RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET,
+           art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveEverythingForSuspendCheck))
+ASM_DEFINE(RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET,
+           art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveEverything))
+ASM_DEFINE(RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET,
+           art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveRefsAndArgs))
+ASM_DEFINE(RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET,
+           art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveRefsOnly))
diff --git a/tools/cpp-define-generator/shadow_frame.def b/tools/cpp-define-generator/shadow_frame.def
new file mode 100644
index 0000000..10a309c
--- /dev/null
+++ b/tools/cpp-define-generator/shadow_frame.def
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "interpreter/shadow_frame.h"
+#endif
+
+ASM_DEFINE(SHADOWFRAME_CACHED_HOTNESS_COUNTDOWN_OFFSET,
+           art::ShadowFrame::CachedHotnessCountdownOffset())
+ASM_DEFINE(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET,
+           art::ShadowFrame::DexInstructionsOffset())
+ASM_DEFINE(SHADOWFRAME_DEX_PC_OFFSET,
+           art::ShadowFrame::DexPCOffset())
+ASM_DEFINE(SHADOWFRAME_DEX_PC_PTR_OFFSET,
+           art::ShadowFrame::DexPCPtrOffset())
+ASM_DEFINE(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET,
+           art::ShadowFrame::HotnessCountdownOffset())
+ASM_DEFINE(SHADOWFRAME_LINK_OFFSET,
+           art::ShadowFrame::LinkOffset())
+ASM_DEFINE(SHADOWFRAME_LOCK_COUNT_DATA_OFFSET,
+           art::ShadowFrame::LockCountDataOffset())
+ASM_DEFINE(SHADOWFRAME_METHOD_OFFSET,
+           art::ShadowFrame::MethodOffset())
+ASM_DEFINE(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET,
+           art::ShadowFrame::NumberOfVRegsOffset())
+ASM_DEFINE(SHADOWFRAME_RESULT_REGISTER_OFFSET,
+           art::ShadowFrame::ResultRegisterOffset())
+ASM_DEFINE(SHADOWFRAME_VREGS_OFFSET,
+           art::ShadowFrame::VRegsOffset())
diff --git a/tools/cpp-define-generator/thread.def b/tools/cpp-define-generator/thread.def
new file mode 100644
index 0000000..8c91dc8
--- /dev/null
+++ b/tools/cpp-define-generator/thread.def
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "thread.h"
+#endif
+
+ASM_DEFINE(THREAD_CARD_TABLE_OFFSET,
+           art::Thread::CardTableOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_CHECKPOINT_REQUEST,
+           art::kCheckpointRequest)
+ASM_DEFINE(THREAD_CURRENT_IBASE_OFFSET,
+           art::Thread::MterpCurrentIBaseOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_EMPTY_CHECKPOINT_REQUEST,
+           art::kEmptyCheckpointRequest)
+ASM_DEFINE(THREAD_EXCEPTION_OFFSET,
+           art::Thread::ExceptionOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_FLAGS_OFFSET,
+           art::Thread::ThreadFlagsOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_ID_OFFSET,
+           art::Thread::ThinLockIdOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_INTERPRETER_CACHE_OFFSET,
+           art::Thread::InterpreterCacheOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_INTERPRETER_CACHE_SIZE_LOG2,
+           art::Thread::InterpreterCacheSizeLog2())
+ASM_DEFINE(THREAD_IS_GC_MARKING_OFFSET,
+           art::Thread::IsGcMarkingOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
+           art::Thread::ThreadLocalAllocStackEndOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET,
+           art::Thread::ThreadLocalAllocStackTopOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_LOCAL_END_OFFSET,
+           art::Thread::ThreadLocalEndOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_LOCAL_OBJECTS_OFFSET,
+           art::Thread::ThreadLocalObjectsOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_LOCAL_POS_OFFSET,
+           art::Thread::ThreadLocalPosOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_ROSALLOC_RUNS_OFFSET,
+           art::Thread::RosAllocRunsOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_SELF_OFFSET,
+           art::Thread::SelfOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST,
+           art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest)
+ASM_DEFINE(THREAD_SUSPEND_REQUEST,
+           art::kSuspendRequest)
+ASM_DEFINE(THREAD_USE_MTERP_OFFSET,
+           art::Thread::UseMterpOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_TOP_QUICK_FRAME_OFFSET,
+           art::Thread::TopOfManagedStackOffset<art::kRuntimePointerSize>().Int32Value())
diff --git a/tools/cpp-define-generator/verify-asm-support b/tools/cpp-define-generator/verify-asm-support
deleted file mode 100755
index 745b115..0000000
--- a/tools/cpp-define-generator/verify-asm-support
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ---------------------------------------------------------------------------
-
-# Generates asm_support_gen.h into the $OUT directory in the build.
-# Then verifies that it is the same as in runtime/generated/asm_support_gen.h
-
-# Validates that art/runtime/generated/asm_support_gen.h
-# - This must be run after a build since it uses cpp-define-generator-data
-
-# Path to asm_support_gen.h that we check into our git repository.
-ASM_SUPPORT_GEN_CHECKED_IN_COPY="runtime/generated/asm_support_gen.h"
-# Instead of producing an error if checked-in copy differs from the generated version,
-# overwrite the local checked-in copy instead.
-OVERWRITE_CHECKED_IN_COPY_IF_CHANGED="n"
-
-#######################
-#######################
-
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-ART_DIR="$( cd "$DIR/../.." && pwd )"
-ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY="$ART_DIR/runtime/generated/asm_support_gen.h"
-
-# Sanity check that we haven't moved the file around.
-# If we did, perhaps the above constant should be updated.
-if ! [[ -f "$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY" ]]; then
-  echo "ERROR: Missing asm_support_gen.h, expected to be in '$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY'" >&2
-  exit 1
-fi
-
-# The absolute path to cpp-define-generator is in $1
-# Generate the file as part of the build into the out location specified by $2.
-
-# Compare that the generated file matches our golden copy that's checked into git.
-# If not, it is a fatal error and the user needs to run 'generate-asm-support' to rebuild.
-
-if [[ $# -lt 2 ]]; then
-  echo "Usage: $0 [--quiet] [--presubmit] <path-to-cpp-define-generator-data-binary> <output-file>'" >&2
-  exit 1
-fi
-
-# Supress 'chatty' messages during the build.
-# If anything is printed in a success case then
-# the main Android build can't reuse the same line for
-# showing multiple commands being executed.
-QUIET=false
-if [[ "$1" == "--quiet" ]]; then
-  QUIET=true
-  shift
-fi
-
-CPP_DEFINE_GENERATOR_TOOL="$1"
-OUTPUT_FILE="$2"
-
-function pecho() {
-  if ! $QUIET; then
-    echo "$@"
-  fi
-}
-
-# Generate the header. Print the command we're running to console for readability.
-pecho "cpp-define-generator-data > \"$OUTPUT_FILE\""
-"$CPP_DEFINE_GENERATOR_TOOL" > "$OUTPUT_FILE"
-retval="$?"
-
-if [[ $retval -ne 0 ]]; then
-  echo "verify-asm-support: FATAL: Error while running cpp-define-generator-data" >&2
-  exit $retval
-fi
-
-if ! diff "$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY" "$OUTPUT_FILE"; then
-
-  if [[ $OVERWRITE_CHECKED_IN_COPY_IF_CHANGED == "y" ]]; then
-    cp "$OUTPUT_FILE" "$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY"
-    echo "verify-asm-support: OK: Overwrote '$ASM_SUPPORT_GEN_CHECKED_IN_COPY' with build copy."
-    echo "                        Please 'git add $ASM_SUPPORT_GEN_CHECKED_IN_COPY'."
-  else
-    echo "---------------------------------------------------------------------------------------------" >&2
-    echo "verify-asm-support: ERROR: Checked-in copy of '$ASM_SUPPORT_GEN_CHECKED_IN_COPY' " >&2
-    echo "                    has diverged from the build copy." >&2
-    echo "                    Please re-run the 'generate-asm-support' command to resync the header." >&2
-    [[ -f "$OUTPUT_FILE" ]] && rm "$OUTPUT_FILE"
-    exit 1
-  fi
-fi
-
-pecho "verify-asm-support: SUCCESS. Built '$OUTPUT_FILE' which matches our checked in copy."
diff --git a/tools/dexanalyze/Android.bp b/tools/dexanalyze/Android.bp
index a85bf56..a232a1b 100644
--- a/tools/dexanalyze/Android.bp
+++ b/tools/dexanalyze/Android.bp
@@ -24,11 +24,6 @@
         "dexanalyze_experiments.cc",
         "dexanalyze_strings.cc",
     ],
-    target: {
-        android: {
-            shared_libs: ["libcutils"],
-        },
-    },
     header_libs: [
         "art_cmdlineparser_headers",
     ],
diff --git a/tools/dexanalyze/dexanalyze_bytecode.cc b/tools/dexanalyze/dexanalyze_bytecode.cc
index 659a940..ae88f37 100644
--- a/tools/dexanalyze/dexanalyze_bytecode.cc
+++ b/tools/dexanalyze/dexanalyze_bytecode.cc
@@ -118,7 +118,7 @@
         ProcessCodeItem(*dex_file,
                         method.GetInstructionsAndData(),
                         accessor.GetClassIdx(),
-                        /*count_types*/ true,
+                        /*count_types=*/ true,
                         types);
       }
     }
@@ -143,7 +143,7 @@
         ProcessCodeItem(*dex_file,
                         data,
                         accessor.GetClassIdx(),
-                        /*count_types*/ false,
+                        /*count_types=*/ false,
                         types);
         std::vector<uint8_t> buffer = std::move(buffer_);
         buffer_.clear();
@@ -360,7 +360,7 @@
       case Instruction::INVOKE_INTERFACE:
       case Instruction::INVOKE_SUPER: {
         const uint32_t method_idx = DexMethodIndex(inst.Inst());
-        const DexFile::MethodId& method = dex_file.GetMethodId(method_idx);
+        const dex::MethodId& method = dex_file.GetMethodId(method_idx);
         const dex::TypeIndex receiver_type = method.class_idx_;
         if (Enabled(kExperimentInvoke)) {
           if (count_types) {
diff --git a/tools/dexanalyze/dexanalyze_bytecode.h b/tools/dexanalyze/dexanalyze_bytecode.h
index 015801f..da4249d 100644
--- a/tools/dexanalyze/dexanalyze_bytecode.h
+++ b/tools/dexanalyze/dexanalyze_bytecode.h
@@ -24,6 +24,7 @@
 #include "base/safe_map.h"
 #include "dexanalyze_experiments.h"
 #include "dex/code_item_accessors.h"
+#include "dex/dex_file_types.h"
 
 namespace art {
 namespace dexanalyze {
diff --git a/tools/dexanalyze/dexanalyze_strings.cc b/tools/dexanalyze/dexanalyze_strings.cc
index 863e4ee..dcadb59 100644
--- a/tools/dexanalyze/dexanalyze_strings.cc
+++ b/tools/dexanalyze/dexanalyze_strings.cc
@@ -21,6 +21,7 @@
 #include <iostream>
 #include <queue>
 
+#include "base/time_utils.h"
 #include "dex/class_accessor-inl.h"
 #include "dex/code_item_accessors-inl.h"
 #include "dex/dex_instruction-inl.h"
@@ -34,10 +35,156 @@
 static const size_t kPrefixConstantCost = 4;
 static const size_t kPrefixIndexCost = 2;
 
+class PrefixDictionary {
+ public:
+  // Add prefix data and return the offset to the start of the added data.
+  size_t AddPrefixData(const uint8_t* data, size_t len) {
+    const size_t offset = prefix_data_.size();
+    prefix_data_.insert(prefix_data_.end(), data, data + len);
+    return offset;
+  }
+
+  static constexpr size_t kLengthBits = 8;
+  static constexpr size_t kLengthMask = (1u << kLengthBits) - 1;
+
+  // Return the prefix offset and length.
+  ALWAYS_INLINE void GetOffset(uint32_t prefix_index, uint32_t* offset, uint32_t* length) const {
+    CHECK_LT(prefix_index, offsets_.size());
+    const uint32_t data = offsets_[prefix_index];
+    *length = data & kLengthMask;
+    *offset = data >> kLengthBits;
+  }
+
+  uint32_t AddOffset(uint32_t offset, uint32_t length) {
+    CHECK_LE(length, kLengthMask);
+    offsets_.push_back((offset << kLengthBits) | length);
+    return offsets_.size() - 1;
+  }
+
+ public:
+  std::vector<uint32_t> offsets_;
+  std::vector<uint8_t> prefix_data_;
+};
+
+class PrefixStrings {
+ public:
+  class Builder {
+   public:
+    explicit Builder(PrefixStrings* output) : output_(output) {}
+    void Build(const std::vector<std::string>& strings);
+
+   private:
+    PrefixStrings* const output_;
+  };
+
+  // Return the string index that was added.
+  size_t AddString(uint16_t prefix, const std::string& str) {
+    const size_t string_offset = chars_.size();
+    chars_.push_back(static_cast<uint8_t>(prefix >> 8));
+    chars_.push_back(static_cast<uint8_t>(prefix >> 0));
+    EncodeUnsignedLeb128(&chars_, str.length());
+    const uint8_t* ptr = reinterpret_cast<const uint8_t*>(&str[0]);
+    chars_.insert(chars_.end(), ptr, ptr + str.length());
+    string_offsets_.push_back(string_offset);
+    return string_offsets_.size() - 1;
+  }
+
+  std::string GetString(uint32_t string_idx) const {
+    const size_t offset = string_offsets_[string_idx];
+    const uint8_t* suffix_data = &chars_[offset];
+    uint16_t prefix_idx = (static_cast<uint16_t>(suffix_data[0]) << 8) +
+        suffix_data[1];
+    suffix_data += 2;
+    uint32_t prefix_offset;
+    uint32_t prefix_len;
+    dictionary_.GetOffset(prefix_idx, &prefix_offset, &prefix_len);
+    const uint8_t* prefix_data = &dictionary_.prefix_data_[prefix_offset];
+    std::string ret(prefix_data, prefix_data + prefix_len);
+    uint32_t suffix_len = DecodeUnsignedLeb128(&suffix_data);
+    ret.insert(ret.end(), suffix_data, suffix_data + suffix_len);
+    return ret;
+  }
+
+  ALWAYS_INLINE bool Equal(uint32_t string_idx, const uint8_t* data, size_t len) const {
+    const size_t offset = string_offsets_[string_idx];
+    const uint8_t* suffix_data = &chars_[offset];
+    uint16_t prefix_idx = (static_cast<uint16_t>(suffix_data[0]) << 8) +
+        suffix_data[1];
+    suffix_data += 2;
+    uint32_t prefix_offset;
+    uint32_t prefix_len;
+    dictionary_.GetOffset(prefix_idx, &prefix_offset, &prefix_len);
+    uint32_t suffix_len = DecodeUnsignedLeb128(&suffix_data);
+    if (prefix_len + suffix_len != len) {
+      return false;
+    }
+    const uint8_t* prefix_data = &dictionary_.prefix_data_[prefix_offset];
+    if ((true)) {
+      return memcmp(prefix_data, data, prefix_len) == 0u &&
+          memcmp(suffix_data, data + prefix_len, len - prefix_len) == 0u;
+    } else {
+      len -= prefix_len;
+      while (prefix_len != 0u) {
+        if (*prefix_data++ != *data++) {
+          return false;
+        }
+        --prefix_len;
+      }
+      while (len != 0u) {
+        if (*suffix_data++ != *data++) {
+          return false;
+        }
+        --len;
+      }
+      return true;
+    }
+  }
+
+ public:
+  PrefixDictionary dictionary_;
+  std::vector<uint8_t> chars_;
+  std::vector<uint32_t> string_offsets_;
+};
+
+// Normal non prefix strings.
+class NormalStrings {
+ public:
+  // Return the string index that was added.
+  size_t AddString(const std::string& str) {
+    const size_t string_offset = chars_.size();
+    EncodeUnsignedLeb128(&chars_, str.length());
+    const uint8_t* ptr = reinterpret_cast<const uint8_t*>(&str[0]);
+    chars_.insert(chars_.end(), ptr, ptr + str.length());
+    string_offsets_.push_back(string_offset);
+    return string_offsets_.size() - 1;
+  }
+
+  std::string GetString(uint32_t string_idx) const {
+    const size_t offset = string_offsets_[string_idx];
+    const uint8_t* data = &chars_[offset];
+    uint32_t len = DecodeUnsignedLeb128(&data);
+    return std::string(data, data + len);
+  }
+
+  ALWAYS_INLINE bool Equal(uint32_t string_idx, const uint8_t* data, size_t len) const {
+    const size_t offset = string_offsets_[string_idx];
+    const uint8_t* str_data = &chars_[offset];
+    uint32_t str_len = DecodeUnsignedLeb128(&str_data);
+    if (str_len != len) {
+      return false;
+    }
+    return memcmp(data, str_data, len) == 0u;
+  }
+
+ public:
+  std::vector<uint8_t> chars_;
+  std::vector<uint32_t> string_offsets_;
+};
+
 // Node value = (distance from root) * (occurrences - 1).
 class MatchTrie {
  public:
-  void Add(const std::string& str) {
+  MatchTrie* Add(const std::string& str) {
     MatchTrie* node = this;
     size_t depth = 0u;
     for (uint8_t c : str) {
@@ -54,33 +201,28 @@
       }
       ++node->count_;
     }
-    node->is_end_ = true;
+    return node;
   }
 
   // Returns the length of the longest prefix and if it's a leaf node.
-  std::pair<size_t, bool> LongestPrefix(const std::string& str) const {
-    const MatchTrie* node = this;
-    const MatchTrie* best_node = this;
-    size_t depth = 0u;
-    size_t best_depth = 0u;
+  MatchTrie* LongestPrefix(const std::string& str) {
+    MatchTrie* node = this;
     for (uint8_t c : str) {
       if (node->nodes_[c] == nullptr) {
         break;
       }
       node = node->nodes_[c].get();
-      ++depth;
-      if (node->is_end_) {
-        best_depth = depth;
-        best_node = node;
-      }
     }
-    bool is_leaf = true;
-    for (const std::unique_ptr<MatchTrie>& cur_node : best_node->nodes_) {
+    return node;
+  }
+
+  bool IsLeaf() const {
+    for (const std::unique_ptr<MatchTrie>& cur_node : nodes_) {
       if (cur_node != nullptr) {
-        is_leaf = false;
+        return false;
       }
     }
-    return {best_depth, is_leaf};
+    return true;
   }
 
   int32_t Savings() const {
@@ -134,7 +276,7 @@
           ++num_childs;
         }
       }
-      if (num_childs > 1u || elem->is_end_) {
+      if (num_childs > 1u || elem->value_ != 0u) {
         queue.emplace(elem->Savings(), elem);
       }
     }
@@ -166,30 +308,117 @@
       if (pair.first <= 0) {
         continue;
       }
-      std::vector<uint8_t> chars;
-      for (MatchTrie* cur = pair.second; cur != this; cur = cur->parent_) {
-        chars.push_back(cur->incoming_);
-      }
-      ret.push_back(std::string(chars.rbegin(), chars.rend()));
-      // LOG(INFO) << pair.second->Savings() << " : " << ret.back();
+      ret.push_back(pair.second->GetString());
     }
     return ret;
   }
 
+  std::string GetString() const {
+    std::vector<uint8_t> chars;
+    for (const MatchTrie* cur = this; cur->parent_ != nullptr; cur = cur->parent_) {
+      chars.push_back(cur->incoming_);
+    }
+    return std::string(chars.rbegin(), chars.rend());
+  }
+
   std::unique_ptr<MatchTrie> nodes_[256];
   MatchTrie* parent_ = nullptr;
   uint32_t count_ = 0u;
-  int32_t depth_ = 0u;
+  uint32_t depth_ = 0u;
   int32_t savings_ = 0u;
   uint8_t incoming_ = 0u;
-  // If the current node is the end of a possible prefix.
-  bool is_end_ = false;
+  // Value of the current node, non zero if the node is chosen.
+  uint32_t value_ = 0u;
   // If the current node is chosen to be a used prefix.
   bool chosen_ = false;
   // If the current node is a prefix of a longer chosen prefix.
   uint32_t chosen_suffix_count_ = 0u;
 };
 
+void PrefixStrings::Builder::Build(const std::vector<std::string>& strings) {
+  std::unique_ptr<MatchTrie> prefixe_trie(new MatchTrie());
+  for (size_t i = 0; i < strings.size(); ++i) {
+    size_t len = 0u;
+    if (i > 0u) {
+      CHECK_GT(strings[i], strings[i - 1]);
+      len = std::max(len, PrefixLen(strings[i], strings[i - 1]));
+    }
+    if (i < strings.size() - 1) {
+      len = std::max(len, PrefixLen(strings[i], strings[i + 1]));
+    }
+    len = std::min(len, kMaxPrefixLen);
+    if (len >= kMinPrefixLen) {
+      prefixe_trie->Add(strings[i].substr(0, len))->value_ = 1u;
+    }
+  }
+
+  // Build prefixes.
+  {
+    static constexpr size_t kPrefixBits = 15;
+    std::vector<std::string> prefixes(prefixe_trie->ExtractPrefixes(1 << kPrefixBits));
+    // Add longest prefixes first so that subprefixes can share data.
+    std::sort(prefixes.begin(), prefixes.end(), [](const std::string& a, const std::string& b) {
+      return a.length() > b.length();
+    });
+    prefixe_trie.reset();
+    prefixe_trie.reset(new MatchTrie());
+    uint32_t prefix_idx = 0u;
+    CHECK_EQ(output_->dictionary_.AddOffset(0u, 0u), prefix_idx++);
+    for (const std::string& str : prefixes) {
+      uint32_t prefix_offset = 0u;
+      MatchTrie* node = prefixe_trie->LongestPrefix(str);
+      if (node != nullptr && node->depth_ == str.length() && node->value_ != 0u) {
+        CHECK_EQ(node->GetString(), str);
+        uint32_t existing_len = 0u;
+        output_->dictionary_.GetOffset(node->value_, &prefix_offset, &existing_len);
+        // Make sure to register the current node.
+        prefixe_trie->Add(str)->value_ = prefix_idx;
+      } else {
+        auto add_str = [&](const std::string& s) {
+          node = prefixe_trie->Add(s);
+          node->value_ = prefix_idx;
+          while (node != nullptr) {
+            node->value_ = prefix_idx;
+            node = node->parent_;
+          }
+        };
+        static constexpr size_t kNumSubstrings = 1u;
+        // Increasing kNumSubstrings provides savings since it enables common substrings and not
+        // only prefixes to share data. The problem is that it's slow.
+        for (size_t i = 0; i < std::min(str.length(), kNumSubstrings); ++i) {
+          add_str(str.substr(i));
+        }
+        prefix_offset = output_->dictionary_.AddPrefixData(
+            reinterpret_cast<const uint8_t*>(&str[0]),
+            str.length());
+      }
+      // TODO: Validiate the prefix offset.
+      CHECK_EQ(output_->dictionary_.AddOffset(prefix_offset, str.length()), prefix_idx);
+      ++prefix_idx;
+    }
+  }
+
+  // Add strings to the dictionary.
+  for (const std::string& str : strings) {
+    MatchTrie* node = prefixe_trie->LongestPrefix(str);
+    uint32_t prefix_idx = 0u;
+    uint32_t best_length = 0u;
+    while (node != nullptr) {
+      uint32_t offset = 0u;
+      uint32_t length = 0u;
+      output_->dictionary_.GetOffset(node->value_, &offset, &length);
+      if (node->depth_ == length) {
+        prefix_idx = node->value_;
+        best_length = node->depth_;
+        break;
+        // Actually the prefix we want.
+      }
+      node = node->parent_;
+    }
+    output_->AddString(prefix_idx, str.substr(best_length));
+  }
+}
+
 void AnalyzeStrings::ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) {
   std::set<std::string> unique_strings;
   // Accumulate the strings.
@@ -212,18 +441,13 @@
       unique_strings.insert(data);
     }
   }
-  // Unique strings only since we want to exclude savings from multidex duplication.
-  ProcessStrings(std::vector<std::string>(unique_strings.begin(), unique_strings.end()), 1);
+  // Unique strings only since we want to exclude savings from multi-dex duplication.
+  ProcessStrings(std::vector<std::string>(unique_strings.begin(), unique_strings.end()));
 }
 
-void AnalyzeStrings::ProcessStrings(const std::vector<std::string>& strings, size_t iterations) {
-  if (iterations == 0u) {
-    return;
-  }
+void AnalyzeStrings::ProcessStrings(const std::vector<std::string>& strings) {
   // Calculate total shared prefix.
-  std::vector<size_t> shared_len;
-  prefixes_.clear();
-  std::unique_ptr<MatchTrie> prefix_construct(new MatchTrie());
+  size_t prefix_index_cost_ = 0u;
   for (size_t i = 0; i < strings.size(); ++i) {
     size_t best_len = 0;
     if (i > 0) {
@@ -233,131 +457,117 @@
       best_len = std::max(best_len, PrefixLen(strings[i], strings[i + 1]));
     }
     best_len = std::min(best_len, kMaxPrefixLen);
-    std::string prefix;
     if (best_len >= kMinPrefixLen) {
-      prefix = strings[i].substr(0, best_len);
-      prefix_construct->Add(prefix);
-      ++prefixes_[prefix];
       total_shared_prefix_bytes_ += best_len;
     }
-    total_prefix_index_cost_ += kPrefixIndexCost;
-  }
-
-  static constexpr size_t kPrefixBits = 15;
-  static constexpr size_t kShortLen = (1u << (15 - kPrefixBits)) - 1;
-  std::unique_ptr<MatchTrie> prefix_trie(new MatchTrie());
-  static constexpr bool kUseGreedyTrie = true;
-  if (kUseGreedyTrie) {
-    std::vector<std::string> prefixes(prefix_construct->ExtractPrefixes(1 << kPrefixBits));
-    for (auto&& str : prefixes) {
-      prefix_trie->Add(str);
-    }
-  } else {
-    // Optimize the result by moving long prefixes to shorter ones if it causes additional savings.
-    while (true) {
-      bool have_savings = false;
-      auto it = prefixes_.begin();
-      std::vector<std::string> longest;
-      for (const auto& pair : prefixes_) {
-        longest.push_back(pair.first);
-      }
-      std::sort(longest.begin(), longest.end(), [](const std::string& a, const std::string& b) {
-        return a.length() > b.length();
-      });
-      // Do longest first since this provides the best results.
-      for (const std::string& s : longest) {
-        it = prefixes_.find(s);
-        CHECK(it != prefixes_.end());
-        const std::string& prefix = it->first;
-        int64_t best_savings = 0u;
-        int64_t best_len = -1;
-        for (int64_t len = prefix.length() - 1; len >= 0; --len) {
-          auto found = prefixes_.find(prefix.substr(0, len));
-          if (len != 0 && found == prefixes_.end()) {
-            continue;
-          }
-          // Calculate savings from downgrading the prefix.
-          int64_t savings = kPrefixConstantCost + prefix.length() -
-              (prefix.length() - len) * it->second;
-          if (savings > best_savings) {
-            best_savings = savings;
-            best_len = len;
-            break;
-          }
-        }
-        if (best_len != -1) {
-          prefixes_[prefix.substr(0, best_len)] += it->second;
-          it = prefixes_.erase(it);
-          optimization_savings_ += best_savings;
-          have_savings = true;
-        } else {
-          ++it;
-        }
-      }
-      if (!have_savings) {
-        break;
-      }
-    }
-    for (auto&& pair : prefixes_) {
-      prefix_trie->Add(pair.first);
-    }
-  }
-
-  // Count longest prefixes.
-  std::set<std::string> used_prefixes;
-  std::vector<std::string> suffix;
-  for (const std::string& str : strings) {
-    auto pair = prefix_trie->LongestPrefix(str);
-    const size_t len = pair.first;
-    if (len >= kMinPrefixLen) {
-      ++strings_used_prefixed_;
-      total_prefix_savings_ += len;
-      used_prefixes.insert(str.substr(0, len));
-    }
-    suffix.push_back(str.substr(len));
-    if (suffix.back().size() < kShortLen) {
+    prefix_index_cost_ += kPrefixIndexCost;
+    if (strings[i].length() < 64) {
       ++short_strings_;
     } else {
       ++long_strings_;
     }
   }
-  std::sort(suffix.begin(), suffix.end());
-  for (const std::string& prefix : used_prefixes) {
-    // 4 bytes for an offset, one for length.
-    auto pair = prefix_trie->LongestPrefix(prefix);
-    CHECK_EQ(pair.first, prefix.length());
-    if (pair.second) {
-      // Only need to add to dictionary if it's a leaf, otherwise we can reuse string data of the
-      // other prefix.
-      total_prefix_dict_ += prefix.size();
-    }
-    total_prefix_table_ += kPrefixConstantCost;
+  total_prefix_index_cost_ += prefix_index_cost_;
+
+  PrefixStrings prefix_strings;
+  {
+    PrefixStrings::Builder prefix_builder(&prefix_strings);
+    prefix_builder.Build(strings);
   }
-  ProcessStrings(suffix, iterations - 1);
+  Benchmark(prefix_strings, strings, &prefix_timings_);
+  const size_t num_prefixes = prefix_strings.dictionary_.offsets_.size();
+  total_num_prefixes_ += num_prefixes;
+  total_prefix_table_ += num_prefixes * sizeof(prefix_strings.dictionary_.offsets_[0]);
+  total_prefix_dict_ += prefix_strings.dictionary_.prefix_data_.size();
+
+  {
+    NormalStrings normal_strings;
+    for (const std::string& s : strings) {
+      normal_strings.AddString(s);
+    }
+    const uint64_t unique_string_data_bytes = normal_strings.chars_.size();
+    total_unique_string_data_bytes_ += unique_string_data_bytes;
+    total_prefix_savings_ += unique_string_data_bytes - prefix_strings.chars_.size() +
+        prefix_index_cost_;
+    Benchmark(normal_strings, strings, &normal_timings_);
+  }
+}
+
+template <typename Strings>
+void AnalyzeStrings::Benchmark(const Strings& strings,
+                               const std::vector<std::string>& reference,
+                               StringTimings* timings) {
+  const size_t kIterations = 100;
+  timings->num_comparisons_ += reference.size() * kIterations;
+
+  uint64_t start = NanoTime();
+  for (size_t j = 0; j < kIterations; ++j) {
+    for (size_t i = 0; i < reference.size(); ++i) {
+      CHECK(strings.Equal(
+          i,
+          reinterpret_cast<const uint8_t*>(&reference[i][0]),
+          reference[i].length()))
+          << i << ": " << strings.GetString(i) << " vs " << reference[i];
+    }
+  }
+  timings->time_equal_comparisons_ += NanoTime() - start;
+
+  start = NanoTime();
+  for (size_t j = 0; j < kIterations; ++j) {
+    size_t count = 0u;
+    for (size_t i = 0; i < reference.size(); ++i) {
+      count += strings.Equal(
+          reference.size() - 1 - i,
+          reinterpret_cast<const uint8_t*>(&reference[i][0]),
+          reference[i].length());
+    }
+    CHECK_LT(count, 2u);
+  }
+  timings->time_non_equal_comparisons_ += NanoTime() - start;
+}
+
+template void AnalyzeStrings::Benchmark(const PrefixStrings&,
+                                        const std::vector<std::string>&,
+                                        StringTimings* timings);
+template void AnalyzeStrings::Benchmark(const NormalStrings&,
+                                        const std::vector<std::string>&,
+                                        StringTimings* timings);
+
+void StringTimings::Dump(std::ostream& os) const {
+  const double comparisons = static_cast<double>(num_comparisons_);
+  os << "Compare equal " << static_cast<double>(time_equal_comparisons_) / comparisons << "\n";
+  os << "Compare not equal " << static_cast<double>(time_non_equal_comparisons_) / comparisons << "\n";
 }
 
 void AnalyzeStrings::Dump(std::ostream& os, uint64_t total_size) const {
   os << "Total string data bytes " << Percent(string_data_bytes_, total_size) << "\n";
+  os << "Total unique string data bytes "
+     << Percent(total_unique_string_data_bytes_, total_size) << "\n";
   os << "UTF-16 string data bytes " << Percent(wide_string_bytes_, total_size) << "\n";
   os << "ASCII string data bytes " << Percent(ascii_string_bytes_, total_size) << "\n";
 
+  os << "Prefix string timings\n";
+  prefix_timings_.Dump(os);
+  os << "Normal string timings\n";
+  normal_timings_.Dump(os);
+
   // Prefix based strings.
   os << "Total shared prefix bytes " << Percent(total_shared_prefix_bytes_, total_size) << "\n";
   os << "Prefix dictionary cost " << Percent(total_prefix_dict_, total_size) << "\n";
   os << "Prefix table cost " << Percent(total_prefix_table_, total_size) << "\n";
   os << "Prefix index cost " << Percent(total_prefix_index_cost_, total_size) << "\n";
-  int64_t net_savings = total_prefix_savings_ + short_strings_;
+  int64_t net_savings = total_prefix_savings_;
   net_savings -= total_prefix_dict_;
   net_savings -= total_prefix_table_;
   net_savings -= total_prefix_index_cost_;
   os << "Prefix dictionary elements " << total_num_prefixes_ << "\n";
-  os << "Optimization savings " << Percent(optimization_savings_, total_size) << "\n";
+  os << "Prefix base savings " << Percent(total_prefix_savings_, total_size) << "\n";
   os << "Prefix net savings " << Percent(net_savings, total_size) << "\n";
   os << "Strings using prefix "
      << Percent(strings_used_prefixed_, total_prefix_index_cost_ / kPrefixIndexCost) << "\n";
   os << "Short strings " << Percent(short_strings_, short_strings_ + long_strings_) << "\n";
   if (verbose_level_ >= VerboseLevel::kEverything) {
-    std::vector<std::pair<std::string, size_t>> pairs(prefixes_.begin(), prefixes_.end());
+    std::vector<std::pair<std::string, size_t>> pairs;  // (prefixes_.begin(), prefixes_.end());
     // Sort lexicographically.
     std::sort(pairs.begin(), pairs.end());
     for (const auto& pair : pairs) {
diff --git a/tools/dexanalyze/dexanalyze_strings.h b/tools/dexanalyze/dexanalyze_strings.h
index 32702a6..88ea467 100644
--- a/tools/dexanalyze/dexanalyze_strings.h
+++ b/tools/dexanalyze/dexanalyze_strings.h
@@ -18,9 +18,10 @@
 #define ART_TOOLS_DEXANALYZE_DEXANALYZE_STRINGS_H_
 
 #include <array>
-#include <vector>
 #include <map>
+#include <vector>
 
+#include "base/leb128.h"
 #include "base/safe_map.h"
 #include "dexanalyze_experiments.h"
 #include "dex/code_item_accessors.h"
@@ -29,6 +30,15 @@
 namespace art {
 namespace dexanalyze {
 
+class StringTimings {
+ public:
+  void Dump(std::ostream& os) const;
+
+  uint64_t time_equal_comparisons_ = 0u;
+  uint64_t time_non_equal_comparisons_ = 0u;
+  uint64_t num_comparisons_ = 0u;
+};
+
 // Analyze string data and strings accessed from code.
 class AnalyzeStrings : public Experiment {
  public:
@@ -36,22 +46,26 @@
   void Dump(std::ostream& os, uint64_t total_size) const override;
 
  private:
-  void ProcessStrings(const std::vector<std::string>& strings, size_t iterations);
+  void ProcessStrings(const std::vector<std::string>& strings);
+  template <typename Strings> void Benchmark(const Strings& strings,
+                                             const std::vector<std::string>& reference,
+                                             StringTimings* timings);
 
+  StringTimings prefix_timings_;
+  StringTimings normal_timings_;
   int64_t wide_string_bytes_ = 0u;
   int64_t ascii_string_bytes_ = 0u;
   int64_t string_data_bytes_ = 0u;
+  int64_t total_unique_string_data_bytes_ = 0u;
   int64_t total_shared_prefix_bytes_ = 0u;
   int64_t total_prefix_savings_ = 0u;
   int64_t total_prefix_dict_ = 0u;
   int64_t total_prefix_table_ = 0u;
   int64_t total_prefix_index_cost_ = 0u;
   int64_t total_num_prefixes_ = 0u;
-  int64_t optimization_savings_ = 0u;
   int64_t strings_used_prefixed_ = 0u;
   int64_t short_strings_ = 0u;
   int64_t long_strings_ = 0u;
-  std::unordered_map<std::string, size_t> prefixes_;
 };
 
 }  // namespace dexanalyze
diff --git a/tools/dexanalyze/dexanalyze_test.cc b/tools/dexanalyze/dexanalyze_test.cc
index 96be3f9..c6648c0 100644
--- a/tools/dexanalyze/dexanalyze_test.cc
+++ b/tools/dexanalyze/dexanalyze_test.cc
@@ -37,23 +37,23 @@
 };
 
 TEST_F(DexAnalyzeTest, NoInputFileGiven) {
-  DexAnalyzeExec({ "-a" }, /*expect_success*/ false);
+  DexAnalyzeExec({ "-a" }, /*expect_success=*/ false);
 }
 
 TEST_F(DexAnalyzeTest, CantOpenInput) {
-  DexAnalyzeExec({ "-a", "/non/existent/path" }, /*expect_success*/ false);
+  DexAnalyzeExec({ "-a", "/non/existent/path" }, /*expect_success=*/ false);
 }
 
 TEST_F(DexAnalyzeTest, TestAnalyzeMultidex) {
-  DexAnalyzeExec({ "-a", GetTestDexFileName("MultiDex") }, /*expect_success*/ true);
+  DexAnalyzeExec({ "-a", GetTestDexFileName("MultiDex") }, /*expect_success=*/ true);
 }
 
 TEST_F(DexAnalyzeTest, TestAnalizeCoreDex) {
-  DexAnalyzeExec({ "-a", GetLibCoreDexFileNames()[0] }, /*expect_success*/ true);
+  DexAnalyzeExec({ "-a", GetLibCoreDexFileNames()[0] }, /*expect_success=*/ true);
 }
 
 TEST_F(DexAnalyzeTest, TestInvalidArg) {
-  DexAnalyzeExec({ "-invalid-option" }, /*expect_success*/ false);
+  DexAnalyzeExec({ "-invalid-option" }, /*expect_success=*/ false);
 }
 
 }  // namespace art
diff --git a/tools/dist_linux_bionic.sh b/tools/dist_linux_bionic.sh
new file mode 100755
index 0000000..4c7ba1c
--- /dev/null
+++ b/tools/dist_linux_bionic.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Builds the given targets using linux-bionic and moves the output files to the
+# DIST_DIR. Takes normal make arguments.
+
+if [[ -z $ANDROID_BUILD_TOP ]]; then
+  pushd .
+else
+  pushd $ANDROID_BUILD_TOP
+fi
+
+if [[ -z $DIST_DIR ]]; then
+  echo "DIST_DIR must be set!"
+  exit 1
+fi
+
+if [ ! -d art ]; then
+  echo "Script needs to be run at the root of the android tree"
+  exit 1
+fi
+
+source build/envsetup.sh >&/dev/null # for get_build_var
+out_dir=$(get_build_var OUT_DIR)
+
+./art/tools/build_linux_bionic.sh $@
+
+mkdir -p $DIST_DIR
+cp -R ${out_dir}/soong/host/* $DIST_DIR/
diff --git a/tools/external_oj_libjdwp_art_failures.txt b/tools/external_oj_libjdwp_art_failures.txt
index 9b6ff98..38e5a99 100644
--- a/tools/external_oj_libjdwp_art_failures.txt
+++ b/tools/external_oj_libjdwp_art_failures.txt
@@ -10,47 +10,47 @@
   description: "Test fails due to unexpectedly getting the thread-groups of zombie threads",
   result: EXEC_FAILED,
   bug: 66906414,
-  name: "org.apache.harmony.jpda.tests.jdwp.ThreadReference.ThreadGroup002Test#testThreadGroup002"
+  name: "org.apache.harmony.jpda.tests.jdwp.ThreadReference_ThreadGroup002Test#testThreadGroup002"
 },
 {
   description: "Test fails due to static values not being set correctly.",
   result: EXEC_FAILED,
   bug: 66905894,
-  name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues006Test#testGetValues006"
+  name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType_GetValues006Test#testGetValues006"
 },
 /* TODO Categorize these failures more. */
 {
   description: "Tests that fail on both ART and RI. These tests are likely incorrect",
   result: EXEC_FAILED,
   bug: 66906734,
-  names: [ "org.apache.harmony.jpda.tests.jdwp.ArrayReference.SetValues003Test#testSetValues003_InvalidIndex",
-           "org.apache.harmony.jpda.tests.jdwp.ClassType.InvokeMethod002Test#testInvokeMethod_wrong_argument_types",
-           "org.apache.harmony.jpda.tests.jdwp.ClassType.InvokeMethodTest#testInvokeMethod002",
-           "org.apache.harmony.jpda.tests.jdwp.ClassType.InvokeMethodTest#testInvokeMethod003",
-           "org.apache.harmony.jpda.tests.jdwp.ClassType.NewInstanceTest#testNewInstance002",
-           "org.apache.harmony.jpda.tests.jdwp.ClassType.SetValues002Test#testSetValues002",
-           "org.apache.harmony.jpda.tests.jdwp.Events.ClassPrepare002Test#testClassPrepareCausedByDebugger",
-           "org.apache.harmony.jpda.tests.jdwp.Events.ExceptionCaughtTest#testExceptionEvent_ThrowLocation_FromNative",
-           "org.apache.harmony.jpda.tests.jdwp.ObjectReference.DisableCollectionTest#testDisableCollection_null",
-           "org.apache.harmony.jpda.tests.jdwp.ObjectReference.EnableCollectionTest#testEnableCollection_invalid",
-           "org.apache.harmony.jpda.tests.jdwp.ObjectReference.EnableCollectionTest#testEnableCollection_null",
-           "org.apache.harmony.jpda.tests.jdwp.ObjectReference.GetValues002Test#testGetValues002",
-           "org.apache.harmony.jpda.tests.jdwp.ObjectReference.SetValues003Test#testSetValues003",
-           "org.apache.harmony.jpda.tests.jdwp.ObjectReference.SetValuesTest#testSetValues001",
-           "org.apache.harmony.jpda.tests.jdwp.ReferenceType.FieldsWithGenericTest#testFieldsWithGeneric001",
-           "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues002Test#testGetValues002",
-           "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues004Test#testGetValues004",
-           "org.apache.harmony.jpda.tests.jdwp.StringReference.ValueTest#testStringReferenceValueTest001_NullString",
-           "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.ChildrenTest#testChildren_NullObject",
-           "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.NameTest#testName001_NullObject",
-           "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.ParentTest#testParent_NullObject",
-           "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.CapabilitiesNewTest#testCapabilitiesNew001" ]
+  names: [ "org.apache.harmony.jpda.tests.jdwp.ArrayReference_SetValues003Test#testSetValues003_InvalidIndex",
+           "org.apache.harmony.jpda.tests.jdwp.ClassType_InvokeMethod002Test#testInvokeMethod_wrong_argument_types",
+           "org.apache.harmony.jpda.tests.jdwp.ClassType_InvokeMethodTest#testInvokeMethod002",
+           "org.apache.harmony.jpda.tests.jdwp.ClassType_InvokeMethodTest#testInvokeMethod003",
+           "org.apache.harmony.jpda.tests.jdwp.ClassType_NewInstanceTest#testNewInstance002",
+           "org.apache.harmony.jpda.tests.jdwp.ClassType_SetValues002Test#testSetValues002",
+           "org.apache.harmony.jpda.tests.jdwp.Events_ClassPrepare002Test#testClassPrepareCausedByDebugger",
+           "org.apache.harmony.jpda.tests.jdwp.Events_ExceptionCaughtTest#testExceptionEvent_ThrowLocation_FromNative",
+           "org.apache.harmony.jpda.tests.jdwp.ObjectReference_DisableCollectionTest#testDisableCollection_null",
+           "org.apache.harmony.jpda.tests.jdwp.ObjectReference_EnableCollectionTest#testEnableCollection_invalid",
+           "org.apache.harmony.jpda.tests.jdwp.ObjectReference_EnableCollectionTest#testEnableCollection_null",
+           "org.apache.harmony.jpda.tests.jdwp.ObjectReference_GetValues002Test#testGetValues002",
+           "org.apache.harmony.jpda.tests.jdwp.ObjectReference_SetValues003Test#testSetValues003",
+           "org.apache.harmony.jpda.tests.jdwp.ObjectReference_SetValuesTest#testSetValues001",
+           "org.apache.harmony.jpda.tests.jdwp.ReferenceType_FieldsWithGenericTest#testFieldsWithGeneric001",
+           "org.apache.harmony.jpda.tests.jdwp.ReferenceType_GetValues002Test#testGetValues002",
+           "org.apache.harmony.jpda.tests.jdwp.ReferenceType_GetValues004Test#testGetValues004",
+           "org.apache.harmony.jpda.tests.jdwp.StringReference_ValueTest#testStringReferenceValueTest001_NullString",
+           "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference_ChildrenTest#testChildren_NullObject",
+           "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference_NameTest#testName001_NullObject",
+           "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference_ParentTest#testParent_NullObject",
+           "org.apache.harmony.jpda.tests.jdwp.VirtualMachine_CapabilitiesNewTest#testCapabilitiesNew001" ]
 },
 {
   description: "Test times out on fugu-debug",
   result: EXEC_FAILED,
   bug: 70459916,
-  names: [ "org.apache.harmony.jpda.tests.jdwp.VMDebug.VMDebugTest#testVMDebug",
-           "org.apache.harmony.jpda.tests.jdwp.VMDebug.VMDebugTest002#testVMDebug" ]
+  names: [ "org.apache.harmony.jpda.tests.jdwp.VMDebug_VMDebugTest#testVMDebug",
+           "org.apache.harmony.jpda.tests.jdwp.VMDebug_VMDebugTest002#testVMDebug" ]
 }
 ]
diff --git a/tools/field-null-percent/check-null-fields.py b/tools/field-null-percent/check-null-fields.py
new file mode 100755
index 0000000..c11d51a
--- /dev/null
+++ b/tools/field-null-percent/check-null-fields.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Retrieves the counts of how many objects have a particular field null on all running processes.
+
+Prints a json map from pid -> (log-tag, field-name, null-count, total-count).
+"""
+
+
+import adb
+import argparse
+import concurrent.futures
+import itertools
+import json
+import logging
+import os
+import os.path
+import signal
+import subprocess
+import time
+
+def main():
+  parser = argparse.ArgumentParser(description="Get counts of null fields from a device.")
+  parser.add_argument("-S", "--serial", metavar="SERIAL", type=str,
+                      required=False,
+                      default=os.environ.get("ANDROID_SERIAL", None),
+                      help="Android serial to use. Defaults to ANDROID_SERIAL")
+  parser.add_argument("-p", "--pid", required=False,
+                      default=[], action="append",
+                      help="Specific pids to check. By default checks all running dalvik processes")
+  has_out = "OUT" in os.environ
+  def_32 = os.path.join(os.environ.get("OUT", ""), "system", "lib", "libfieldnull.so")
+  def_64 = os.path.join(os.environ.get("OUT", ""), "system", "lib64", "libfieldnull.so")
+  has_32 = has_out and os.path.exists(def_32)
+  has_64 = has_out and os.path.exists(def_64)
+  def pushable_lib(name):
+    if os.path.isfile(name):
+      return name
+    else:
+      raise argparse.ArgumentTypeError(name + " is not a file!")
+  parser.add_argument('--lib32', type=pushable_lib,
+                      required=not has_32,
+                      action='store',
+                      default=def_32,
+                      help="Location of 32 bit agent to push")
+  parser.add_argument('--lib64', type=pushable_lib,
+                      required=not has_64,
+                      action='store',
+                      default=def_64 if has_64 else None,
+                      help="Location of 64 bit agent to push")
+  parser.add_argument("fields", nargs="+",
+                      help="fields to check")
+
+  out = parser.parse_args()
+
+  device = adb.device.get_device(out.serial)
+  print("getting root")
+  device.root()
+
+  print("Disabling selinux")
+  device.shell("setenforce 0".split())
+
+  print("Pushing libraries")
+  lib32 = device.shell("mktemp".split())[0].strip()
+  lib64 = device.shell("mktemp".split())[0].strip()
+
+  print(out.lib32 + " -> " + lib32)
+  device.push(out.lib32, lib32)
+
+  print(out.lib64 + " -> " + lib64)
+  device.push(out.lib64, lib64)
+
+  cmd32 = "'{}={}'".format(lib32, ','.join(out.fields))
+  cmd64 = "'{}={}'".format(lib64, ','.join(out.fields))
+
+  if len(out.pid) == 0:
+    print("Getting jdwp pids")
+    new_env = dict(os.environ)
+    new_env["ANDROID_SERIAL"] = device.serial
+    p = subprocess.Popen([device.adb_path, "jdwp"], env=new_env, stdout=subprocess.PIPE)
+    # ADB jdwp doesn't ever exit so just kill it after 1 second to get a list of pids.
+    with concurrent.futures.ProcessPoolExecutor() as ppe:
+      ppe.submit(kill_it, p.pid).result()
+    out.pid = p.communicate()[0].strip().split()
+    p.wait()
+    print(out.pid)
+  print("Clearing logcat")
+  device.shell("logcat -c".split())
+  final = {}
+  print("Getting info from every process dumped to logcat")
+  for p in out.pid:
+    res = check_single_process(p, device, cmd32, cmd64);
+    if res is not None:
+      final[p] = res
+  device.shell('rm {}'.format(lib32).split())
+  device.shell('rm {}'.format(lib64).split())
+  print(json.dumps(final, indent=2))
+
+def kill_it(p):
+  time.sleep(1)
+  os.kill(p, signal.SIGINT)
+
+def check_single_process(pid, device, bit32, bit64):
+  try:
+    # Just try attaching both 32 and 64 bit. Wrong one will fail silently.
+    device.shell(['am', 'attach-agent', str(pid), bit32])
+    device.shell(['am', 'attach-agent', str(pid), bit64])
+    time.sleep(0.5)
+    device.shell('kill -3 {}'.format(pid).split())
+    time.sleep(0.5)
+    out = []
+    all_fields = []
+    lc_cmd = "logcat -d -b main --pid={} -e '^\\t.*\\t[0-9]*\\t[0-9]*$'".format(pid).split(' ')
+    for l in device.shell(lc_cmd)[0].strip().split('\n'):
+      # first 4 are just date and other useless data.
+      data = l.strip().split()[5:]
+      if len(data) < 4:
+        continue
+      # If we run multiple times many copies of the agent will be attached. Just choose one of any
+      # copies for each field.
+      field = data[1]
+      if field not in all_fields:
+        out.append((str(data[0]), str(data[1]), int(data[2]), int(data[3])))
+        all_fields.append(field)
+    if len(out) != 0:
+      print("pid: " + pid + " -> " + str(out))
+      return out
+    else:
+      return None
+  except adb.device.ShellError as e:
+    print("failed on pid " + repr(pid) + " because " + repr(e))
+    return None
+
+if __name__ == '__main__':
+  main()
diff --git a/tools/field-null-percent/fieldnull.cc b/tools/field-null-percent/fieldnull.cc
index 86459d2..8f5b389 100644
--- a/tools/field-null-percent/fieldnull.cc
+++ b/tools/field-null-percent/fieldnull.cc
@@ -147,7 +147,7 @@
   delete list;
 }
 
-static void CreateFieldList(jvmtiEnv* jvmti, JNIEnv* env, std::string args) {
+static void CreateFieldList(jvmtiEnv* jvmti, JNIEnv* env, const std::string& args) {
   RequestList* list = nullptr;
   CHECK_JVMTI(jvmti->Allocate(sizeof(*list), reinterpret_cast<unsigned char**>(&list)));
   new (list) RequestList { .fields_ = GetRequestedFields(env, args), };
@@ -167,7 +167,7 @@
 }
 
 static jint AgentStart(JavaVM* vm, char* options, bool is_onload) {
-  android::base::InitLogging(/* argv */nullptr);
+  android::base::InitLogging(/* argv= */nullptr);
   java_vm = vm;
   jvmtiEnv* jvmti = nullptr;
   if (SetupJvmtiEnv(vm, &jvmti) != JNI_OK) {
@@ -204,14 +204,14 @@
 extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM *vm,
                                                  char* options,
                                                  void* reserved ATTRIBUTE_UNUSED) {
-  return AgentStart(vm, options, /*is_onload*/false);
+  return AgentStart(vm, options, /*is_onload=*/false);
 }
 
 // Early attachment
 extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* jvm,
                                                char* options,
                                                void* reserved ATTRIBUTE_UNUSED) {
-  return AgentStart(jvm, options, /*is_onload*/true);
+  return AgentStart(jvm, options, /*is_onload=*/true);
 }
 
 }  // namespace fieldnull
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
index 6d9b6fb..2692f68 100644
--- a/tools/hiddenapi/hiddenapi.cc
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -22,21 +22,27 @@
 #include "android-base/stringprintf.h"
 #include "android-base/strings.h"
 
+#include "base/bit_utils.h"
+#include "base/hiddenapi_flags.h"
 #include "base/mem_map.h"
 #include "base/os.h"
+#include "base/stl_util.h"
 #include "base/unix_file/fd_file.h"
 #include "dex/art_dex_file_loader.h"
 #include "dex/class_accessor-inl.h"
 #include "dex/dex_file-inl.h"
-#include "dex/hidden_api_access_flags.h"
 
 namespace art {
+namespace hiddenapi {
+
+const char kErrorHelp[] = "\nSee go/hiddenapi-error for help.";
 
 static int original_argc;
 static char** original_argv;
 
 static std::string CommandLine() {
   std::vector<std::string> command;
+  command.reserve(original_argc);
   for (int i = 0; i < original_argc; ++i) {
     command.push_back(original_argv[i]);
   }
@@ -66,22 +72,24 @@
   UsageError("Usage: hiddenapi [command_name] [options]...");
   UsageError("");
   UsageError("  Command \"encode\": encode API list membership in boot dex files");
-  UsageError("    --dex=<filename>: dex file which belongs to boot class path,");
-  UsageError("                      the file will be overwritten");
+  UsageError("    --input-dex=<filename>: dex file which belongs to boot class path");
+  UsageError("    --output-dex=<filename>: file to write encoded dex into");
+  UsageError("        input and output dex files are paired in order of appearance");
   UsageError("");
-  UsageError("    --light-greylist=<filename>:");
-  UsageError("    --dark-greylist=<filename>:");
-  UsageError("    --blacklist=<filename>:");
-  UsageError("        text files with signatures of methods/fields to be annotated");
+  UsageError("    --api-flags=<filename>:");
+  UsageError("        CSV file with signatures of methods/fields and their respective flags");
+  UsageError("");
+  UsageError("    --no-force-assign-all:");
+  UsageError("        Disable check that all dex entries have been assigned a flag");
   UsageError("");
   UsageError("  Command \"list\": dump lists of public and private API");
   UsageError("    --boot-dex=<filename>: dex file which belongs to boot class path");
-  UsageError("    --stub-classpath=<filenames>: colon-separated list of dex/apk files");
-  UsageError("        which form API stubs of boot class path. Multiple classpaths can");
-  UsageError("        be specified");
+  UsageError("    --public-stub-classpath=<filenames>:");
+  UsageError("    --core-platform-stub-classpath=<filenames>:");
+  UsageError("        colon-separated list of dex/apk files which form API stubs of boot");
+  UsageError("        classpath. Multiple classpaths can be specified");
   UsageError("");
-  UsageError("    --out-public=<filename>: output file for a list of all public APIs");
-  UsageError("    --out-private=<filename>: output file for a list of all private APIs");
+  UsageError("    --out-api-flags=<filename>: output file for a CSV file with API flags");
   UsageError("");
 
   exit(EXIT_FAILURE);
@@ -108,7 +116,7 @@
 
   std::set<std::string> GetInterfaceDescriptors() const {
     std::set<std::string> list;
-    const DexFile::TypeList* ifaces = dex_file_.GetInterfacesList(GetClassDef());
+    const dex::TypeList* ifaces = dex_file_.GetInterfacesList(GetClassDef());
     for (uint32_t i = 0; ifaces != nullptr && i < ifaces->Size(); ++i) {
       list.insert(dex_file_.StringByTypeIdx(ifaces->GetTypeItem(i).type_idx_));
     }
@@ -116,14 +124,27 @@
   }
 
   inline bool IsPublic() const { return HasAccessFlags(kAccPublic); }
+  inline bool IsInterface() const { return HasAccessFlags(kAccInterface); }
 
   inline bool Equals(const DexClass& other) const {
     bool equals = strcmp(GetDescriptor(), other.GetDescriptor()) == 0;
     if (equals) {
       // TODO(dbrazdil): Check that methods/fields match as well once b/111116543 is fixed.
-      CHECK_EQ(GetAccessFlags(), other.GetAccessFlags());
-      CHECK_EQ(GetSuperclassDescriptor(), other.GetSuperclassDescriptor());
-      CHECK(GetInterfaceDescriptors() == other.GetInterfaceDescriptors());
+      CHECK_EQ(GetAccessFlags(), other.GetAccessFlags())
+          << "Inconsistent access flags of class " << GetDescriptor() << ": "
+          << "0x" << std::hex << GetAccessFlags() << std::dec << " (" << dex_file_.GetLocation()
+          << ") and 0x" << std::hex << other.GetAccessFlags() << std::dec << " ("
+          << other.dex_file_.GetLocation() << ")";
+      CHECK_EQ(GetSuperclassDescriptor(), other.GetSuperclassDescriptor())
+          << "Inconsistent superclass of class " << GetDescriptor() << ": "
+          << GetSuperclassDescriptor() << " (" << dex_file_.GetLocation()
+          << ") and " << other.GetSuperclassDescriptor() << " (" << other.dex_file_.GetLocation()
+          << ")";
+      CHECK(GetInterfaceDescriptors() == other.GetInterfaceDescriptors())
+          << "Inconsistent set of interfaces of class " << GetDescriptor() << ": "
+          << JoinStringSet(GetInterfaceDescriptors()) << " (" << dex_file_.GetLocation()
+          << ") and " << JoinStringSet(other.GetInterfaceDescriptors()) << " ("
+          << other.dex_file_.GetLocation() << ")";
     }
     return equals;
   }
@@ -131,6 +152,10 @@
  private:
   uint32_t GetAccessFlags() const { return GetClassDef().access_flags_; }
   bool HasAccessFlags(uint32_t mask) const { return (GetAccessFlags() & mask) == mask; }
+
+  static std::string JoinStringSet(const std::set<std::string>& s) {
+    return "{" + ::android::base::Join(std::vector<std::string>(s.begin(), s.end()), ",") + "}";
+  }
 };
 
 class DexMember {
@@ -147,30 +172,6 @@
 
   inline const DexClass& GetDeclaringClass() const { return klass_; }
 
-  // Sets hidden bits in access flags and writes them back into the DEX in memory.
-  // Note that this will not update the cached data of the class accessor
-  // until it iterates over this item again and therefore will fail a CHECK if
-  // it is called multiple times on the same DexMember.
-  void SetHidden(HiddenApiAccessFlags::ApiList value) const {
-    const uint32_t old_flags = item_.GetRawAccessFlags();
-    const uint32_t new_flags = HiddenApiAccessFlags::EncodeForDex(old_flags, value);
-    CHECK_EQ(UnsignedLeb128Size(new_flags), UnsignedLeb128Size(old_flags));
-
-    // Locate the LEB128-encoded access flags in class data.
-    // `ptr` initially points to the next ClassData item. We iterate backwards
-    // until we hit the terminating byte of the previous Leb128 value.
-    const uint8_t* ptr = item_.GetDataPointer();
-    if (IsMethod()) {
-      ptr = ReverseSearchUnsignedLeb128(ptr);
-      DCHECK_EQ(DecodeUnsignedLeb128WithoutMovingCursor(ptr), GetMethod().GetCodeItemOffset());
-    }
-    ptr = ReverseSearchUnsignedLeb128(ptr);
-    DCHECK_EQ(DecodeUnsignedLeb128WithoutMovingCursor(ptr), old_flags);
-
-    // Overwrite the access flags.
-    UpdateUnsignedLeb128(const_cast<uint8_t*>(ptr), new_flags);
-  }
-
   inline bool IsMethod() const { return is_method_; }
   inline bool IsVirtualMethod() const { return IsMethod() && !GetMethod().IsStaticOrDirect(); }
   inline bool IsConstructor() const { return IsMethod() && HasAccessFlags(kAccConstructor); }
@@ -220,12 +221,12 @@
     return down_cast<const ClassAccessor::Method&>(item_);
   }
 
-  inline const DexFile::MethodId& GetMethodId() const {
+  inline const dex::MethodId& GetMethodId() const {
     DCHECK(IsMethod());
     return item_.GetDexFile().GetMethodId(item_.GetIndex());
   }
 
-  inline const DexFile::FieldId& GetFieldId() const {
+  inline const dex::FieldId& GetFieldId() const {
     DCHECK(!IsMethod());
     return item_.GetDexFile().GetFieldId(item_.GetIndex());
   }
@@ -262,6 +263,10 @@
     });
   }
 
+  std::vector<const DexFile*> GetDexFiles() const {
+    return MakeNonOwningPointerVector(dex_files_);
+  }
+
   void UpdateDexChecksums() {
     for (auto& dex_file : dex_files_) {
       // Obtain a writeable pointer to the dex header.
@@ -278,7 +283,7 @@
 
     if (open_writable) {
       for (const std::string& filename : dex_paths) {
-        File fd(filename.c_str(), O_RDWR, /* check_usage */ false);
+        File fd(filename.c_str(), O_RDWR, /* check_usage= */ false);
         CHECK_NE(fd.Fd(), -1) << "Unable to open file '" << filename << "': " << strerror(errno);
 
         // Memory-map the dex file with MAP_SHARED flag so that changes in memory
@@ -288,10 +293,10 @@
         // We do those checks here and skip them when loading the processed file
         // into boot class path.
         std::unique_ptr<const DexFile> dex_file(dex_loader.OpenDex(fd.Release(),
-                                                                   /* location */ filename,
-                                                                   /* verify */ true,
-                                                                   /* verify_checksum */ true,
-                                                                   /* mmap_shared */ true,
+                                                                   /* location= */ filename,
+                                                                   /* verify= */ true,
+                                                                   /* verify_checksum= */ true,
+                                                                   /* mmap_shared= */ true,
                                                                    &error_msg));
         CHECK(dex_file.get() != nullptr) << "Open failed for '" << filename << "' " << error_msg;
         CHECK(dex_file->IsStandardDexFile()) << "Expected a standard dex file '" << filename << "'";
@@ -302,9 +307,9 @@
     } else {
       for (const std::string& filename : dex_paths) {
         bool success = dex_loader.Open(filename.c_str(),
-                                       /* location */ filename,
-                                       /* verify */ true,
-                                       /* verify_checksum */ true,
+                                       /* location= */ filename,
+                                       /* verify= */ true,
+                                       /* verify_checksum= */ true,
                                        &error_msg,
                                        &dex_files_);
         CHECK(success) << "Open failed for '" << filename << "' " << error_msg;
@@ -340,13 +345,13 @@
   // See comment on Hierarchy::ForEachResolvableMember.
   template<typename Fn>
   bool ForEachResolvableMember(const DexMember& other, Fn fn) {
-    return ForEachResolvableMember_Impl(other, fn) != ResolutionResult::kNotFound;
+    std::vector<HierarchyClass*> visited;
+    return ForEachResolvableMember_Impl(other, fn, true, true, visited);
   }
 
   // Returns true if this class contains at least one member matching `other`.
   bool HasMatchingMember(const DexMember& other) {
-    return ForEachMatchingMember(
-        other, [](const DexMember&) { return true; }) != ResolutionResult::kNotFound;
+    return ForEachMatchingMember(other, [](const DexMember&) { return true; });
   }
 
   // Recursively iterates over all subclasses of this class and invokes `fn`
@@ -362,62 +367,60 @@
   }
 
  private:
-  // Result of resolution which takes into account whether the member was found
-  // for the first time or not. This is just a performance optimization to prevent
-  // re-visiting previously visited members.
-  // Note that order matters. When accumulating results, we always pick the maximum.
-  enum class ResolutionResult {
-    kNotFound,
-    kFoundOld,
-    kFoundNew,
-  };
-
-  inline ResolutionResult Accumulate(ResolutionResult a, ResolutionResult b) {
-    return static_cast<ResolutionResult>(
-        std::max(static_cast<unsigned>(a), static_cast<unsigned>(b)));
-  }
-
   template<typename Fn>
-  ResolutionResult ForEachResolvableMember_Impl(const DexMember& other, Fn fn) {
-    // First try to find a member matching `other` in this class.
-    ResolutionResult foundInClass = ForEachMatchingMember(other, fn);
-
-    switch (foundInClass) {
-      case ResolutionResult::kFoundOld:
-        // A matching member was found and previously explored. All subclasses
-        // must have been explored too.
-        break;
-
-      case ResolutionResult::kFoundNew:
-        // A matching member was found and this was the first time it was visited.
-        // If it is a virtual method, visit all methods overriding/implementing it too.
-        if (other.IsVirtualMethod()) {
-          for (HierarchyClass* subclass : extended_by_) {
-            subclass->ForEachOverridingMember(other, fn);
-          }
-        }
-        break;
-
-      case ResolutionResult::kNotFound:
-        // A matching member was not found in this class. Explore the superclasses
-        // and implemented interfaces.
-        for (HierarchyClass* superclass : extends_) {
-          foundInClass = Accumulate(
-              foundInClass, superclass->ForEachResolvableMember_Impl(other, fn));
-        }
-        break;
+  bool ForEachResolvableMember_Impl(const DexMember& other,
+                                    Fn fn,
+                                    bool allow_explore_up,
+                                    bool allow_explore_down,
+                                    std::vector<HierarchyClass*> visited) {
+    if (std::find(visited.begin(), visited.end(), this) == visited.end()) {
+      visited.push_back(this);
+    } else {
+      return false;
     }
 
-    return foundInClass;
+    // First try to find a member matching `other` in this class.
+    bool found = ForEachMatchingMember(other, fn);
+
+    // If not found, see if it is inherited from parents. Note that this will not
+    // revisit parents already in `visited`.
+    if (!found && allow_explore_up) {
+      for (HierarchyClass* superclass : extends_) {
+        found |= superclass->ForEachResolvableMember_Impl(
+            other,
+            fn,
+            /* allow_explore_up */ true,
+            /* allow_explore_down */ false,
+            visited);
+      }
+    }
+
+    // If this is a virtual method, continue exploring into subclasses so as to visit
+    // all overriding methods. Allow subclasses to explore their superclasses if this
+    // is an interface. This is needed to find implementations of this interface's
+    // methods inherited from superclasses (b/122551864).
+    if (allow_explore_down && other.IsVirtualMethod()) {
+      for (HierarchyClass* subclass : extended_by_) {
+        subclass->ForEachResolvableMember_Impl(
+            other,
+            fn,
+            /* allow_explore_up */ GetOneDexClass().IsInterface(),
+            /* allow_explore_down */ true,
+            visited);
+      }
+    }
+
+    return found;
   }
 
   template<typename Fn>
-  ResolutionResult ForEachMatchingMember(const DexMember& other, Fn fn) {
-    ResolutionResult found = ResolutionResult::kNotFound;
+  bool ForEachMatchingMember(const DexMember& other, Fn fn) {
+    bool found = false;
     auto compare_member = [&](const DexMember& member) {
+      // TODO(dbrazdil): Check whether class of `other` can access `member`.
       if (member == other) {
-        found = Accumulate(found, fn(member) ? ResolutionResult::kFoundNew
-                                             : ResolutionResult::kFoundOld);
+        found = true;
+        fn(member);
       }
     };
     for (const DexClass& dex_class : dex_classes_) {
@@ -431,20 +434,6 @@
     return found;
   }
 
-  template<typename Fn>
-  void ForEachOverridingMember(const DexMember& other, Fn fn) {
-    CHECK(other.IsVirtualMethod());
-    ResolutionResult found = ForEachMatchingMember(other, fn);
-    if (found == ResolutionResult::kFoundOld) {
-      // No need to explore further.
-      return;
-    } else {
-      for (HierarchyClass* subclass : extended_by_) {
-        subclass->ForEachOverridingMember(other, fn);
-      }
-    }
-  }
-
   // DexClass entries of this class found across all the provided dex files.
   std::vector<DexClass> dex_classes_;
 
@@ -559,9 +548,321 @@
   std::map<std::string, HierarchyClass> classes_;
 };
 
+// Builder of dex section containing hiddenapi flags.
+class HiddenapiClassDataBuilder final {
+ public:
+  explicit HiddenapiClassDataBuilder(const DexFile& dex_file)
+      : num_classdefs_(dex_file.NumClassDefs()),
+        next_class_def_idx_(0u),
+        class_def_has_non_zero_flags_(false),
+        dex_file_has_non_zero_flags_(false),
+        data_(sizeof(uint32_t) * (num_classdefs_ + 1), 0u) {
+    *GetSizeField() = GetCurrentDataSize();
+  }
+
+  // Notify the builder that new flags for the next class def
+  // will be written now. The builder records the current offset
+  // into the header.
+  void BeginClassDef(uint32_t idx) {
+    CHECK_EQ(next_class_def_idx_, idx);
+    CHECK_LT(idx, num_classdefs_);
+    GetOffsetArray()[idx] = GetCurrentDataSize();
+    class_def_has_non_zero_flags_ = false;
+  }
+
+  // Notify the builder that all flags for this class def have been
+  // written. The builder updates the total size of the data struct
+  // and may set offset for class def in header to zero if no data
+  // has been written.
+  void EndClassDef(uint32_t idx) {
+    CHECK_EQ(next_class_def_idx_, idx);
+    CHECK_LT(idx, num_classdefs_);
+
+    ++next_class_def_idx_;
+
+    if (!class_def_has_non_zero_flags_) {
+      // No need to store flags for this class. Remove the written flags
+      // and set offset in header to zero.
+      data_.resize(GetOffsetArray()[idx]);
+      GetOffsetArray()[idx] = 0u;
+    }
+
+    dex_file_has_non_zero_flags_ |= class_def_has_non_zero_flags_;
+
+    if (idx == num_classdefs_ - 1) {
+      if (dex_file_has_non_zero_flags_) {
+        // This was the last class def and we have generated non-zero hiddenapi
+        // flags. Update total size in the header.
+        *GetSizeField() = GetCurrentDataSize();
+      } else {
+        // This was the last class def and we have not generated any non-zero
+        // hiddenapi flags. Clear all the data.
+        data_.clear();
+      }
+    }
+  }
+
+  // Append flags at the end of the data struct. This should be called
+  // between BeginClassDef and EndClassDef in the order of appearance of
+  // fields/methods in the class data stream.
+  void WriteFlags(const ApiList& flags) {
+    uint32_t dex_flags = flags.GetDexFlags();
+    EncodeUnsignedLeb128(&data_, dex_flags);
+    class_def_has_non_zero_flags_ |= (dex_flags != 0u);
+  }
+
+  // Return backing data, assuming that all flags have been written.
+  const std::vector<uint8_t>& GetData() const {
+    CHECK_EQ(next_class_def_idx_, num_classdefs_) << "Incomplete data";
+    return data_;
+  }
+
+ private:
+  // Returns pointer to the size field in the header of this dex section.
+  uint32_t* GetSizeField() {
+    // Assume malloc() aligns allocated memory to at least uint32_t.
+    CHECK(IsAligned<sizeof(uint32_t)>(data_.data()));
+    return reinterpret_cast<uint32_t*>(data_.data());
+  }
+
+  // Returns pointer to array of offsets (indexed by class def indices) in the
+  // header of this dex section.
+  uint32_t* GetOffsetArray() { return &GetSizeField()[1]; }
+  uint32_t GetCurrentDataSize() const { return data_.size(); }
+
+  // Number of class defs in this dex file.
+  const uint32_t num_classdefs_;
+
+  // Next expected class def index.
+  uint32_t next_class_def_idx_;
+
+  // Whether non-zero flags have been encountered for this class def.
+  bool class_def_has_non_zero_flags_;
+
+  // Whether any non-zero flags have been encountered for this dex file.
+  bool dex_file_has_non_zero_flags_;
+
+  // Vector containing the data of the built data structure.
+  std::vector<uint8_t> data_;
+};
+
+// Edits a dex file, inserting a new HiddenapiClassData section.
+class DexFileEditor final {
+ public:
+  DexFileEditor(const DexFile& old_dex, const std::vector<uint8_t>& hiddenapi_class_data)
+      : old_dex_(old_dex),
+        hiddenapi_class_data_(hiddenapi_class_data),
+        loaded_dex_header_(nullptr),
+        loaded_dex_maplist_(nullptr) {}
+
+  // Copies dex file into a backing data vector, appends the given HiddenapiClassData
+  // and updates the MapList.
+  void Encode() {
+    // We do not support non-standard dex encodings, e.g. compact dex.
+    CHECK(old_dex_.IsStandardDexFile());
+
+    // If there are no data to append, copy the old dex file and return.
+    if (hiddenapi_class_data_.empty()) {
+      AllocateMemory(old_dex_.Size());
+      Append(old_dex_.Begin(), old_dex_.Size(), /* update_header= */ false);
+      return;
+    }
+
+    // Find the old MapList, find its size.
+    const dex::MapList* old_map = old_dex_.GetMapList();
+    CHECK_LT(old_map->size_, std::numeric_limits<uint32_t>::max());
+
+    // Compute the size of the new dex file. We append the HiddenapiClassData,
+    // one MapItem and possibly some padding to align the new MapList.
+    CHECK(IsAligned<kMapListAlignment>(old_dex_.Size()))
+        << "End of input dex file is not 4-byte aligned, possibly because its MapList is not "
+        << "at the end of the file.";
+    size_t size_delta =
+        RoundUp(hiddenapi_class_data_.size(), kMapListAlignment) + sizeof(dex::MapItem);
+    size_t new_size = old_dex_.Size() + size_delta;
+    AllocateMemory(new_size);
+
+    // Copy the old dex file into the backing data vector. Load the copied
+    // dex file to obtain pointers to its header and MapList.
+    Append(old_dex_.Begin(), old_dex_.Size(), /* update_header= */ false);
+    ReloadDex(/* verify= */ false);
+
+    // Truncate the new dex file before the old MapList. This assumes that
+    // the MapList is the last entry in the dex file. This is currently true
+    // for our tooling.
+    // TODO: Implement the general case by zero-ing the old MapList (turning
+    // it into padding.
+    RemoveOldMapList();
+
+    // Append HiddenapiClassData.
+    size_t payload_offset = AppendHiddenapiClassData();
+
+    // Wrute new MapList with an entry for HiddenapiClassData.
+    CreateMapListWithNewItem(payload_offset);
+
+    // Check that the pre-computed size matches the actual size.
+    CHECK_EQ(offset_, new_size);
+
+    // Reload to all data structures.
+    ReloadDex(/* verify= */ false);
+
+    // Update the dex checksum.
+    UpdateChecksum();
+
+    // Run DexFileVerifier on the new dex file as a CHECK.
+    ReloadDex(/* verify= */ true);
+  }
+
+  // Writes the edited dex file into a file.
+  void WriteTo(const std::string& path) {
+    CHECK(!data_.empty());
+    std::ofstream ofs(path.c_str(), std::ofstream::out | std::ofstream::binary);
+    ofs.write(reinterpret_cast<const char*>(data_.data()), data_.size());
+    ofs.flush();
+    CHECK(ofs.good());
+    ofs.close();
+  }
+
+ private:
+  static constexpr size_t kMapListAlignment = 4u;
+  static constexpr size_t kHiddenapiClassDataAlignment = 4u;
+
+  void ReloadDex(bool verify) {
+    std::string error_msg;
+    DexFileLoader loader;
+    loaded_dex_ = loader.Open(
+        data_.data(),
+        data_.size(),
+        "test_location",
+        old_dex_.GetLocationChecksum(),
+        /* oat_dex_file= */ nullptr,
+        /* verify= */ verify,
+        /* verify_checksum= */ verify,
+        &error_msg);
+    if (loaded_dex_.get() == nullptr) {
+      LOG(FATAL) << "Failed to load edited dex file: " << error_msg;
+      UNREACHABLE();
+    }
+
+    // Load the location of header and map list before we start editing the file.
+    loaded_dex_header_ = const_cast<DexFile::Header*>(&loaded_dex_->GetHeader());
+    loaded_dex_maplist_ = const_cast<dex::MapList*>(loaded_dex_->GetMapList());
+  }
+
+  DexFile::Header& GetHeader() const {
+    CHECK(loaded_dex_header_ != nullptr);
+    return *loaded_dex_header_;
+  }
+
+  dex::MapList& GetMapList() const {
+    CHECK(loaded_dex_maplist_ != nullptr);
+    return *loaded_dex_maplist_;
+  }
+
+  void AllocateMemory(size_t total_size) {
+    data_.clear();
+    data_.resize(total_size);
+    CHECK(IsAligned<kMapListAlignment>(data_.data()));
+    CHECK(IsAligned<kHiddenapiClassDataAlignment>(data_.data()));
+    offset_ = 0;
+  }
+
+  uint8_t* GetCurrentDataPtr() {
+    return data_.data() + offset_;
+  }
+
+  void UpdateDataSize(off_t delta, bool update_header) {
+    offset_ += delta;
+    if (update_header) {
+      DexFile::Header& header = GetHeader();
+      header.file_size_ += delta;
+      header.data_size_ += delta;
+    }
+  }
+
+  template<typename T>
+  T* Append(const T* src, size_t len, bool update_header = true) {
+    CHECK_LE(offset_ + len, data_.size());
+    uint8_t* dst = GetCurrentDataPtr();
+    memcpy(dst, src, len);
+    UpdateDataSize(len, update_header);
+    return reinterpret_cast<T*>(dst);
+  }
+
+  void InsertPadding(size_t alignment) {
+    size_t len = RoundUp(offset_, alignment) - offset_;
+    std::vector<uint8_t> padding(len, 0);
+    Append(padding.data(), padding.size());
+  }
+
+  void RemoveOldMapList() {
+    size_t map_size = GetMapList().Size();
+    uint8_t* map_start = reinterpret_cast<uint8_t*>(&GetMapList());
+    CHECK_EQ(map_start + map_size, GetCurrentDataPtr()) << "MapList not at the end of dex file";
+    UpdateDataSize(-static_cast<off_t>(map_size), /* update_header= */ true);
+    CHECK_EQ(map_start, GetCurrentDataPtr());
+    loaded_dex_maplist_ = nullptr;  // do not use this map list any more
+  }
+
+  void CreateMapListWithNewItem(size_t payload_offset) {
+    InsertPadding(/* alignment= */ kMapListAlignment);
+
+    size_t new_map_offset = offset_;
+    dex::MapList* map = Append(old_dex_.GetMapList(), old_dex_.GetMapList()->Size());
+
+    // Check last map entry is a pointer to itself.
+    dex::MapItem& old_item = map->list_[map->size_ - 1];
+    CHECK(old_item.type_ == DexFile::kDexTypeMapList);
+    CHECK_EQ(old_item.size_, 1u);
+    CHECK_EQ(old_item.offset_, GetHeader().map_off_);
+
+    // Create a new MapItem entry with new MapList details.
+    dex::MapItem new_item;
+    new_item.type_ = old_item.type_;
+    new_item.unused_ = 0u;  // initialize to ensure dex output is deterministic (b/119308882)
+    new_item.size_ = old_item.size_;
+    new_item.offset_ = new_map_offset;
+
+    // Update pointer in the header.
+    GetHeader().map_off_ = new_map_offset;
+
+    // Append a new MapItem and return its pointer.
+    map->size_++;
+    Append(&new_item, sizeof(dex::MapItem));
+
+    // Change penultimate entry to point to metadata.
+    old_item.type_ = DexFile::kDexTypeHiddenapiClassData;
+    old_item.size_ = 1u;  // there is only one section
+    old_item.offset_ = payload_offset;
+  }
+
+  size_t AppendHiddenapiClassData() {
+    size_t payload_offset = offset_;
+    CHECK_EQ(kMapListAlignment, kHiddenapiClassDataAlignment);
+    CHECK(IsAligned<kHiddenapiClassDataAlignment>(payload_offset))
+        << "Should not need to align the section, previous data was already aligned";
+    Append(hiddenapi_class_data_.data(), hiddenapi_class_data_.size());
+    return payload_offset;
+  }
+
+  void UpdateChecksum() {
+    GetHeader().checksum_ = loaded_dex_->CalculateChecksum();
+  }
+
+  const DexFile& old_dex_;
+  const std::vector<uint8_t>& hiddenapi_class_data_;
+
+  std::vector<uint8_t> data_;
+  size_t offset_;
+
+  std::unique_ptr<const DexFile> loaded_dex_;
+  DexFile::Header* loaded_dex_header_;
+  dex::MapList* loaded_dex_maplist_;
+};
+
 class HiddenApi final {
  public:
-  HiddenApi() {}
+  HiddenApi() : force_assign_all_(true) {}
 
   void Run(int argc, char** argv) {
     switch (ParseArgs(argc, argv)) {
@@ -590,14 +891,14 @@
       if (command == "encode") {
         for (int i = 1; i < argc; ++i) {
           const StringPiece option(argv[i]);
-          if (option.starts_with("--dex=")) {
-            boot_dex_paths_.push_back(option.substr(strlen("--dex=")).ToString());
-          } else if (option.starts_with("--light-greylist=")) {
-            light_greylist_path_ = option.substr(strlen("--light-greylist=")).ToString();
-          } else if (option.starts_with("--dark-greylist=")) {
-            dark_greylist_path_ = option.substr(strlen("--dark-greylist=")).ToString();
-          } else if (option.starts_with("--blacklist=")) {
-            blacklist_path_ = option.substr(strlen("--blacklist=")).ToString();
+          if (option.starts_with("--input-dex=")) {
+            boot_dex_paths_.push_back(option.substr(strlen("--input-dex=")).ToString());
+          } else if (option.starts_with("--output-dex=")) {
+            output_dex_paths_.push_back(option.substr(strlen("--output-dex=")).ToString());
+          } else if (option.starts_with("--api-flags=")) {
+            api_flags_path_ = option.substr(strlen("--api-flags=")).ToString();
+          } else if (option == "--no-force-assign-all") {
+            force_assign_all_ = false;
           } else {
             Usage("Unknown argument '%s'", option.data());
           }
@@ -608,13 +909,16 @@
           const StringPiece option(argv[i]);
           if (option.starts_with("--boot-dex=")) {
             boot_dex_paths_.push_back(option.substr(strlen("--boot-dex=")).ToString());
-          } else if (option.starts_with("--stub-classpath=")) {
-            stub_classpaths_.push_back(android::base::Split(
-                option.substr(strlen("--stub-classpath=")).ToString(), ":"));
-          } else if (option.starts_with("--out-public=")) {
-            out_public_path_ = option.substr(strlen("--out-public=")).ToString();
-          } else if (option.starts_with("--out-private=")) {
-            out_private_path_ = option.substr(strlen("--out-private=")).ToString();
+          } else if (option.starts_with("--public-stub-classpath=")) {
+            stub_classpaths_.push_back(std::make_pair(
+                option.substr(strlen("--public-stub-classpath=")).ToString(),
+                ApiList::Whitelist()));
+          } else if (option.starts_with("--core-platform-stub-classpath=")) {
+            stub_classpaths_.push_back(std::make_pair(
+                option.substr(strlen("--core-platform-stub-classpath=")).ToString(),
+                ApiList::CorePlatformApi()));
+          } else if (option.starts_with("--out-api-flags=")) {
+            api_flags_path_ = option.substr(strlen("--out-api-flags=")).ToString();
           } else {
             Usage("Unknown argument '%s'", option.data());
           }
@@ -630,43 +934,81 @@
 
   void EncodeAccessFlags() {
     if (boot_dex_paths_.empty()) {
-      Usage("No boot DEX files specified");
+      Usage("No input DEX files specified");
+    } else if (output_dex_paths_.size() != boot_dex_paths_.size()) {
+      Usage("Number of input DEX files does not match number of output DEX files");
     }
 
     // Load dex signatures.
-    std::map<std::string, HiddenApiAccessFlags::ApiList> api_list;
-    OpenApiFile(light_greylist_path_, api_list, HiddenApiAccessFlags::kLightGreylist);
-    OpenApiFile(dark_greylist_path_, api_list, HiddenApiAccessFlags::kDarkGreylist);
-    OpenApiFile(blacklist_path_, api_list, HiddenApiAccessFlags::kBlacklist);
+    std::map<std::string, ApiList> api_list = OpenApiFile(api_flags_path_);
 
-    // Open all dex files.
-    ClassPath boot_classpath(boot_dex_paths_, /* open_writable */ true);
+    // Iterate over input dex files and insert HiddenapiClassData sections.
+    for (size_t i = 0; i < boot_dex_paths_.size(); ++i) {
+      const std::string& input_path = boot_dex_paths_[i];
+      const std::string& output_path = output_dex_paths_[i];
 
-    // Set access flags of all members.
-    boot_classpath.ForEachDexMember([&api_list](const DexMember& boot_member) {
-      auto it = api_list.find(boot_member.GetApiEntry());
-      boot_member.SetHidden(it == api_list.end() ? HiddenApiAccessFlags::kWhitelist : it->second);
-    });
+      ClassPath boot_classpath({ input_path }, /* open_writable= */ false);
+      std::vector<const DexFile*> input_dex_files = boot_classpath.GetDexFiles();
+      CHECK_EQ(input_dex_files.size(), 1u);
+      const DexFile& input_dex = *input_dex_files[0];
 
-    boot_classpath.UpdateDexChecksums();
+      HiddenapiClassDataBuilder builder(input_dex);
+      boot_classpath.ForEachDexClass([&](const DexClass& boot_class) {
+        builder.BeginClassDef(boot_class.GetClassDefIndex());
+        if (boot_class.GetData() != nullptr) {
+          auto fn_shared = [&](const DexMember& boot_member) {
+            auto it = api_list.find(boot_member.GetApiEntry());
+            bool api_list_found = (it != api_list.end());
+            CHECK(!force_assign_all_ || api_list_found)
+                << "Could not find hiddenapi flags for dex entry: " << boot_member.GetApiEntry();
+            builder.WriteFlags(api_list_found ? it->second : ApiList::Whitelist());
+          };
+          auto fn_field = [&](const ClassAccessor::Field& boot_field) {
+            fn_shared(DexMember(boot_class, boot_field));
+          };
+          auto fn_method = [&](const ClassAccessor::Method& boot_method) {
+            fn_shared(DexMember(boot_class, boot_method));
+          };
+          boot_class.VisitFieldsAndMethods(fn_field, fn_field, fn_method, fn_method);
+        }
+        builder.EndClassDef(boot_class.GetClassDefIndex());
+      });
+
+      DexFileEditor dex_editor(input_dex, builder.GetData());
+      dex_editor.Encode();
+      dex_editor.WriteTo(output_path);
+    }
   }
 
-  void OpenApiFile(const std::string& path,
-                   std::map<std::string, HiddenApiAccessFlags::ApiList>& api_list,
-                   HiddenApiAccessFlags::ApiList membership) {
-    if (path.empty()) {
-      return;
-    }
-
+  std::map<std::string, ApiList> OpenApiFile(const std::string& path) {
+    CHECK(!path.empty());
     std::ifstream api_file(path, std::ifstream::in);
     CHECK(!api_file.fail()) << "Unable to open file '" << path << "' " << strerror(errno);
 
-    for (std::string line; std::getline(api_file, line);) {
-      CHECK(api_list.find(line) == api_list.end())
-          << "Duplicate entry: " << line << " (" << api_list[line] << " and " << membership << ")";
-      api_list.emplace(line, membership);
+    std::map<std::string, ApiList> api_flag_map;
+
+    size_t line_number = 1;
+    for (std::string line; std::getline(api_file, line); line_number++) {
+      std::vector<std::string> values = android::base::Split(line, ",");
+      CHECK_GT(values.size(), 1u) << path << ":" << line_number
+          << ": No flags found: " << line << kErrorHelp;
+
+      const std::string& signature = values[0];
+      CHECK(api_flag_map.find(signature) == api_flag_map.end()) << path << ":" << line_number
+          << ": Duplicate entry: " << signature << kErrorHelp;
+
+      ApiList membership;
+      bool success = ApiList::FromNames(values.begin() + 1, values.end(), &membership);
+      CHECK(success) << path << ":" << line_number
+          << ": Some flags were not recognized: " << line << kErrorHelp;
+      CHECK(membership.IsValid()) << path << ":" << line_number
+          << ": Invalid combination of flags: " << line << kErrorHelp;
+
+      api_flag_map.emplace(signature, membership);
     }
+
     api_file.close();
+    return api_flag_map;
   }
 
   void ListApi() {
@@ -674,51 +1016,46 @@
       Usage("No boot DEX files specified");
     } else if (stub_classpaths_.empty()) {
       Usage("No stub DEX files specified");
-    } else if (out_public_path_.empty()) {
-      Usage("No public API output path specified");
-    } else if (out_private_path_.empty()) {
-      Usage("No private API output path specified");
+    } else if (api_flags_path_.empty()) {
+      Usage("No output path specified");
     }
 
     // Complete list of boot class path members. The associated boolean states
     // whether it is public (true) or private (false).
-    std::map<std::string, bool> boot_members;
+    std::map<std::string, ApiList> boot_members;
 
     // Deduplicate errors before printing them.
     std::set<std::string> unresolved;
 
     // Open all dex files.
-    ClassPath boot_classpath(boot_dex_paths_, /* open_writable */ false);
+    ClassPath boot_classpath(boot_dex_paths_, /* open_writable= */ false);
     Hierarchy boot_hierarchy(boot_classpath);
 
     // Mark all boot dex members private.
-    boot_classpath.ForEachDexMember([&boot_members](const DexMember& boot_member) {
-      boot_members[boot_member.GetApiEntry()] = false;
+    boot_classpath.ForEachDexMember([&](const DexMember& boot_member) {
+      boot_members[boot_member.GetApiEntry()] = ApiList();
     });
 
     // Resolve each SDK dex member against the framework and mark it white.
-    for (const std::vector<std::string>& stub_classpath_dex : stub_classpaths_) {
-      ClassPath stub_classpath(stub_classpath_dex, /* open_writable */ false);
+    for (const auto& cp_entry : stub_classpaths_) {
+      ClassPath stub_classpath(android::base::Split(cp_entry.first, ":"),
+                               /* open_writable= */ false);
       Hierarchy stub_hierarchy(stub_classpath);
+      const ApiList stub_api_list = cp_entry.second;
+
       stub_classpath.ForEachDexMember(
-          [&stub_hierarchy, &boot_hierarchy, &boot_members, &unresolved](
-              const DexMember& stub_member) {
+          [&](const DexMember& stub_member) {
             if (!stub_hierarchy.IsMemberVisible(stub_member)) {
               // Typically fake constructors and inner-class `this` fields.
               return;
             }
             bool resolved = boot_hierarchy.ForEachResolvableMember(
                 stub_member,
-                [&boot_members](const DexMember& boot_member) {
+                [&](const DexMember& boot_member) {
                   std::string entry = boot_member.GetApiEntry();
                   auto it = boot_members.find(entry);
                   CHECK(it != boot_members.end());
-                  if (it->second) {
-                    return false;  // has been marked before
-                  } else {
-                    it->second = true;
-                    return true;  // marked for the first time
-                  }
+                  it->second |= stub_api_list;
                 });
             if (!resolved) {
               unresolved.insert(stub_member.GetApiEntry());
@@ -732,41 +1069,44 @@
     }
 
     // Write into public/private API files.
-    std::ofstream file_public(out_public_path_.c_str());
-    std::ofstream file_private(out_private_path_.c_str());
-    for (const std::pair<std::string, bool> entry : boot_members) {
-      if (entry.second) {
-        file_public << entry.first << std::endl;
+    std::ofstream file_flags(api_flags_path_.c_str());
+    for (const auto& entry : boot_members) {
+      if (entry.second.IsEmpty()) {
+        file_flags << entry.first << std::endl;
       } else {
-        file_private << entry.first << std::endl;
+        file_flags << entry.first << "," << entry.second << std::endl;
       }
     }
-    file_public.close();
-    file_private.close();
+    file_flags.close();
   }
 
+  // Whether to check that all dex entries have been assigned flags.
+  // Defaults to true.
+  bool force_assign_all_;
+
   // Paths to DEX files which should be processed.
   std::vector<std::string> boot_dex_paths_;
 
+  // Output paths where modified DEX files should be written.
+  std::vector<std::string> output_dex_paths_;
+
   // Set of public API stub classpaths. Each classpath is formed by a list
   // of DEX/APK files in the order they appear on the classpath.
-  std::vector<std::vector<std::string>> stub_classpaths_;
+  std::vector<std::pair<std::string, ApiList>> stub_classpaths_;
 
-  // Paths to text files which contain the lists of API members.
-  std::string light_greylist_path_;
-  std::string dark_greylist_path_;
-  std::string blacklist_path_;
-
-  // Paths to text files to which we will output list of all API members.
-  std::string out_public_path_;
-  std::string out_private_path_;
+  // Path to CSV file containing the list of API members and their flags.
+  // This could be both an input and output path.
+  std::string api_flags_path_;
 };
 
+}  // namespace hiddenapi
 }  // namespace art
 
 int main(int argc, char** argv) {
+  art::hiddenapi::original_argc = argc;
+  art::hiddenapi::original_argv = argv;
   android::base::InitLogging(argv);
   art::MemMap::Init();
-  art::HiddenApi().Run(argc, argv);
+  art::hiddenapi::HiddenApi().Run(argc, argv);
   return EXIT_SUCCESS;
 }
diff --git a/tools/hiddenapi/hiddenapi_test.cc b/tools/hiddenapi/hiddenapi_test.cc
index b50f684..74feb8a 100644
--- a/tools/hiddenapi/hiddenapi_test.cc
+++ b/tools/hiddenapi/hiddenapi_test.cc
@@ -16,6 +16,8 @@
 
 #include <fstream>
 
+#include "android-base/strings.h"
+
 #include "base/unix_file/fd_file.h"
 #include "base/zip_archive.h"
 #include "common_runtime_test.h"
@@ -41,12 +43,11 @@
     return file_path;
   }
 
-  std::unique_ptr<const DexFile> RunHiddenApi(const ScratchFile& light_greylist,
-                                              const ScratchFile& dark_greylist,
-                                              const ScratchFile& blacklist,
-                                              const std::vector<std::string>& extra_args,
-                                              ScratchFile* out_dex) {
+  std::unique_ptr<const DexFile> RunHiddenapiEncode(const ScratchFile& flags_csv,
+                                                    const std::vector<std::string>& extra_args,
+                                                    const ScratchFile& out_dex) {
     std::string error;
+    ScratchFile in_dex;
     std::unique_ptr<ZipArchive> jar(
         ZipArchive::Open(GetTestDexFileName("HiddenApi").c_str(), &error));
     if (jar == nullptr) {
@@ -58,7 +59,7 @@
       LOG(FATAL) << "Could not find classes.dex in test file " << GetTestDexFileName("HiddenApi")
                  << ": " << error;
       UNREACHABLE();
-    } else if (!jar_classes_dex->ExtractToFile(*out_dex->GetFile(), &error)) {
+    } else if (!jar_classes_dex->ExtractToFile(*in_dex.GetFile(), &error)) {
       LOG(FATAL) << "Could not extract classes.dex from test file "
                  << GetTestDexFileName("HiddenApi") << ": " << error;
       UNREACHABLE();
@@ -68,32 +69,56 @@
     argv_str.push_back(GetHiddenApiCmd());
     argv_str.insert(argv_str.end(), extra_args.begin(), extra_args.end());
     argv_str.push_back("encode");
-    argv_str.push_back("--dex=" + out_dex->GetFilename());
-    argv_str.push_back("--light-greylist=" + light_greylist.GetFilename());
-    argv_str.push_back("--dark-greylist=" + dark_greylist.GetFilename());
-    argv_str.push_back("--blacklist=" + blacklist.GetFilename());
+    argv_str.push_back("--input-dex=" + in_dex.GetFilename());
+    argv_str.push_back("--output-dex=" + out_dex.GetFilename());
+    argv_str.push_back("--api-flags=" + flags_csv.GetFilename());
+    argv_str.push_back("--no-force-assign-all");
     int return_code = ExecAndReturnCode(argv_str, &error);
     if (return_code == 0) {
-      return OpenDex(*out_dex);
+      return OpenDex(out_dex);
     } else {
       LOG(ERROR) << "HiddenApi binary exited with unexpected return code " << return_code;
       return nullptr;
     }
   }
 
+  bool RunHiddenapiList(const ScratchFile& out_flags_csv) {
+    std::string error;
+    std::string boot_jar = GetTestDexFileName("HiddenApi");
+    std::string stub_jar = GetTestDexFileName("HiddenApiStubs");
+    std::string boot_cp = android::base::Join(GetLibCoreDexFileNames(), ":");
+
+    std::vector<std::string> argv_str;
+    argv_str.push_back(GetHiddenApiCmd());
+    argv_str.push_back("list");
+    for (const std::string& core_jar : GetLibCoreDexFileNames()) {
+      argv_str.push_back("--boot-dex=" + core_jar);
+    }
+    argv_str.push_back("--boot-dex=" + boot_jar);
+    argv_str.push_back("--public-stub-classpath=" + boot_cp + ":" + stub_jar);
+    argv_str.push_back("--out-api-flags=" + out_flags_csv.GetFilename());
+    int return_code = ExecAndReturnCode(argv_str, &error);
+    if (return_code == 0) {
+      return true;
+    } else {
+      LOG(ERROR) << "HiddenApi binary exited with unexpected return code " << return_code;
+      return false;
+    }
+  }
+
   std::unique_ptr<const DexFile> OpenDex(const ScratchFile& file) {
     ArtDexFileLoader dex_loader;
     std::string error_msg;
 
-    File fd(file.GetFilename(), O_RDONLY, /* check_usage */ false);
+    File fd(file.GetFilename(), O_RDONLY, /* check_usage= */ false);
     if (fd.Fd() == -1) {
       LOG(FATAL) << "Unable to open file '" << file.GetFilename() << "': " << strerror(errno);
       UNREACHABLE();
     }
 
     std::unique_ptr<const DexFile> dex_file(dex_loader.OpenDex(
-        fd.Release(), /* location */ file.GetFilename(), /* verify */ false,
-        /* verify_checksum */ true, /* mmap_shared */ false, &error_msg));
+        fd.Release(), /* location= */ file.GetFilename(), /* verify= */ true,
+        /* verify_checksum= */ true, /* mmap_shared= */ false, &error_msg));
     if (dex_file.get() == nullptr) {
       LOG(FATAL) << "Open failed for '" << file.GetFilename() << "' " << error_msg;
       UNREACHABLE();
@@ -114,28 +139,57 @@
     return ofs;
   }
 
-  const DexFile::ClassDef& FindClass(const char* desc, const DexFile& dex_file) {
-    const DexFile::TypeId* type_id = dex_file.FindTypeId(desc);
+  std::map<std::string, std::string> ReadFlagsCsvFile(const ScratchFile& file) {
+    std::ifstream ifs(file.GetFilename());
+    std::map<std::string, std::string> flags;
+
+    for (std::string line; std::getline(ifs, line);) {
+      std::size_t comma = line.find(",");
+      if (comma == std::string::npos) {
+        flags.emplace(line, "");
+      } else {
+        flags.emplace(line.substr(0, comma), line.substr(comma + 1));
+      }
+    }
+
+    return flags;
+  }
+
+  std::string SafeMapGet(const std::string& key, const std::map<std::string, std::string>& map) {
+    auto it = map.find(key);
+    if (it == map.end()) {
+      LOG(FATAL) << "Key not found: " << key;
+      UNREACHABLE();
+    }
+    return it->second;
+  }
+
+  const dex::ClassDef& FindClass(const char* desc, const DexFile& dex_file) {
+    const dex::TypeId* type_id = dex_file.FindTypeId(desc);
     CHECK(type_id != nullptr) << "Could not find class " << desc;
-    const DexFile::ClassDef* found = dex_file.FindClassDef(dex_file.GetIndexForTypeId(*type_id));
+    const dex::ClassDef* found = dex_file.FindClassDef(dex_file.GetIndexForTypeId(*type_id));
     CHECK(found != nullptr) << "Could not find class " << desc;
     return *found;
   }
 
-  HiddenApiAccessFlags::ApiList GetFieldHiddenFlags(const char* name,
-                                                    uint32_t expected_visibility,
-                                                    const DexFile::ClassDef& class_def,
-                                                    const DexFile& dex_file) {
-    ClassAccessor accessor(dex_file, class_def);
+  hiddenapi::ApiList GetFieldHiddenFlags(const char* name,
+                                         uint32_t expected_visibility,
+                                         const dex::ClassDef& class_def,
+                                         const DexFile& dex_file) {
+    ClassAccessor accessor(dex_file, class_def, /* parse hiddenapi flags */ true);
     CHECK(accessor.HasClassData()) << "Class " << accessor.GetDescriptor() << " has no data";
 
+    if (!accessor.HasHiddenapiClassData()) {
+      return hiddenapi::ApiList::Whitelist();
+    }
+
     for (const ClassAccessor::Field& field : accessor.GetFields()) {
-      const DexFile::FieldId& fid = dex_file.GetFieldId(field.GetIndex());
+      const dex::FieldId& fid = dex_file.GetFieldId(field.GetIndex());
       if (strcmp(name, dex_file.GetFieldName(fid)) == 0) {
         const uint32_t actual_visibility = field.GetAccessFlags() & kAccVisibilityFlags;
         CHECK_EQ(actual_visibility, expected_visibility)
             << "Field " << name << " in class " << accessor.GetDescriptor();
-        return field.DecodeHiddenAccessFlags();
+        return hiddenapi::ApiList(field.GetHiddenapiFlags());
       }
     }
 
@@ -144,23 +198,27 @@
     UNREACHABLE();
   }
 
-  HiddenApiAccessFlags::ApiList GetMethodHiddenFlags(const char* name,
-                                                     uint32_t expected_visibility,
-                                                     bool expected_native,
-                                                     const DexFile::ClassDef& class_def,
-                                                     const DexFile& dex_file) {
-    ClassAccessor accessor(dex_file, class_def);
+  hiddenapi::ApiList GetMethodHiddenFlags(const char* name,
+                                          uint32_t expected_visibility,
+                                          bool expected_native,
+                                          const dex::ClassDef& class_def,
+                                          const DexFile& dex_file) {
+    ClassAccessor accessor(dex_file, class_def, /* parse hiddenapi flags */ true);
     CHECK(accessor.HasClassData()) << "Class " << accessor.GetDescriptor() << " has no data";
 
+    if (!accessor.HasHiddenapiClassData()) {
+      return hiddenapi::ApiList::Whitelist();
+    }
+
     for (const ClassAccessor::Method& method : accessor.GetMethods()) {
-      const DexFile::MethodId& mid = dex_file.GetMethodId(method.GetIndex());
+      const dex::MethodId& mid = dex_file.GetMethodId(method.GetIndex());
       if (strcmp(name, dex_file.GetMethodName(mid)) == 0) {
         CHECK_EQ(expected_native, method.MemberIsNative())
             << "Method " << name << " in class " << accessor.GetDescriptor();
         const uint32_t actual_visibility = method.GetAccessFlags() & kAccVisibilityFlags;
         CHECK_EQ(actual_visibility, expected_visibility)
             << "Method " << name << " in class " << accessor.GetDescriptor();
-        return method.DecodeHiddenAccessFlags();
+        return hiddenapi::ApiList(method.GetHiddenapiFlags());
       }
     }
 
@@ -169,435 +227,495 @@
     UNREACHABLE();
   }
 
-  HiddenApiAccessFlags::ApiList GetIFieldHiddenFlags(const DexFile& dex_file) {
+  hiddenapi::ApiList GetIFieldHiddenFlags(const DexFile& dex_file) {
     return GetFieldHiddenFlags("ifield", kAccPublic, FindClass("LMain;", dex_file), dex_file);
   }
 
-  HiddenApiAccessFlags::ApiList GetSFieldHiddenFlags(const DexFile& dex_file) {
+  hiddenapi::ApiList GetSFieldHiddenFlags(const DexFile& dex_file) {
     return GetFieldHiddenFlags("sfield", kAccPrivate, FindClass("LMain;", dex_file), dex_file);
   }
 
-  HiddenApiAccessFlags::ApiList GetIMethodHiddenFlags(const DexFile& dex_file) {
+  hiddenapi::ApiList GetIMethodHiddenFlags(const DexFile& dex_file) {
     return GetMethodHiddenFlags(
-        "imethod", 0, /* native */ false, FindClass("LMain;", dex_file), dex_file);
+        "imethod", 0, /* expected_native= */ false, FindClass("LMain;", dex_file), dex_file);
   }
 
-  HiddenApiAccessFlags::ApiList GetSMethodHiddenFlags(const DexFile& dex_file) {
-    return GetMethodHiddenFlags(
-        "smethod", kAccPublic, /* native */ false, FindClass("LMain;", dex_file), dex_file);
+  hiddenapi::ApiList GetSMethodHiddenFlags(const DexFile& dex_file) {
+    return GetMethodHiddenFlags("smethod",
+                                kAccPublic,
+                                /* expected_native= */ false,
+                                FindClass("LMain;", dex_file),
+                                dex_file);
   }
 
-  HiddenApiAccessFlags::ApiList GetINMethodHiddenFlags(const DexFile& dex_file) {
-    return GetMethodHiddenFlags(
-        "inmethod", kAccPublic, /* native */ true, FindClass("LMain;", dex_file), dex_file);
+  hiddenapi::ApiList GetINMethodHiddenFlags(const DexFile& dex_file) {
+    return GetMethodHiddenFlags("inmethod",
+                                kAccPublic,
+                                /* expected_native= */ true,
+                                FindClass("LMain;", dex_file),
+                                dex_file);
   }
 
-  HiddenApiAccessFlags::ApiList GetSNMethodHiddenFlags(const DexFile& dex_file) {
-    return GetMethodHiddenFlags(
-        "snmethod", kAccProtected, /* native */ true, FindClass("LMain;", dex_file), dex_file);
+  hiddenapi::ApiList GetSNMethodHiddenFlags(const DexFile& dex_file) {
+    return GetMethodHiddenFlags("snmethod",
+                                kAccProtected,
+                                /* expected_native= */ true,
+                                FindClass("LMain;", dex_file),
+                                dex_file);
   }
 };
 
 TEST_F(HiddenApiTest, InstanceFieldNoMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->ifield:LBadType1;" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
-  OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->ifield:LBadType1;,greylist" << std::endl
+      << "LMain;->ifield:LBadType2;,greylist-max-o" << std::endl
+      << "LMain;->ifield:LBadType3;,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetIFieldHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Whitelist(), GetIFieldHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, InstanceFieldLightGreylistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->ifield:I" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
-  OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->ifield:I,greylist" << std::endl
+      << "LMain;->ifield:LBadType2;,greylist-max-o" << std::endl
+      << "LMain;->ifield:LBadType3;,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetIFieldHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Greylist(), GetIFieldHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, InstanceFieldDarkGreylistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->ifield:LBadType1;" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->ifield:I" << std::endl;
-  OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->ifield:LBadType1;,greylist" << std::endl
+      << "LMain;->ifield:I,greylist-max-o" << std::endl
+      << "LMain;->ifield:LBadType3;,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetIFieldHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::GreylistMaxO(), GetIFieldHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, InstanceFieldBlacklistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->ifield:LBadType1;" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
-  OpenStream(blacklist) << "LMain;->ifield:I" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->ifield:LBadType1;,greylist" << std::endl
+      << "LMain;->ifield:LBadType2;,greylist-max-o" << std::endl
+      << "LMain;->ifield:I,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIFieldHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Blacklist(), GetIFieldHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, InstanceFieldTwoListsMatch1) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->ifield:LBadType1;" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->ifield:I" << std::endl;
-  OpenStream(blacklist) << "LMain;->ifield:I" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->ifield:LBadType1;,greylist" << std::endl
+      << "LMain;->ifield:I,blacklist,greylist-max-o" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, InstanceFieldTwoListsMatch2) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->ifield:I" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
-  OpenStream(blacklist) << "LMain;->ifield:I" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->ifield:LBadType2;,greylist-max-o" << std::endl
+      << "LMain;->ifield:I,blacklist,greylist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, InstanceFieldTwoListsMatch3) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->ifield:I" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->ifield:I" << std::endl;
-  OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->ifield:I,greylist,greylist-max-o" << std::endl
+      << "LMain;->ifield:LBadType3;,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, StaticFieldNoMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->sfield:LBadType1;" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
-  OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->sfield:LBadType1;,greylist" << std::endl
+      << "LMain;->sfield:LBadType2;,greylist-max-o" << std::endl
+      << "LMain;->sfield:LBadType3;,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetSFieldHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Whitelist(), GetSFieldHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, StaticFieldLightGreylistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
-  OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->sfield:Ljava/lang/Object;,greylist" << std::endl
+      << "LMain;->sfield:LBadType2;,greylist-max-o" << std::endl
+      << "LMain;->sfield:LBadType3;,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetSFieldHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Greylist(), GetSFieldHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, StaticFieldDarkGreylistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->sfield:LBadType1;" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
-  OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->sfield:LBadType1;,greylist" << std::endl
+      << "LMain;->sfield:Ljava/lang/Object;,greylist-max-o" << std::endl
+      << "LMain;->sfield:LBadType3;,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSFieldHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::GreylistMaxO(), GetSFieldHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, StaticFieldBlacklistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->sfield:LBadType1;" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
-  OpenStream(blacklist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->sfield:LBadType1;,greylist" << std::endl
+      << "LMain;->sfield:LBadType2;,greylist-max-o" << std::endl
+      << "LMain;->sfield:Ljava/lang/Object;,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSFieldHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Blacklist(), GetSFieldHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, StaticFieldTwoListsMatch1) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->sfield:LBadType1;" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
-  OpenStream(blacklist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->sfield:LBadType1;,greylist" << std::endl
+      << "LMain;->sfield:Ljava/lang/Object;,blacklist,greylist-max-o" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, StaticFieldTwoListsMatch2) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
-  OpenStream(blacklist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->sfield:LBadType2;,greylist-max-o" << std::endl
+      << "LMain;->sfield:Ljava/lang/Object;,blacklist,greylist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, StaticFieldTwoListsMatch3) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
-  OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->sfield:Ljava/lang/Object;,greylist,greylist-max-o" << std::endl
+      << "LMain;->sfield:LBadType3;,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, InstanceMethodNoMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->imethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->imethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->imethod(LBadType2;)V,greylist-max-o" << std::endl
+      << "LMain;->imethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetIMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Whitelist(), GetIMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, InstanceMethodLightGreylistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->imethod(J)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->imethod(J)V,greylist" << std::endl
+      << "LMain;->imethod(LBadType2;)V,greylist-max-o" << std::endl
+      << "LMain;->imethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetIMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Greylist(), GetIMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, InstanceMethodDarkGreylistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->imethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->imethod(J)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->imethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->imethod(J)V,greylist-max-o" << std::endl
+      << "LMain;->imethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetIMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::GreylistMaxO(), GetIMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, InstanceMethodBlacklistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->imethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->imethod(J)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->imethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->imethod(LBadType2;)V,greylist-max-o" << std::endl
+      << "LMain;->imethod(J)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Blacklist(), GetIMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, InstanceMethodTwoListsMatch1) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->imethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->imethod(J)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->imethod(J)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->imethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->imethod(J)V,blacklist,greylist-max-o" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, InstanceMethodTwoListsMatch2) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->imethod(J)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->imethod(J)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->imethod(LBadType2;)V,greylist-max-o" << std::endl
+      << "LMain;->imethod(J)V,blacklist,greylist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, InstanceMethodTwoListsMatch3) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->imethod(J)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->imethod(J)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->imethod(J)V,greylist,greylist-max-o" << std::endl
+      << "LMain;->imethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, StaticMethodNoMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->smethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->smethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->smethod(LBadType2;)V,greylist-max-o" << std::endl
+      << "LMain;->smethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetSMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Whitelist(), GetSMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, StaticMethodLightGreylistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->smethod(Ljava/lang/Object;)V,greylist" << std::endl
+      << "LMain;->smethod(LBadType2;)V,greylist-max-o" << std::endl
+      << "LMain;->smethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetSMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Greylist(), GetSMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, StaticMethodDarkGreylistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->smethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->smethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->smethod(Ljava/lang/Object;)V,greylist-max-o" << std::endl
+      << "LMain;->smethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::GreylistMaxO(), GetSMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, StaticMethodBlacklistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->smethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->smethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->smethod(LBadType2;)V,greylist-max-o" << std::endl
+      << "LMain;->smethod(Ljava/lang/Object;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Blacklist(), GetSMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, StaticMethodTwoListsMatch1) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->smethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->smethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->smethod(Ljava/lang/Object;)V,blacklist,greylist-max-o" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, StaticMethodTwoListsMatch2) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->smethod(LBadType2;)V,greylist-max-o" << std::endl
+      << "LMain;->smethod(Ljava/lang/Object;)V,blacklist,greylist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, StaticMethodTwoListsMatch3) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->smethod(Ljava/lang/Object;)V,greylist,greylist-max-o" << std::endl
+      << "LMain;->smethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, InstanceNativeMethodNoMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->inmethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->inmethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->inmethod(LBadType2;)V,greylist-max-o" << std::endl
+      << "LMain;->inmethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetINMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Whitelist(), GetINMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, InstanceNativeMethodLightGreylistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->inmethod(C)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->inmethod(C)V,greylist" << std::endl
+      << "LMain;->inmethod(LBadType2;)V,greylist-max-o" << std::endl
+      << "LMain;->inmethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetINMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Greylist(), GetINMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, InstanceNativeMethodDarkGreylistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->inmethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->inmethod(C)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->inmethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->inmethod(C)V,greylist-max-o" << std::endl
+      << "LMain;->inmethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetINMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::GreylistMaxO(), GetINMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, InstanceNativeMethodBlacklistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->inmethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->inmethod(C)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->inmethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->inmethod(LBadType2;)V,greylist-max-o" << std::endl
+      << "LMain;->inmethod(C)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetINMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Blacklist(), GetINMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, InstanceNativeMethodTwoListsMatch1) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->inmethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->inmethod(C)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->inmethod(C)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->inmethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->inmethod(C)V,blacklist,greylist-max-o" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, InstanceNativeMethodTwoListsMatch2) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->inmethod(C)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->inmethod(C)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->inmethod(C)V,blacklist,greylist" << std::endl
+      << "LMain;->inmethod(LBadType2;)V,greylist-max-o" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, InstanceNativeMethodTwoListsMatch3) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->inmethod(C)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->inmethod(C)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->inmethod(C)V,greylist,greylist-max-o" << std::endl
+      << "LMain;->inmethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, StaticNativeMethodNoMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->snmethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->snmethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->snmethod(LBadType2;)V,greylist-max-o" << std::endl
+      << "LMain;->snmethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetSNMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Whitelist(), GetSNMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, StaticNativeMethodLightGreylistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->snmethod(Ljava/lang/Integer;)V,greylist" << std::endl
+      << "LMain;->snmethod(LBadType2;)V,greylist-max-o" << std::endl
+      << "LMain;->snmethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetSNMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Greylist(), GetSNMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, StaticNativeMethodDarkGreylistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->snmethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->snmethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->snmethod(Ljava/lang/Integer;)V,greylist-max-o" << std::endl
+      << "LMain;->snmethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSNMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::GreylistMaxO(), GetSNMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, StaticNativeMethodBlacklistMatch) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->snmethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->snmethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->snmethod(LBadType2;)V,greylist-max-o" << std::endl
+      << "LMain;->snmethod(Ljava/lang/Integer;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_NE(dex_file.get(), nullptr);
-  ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSNMethodHiddenFlags(*dex_file));
+  ASSERT_EQ(hiddenapi::ApiList::Blacklist(), GetSNMethodHiddenFlags(*dex_file));
 }
 
 TEST_F(HiddenApiTest, StaticNativeMethodTwoListsMatch1) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->snmethod(LBadType1;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->snmethod(LBadType1;)V,greylist" << std::endl
+      << "LMain;->snmethod(Ljava/lang/Integer;)V,blacklist,greylist-max-o" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, StaticNativeMethodTwoListsMatch2) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->snmethod(Ljava/lang/Integer;)V,blacklist,greylist" << std::endl
+      << "LMain;->snmethod(LBadType2;)V,greylist-max-o" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
 TEST_F(HiddenApiTest, StaticNativeMethodTwoListsMatch3) {
-  ScratchFile dex, light_greylist, dark_greylist, blacklist;
-  OpenStream(light_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
-  OpenStream(dark_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
-  OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
-  auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->snmethod(Ljava/lang/Integer;)V,greylist,greylist-max-o" << std::endl
+      << "LMain;->snmethod(LBadType3;)V,blacklist" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
+// The following tests use this class hierarchy:
+//
+//    AbstractPackageClass  PublicInterface
+//           |                     |
+//           |    ┌----------------┘
+//           |    |
+//        PackageClass
+//
+// Only PublicInterface is in stubs.
+
+// Test a method declared in PublicInterface and defined in PackageClass.
+TEST_F(HiddenApiTest, InterfaceMethodImplemented) {
+  ScratchFile flags_csv;
+  ASSERT_TRUE(RunHiddenapiList(flags_csv));
+  auto flags = ReadFlagsCsvFile(flags_csv);
+  ASSERT_EQ(SafeMapGet("LPackageClass;->publicMethod1()V", flags), "whitelist");
+}
+
+// Test a method declared in PublicInterface, defined in AbstractPackageClass and
+// inherited by PackageClass.
+TEST_F(HiddenApiTest, InterfaceMethodImplementedInParent) {
+  ScratchFile flags_csv;
+  ASSERT_TRUE(RunHiddenapiList(flags_csv));
+  auto flags = ReadFlagsCsvFile(flags_csv);
+  ASSERT_EQ(SafeMapGet("LAbstractPackageClass;->publicMethod2()V", flags), "whitelist");
+}
+
 }  // namespace art
diff --git a/tools/jfuzz/jfuzz.cc b/tools/jfuzz/jfuzz.cc
index a97a99c..b8a646d 100644
--- a/tools/jfuzz/jfuzz.cc
+++ b/tools/jfuzz/jfuzz.cc
@@ -562,11 +562,11 @@
       case 1:
         if (emitArrayVariable(tp))
           return;
-        // FALL-THROUGH
+        [[fallthrough]];
       case 2:
         if (emitLocalVariable(tp))
           return;
-        // FALL-THROUGH
+        [[fallthrough]];
       default:
         emitFieldVariable(tp);
         break;
diff --git a/tools/jit-load/Android.bp b/tools/jit-load/Android.bp
new file mode 100644
index 0000000..a57a408
--- /dev/null
+++ b/tools/jit-load/Android.bp
@@ -0,0 +1,85 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build variants {target,host} x {debug,ndebug} x {32,64}
+
+cc_defaults {
+    name: "jitload-defaults",
+    host_supported: true,
+    srcs: [
+        "jitload.cc",
+    ],
+    defaults: ["art_defaults"],
+
+    // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
+    // to be same ISA as what it is attached to.
+    compile_multilib: "both",
+
+    shared_libs: [
+        "libbase",
+    ],
+    target: {
+        android: {
+        },
+        host: {
+        },
+    },
+    header_libs: [
+        "libopenjdkjvmti_headers",
+    ],
+    multilib: {
+        lib32: {
+            suffix: "32",
+        },
+        lib64: {
+            suffix: "64",
+        },
+    },
+    symlink_preferred_arch: true,
+}
+
+art_cc_library {
+    name: "libjitload",
+    defaults: ["jitload-defaults"],
+    shared_libs: [
+        "libart",
+        "libdexfile",
+        "libprofile",
+        "libartbase",
+    ],
+}
+
+art_cc_library {
+    name: "libjitloadd",
+    defaults: [
+        "art_debug_defaults",
+        "jitload-defaults",
+    ],
+    shared_libs: [
+        "libartd",
+        "libdexfiled",
+        "libprofiled",
+        "libartbased",
+    ],
+}
+
+//art_cc_test {
+//    name: "art_titrace_tests",
+//    defaults: [
+//        "art_gtest_defaults",
+//    ],
+//    srcs: ["titrace_test.cc"],
+//}
diff --git a/tools/jit-load/README.md b/tools/jit-load/README.md
new file mode 100644
index 0000000..8aa4513
--- /dev/null
+++ b/tools/jit-load/README.md
@@ -0,0 +1,35 @@
+# jitload
+
+Jitload is an art-specific agent allowing one to count the number of classes
+loaded on the jit-thread or verify that none were.
+
+# Usage
+### Build
+>    `make libjitload`  # or 'make libjitloadd' with debugging checks enabled
+
+The libraries will be built for 32-bit, 64-bit, host and target. Below examples assume you want to use the 64-bit version.
+### Command Line
+
+>    `art -Xplugin:$ANDROID_HOST_OUT/lib64/libopenjdkjvmti.so -agentpath:$ANDROID_HOST_OUT/lib64/libjitload.so -cp tmp/java/helloworld.dex -Xint helloworld`
+
+* `-Xplugin` and `-agentpath` need to be used, otherwise libtitrace agent will fail during init.
+* If using `libartd.so`, make sure to use the debug version of jvmti and agent.
+* Pass the '=fatal' option to the agent to cause it to abort if any classes are
+  loaded on a jit thread. Otherwise a warning will be printed.
+
+>    `art -d -Xplugin:$ANDROID_HOST_OUT/lib64/libopenjdkjvmtid.so -agentpath:$ANDROID_HOST_OUT/lib64/libjitloadd.so=fatal -cp tmp/java/helloworld.dex -Xint helloworld`
+
+* To use with run-test or testrunner.py use the --with-agent argument.
+
+>    `./test/run-test --host --with-agent libtitraced.so=fatal 001-HelloWorld`
+
+
+### Printing the Results
+All statistics gathered during the trace are printed automatically when the
+program normally exits. In the case of Android applications, they are always
+killed, so we need to manually print the results.
+
+>    `kill -SIGQUIT $(pid com.example.android.displayingbitmaps)`
+
+Will initiate a dump of the counts (to logcat).
+
diff --git a/tools/jit-load/jitload.cc b/tools/jit-load/jitload.cc
new file mode 100644
index 0000000..7e715de
--- /dev/null
+++ b/tools/jit-load/jitload.cc
@@ -0,0 +1,144 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <android-base/logging.h>
+#include <jni.h>
+#include <jvmti.h>
+
+#include "base/runtime_debug.h"
+#include "jit/jit.h"
+#include "runtime-inl.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+namespace jitload {
+
+// Special env version that allows JVMTI-like access on userdebug builds.
+static constexpr jint kArtTiVersion = JVMTI_VERSION_1_2 | 0x40000000;
+
+#define CHECK_CALL_SUCCESS(c) \
+  do { \
+    auto vc = (c); \
+    CHECK(vc == JNI_OK || vc == JVMTI_ERROR_NONE) << "call " << #c  << " did not succeed\n"; \
+  } while (false)
+
+static jthread GetJitThread() {
+  art::ScopedObjectAccess soa(art::Thread::Current());
+  auto* jit = art::Runtime::Current()->GetJit();
+  if (jit == nullptr) {
+    return nullptr;
+  }
+  auto* thread_pool = jit->GetThreadPool();
+  if (thread_pool == nullptr) {
+    return nullptr;
+  }
+  // Currently we only have a single jit thread so we only look at that one.
+  return soa.AddLocalReference<jthread>(
+          thread_pool->GetWorkers()[0]->GetThread()->GetPeerFromOtherThread());
+}
+
+JNICALL void VmInitCb(jvmtiEnv* jvmti,
+                      JNIEnv* env ATTRIBUTE_UNUSED,
+                      jthread curthread ATTRIBUTE_UNUSED) {
+  jthread jit_thread = GetJitThread();
+  if (jit_thread != nullptr) {
+    CHECK_EQ(jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_CLASS_PREPARE, jit_thread),
+             JVMTI_ERROR_NONE);
+  }
+}
+
+struct AgentOptions {
+  bool fatal;
+  uint64_t cnt;
+};
+
+JNICALL static void DataDumpRequestCb(jvmtiEnv* jvmti) {
+  AgentOptions* ops;
+  CHECK_CALL_SUCCESS(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&ops)));
+  LOG(WARNING) << "Jit thread has loaded " << ops->cnt << " classes";
+}
+
+JNICALL void ClassPrepareJit(jvmtiEnv* jvmti,
+                             JNIEnv* jni_env ATTRIBUTE_UNUSED,
+                             jthread thr ATTRIBUTE_UNUSED,
+                             jclass klass) {
+  AgentOptions* ops;
+  CHECK_CALL_SUCCESS(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&ops)));
+  char* klass_name;
+  CHECK_CALL_SUCCESS(jvmti->GetClassSignature(klass, &klass_name, nullptr));
+  (ops->fatal ? LOG_STREAM(FATAL)
+              : LOG_STREAM(WARNING)) << "Loaded " << klass_name << " on jit thread!";
+  ops->cnt++;
+  CHECK_CALL_SUCCESS(jvmti->Deallocate(reinterpret_cast<unsigned char*>(klass_name)));
+}
+
+JNICALL void VMDeathCb(jvmtiEnv* jvmti, JNIEnv* env ATTRIBUTE_UNUSED) {
+  DataDumpRequestCb(jvmti);
+}
+
+static jvmtiEnv* SetupJvmti(JavaVM* vm, const char* options) {
+  android::base::InitLogging(/* argv= */nullptr);
+
+  jvmtiEnv* jvmti = nullptr;
+  if (vm->GetEnv(reinterpret_cast<void**>(&jvmti), JVMTI_VERSION_1_0) != JNI_OK &&
+      vm->GetEnv(reinterpret_cast<void**>(&jvmti), kArtTiVersion) != JNI_OK) {
+    LOG(FATAL) << "Unable to setup JVMTI environment!";
+  }
+  jvmtiEventCallbacks cb {
+        .VMInit = VmInitCb,
+        .ClassPrepare = ClassPrepareJit,
+        .DataDumpRequest = DataDumpRequestCb,
+        .VMDeath = VMDeathCb,
+  };
+  AgentOptions* ops;
+  CHECK_CALL_SUCCESS(
+      jvmti->Allocate(sizeof(AgentOptions), reinterpret_cast<unsigned char**>(&ops)));
+  ops->fatal = (strcmp(options, "fatal") == 0);
+  ops->cnt = 0;
+  CHECK_CALL_SUCCESS(jvmti->SetEnvironmentLocalStorage(ops));
+  CHECK_CALL_SUCCESS(jvmti->SetEventCallbacks(&cb, sizeof(cb)));
+  CHECK_CALL_SUCCESS(jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_INIT, nullptr));
+  CHECK_CALL_SUCCESS(
+      jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_DATA_DUMP_REQUEST, nullptr));
+  return jvmti;
+}
+
+// Early attachment (e.g. 'java -agent[lib|path]:filename.so').
+extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* vm, char* options, void* /* reserved */) {
+  SetupJvmti(vm, options);
+  return JNI_OK;
+}
+
+// Late attachment (e.g. 'am attach-agent').
+extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM *vm, char* options, void* /* reserved */) {
+  jvmtiEnv* jvmti = SetupJvmti(vm, options);
+
+  JNIEnv* jni = nullptr;
+  jthread thr = nullptr;
+  CHECK_CALL_SUCCESS(vm->GetEnv(reinterpret_cast<void**>(&jni), JNI_VERSION_1_6));
+  CHECK_CALL_SUCCESS(jvmti->GetCurrentThread(&thr));
+
+  // Final setup is done in the VmInitCb.
+  VmInitCb(jvmti, jni, thr);
+
+  jni->DeleteLocalRef(thr);
+  return JNI_OK;
+}
+
+#undef CHECK_CALL_SUCCESS
+
+}  // namespace jitload
+
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 3ef78d5..5177919 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -17,13 +17,13 @@
 {
   description: "Differences between vogar and cts in user directory",
   result: EXEC_FAILED,
-  modes: [device],
+  modes: [device_testdex],
   name: "libcore.java.lang.SystemTest#testSystemProperties_mutable"
 },
 {
   description: "Differences between vogar and cts. Passes with --mode activity",
   result: EXEC_FAILED,
-  modes: [device],
+  modes: [device_testdex],
   names: ["libcore.java.lang.OldSystemTest#test_getProperties"]
 },
 {
@@ -32,7 +32,7 @@
                 (--invoke-with \"su root\"). Does not pass after setting chmod
                 777 all directories on path to socket (on device without su).",
   result: EXEC_FAILED,
-  modes: [device],
+  modes: [device_testdex],
   names: ["libcore.libcore.io.OsTest#testUnixDomainSockets_in_file_system"]
 },
 {
@@ -47,7 +47,7 @@
 {
   description: "Issue with incorrect device time (1970)",
   result: EXEC_FAILED,
-  modes: [device],
+  modes: [device_testdex],
   names: ["libcore.java.util.TimeZoneTest#testDisplayNames",
           "libcore.java.util.TimeZoneTest#test_useDaylightTime_Taiwan",
           "org.apache.harmony.tests.java.util.TimeZoneTest#test_hasSameRules_Ljava_util_TimeZone"],
@@ -57,13 +57,13 @@
   description: "Issue with incorrect device time (1970). Test assumes that DateTime.now()
                 is greater then a date in 1998.",
   result: EXEC_FAILED,
-  modes: [device],
+  modes: [device_testdex],
   names: ["org.apache.harmony.tests.java.util.DateTest#test_Constructor"]
 },
 {
   description: "Failing due to a locale problem on hammerhead.",
   result: EXEC_FAILED,
-  modes: [device],
+  modes: [device_testdex],
   names: ["libcore.icu.DateIntervalFormatTest#test10089890",
           "libcore.icu.DateIntervalFormatTest#test10209343_when_not_this_year",
           "libcore.icu.DateIntervalFormatTest#test10560853_for_single_day_events",
@@ -80,7 +80,7 @@
 {
   description: "Failing due to missing localhost on hammerhead and volantis.",
   result: EXEC_FAILED,
-  modes: [device],
+  modes: [device_testdex],
   names: ["libcore.javax.crypto.CipherTest#testCipherInitWithCertificate",
           "libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithFtpURLConnection",
           "libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithJarFtpURLConnection",
@@ -95,13 +95,13 @@
 {
   description: "Test timeouts",
   result: EXEC_TIMEOUT,
-  modes: [device],
+  modes: [device_testdex],
   names: ["org.apache.harmony.tests.java.util.ScannerTest#testPerformance"]
 },
 {
   description: "Needs the newest cat version on the device",
   result: EXEC_FAILED,
-  modes: [device],
+  modes: [device_testdex],
   names: ["org.apache.harmony.tests.java.lang.ProcessTest#test_getErrorStream"]
 },
 {
@@ -117,7 +117,7 @@
 },
 {
   description: "Linker issues in chrooted environment",
-  modes: [device],
+  modes: [device_testdex],
   result: EXEC_FAILED,
   names: ["org.apache.harmony.tests.java.lang.ProcessManagerTest#testEnvironment"]
 },
@@ -130,14 +130,14 @@
 {
   description: "test_xattr fails on arm64 on the buildbots only: needs investigation",
   result: EXEC_FAILED,
-  modes: [device],
+  modes: [device_testdex],
   names: ["libcore.libcore.io.OsTest#test_xattr"],
   bug: 22258911
 },
 {
   description: "fails on L builds: needs investigation",
   result: EXEC_FAILED,
-  modes: [device],
+  modes: [device_testdex],
   names: ["org.apache.harmony.tests.java.lang.ClassTest#test_forNameLjava_lang_String"]
 },
 {
@@ -155,41 +155,6 @@
   bug: 25437292
 },
 {
-  description: "Missing resource in classpath",
-  result: EXEC_FAILED,
-  modes: [device],
-  names: ["libcore.java.util.prefs.OldAbstractPreferencesTest#testClear",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportNode",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportSubtree",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testGet",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetBoolean",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetByteArray",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetDouble",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetFloat",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetInt",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetLong",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testKeys",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testNodeExists",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testPut",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutBoolean",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutByteArray",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutDouble",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutFloat",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutInt",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutLong",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testRemove",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testRemoveNode",
-          "libcore.java.util.prefs.OldAbstractPreferencesTest#testSync",
-          "libcore.java.util.prefs.PreferencesTest#testHtmlEncoding",
-          "libcore.java.util.prefs.PreferencesTest#testPreferencesClobbersExistingFiles",
-          "org.apache.harmony.tests.java.util.PropertiesTest#test_storeToXMLLjava_io_OutputStreamLjava_lang_StringLjava_lang_String",
-          "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportNode",
-          "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportSubtree",
-          "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testFlush",
-          "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testSync",
-          "org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet"]
-},
-{
   description: "Only work with --mode=activity",
   result: EXEC_FAILED,
   names: [ "libcore.java.io.FileTest#testJavaIoTmpdirMutable" ]
@@ -198,7 +163,7 @@
   description: "Flaky test",
   result: EXEC_FAILED,
   bug: 30107038,
-  modes: [device],
+  modes: [device_testdex],
   names: ["org.apache.harmony.tests.java.lang.ProcessTest#test_destroyForcibly"]
 },
 {
@@ -206,7 +171,7 @@
                 Unclear if this relates to the tests running sh as a child process.",
   result: EXEC_FAILED,
   bug: 30657148,
-  modes: [device],
+  modes: [device_testdex],
   names: ["libcore.java.lang.ProcessBuilderTest#testRedirectInherit",
           "libcore.java.lang.ProcessBuilderTest#testRedirect_nullStreams"]
 },
@@ -216,16 +181,6 @@
   names: ["libcore.javax.crypto.spec.AlgorithmParametersTestGCM#testEncoding"]
 },
 {
-  description: "Tests fail because mockito can not read android.os.Build$VERSION",
-  result: EXEC_FAILED,
-  bug: 111704422,
-  names: ["libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_calledBeforeDefaultHandler",
-          "libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_noDefaultHandler",
-          "libcore.javax.crypto.CipherInputStreamTest#testCloseTwice",
-          "libcore.libcore.io.BlockGuardOsTest#test_android_getaddrinfo_networkPolicy",
-          "libcore.libcore.io.BlockGuardOsTest#test_checkNewMethodsInPosix"]
-},
-{
   description: "fdsan doesn't exist on the host",
   result: EXEC_FAILED,
   modes: [host],
@@ -233,6 +188,58 @@
   names: ["libcore.libcore.io.FdsanTest#testFileInputStream",
           "libcore.libcore.io.FdsanTest#testFileOutputStream",
           "libcore.libcore.io.FdsanTest#testRandomAccessFile",
-          "libcore.libcore.io.FdsanTest#testParcelFileDescriptor"]
+          "libcore.libcore.io.FdsanTest#testParcelFileDescriptor",
+          "libcore.libcore.io.FdsanTest#testDatagramSocket",
+          "libcore.libcore.io.FdsanTest#testSocket"]
+},
+{
+  description: "Timeout on heap-poisoning target builds",
+  result: EXEC_FAILED,
+  modes: [device_testdex],
+  bug: 116446372,
+  names: ["libcore.libcore.io.FdsanTest#testSocket"]
+},
+{
+  description: "Host implementation of android_getaddrinfo differs from device implementation",
+  result: EXEC_FAILED,
+  modes: [host],
+  bug: 121230364,
+  names: [
+    "libcore.libcore.net.InetAddressUtilsTest#parseNumericAddress[8]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[10]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[11]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[12]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[5]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[6]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[7]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[8]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[9]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[10]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[11]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[12]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[5]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[6]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[7]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[8]",
+    "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[9]"
+  ]
+},
+{
+  description: "Apex related",
+  result: EXEC_FAILED,
+  modes: [device_testdex],
+  bug: 122642227,
+  names: [
+    "libcore.libcore.icu.TimeZoneIntegrationTest#testTimeZoneDataVersion",
+    "libcore.libcore.icu.TimeZoneIntegrationTest#testTimeZoneDebugInfo",
+    "libcore.libcore.icu.TimeZoneIntegrationTest#testTzDataSetVersions"
+  ]
+},
+{
+  description: "Expected networking failure on host / old systems: we expect 97 (EAFNOSUPPORT), but we get 22 (EINVAL)",
+  result: EXEC_FAILED,
+  names: [
+    "libcore.libcore.io.OsTest#testCrossFamilyBindConnectSendto"
+  ]
 }
 ]
diff --git a/tools/libcore_gcstress_debug_failures.txt b/tools/libcore_gcstress_debug_failures.txt
index 23533af..9009a4e 100644
--- a/tools/libcore_gcstress_debug_failures.txt
+++ b/tools/libcore_gcstress_debug_failures.txt
@@ -7,7 +7,7 @@
 {
   description: "Timeouts on target with gcstress and debug.",
   result: EXEC_FAILED,
-  modes: [device],
+  modes: [device_testdex],
   names: ["jsr166.CompletableFutureTest#testCompleteOnTimeout_completed",
           "jsr166.CompletableFutureTest#testDelayedExecutor",
           "jsr166.ExecutorsTest#testTimedCallable",
@@ -32,6 +32,12 @@
         ]
 },
 {
+  description: "Timeouts on host with gcstress and debug.",
+  result: EXEC_FAILED,
+  modes: [host],
+  names: ["jsr166.StampedLockTest#testWriteAfterReadLock"]
+},
+{
   description: "Sometimes times out with gcstress and debug.",
   result: EXEC_FAILED,
   bug: 78228743,
diff --git a/tools/libcore_gcstress_failures.txt b/tools/libcore_gcstress_failures.txt
index 965e85c..9af2f64 100644
--- a/tools/libcore_gcstress_failures.txt
+++ b/tools/libcore_gcstress_failures.txt
@@ -7,7 +7,7 @@
 {
   description: "Timeouts on target with gcstress.",
   result: EXEC_FAILED,
-  modes: [device],
+  modes: [device_testdex],
   names: ["libcore.javax.crypto.CipherBasicsTest#testGcmEncryption"]
 },
 {
@@ -26,12 +26,16 @@
 {
   description: "Timeouts.",
   result: EXEC_FAILED,
-  modes: [device],
-  names: ["libcore.java.lang.StringTest#testFastPathString_wellFormedUtf8Sequence",
-          "org.apache.harmony.tests.java.lang.ref.ReferenceQueueTest#test_remove",
-          "org.apache.harmony.tests.java.util.TimerTest#testOverdueTaskExecutesImmediately",
-          "org.apache.harmony.tests.java.util.WeakHashMapTest#test_keySet_hasNext",
+  modes: [device_testdex],
+  names: ["jsr166.TimeUnitTest#testConvert",
+          "libcore.java.lang.StringTest#testFastPathString_wellFormedUtf8Sequence",
           "libcore.java.text.DecimalFormatTest#testCurrencySymbolSpacing",
-          "libcore.java.text.SimpleDateFormatTest#testLocales"]
+          "libcore.java.text.SimpleDateFormatTest#testLocales",
+          "org.apache.harmony.tests.java.lang.ref.ReferenceQueueTest#test_remove",
+          "org.apache.harmony.tests.java.lang.ProcessManagerTest#testSleep",
+          "org.apache.harmony.tests.java.lang.String2Test#test_getBytes",
+          "org.apache.harmony.tests.java.text.DateFormatTest#test_getAvailableLocales",
+          "org.apache.harmony.tests.java.util.TimerTest#testOverdueTaskExecutesImmediately",
+          "org.apache.harmony.tests.java.util.WeakHashMapTest#test_keySet_hasNext"]
 }
 ]
diff --git a/tools/libcore_network_failures.txt b/tools/libcore_network_failures.txt
index e7e31db..380f56b 100644
--- a/tools/libcore_network_failures.txt
+++ b/tools/libcore_network_failures.txt
@@ -8,7 +8,7 @@
   description: "Ignore failure of network-related tests on new devices running Android O",
   result: EXEC_FAILED,
   bug: 74725685,
-  modes: [device],
+  modes: [device_testdex],
   names: ["libcore.libcore.io.OsTest#test_byteBufferPositions_sendto_recvfrom_af_inet",
           "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithFtpURLConnection",
           "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithHttpURLConnection",
diff --git a/tools/luci/config/cr-buildbucket.cfg b/tools/luci/config/cr-buildbucket.cfg
new file mode 100644
index 0000000..8df8433
--- /dev/null
+++ b/tools/luci/config/cr-buildbucket.cfg
@@ -0,0 +1,128 @@
+# Defines buckets on cr-buildbucket.appspot.com, used to schedule builds
+# on buildbot. In particular, CQ uses some of these buckets to schedule tryjobs.
+#
+# See http://luci-config.appspot.com/schemas/projects:buildbucket.cfg for
+# schema of this file and documentation.
+#
+# Please keep this list sorted by bucket name.
+acl_sets {
+  name: "ci"
+  acls {
+    role: READER
+    group: "all"
+  }
+  acls {
+    role: WRITER
+    group: "project-art-admins"
+  }
+  acls {
+    role: SCHEDULER
+    identity: "luci-scheduler@appspot.gserviceaccount.com"
+  }
+}
+
+buckets {
+  name: "luci.art.ci"
+  acl_sets: "ci"
+  swarming {
+    hostname: "chromium-swarm.appspot.com"
+    builder_defaults {
+      dimensions: "pool:luci.art.ci"
+      service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+      execution_timeout_secs: 10800  # 3h
+      swarming_tags: "vpython:native-python-wrapper"
+      build_numbers: YES
+      # Some builders require specific hardware, so we make the assignment in bots.cfg
+      auto_builder_dimension: YES
+      luci_migration_host: "luci-migration.appspot.com"
+      recipe {
+        cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
+        cipd_version: "refs/heads/master"
+        name: "art"
+        properties: "mastername:client.art"
+      }
+    }
+
+    builders {
+      name: "angler-armv7-debug"
+    }
+    builders {
+      name: "angler-armv7-generational-cc"
+    }
+    builders {
+      name: "angler-armv7-ndebug"
+    }
+    builders {
+      name: "angler-armv8-debug"
+    }
+    builders {
+      name: "angler-armv8-generational-cc"
+    }
+    builders {
+      name: "angler-armv8-ndebug"
+    }
+    builders {
+      name: "aosp-builder-cc"
+    }
+    builders {
+      name: "aosp-builder-cms"
+    }
+    builders {
+      name: "bullhead-armv7-gcstress-ndebug"
+    }
+    builders {
+      name: "bullhead-armv8-gcstress-debug"
+    }
+    builders {
+      name: "bullhead-armv8-gcstress-ndebug"
+    }
+    builders {
+      name: "fugu-debug"
+    }
+    builders {
+      name: "fugu-ndebug"
+    }
+    builders {
+      name: "host-x86-cms"
+    }
+    builders {
+      name: "host-x86-debug"
+    }
+    builders {
+      name: "host-x86-gcstress-debug"
+    }
+    builders {
+      name: "host-x86-ndebug"
+    }
+    builders {
+      name: "host-x86-poison-debug"
+    }
+    builders {
+      name: "host-x86_64-cdex-fast"
+    }
+    builders {
+      name: "host-x86_64-cms"
+    }
+    builders {
+      name: "host-x86_64-debug"
+    }
+    builders {
+      name: "host-x86_64-generational-cc"
+    }
+    builders {
+      name: "host-x86_64-ndebug"
+    }
+    builders {
+      name: "host-x86_64-poison-debug"
+    }
+    builders {
+      name: "volantis-armv7-poison-debug"
+    }
+    builders {
+      name: "volantis-armv8-poison-debug"
+    }
+    builders {
+      name: "volantis-armv8-poison-ndebug"
+    }
+  }
+}
diff --git a/tools/luci/config/luci-logdog.cfg b/tools/luci/config/luci-logdog.cfg
new file mode 100644
index 0000000..e910bc3
--- /dev/null
+++ b/tools/luci/config/luci-logdog.cfg
@@ -0,0 +1,18 @@
+# For the schema of this file and documentation, see ProjectConfig message in
+# https://luci-config.appspot.com/schemas/services/luci-logdog:logdog.cfg
+# This is for the pdfium project, but we're going to piggyback
+# off of the chromium settings.
+
+# Auth groups who can read log streams.
+# Currently, all projects with "all" (aka public) read/write permissions use
+# the Chromium auth group and buckets.
+reader_auth_groups: "all"
+
+# Auth groups who can register and emit new log streams.
+# These are bots that emit logs.
+writer_auth_groups: "luci-logdog-chromium-writers"
+
+# The base Google Storage archival path for this project.
+#
+# Archived LogDog logs will be written to this bucket/path.
+archive_gs_bucket: "chromium-luci-logdog"
diff --git a/tools/luci/config/luci-milo.cfg b/tools/luci/config/luci-milo.cfg
new file mode 100644
index 0000000..60e8404
--- /dev/null
+++ b/tools/luci/config/luci-milo.cfg
@@ -0,0 +1,146 @@
+logo_url: "https://storage.googleapis.com/chrome-infra-public/logo/art-logo.png"
+
+consoles {
+  id: "main"
+  name: "ART Main Console"
+  repo_url: "https://android.googlesource.com/platform/art"
+  refs: "refs/heads/master"
+  manifest_name: "REVISION"
+  include_experimental_builds: true
+
+  builders {
+    name: "buildbucket/luci.art.ci/angler-armv7-debug"
+    category: "angler|armv7"
+    short_name: "dbg"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/angler-armv7-generational-cc"
+    category: "angler|armv7"
+    short_name: "gen"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/angler-armv7-ndebug"
+    category: "angler|armv7"
+    short_name: "ndbg"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/angler-armv8-debug"
+    category: "angler|armv8"
+    short_name: "dbg"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/angler-armv8-generational-cc"
+    category: "angler|armv8"
+    short_name: "gen"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/angler-armv8-ndebug"
+    category: "angler|armv8"
+    short_name: "ndbg"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/aosp-builder-cc"
+    category: "aosp"
+    short_name: "cc"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/aosp-builder-cms"
+    category: "aosp"
+    short_name: "cms"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/bullhead-armv7-gcstress-ndebug"
+    category: "bullhead|armv7|gcstress"
+    short_name: "dbg"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/bullhead-armv8-gcstress-debug"
+    category: "bullhead|armv8|gcstress"
+    short_name: "dbg"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/bullhead-armv8-gcstress-ndebug"
+    category: "bullhead|armv8|gcstress"
+    short_name: "ndbg"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/fugu-debug"
+    category: "fugu"
+    short_name: "dbg"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/fugu-ndebug"
+    category: "fugu"
+    short_name: "ndbg"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/host-x86-cms"
+    category: "host|x86"
+    short_name: "cms"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/host-x86-debug"
+    category: "host|x86"
+    short_name: "dbg"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/host-x86-ndebug"
+    category: "host|x86"
+    short_name: "ndbg"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/host-x86-gcstress-debug"
+    category: "host|x86"
+    short_name: "gcs"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/host-x86-poison-debug"
+    category: "host|x86"
+    short_name: "psn"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/host-x86_64-cdex-fast"
+    category: "host|x64"
+    short_name: "cdx"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/host-x86_64-cms"
+    category: "host|x64"
+    short_name: "cms"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/host-x86_64-debug"
+    category: "host|x64"
+    short_name: "dbg"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/host-x86_64-generational-cc"
+    category: "host|x64"
+    short_name: "gen"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/host-x86_64-ndebug"
+    category: "host|x64"
+    short_name: "ndbg"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/host-x86_64-poison-debug"
+    category: "host|x64"
+    short_name: "psn"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/volantis-armv7-poison-debug"
+    category: "volantis|armv7|poison"
+    short_name: "dbg"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/volantis-armv8-poison-debug"
+    category: "volantis|armv8|poison"
+    short_name: "dbg"
+  }
+  builders {
+    name: "buildbucket/luci.art.ci/volantis-armv8-poison-ndebug"
+    category: "volantis|armv8|poison"
+    short_name: "ndbg"
+  }
+}
diff --git a/tools/luci/config/luci-scheduler.cfg b/tools/luci/config/luci-scheduler.cfg
new file mode 100644
index 0000000..717daa5
--- /dev/null
+++ b/tools/luci/config/luci-scheduler.cfg
@@ -0,0 +1,325 @@
+# Defines jobs on luci-scheduler.appspot.com.
+#
+# For schema of this file and documentation see ProjectConfig message in
+#
+# https://chromium.googlesource.com/infra/luci/luci-go/+/master/scheduler/appengine/messages/config.proto
+
+acl_sets {
+  name: "default"
+  acls {
+    role: READER
+    granted_to: "group:all"
+  }
+  acls {
+    role: OWNER
+    granted_to: "group:project-art-admins"
+  }
+}
+
+trigger {
+  id: "master-gitiles-trigger"
+  acl_sets: "default"
+  gitiles: {
+    repo: "https://android.googlesource.com/platform/art"
+    refs: "refs/heads/master"
+  }
+
+  triggers: "angler-armv7-debug"
+  triggers: "angler-armv7-generational-cc"
+  triggers: "angler-armv7-ndebug"
+  triggers: "angler-armv8-debug"
+  triggers: "angler-armv8-generational-cc"
+  triggers: "angler-armv8-ndebug"
+  triggers: "aosp-builder-cc"
+  triggers: "aosp-builder-cms"
+  triggers: "bullhead-armv7-gcstress-ndebug"
+  triggers: "bullhead-armv8-gcstress-debug"
+  triggers: "bullhead-armv8-gcstress-ndebug"
+  triggers: "fugu-debug"
+  triggers: "fugu-ndebug"
+  triggers: "host-x86-cms"
+  triggers: "host-x86-debug"
+  triggers: "host-x86-gcstress-debug"
+  triggers: "host-x86-ndebug"
+  triggers: "host-x86-poison-debug"
+  triggers: "host-x86_64-cdex-fast"
+  triggers: "host-x86_64-cms"
+  triggers: "host-x86_64-debug"
+  triggers: "host-x86_64-generational-cc"
+  triggers: "host-x86_64-ndebug"
+  triggers: "host-x86_64-poison-debug"
+  triggers: "volantis-armv7-poison-debug"
+  triggers: "volantis-armv8-poison-debug"
+  triggers: "volantis-armv8-poison-ndebug"
+}
+
+job {
+  id: "angler-armv7-debug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "angler-armv7-debug"
+  }
+}
+
+job {
+  id: "angler-armv7-generational-cc"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "angler-armv7-generational-cc"
+  }
+}
+
+job {
+  id: "angler-armv7-ndebug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "angler-armv7-ndebug"
+  }
+}
+
+job {
+  id: "angler-armv8-debug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "angler-armv8-debug"
+  }
+}
+
+job {
+  id: "angler-armv8-generational-cc"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "angler-armv8-generational-cc"
+  }
+}
+
+job {
+  id: "angler-armv8-ndebug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "angler-armv8-ndebug"
+  }
+}
+
+job {
+  id: "aosp-builder-cc"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "aosp-builder-cc"
+  }
+}
+
+job {
+  id: "aosp-builder-cms"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "aosp-builder-cms"
+  }
+}
+
+job {
+  id: "bullhead-armv7-gcstress-ndebug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "bullhead-armv7-gcstress-ndebug"
+  }
+}
+
+job {
+  id: "bullhead-armv8-gcstress-debug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "bullhead-armv8-gcstress-debug"
+  }
+}
+
+job {
+  id: "bullhead-armv8-gcstress-ndebug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "bullhead-armv8-gcstress-ndebug"
+  }
+}
+
+job {
+  id: "fugu-debug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "fugu-debug"
+  }
+}
+
+job {
+  id: "fugu-ndebug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "fugu-ndebug"
+  }
+}
+
+job {
+  id: "host-x86-cms"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "host-x86-cms"
+  }
+}
+
+job {
+  id: "host-x86-debug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "host-x86-debug"
+  }
+}
+
+job {
+  id: "host-x86-gcstress-debug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "host-x86-gcstress-debug"
+  }
+}
+
+job {
+  id: "host-x86-ndebug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "host-x86-ndebug"
+  }
+}
+
+job {
+  id: "host-x86-poison-debug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "host-x86-poison-debug"
+  }
+}
+
+job {
+  id: "host-x86_64-cdex-fast"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "host-x86_64-cdex-fast"
+  }
+}
+
+job {
+  id: "host-x86_64-cms"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "host-x86_64-cms"
+  }
+}
+
+job {
+  id: "host-x86_64-debug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "host-x86_64-debug"
+  }
+}
+
+job {
+  id: "host-x86_64-generational-cc"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "host-x86_64-generational-cc"
+  }
+}
+
+job {
+  id: "host-x86_64-ndebug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "host-x86_64-ndebug"
+  }
+}
+
+job {
+  id: "host-x86_64-poison-debug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "host-x86_64-poison-debug"
+  }
+}
+
+job {
+  id: "volantis-armv7-poison-debug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "volantis-armv7-poison-debug"
+  }
+}
+
+job {
+  id: "volantis-armv8-poison-debug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "volantis-armv8-poison-debug"
+  }
+}
+
+job {
+  id: "volantis-armv8-poison-ndebug"
+  acl_sets: "default"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "luci.art.ci"
+    builder: "volantis-armv8-poison-ndebug"
+  }
+}
+
diff --git a/tools/luci/config/project.cfg b/tools/luci/config/project.cfg
new file mode 100644
index 0000000..41d172d
--- /dev/null
+++ b/tools/luci/config/project.cfg
@@ -0,0 +1,4 @@
+# For the schema of this file and documentation, see ProjectCfg message in
+# https://luci-config.appspot.com/schemas/projects:project.cfg
+name: "art"
+access: "group:all"  # public
diff --git a/tools/mount-buildbot-apexes.sh b/tools/mount-buildbot-apexes.sh
new file mode 100755
index 0000000..778d634
--- /dev/null
+++ b/tools/mount-buildbot-apexes.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Mount Android Runtime and Core Libraries APEX packages required in the chroot directory.
+# This script emulates some the actions performed by `apexd`.
+
+green='\033[0;32m'
+nc='\033[0m'
+
+# Setup as root, as some actions performed here require it.
+adb root
+adb wait-for-device
+
+# Exit early if there is no chroot.
+[[ -n "$ART_TEST_CHROOT" ]] || exit
+
+# Check that ART_TEST_CHROOT is correctly defined.
+[[ "$ART_TEST_CHROOT" = /* ]] || { echo "$ART_TEST_CHROOT is not an absolute path"; exit 1; }
+
+# Check that the "$ART_TEST_CHROOT/apex" directory exists.
+adb shell test -d "$ART_TEST_CHROOT/apex" \
+  || { echo "$ART_TEST_CHROOT/apex does not exist or is not a directory"; exit 1; }
+
+# Create a directory where we extract APEX packages' payloads (ext4 images)
+# under the chroot directory.
+apex_image_dir="/tmp/apex"
+adb shell mkdir -p "$ART_TEST_CHROOT$apex_image_dir"
+
+# activate_system_package APEX_PACKAGE APEX_NAME
+# ----------------------------------------------
+# Extract payload (ext4 image) from system APEX_PACKAGE and mount it as
+# APEX_NAME in `/apex` under the chroot directory.
+activate_system_package() {
+  local apex_package=$1
+  local apex_name=$2
+  local apex_package_path="/system/apex/$apex_package"
+  local abs_mount_point="$ART_TEST_CHROOT/apex/$apex_name"
+  local abs_image_filename="$ART_TEST_CHROOT$apex_image_dir/$apex_name.img"
+
+  # Make sure that the (absolute) path to the mounted ext4 image is less than
+  # 64 characters, which is a hard limit set in the kernel for loop device
+  # filenames (otherwise, we would get an error message from `losetup`, used
+  # by `mount` to manage the loop device).
+  [[ "${#abs_image_filename}" -ge 64 ]] \
+    && { echo "Filename $abs_image_filename is too long to be used with a loop device"; exit 1; }
+
+  echo -e "${green}Activating package $apex_package as $apex_name${nc}"
+
+  # Extract payload (ext4 image). As standard Android builds do not contain
+  # `unzip`, we use the one we built and sync'd to the chroot directory instead.
+  local payload_filename="apex_payload.img"
+  adb shell chroot "$ART_TEST_CHROOT" \
+    /system/bin/unzip -q "$apex_package_path" "$payload_filename" -d "$apex_image_dir"
+  # Rename the extracted payload to have its name match the APEX's name.
+  adb shell mv "$ART_TEST_CHROOT$apex_image_dir/$payload_filename" "$abs_image_filename"
+  # Check that the mount point is available.
+  adb shell mount | grep -q " on $abs_mount_point" && \
+    { echo "$abs_mount_point is already used as mount point"; exit 1; }
+  # Mount the ext4 image.
+  adb shell mkdir -p "$abs_mount_point"
+  adb shell mount -o loop,ro "$abs_image_filename" "$abs_mount_point"
+}
+
+# Activate the Android Runtime APEX.
+# Note: We use the Debug Runtime APEX (which is a superset of the Release Runtime APEX).
+activate_system_package com.android.runtime.debug.apex com.android.runtime
diff --git a/tools/prebuilt_libjdwp_art_failures.txt b/tools/prebuilt_libjdwp_art_failures.txt
index 2664560..ee59315 100644
--- a/tools/prebuilt_libjdwp_art_failures.txt
+++ b/tools/prebuilt_libjdwp_art_failures.txt
@@ -10,101 +10,101 @@
   description: "Test fails due to unexpectedly getting the thread-groups of zombie threads",
   result: EXEC_FAILED,
   bug: 66906414,
-  name: "org.apache.harmony.jpda.tests.jdwp.ThreadReference.ThreadGroup002Test#testThreadGroup002"
+  name: "org.apache.harmony.jpda.tests.jdwp.ThreadReference_ThreadGroup002Test#testThreadGroup002"
 },
 {
   description: "Test fails due to modifiers not including ACC_SUPER",
   result: EXEC_FAILED,
   bug: 66906055,
-  name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType.ModifiersTest#testModifiers001"
+  name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType_ModifiersTest#testModifiers001"
 },
 {
   description: "Test fails due to static values not being set correctly.",
   result: EXEC_FAILED,
   bug: 66905894,
-  name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues006Test#testGetValues006"
+  name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType_GetValues006Test#testGetValues006"
 },
 {
   description: "Tests fail with assertion error on slot number",
   result: EXEC_FAILED,
   bug: 66905468,
-  names: [ "org.apache.harmony.jpda.tests.jdwp.Method.VariableTableTest#testVariableTableTest001",
-           "org.apache.harmony.jpda.tests.jdwp.Method.VariableTableWithGenericTest#testVariableTableWithGenericTest001" ]
+  names: [ "org.apache.harmony.jpda.tests.jdwp.Method_VariableTableTest#testVariableTableTest001",
+           "org.apache.harmony.jpda.tests.jdwp.Method_VariableTableWithGenericTest#testVariableTableWithGenericTest001" ]
 },
 {
   description: "Test fails with assertion error 'Invalid Path' for class path.",
   result: EXEC_FAILED,
   bug: 66904994,
-  name: "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.ClassPathsTest#testClassPaths001"
+  name: "org.apache.harmony.jpda.tests.jdwp.VirtualMachine_ClassPathsTest#testClassPaths001"
 },
 {
   description: "Test fails with Error VM_DEAD when trying to resume during VM_DEATH event",
   result: EXEC_FAILED,
   bug: 66904725,
-  name: "org.apache.harmony.jpda.tests.jdwp.Events.VMDeath002Test#testVMDeathRequest"
+  name: "org.apache.harmony.jpda.tests.jdwp.Events_VMDeath002Test#testVMDeathRequest"
 },
 {
   description: "Test fails with OPAQUE_FRAME error due to attempting a GetLocalReference on a proxy frame instead of GetLocalInstance!",
   result: EXEC_FAILED,
   bug: 66903662,
-  name: "org.apache.harmony.jpda.tests.jdwp.StackFrame.ProxyThisObjectTest#testThisObject"
+  name: "org.apache.harmony.jpda.tests.jdwp.StackFrame_ProxyThisObjectTest#testThisObject"
 },
 {
   description: "Test fails with unexpected TYPE_MISMATCH error",
   result: EXEC_FAILED,
   bug: 66904008,
-  name: "org.apache.harmony.jpda.tests.jdwp.StackFrame.ThisObjectTest#testThisObjectTest001"
+  name: "org.apache.harmony.jpda.tests.jdwp.StackFrame_ThisObjectTest#testThisObjectTest001"
 },
 {
   description: "Tests that fail only on ART with INVALID_SLOT error",
   result: EXEC_FAILED,
   bug: 66903181,
-  names: [ "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testBreakpoint",
-           "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testException",
-           "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testFieldAccess",
-           "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testFieldModification",
-           "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodEntry",
-           "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExit",
-           "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExitWithReturnValue" ]
+  names: [ "org.apache.harmony.jpda.tests.jdwp.EventModifiers_InstanceOnlyModifierTest#testBreakpoint",
+           "org.apache.harmony.jpda.tests.jdwp.EventModifiers_InstanceOnlyModifierTest#testException",
+           "org.apache.harmony.jpda.tests.jdwp.EventModifiers_InstanceOnlyModifierTest#testFieldAccess",
+           "org.apache.harmony.jpda.tests.jdwp.EventModifiers_InstanceOnlyModifierTest#testFieldModification",
+           "org.apache.harmony.jpda.tests.jdwp.EventModifiers_InstanceOnlyModifierTest#testMethodEntry",
+           "org.apache.harmony.jpda.tests.jdwp.EventModifiers_InstanceOnlyModifierTest#testMethodExit",
+           "org.apache.harmony.jpda.tests.jdwp.EventModifiers_InstanceOnlyModifierTest#testMethodExitWithReturnValue" ]
 },
 {
   description: "Tests for VMDebug functionality not implemented in the upstream libjdwp",
   result: EXEC_FAILED,
-  names: [ "org.apache.harmony.jpda.tests.jdwp.VMDebug.VMDebugTest#testVMDebug",
-           "org.apache.harmony.jpda.tests.jdwp.VMDebug.VMDebugTest002#testVMDebug" ]
+  names: [ "org.apache.harmony.jpda.tests.jdwp.VMDebug_VMDebugTest#testVMDebug",
+           "org.apache.harmony.jpda.tests.jdwp.VMDebug_VMDebugTest002#testVMDebug" ]
 },
 /* TODO Categorize these failures more. */
 {
   description: "Tests that fail on both ART and RI. These tests are likely incorrect",
   result: EXEC_FAILED,
   bug: 66906734,
-  names: [ "org.apache.harmony.jpda.tests.jdwp.ArrayReference.SetValues003Test#testSetValues003_InvalidIndex",
-           "org.apache.harmony.jpda.tests.jdwp.ClassType.InvokeMethod002Test#testInvokeMethod_wrong_argument_types",
-           "org.apache.harmony.jpda.tests.jdwp.ClassType.InvokeMethodTest#testInvokeMethod002",
-           "org.apache.harmony.jpda.tests.jdwp.ClassType.InvokeMethodTest#testInvokeMethod003",
-           "org.apache.harmony.jpda.tests.jdwp.ClassType.NewInstanceTest#testNewInstance002",
-           "org.apache.harmony.jpda.tests.jdwp.ClassType.SetValues002Test#testSetValues002",
-           "org.apache.harmony.jpda.tests.jdwp.Events.ClassPrepare002Test#testClassPrepareCausedByDebugger",
-           "org.apache.harmony.jpda.tests.jdwp.Events.ExceptionCaughtTest#testExceptionEvent_ThrowLocation_FromNative",
-           "org.apache.harmony.jpda.tests.jdwp.ObjectReference.DisableCollectionTest#testDisableCollection_null",
-           "org.apache.harmony.jpda.tests.jdwp.ObjectReference.EnableCollectionTest#testEnableCollection_invalid",
-           "org.apache.harmony.jpda.tests.jdwp.ObjectReference.EnableCollectionTest#testEnableCollection_null",
-           "org.apache.harmony.jpda.tests.jdwp.ObjectReference.GetValues002Test#testGetValues002",
-           "org.apache.harmony.jpda.tests.jdwp.ObjectReference.SetValues003Test#testSetValues003",
-           "org.apache.harmony.jpda.tests.jdwp.ObjectReference.SetValuesTest#testSetValues001",
-           "org.apache.harmony.jpda.tests.jdwp.ReferenceType.FieldsWithGenericTest#testFieldsWithGeneric001",
-           "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues002Test#testGetValues002",
-           "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues004Test#testGetValues004",
-           "org.apache.harmony.jpda.tests.jdwp.StringReference.ValueTest#testStringReferenceValueTest001_NullString",
-           "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.ChildrenTest#testChildren_NullObject",
-           "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.NameTest#testName001_NullObject",
-           "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.ParentTest#testParent_NullObject",
-           "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.CapabilitiesNewTest#testCapabilitiesNew001" ]
+  names: [ "org.apache.harmony.jpda.tests.jdwp.ArrayReference_SetValues003Test#testSetValues003_InvalidIndex",
+           "org.apache.harmony.jpda.tests.jdwp.ClassType_InvokeMethod002Test#testInvokeMethod_wrong_argument_types",
+           "org.apache.harmony.jpda.tests.jdwp.ClassType_InvokeMethodTest#testInvokeMethod002",
+           "org.apache.harmony.jpda.tests.jdwp.ClassType_InvokeMethodTest#testInvokeMethod003",
+           "org.apache.harmony.jpda.tests.jdwp.ClassType_NewInstanceTest#testNewInstance002",
+           "org.apache.harmony.jpda.tests.jdwp.ClassType_SetValues002Test#testSetValues002",
+           "org.apache.harmony.jpda.tests.jdwp.Events_ClassPrepare002Test#testClassPrepareCausedByDebugger",
+           "org.apache.harmony.jpda.tests.jdwp.Events_ExceptionCaughtTest#testExceptionEvent_ThrowLocation_FromNative",
+           "org.apache.harmony.jpda.tests.jdwp.ObjectReference_DisableCollectionTest#testDisableCollection_null",
+           "org.apache.harmony.jpda.tests.jdwp.ObjectReference_EnableCollectionTest#testEnableCollection_invalid",
+           "org.apache.harmony.jpda.tests.jdwp.ObjectReference_EnableCollectionTest#testEnableCollection_null",
+           "org.apache.harmony.jpda.tests.jdwp.ObjectReference_GetValues002Test#testGetValues002",
+           "org.apache.harmony.jpda.tests.jdwp.ObjectReference_SetValues003Test#testSetValues003",
+           "org.apache.harmony.jpda.tests.jdwp.ObjectReference_SetValuesTest#testSetValues001",
+           "org.apache.harmony.jpda.tests.jdwp.ReferenceType_FieldsWithGenericTest#testFieldsWithGeneric001",
+           "org.apache.harmony.jpda.tests.jdwp.ReferenceType_GetValues002Test#testGetValues002",
+           "org.apache.harmony.jpda.tests.jdwp.ReferenceType_GetValues004Test#testGetValues004",
+           "org.apache.harmony.jpda.tests.jdwp.StringReference_ValueTest#testStringReferenceValueTest001_NullString",
+           "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference_ChildrenTest#testChildren_NullObject",
+           "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference_NameTest#testName001_NullObject",
+           "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference_ParentTest#testParent_NullObject",
+           "org.apache.harmony.jpda.tests.jdwp.VirtualMachine_CapabilitiesNewTest#testCapabilitiesNew001" ]
 },
 {
   description: "Test for ddms extensions that are not implemented for prebuilt-libjdwp",
   result: EXEC_FAILED,
   bug: 69169846,
-  name: "org.apache.harmony.jpda.tests.jdwp.DDM.DDMTest#testChunk001"
+  name: "org.apache.harmony.jpda.tests.jdwp.DDM_DDMTest#testChunk001"
 },
 ]
diff --git a/tools/run-gtests.sh b/tools/run-gtests.sh
new file mode 100755
index 0000000..8585589
--- /dev/null
+++ b/tools/run-gtests.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Script to run all gtests located under $ART_TEST_CHROOT/data/nativetest{64}
+
+ADB="${ADB:-adb}"
+all_tests=()
+failing_tests=()
+
+function add_tests {
+  all_tests+=$(${ADB} shell "test -d $ART_TEST_CHROOT/$1 && chroot $ART_TEST_CHROOT find $1 -name \*_test")
+}
+
+function fail {
+  failing_tests+=($1)
+}
+
+add_tests "/data/nativetest"
+add_tests "/data/nativetest64"
+
+for i in $all_tests; do
+  ${ADB} shell "chroot $ART_TEST_CHROOT env LD_LIBRARY_PATH= ANDROID_ROOT='/system' ANDROID_RUNTIME_ROOT=/system $i" || fail $i
+done
+
+if [ -n "$failing_tests" ]; then
+  for i in "${failing_tests[@]}"; do
+    echo "Failed test: $i"
+  done
+  exit 1
+fi
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index b0b5810..fbd8077 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -43,6 +43,23 @@
 java_lib_location="${ANDROID_HOST_OUT}/../common/obj/JAVA_LIBRARIES"
 make_target_name="apache-harmony-jdwp-tests-hostdex"
 
+function boot_classpath_arg {
+  local dir="$1"
+  local suffix="$2"
+  shift 2
+  local separator=""
+  for var
+  do
+    printf -- "${separator}${dir}/${var}${suffix}.jar";
+    separator=":"
+  done
+}
+
+# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
+# because that's what we use for compiling the core.art image.
+# It may contain additional modules from TEST_CORE_JARS.
+BOOT_CLASSPATH_JARS="core-oj core-libart okhttp bouncycastle apache-xml conscrypt"
+
 vm_args=""
 art="$android_root/bin/art"
 art_debugee="sh $android_root/bin/art"
@@ -59,6 +76,8 @@
 explicit_debug="no"
 verbose="no"
 image="-Ximage:/data/art-test/core.art"
+boot_classpath="$(boot_classpath_arg /system/framework -testdex $BOOT_CLASSPATH_JARS)"
+boot_classpath_locations=""
 with_jdwp_path=""
 agent_wrapper=""
 vm_args=""
@@ -90,11 +109,27 @@
     art_debugee="bash ${OUT_DIR-out}/host/linux-x86/bin/art"
     # We force generation of a new image to avoid build-time and run-time classpath differences.
     image="-Ximage:/system/non/existent/vogar.art"
+    # Pass the host boot classpath.
+    if [ "${ANDROID_HOST_OUT:0:${#ANDROID_BUILD_TOP}+1}" = "${ANDROID_BUILD_TOP}/" ]; then
+      framework_location="${ANDROID_HOST_OUT:${#ANDROID_BUILD_TOP}+1}/framework"
+    else
+      echo "error: ANDROID_BUILD_TOP/ is not a prefix of ANDROID_HOST_OUT"
+      echo "ANDROID_BUILD_TOP=${ANDROID_BUILD_TOP}"
+      echo "ANDROID_HOST_OUT=${ANDROID_HOST_OUT}"
+      exit
+    fi
+    boot_classpath="$(boot_classpath_arg ${ANDROID_HOST_OUT}/framework -hostdex $BOOT_CLASSPATH_JARS)"
+    boot_classpath_locations="$(boot_classpath_arg ${framework_location} -hostdex $BOOT_CLASSPATH_JARS)"
     # We do not need a device directory on host.
     device_dir=""
     # Vogar knows which VM to use on host.
     vm_command=""
     shift
+  elif [[ "$1" == "--mode=device" ]]; then
+    # Remove the --mode=device from the arguments and replace it with --mode=device_testdex
+    args=${args/$1}
+    args="$args --mode=device_testdex"
+    shift
   elif [[ "$1" == "--mode=jvm" ]]; then
     mode="ri"
     make_target_name="apache-harmony-jdwp-tests-host"
@@ -104,6 +139,8 @@
     debuggee_args=""
     # No image. On the RI.
     image=""
+    boot_classpath=""
+    boot_classpath_locations=""
     # We do not need a device directory on RI.
     device_dir=""
     # Vogar knows which VM to use on RI.
@@ -260,8 +297,8 @@
   # we don't want to be trying to connect to adbconnection which might not have
   # been built.
   vm_args="${vm_args} --vm-arg -XjdwpProvider:none"
-  # Make sure the debuggee doesn't clean up what the debugger has generated.
-  art_debugee="$art_debugee --no-clean"
+  # Make sure the debuggee doesn't re-generate, nor clean up what the debugger has generated.
+  art_debugee="$art_debugee --no-compile --no-clean"
 fi
 
 function jlib_name {
@@ -305,22 +342,31 @@
 
 if [[ "$image" != "" ]]; then
   vm_args="$vm_args --vm-arg $image"
+  debuggee_args="$debuggee_args $image"
+fi
+if [[ "$boot_classpath" != "" ]]; then
+  vm_args="$vm_args --vm-arg -Xbootclasspath:${boot_classpath}"
+  debuggee_args="$debuggee_args -Xbootclasspath:${boot_classpath}"
+fi
+if [[ "$boot_classpath_locations" != "" ]]; then
+  vm_args="$vm_args --vm-arg -Xbootclasspath-locations:${boot_classpath_locations}"
+  debuggee_args="$debuggee_args -Xbootclasspath-locations:${boot_classpath_locations}"
 fi
 
 if [[ "$plugin" != "" ]]; then
   vm_args="$vm_args --vm-arg $plugin"
 fi
 
-# Because we're running debuggable, we discard any AOT code.
-# Therefore we run de2oat with 'quicken' to avoid spending time compiling.
-vm_args="$vm_args --vm-arg -Xcompiler-option --vm-arg --compiler-filter=quicken"
-debuggee_args="$debuggee_args -Xcompiler-option --compiler-filter=quicken"
-
-if $instant_jit; then
-  debuggee_args="$debuggee_args -Xjitthreshold:0"
-fi
-
 if [[ $mode != "ri" ]]; then
+  # Because we're running debuggable, we discard any AOT code.
+  # Therefore we run de2oat with 'quicken' to avoid spending time compiling.
+  vm_args="$vm_args --vm-arg -Xcompiler-option --vm-arg --compiler-filter=quicken"
+  debuggee_args="$debuggee_args -Xcompiler-option --compiler-filter=quicken"
+
+  if $instant_jit; then
+    debuggee_args="$debuggee_args -Xjitthreshold:0"
+  fi
+
   vm_args="$vm_args --vm-arg -Xusejit:$use_jit"
   debuggee_args="$debuggee_args -Xusejit:$use_jit"
 fi
@@ -363,7 +409,7 @@
       --vm-arg -Djpda.settings.waitingTime=$jdwp_test_timeout \
       --vm-arg -Djpda.settings.transportAddress=127.0.0.1:55107 \
       --vm-arg -Djpda.settings.dumpProcess="$dump_command" \
-      --vm-arg -Djpda.settings.debuggeeJavaPath="$art_debugee $plugin $image $debuggee_args" \
+      --vm-arg -Djpda.settings.debuggeeJavaPath="$art_debugee $plugin $debuggee_args" \
       --classpath "$test_jar" \
       $toolchain_args \
       $test
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 2d39b2a..735549e 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -46,6 +46,22 @@
   done
 }
 
+function boot_classpath_arg {
+  local dir="$1"
+  local suffix="$2"
+  shift 2
+  printf -- "--vm-arg -Xbootclasspath"
+  for var
+  do
+    printf -- ":${dir}/${var}${suffix}.jar";
+  done
+}
+
+# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
+# because that's what we use for compiling the core.art image.
+# It may contain additional modules from TEST_CORE_JARS.
+BOOT_CLASSPATH_JARS="core-oj core-libart okhttp bouncycastle apache-xml conscrypt"
+
 DEPS="core-tests jsr166-tests mockito-target"
 
 for lib in $DEPS
@@ -109,7 +125,11 @@
 while true; do
   if [[ "$1" == "--mode=device" ]]; then
     device_mode=true
+    # Remove the --mode=device from the arguments and replace it with --mode=device_testdex
+    vogar_args=${vogar_args/$1}
+    vogar_args="$vogar_args --mode=device_testdex"
     vogar_args="$vogar_args --vm-arg -Ximage:/data/art-test/core.art"
+    vogar_args="$vogar_args $(boot_classpath_arg /system/framework -testdex $BOOT_CLASSPATH_JARS)"
     shift
   elif [[ "$1" == "--mode=host" ]]; then
     # We explicitly give a wrong path for the image, to ensure vogar
diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh
index 04e80df..92b3672 100755
--- a/tools/setup-buildbot-device.sh
+++ b/tools/setup-buildbot-device.sh
@@ -43,7 +43,7 @@
 # Kill logd first, so that when we set the adb buffer size later in this file,
 # it is brought up again.
 echo -e "${green}Killing logd, seen leaking on fugu/N${nc}"
-adb shell killall -9 /system/bin/logd
+adb shell pkill -9 -U logd logd && echo -e "${green}...logd killed${nc}"
 
 # Update date on device if the difference with host is more than one hour.
 if [ $abs_time_difference_in_seconds -gt $seconds_per_hour ]; then
@@ -165,4 +165,9 @@
   adb shell mkdir -p "$ART_TEST_CHROOT/dev"
   adb shell mount | grep -q "^tmpfs on $ART_TEST_CHROOT/dev type tmpfs " \
     || adb shell mount -o bind /dev "$ART_TEST_CHROOT/dev"
+
+  # Create /apex tmpfs in chroot.
+  adb shell mkdir -p "$ART_TEST_CHROOT/apex"
+  adb shell mount | grep -q "^tmpfs on $ART_TEST_CHROOT/apex type tmpfs " \
+    || adb shell mount -t tmpfs -o nodev,noexec,nosuid tmpfs "$ART_TEST_CHROOT/apex"
 fi
diff --git a/tools/teardown-buildbot-device.sh b/tools/teardown-buildbot-device.sh
index 6634fb4..7eb5cc3 100755
--- a/tools/teardown-buildbot-device.sh
+++ b/tools/teardown-buildbot-device.sh
@@ -89,6 +89,9 @@
            fi
     }
 
+    # Remove /apex from chroot.
+    remove_filesystem_from_chroot apex tmpfs true
+
     # Remove /dev from chroot.
     remove_filesystem_from_chroot dev tmpfs true
 
diff --git a/tools/ti-fast/README.md b/tools/ti-fast/README.md
index bc46882..a0a7dd7 100644
--- a/tools/ti-fast/README.md
+++ b/tools/ti-fast/README.md
@@ -21,6 +21,10 @@
   called. This behavior is static. The no-log methods have no branches and just
   immediately return.
 
+* If 'all' is one of the arguments all events the current runtime is capable of
+  providing will be listened for and all other arguments (excepting 'log') will
+  be ignored.
+
 * The event-names are the same names as are used in the jvmtiEventCallbacks
   struct.
 
diff --git a/tools/ti-fast/tifast.cc b/tools/ti-fast/tifast.cc
index b147add..d02e549 100644
--- a/tools/ti-fast/tifast.cc
+++ b/tools/ti-fast/tifast.cc
@@ -36,6 +36,13 @@
 // env.
 static constexpr jint kArtTiVersion = JVMTI_VERSION_1_2 | 0x40000000;
 
+template <typename ...Args> static void Unused(Args... args ATTRIBUTE_UNUSED) {}
+
+// jthread is a typedef of jobject so we use this to allow the templates to distinguish them.
+struct jthreadContainer { jthread thread; };
+// jlocation is a typedef of jlong so use this to distinguish the less common jlong.
+struct jlongContainer { jlong val; };
+
 static void AddCapsForEvent(jvmtiEvent event, jvmtiCapabilities* caps) {
   switch (event) {
 #define DO_CASE(name, cap_name) \
@@ -63,59 +70,520 @@
 }
 
 // Setup for all supported events. Give a macro with fun(name, event_num, args)
-#define FOR_ALL_SUPPORTED_EVENTS(fun) \
-    fun(SingleStep, EVENT(SINGLE_STEP), (jvmtiEnv*, JNIEnv*, jthread, jmethodID, jlocation)) \
-    fun(MethodEntry, EVENT(METHOD_ENTRY), (jvmtiEnv*, JNIEnv*, jthread, jmethodID)) \
-    fun(MethodExit, EVENT(METHOD_EXIT), (jvmtiEnv*, JNIEnv*, jthread, jmethodID, jboolean, jvalue)) \
-    fun(NativeMethodBind, EVENT(NATIVE_METHOD_BIND), (jvmtiEnv*, JNIEnv*, jthread, jmethodID, void*, void**)) \
-    fun(Exception, EVENT(EXCEPTION), (jvmtiEnv*, JNIEnv*, jthread, jmethodID, jlocation, jobject, jmethodID, jlocation)) \
-    fun(ExceptionCatch, EVENT(EXCEPTION_CATCH), (jvmtiEnv*, JNIEnv*, jthread, jmethodID, jlocation, jobject)) \
-    fun(ThreadStart, EVENT(THREAD_START), (jvmtiEnv*, JNIEnv*, jthread)) \
-    fun(ThreadEnd, EVENT(THREAD_END), (jvmtiEnv*, JNIEnv*, jthread)) \
-    fun(ClassLoad, EVENT(CLASS_LOAD), (jvmtiEnv*, JNIEnv*, jthread, jclass)) \
-    fun(ClassPrepare, EVENT(CLASS_PREPARE), (jvmtiEnv*, JNIEnv*, jthread, jclass)) \
-    fun(ClassFileLoadHook, EVENT(CLASS_FILE_LOAD_HOOK), (jvmtiEnv*, JNIEnv*, jclass, jobject, const char*, jobject, jint, const unsigned char*, jint*, unsigned char**)) \
-    fun(CompiledMethodLoad, EVENT(COMPILED_METHOD_LOAD), (jvmtiEnv*, jmethodID, jint, const void*, jint, const jvmtiAddrLocationMap*, const void*)) \
-    fun(CompiledMethodUnload, EVENT(COMPILED_METHOD_UNLOAD), (jvmtiEnv*, jmethodID, const void*)) \
-    fun(DynamicCodeGenerated, EVENT(DYNAMIC_CODE_GENERATED), (jvmtiEnv*, const char*, const void*, jint)) \
-    fun(DataDumpRequest, EVENT(DATA_DUMP_REQUEST), (jvmtiEnv*)) \
-    fun(MonitorContendedEnter, EVENT(MONITOR_CONTENDED_ENTER), (jvmtiEnv*, JNIEnv*, jthread, jobject)) \
-    fun(MonitorContendedEntered, EVENT(MONITOR_CONTENDED_ENTERED), (jvmtiEnv*, JNIEnv*, jthread, jobject)) \
-    fun(MonitorWait, EVENT(MONITOR_WAIT), (jvmtiEnv*, JNIEnv*, jthread, jobject, jlong)) \
-    fun(MonitorWaited, EVENT(MONITOR_WAITED), (jvmtiEnv*, JNIEnv*, jthread, jobject, jboolean)) \
-    fun(ResourceExhausted, EVENT(RESOURCE_EXHAUSTED), (jvmtiEnv*, JNIEnv*, jint, const void*, const char*)) \
-    fun(VMObjectAlloc, EVENT(VM_OBJECT_ALLOC), (jvmtiEnv*, JNIEnv*, jthread, jobject, jclass, jlong)) \
-    fun(GarbageCollectionStart, EVENT(GARBAGE_COLLECTION_START), (jvmtiEnv*)) \
-    fun(GarbageCollectionFinish, EVENT(GARBAGE_COLLECTION_FINISH), (jvmtiEnv*))
+#define FOR_ALL_SUPPORTED_JNI_EVENTS(fun) \
+    fun(SingleStep, EVENT(SINGLE_STEP), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, jlocation loc), (jvmti, jni, jthreadContainer{.thread = thread}, meth, loc)) \
+    fun(MethodEntry, EVENT(METHOD_ENTRY), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth), (jvmti, jni, jthreadContainer{.thread = thread}, meth)) \
+    fun(MethodExit, EVENT(METHOD_EXIT), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, jboolean jb, jvalue jv), (jvmti, jni, jthreadContainer{.thread = thread}, meth, jb, jv)) \
+    fun(NativeMethodBind, EVENT(NATIVE_METHOD_BIND), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, void* v1, void** v2), (jvmti, jni, jthreadContainer{.thread = thread}, meth, v1, v2)) \
+    fun(Exception, EVENT(EXCEPTION), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth1, jlocation loc1, jobject obj, jmethodID meth2, jlocation loc2), (jvmti, jni, jthreadContainer{.thread = thread}, meth1, loc1, obj, meth2, loc2)) \
+    fun(ExceptionCatch, EVENT(EXCEPTION_CATCH), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, jlocation loc, jobject obj), (jvmti, jni, jthreadContainer{.thread = thread}, meth, loc, obj)) \
+    fun(ThreadStart, EVENT(THREAD_START), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread), (jvmti, jni, jthreadContainer{.thread = thread})) \
+    fun(ThreadEnd, EVENT(THREAD_END), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread), (jvmti, jni, jthreadContainer{.thread = thread})) \
+    fun(ClassLoad, EVENT(CLASS_LOAD), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jclass klass), (jvmti, jni, jthreadContainer{.thread = thread}, klass) ) \
+    fun(ClassPrepare, EVENT(CLASS_PREPARE), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jclass klass), (jvmti, jni, jthreadContainer{.thread = thread}, klass)) \
+    fun(ClassFileLoadHook, EVENT(CLASS_FILE_LOAD_HOOK), (jvmtiEnv* jvmti, JNIEnv* jni, jclass klass, jobject obj1, const char* c1, jobject obj2, jint i1, const unsigned char* c2, jint* ip1, unsigned char** cp1), (jvmti, jni, klass, obj1, c1, obj2, i1, c2, ip1, cp1)) \
+    fun(MonitorContendedEnter, EVENT(MONITOR_CONTENDED_ENTER), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj), (jvmti, jni, jthreadContainer{.thread = thread}, obj)) \
+    fun(MonitorContendedEntered, EVENT(MONITOR_CONTENDED_ENTERED), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj), (jvmti, jni, jthreadContainer{.thread = thread}, obj)) \
+    fun(MonitorWait, EVENT(MONITOR_WAIT), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj, jlong l1), (jvmti, jni, jthreadContainer{.thread = thread}, obj, jlongContainer{.val = l1})) \
+    fun(MonitorWaited, EVENT(MONITOR_WAITED), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj, jboolean b1), (jvmti, jni, jthreadContainer{.thread = thread}, obj, b1)) \
+    fun(ResourceExhausted, EVENT(RESOURCE_EXHAUSTED), (jvmtiEnv* jvmti, JNIEnv* jni, jint i1, const void* cv, const char* cc), (jvmti, jni, i1, cv, cc)) \
+    fun(VMObjectAlloc, EVENT(VM_OBJECT_ALLOC), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj, jclass klass, jlong l1), (jvmti, jni, jthreadContainer{.thread = thread}, obj, klass, jlongContainer{.val = l1})) \
 
-#define GENERATE_EMPTY_FUNCTION(name, number, args) \
-    static void JNICALL empty ## name  args { }
+#define FOR_ALL_SUPPORTED_NO_JNI_EVENTS(fun) \
+    fun(CompiledMethodLoad, EVENT(COMPILED_METHOD_LOAD), (jvmtiEnv* jvmti, jmethodID meth, jint i1, const void* cv1, jint i2, const jvmtiAddrLocationMap* alm, const void* cv2), (jvmti, meth, i1, cv1, i2, alm, cv2)) \
+    fun(CompiledMethodUnload, EVENT(COMPILED_METHOD_UNLOAD), (jvmtiEnv* jvmti, jmethodID meth, const void* cv1), (jvmti, meth, cv1)) \
+    fun(DynamicCodeGenerated, EVENT(DYNAMIC_CODE_GENERATED), (jvmtiEnv* jvmti, const char* cc, const void* cv, jint i1), (jvmti, cc, cv, i1)) \
+    fun(DataDumpRequest, EVENT(DATA_DUMP_REQUEST), (jvmtiEnv* jvmti), (jvmti)) \
+    fun(GarbageCollectionStart, EVENT(GARBAGE_COLLECTION_START), (jvmtiEnv* jvmti), (jvmti)) \
+    fun(GarbageCollectionFinish, EVENT(GARBAGE_COLLECTION_FINISH), (jvmtiEnv* jvmti), (jvmti))
+
+#define FOR_ALL_SUPPORTED_EVENTS(fun) \
+    FOR_ALL_SUPPORTED_JNI_EVENTS(fun) \
+    FOR_ALL_SUPPORTED_NO_JNI_EVENTS(fun)
+
+static const jvmtiEvent kAllEvents[] = {
+#define GET_EVENT(a, event, b, c) event,
+FOR_ALL_SUPPORTED_EVENTS(GET_EVENT)
+#undef GET_EVENT
+};
+
+#define GENERATE_EMPTY_FUNCTION(name, number, args, argnames) \
+    static void JNICALL empty ## name  args { Unused argnames ; }
 FOR_ALL_SUPPORTED_EVENTS(GENERATE_EMPTY_FUNCTION)
 #undef GENERATE_EMPTY_FUNCTION
 
 static jvmtiEventCallbacks kEmptyCallbacks {
-#define CREATE_EMPTY_EVENT_CALLBACKS(name, num, args) \
+#define CREATE_EMPTY_EVENT_CALLBACKS(name, num, args, argnames) \
     .name = empty ## name,
   FOR_ALL_SUPPORTED_EVENTS(CREATE_EMPTY_EVENT_CALLBACKS)
 #undef CREATE_EMPTY_EVENT_CALLBACKS
 };
 
-#define GENERATE_LOG_FUNCTION(name, number, args) \
-    static void JNICALL log ## name  args { \
-      LOG(INFO) << "Got event " << #name ; \
+static void DeleteLocalRef(JNIEnv* env, jobject obj) {
+  if (obj != nullptr && env != nullptr) {
+    env->DeleteLocalRef(obj);
+  }
+}
+
+class ScopedThreadInfo {
+ public:
+  ScopedThreadInfo(jvmtiEnv* jvmtienv, JNIEnv* env, jthread thread)
+      : jvmtienv_(jvmtienv), env_(env), free_name_(false) {
+    if (thread == nullptr) {
+      info_.name = const_cast<char*>("<NULLPTR>");
+    } else if (jvmtienv->GetThreadInfo(thread, &info_) != JVMTI_ERROR_NONE) {
+      info_.name = const_cast<char*>("<UNKNOWN THREAD>");
+    } else {
+      free_name_ = true;
     }
-FOR_ALL_SUPPORTED_EVENTS(GENERATE_LOG_FUNCTION)
+  }
+
+  ~ScopedThreadInfo() {
+    if (free_name_) {
+      jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(info_.name));
+    }
+    DeleteLocalRef(env_, info_.thread_group);
+    DeleteLocalRef(env_, info_.context_class_loader);
+  }
+
+  const char* GetName() const {
+    return info_.name;
+  }
+
+ private:
+  jvmtiEnv* jvmtienv_;
+  JNIEnv* env_;
+  bool free_name_;
+  jvmtiThreadInfo info_{};
+};
+
+class ScopedClassInfo {
+ public:
+  ScopedClassInfo(jvmtiEnv* jvmtienv, jclass c) : jvmtienv_(jvmtienv), class_(c) {}
+
+  ~ScopedClassInfo() {
+    if (class_ != nullptr) {
+      jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
+      jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
+      jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(file_));
+      jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(debug_ext_));
+    }
+  }
+
+  bool Init(bool get_generic = true) {
+    if (class_ == nullptr) {
+      name_ = const_cast<char*>("<NONE>");
+      generic_ = const_cast<char*>("<NONE>");
+      return true;
+    } else {
+      jvmtiError ret1 = jvmtienv_->GetSourceFileName(class_, &file_);
+      jvmtiError ret2 = jvmtienv_->GetSourceDebugExtension(class_, &debug_ext_);
+      char** gen_ptr = &generic_;
+      if (!get_generic) {
+        generic_ = nullptr;
+        gen_ptr = nullptr;
+      }
+      return jvmtienv_->GetClassSignature(class_, &name_, gen_ptr) == JVMTI_ERROR_NONE &&
+          ret1 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
+          ret1 != JVMTI_ERROR_INVALID_CLASS &&
+          ret2 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
+          ret2 != JVMTI_ERROR_INVALID_CLASS;
+    }
+  }
+
+  jclass GetClass() const {
+    return class_;
+  }
+
+  const char* GetName() const {
+    return name_;
+  }
+
+  const char* GetGeneric() const {
+    return generic_;
+  }
+
+  const char* GetSourceDebugExtension() const {
+    if (debug_ext_ == nullptr) {
+      return "<UNKNOWN_SOURCE_DEBUG_EXTENSION>";
+    } else {
+      return debug_ext_;
+    }
+  }
+  const char* GetSourceFileName() const {
+    if (file_ == nullptr) {
+      return "<UNKNOWN_FILE>";
+    } else {
+      return file_;
+    }
+  }
+
+ private:
+  jvmtiEnv* jvmtienv_;
+  jclass class_;
+  char* name_ = nullptr;
+  char* generic_ = nullptr;
+  char* file_ = nullptr;
+  char* debug_ext_ = nullptr;
+
+  friend std::ostream& operator<<(std::ostream &os, ScopedClassInfo const& m);
+};
+
+class ScopedMethodInfo {
+ public:
+  ScopedMethodInfo(jvmtiEnv* jvmtienv, JNIEnv* env, jmethodID m)
+      : jvmtienv_(jvmtienv), env_(env), method_(m) {}
+
+  ~ScopedMethodInfo() {
+    DeleteLocalRef(env_, declaring_class_);
+    jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
+    jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(signature_));
+    jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
+  }
+
+  bool Init(bool get_generic = true) {
+    if (jvmtienv_->GetMethodDeclaringClass(method_, &declaring_class_) != JVMTI_ERROR_NONE) {
+      return false;
+    }
+    class_info_.reset(new ScopedClassInfo(jvmtienv_, declaring_class_));
+    jint nlines;
+    jvmtiLineNumberEntry* lines;
+    jvmtiError err = jvmtienv_->GetLineNumberTable(method_, &nlines, &lines);
+    if (err == JVMTI_ERROR_NONE) {
+      if (nlines > 0) {
+        first_line_ = lines[0].line_number;
+      }
+      jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(lines));
+    } else if (err != JVMTI_ERROR_ABSENT_INFORMATION &&
+               err != JVMTI_ERROR_NATIVE_METHOD) {
+      return false;
+    }
+    return class_info_->Init(get_generic) &&
+        (jvmtienv_->GetMethodName(method_, &name_, &signature_, &generic_) == JVMTI_ERROR_NONE);
+  }
+
+  const ScopedClassInfo& GetDeclaringClassInfo() const {
+    return *class_info_;
+  }
+
+  jclass GetDeclaringClass() const {
+    return declaring_class_;
+  }
+
+  const char* GetName() const {
+    return name_;
+  }
+
+  const char* GetSignature() const {
+    return signature_;
+  }
+
+  const char* GetGeneric() const {
+    return generic_;
+  }
+
+  jint GetFirstLine() const {
+    return first_line_;
+  }
+
+ private:
+  jvmtiEnv* jvmtienv_;
+  JNIEnv* env_;
+  jmethodID method_;
+  jclass declaring_class_ = nullptr;
+  std::unique_ptr<ScopedClassInfo> class_info_;
+  char* name_ = nullptr;
+  char* signature_ = nullptr;
+  char* generic_ = nullptr;
+  jint first_line_ = -1;
+
+  friend std::ostream& operator<<(std::ostream &os, ScopedMethodInfo const& m);
+};
+
+std::ostream& operator<<(std::ostream &os, ScopedClassInfo const& c) {
+  const char* generic = c.GetGeneric();
+  if (generic != nullptr) {
+    return os << c.GetName() << "<" << generic << ">" << " file: " << c.GetSourceFileName();
+  } else {
+    return os << c.GetName() << " file: " << c.GetSourceFileName();
+  }
+}
+
+std::ostream& operator<<(std::ostream &os, ScopedMethodInfo const& m) {
+  return os << m.GetDeclaringClassInfo().GetName() << "->" << m.GetName() << m.GetSignature()
+            << " (source: " << m.GetDeclaringClassInfo().GetSourceFileName() << ":"
+            << m.GetFirstLine() << ")";
+}
+
+
+class LogPrinter {
+ public:
+  explicit LogPrinter(jvmtiEvent event) : event_(event) {}
+
+  template <typename ...Args> void PrintRestNoJNI(jvmtiEnv* jvmti, Args... args) {
+    PrintRest(jvmti, static_cast<JNIEnv*>(nullptr), args...);
+  }
+
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti, JNIEnv* env, Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             jlongContainer l,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             jthreadContainer thr,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             jboolean i,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             jint i,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             jclass klass,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             jmethodID meth,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             jlocation loc,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             jint* ip,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             const void* loc,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             void* loc,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             void** loc,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             unsigned char** v,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             const unsigned char* v,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             const char* v,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             const jvmtiAddrLocationMap* v,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             jvalue v,
+                                             Args... args);
+  template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+                                             JNIEnv* env,
+                                             jobject v,
+                                             Args... args);
+
+  std::string GetResult() {
+    std::string out_str = stream.str();
+    return start_args + out_str;
+  }
+
+ private:
+  jvmtiEvent event_;
+  std::string start_args;
+  std::ostringstream stream;
+};
+
+// Base case
+template<> void LogPrinter::PrintRest(jvmtiEnv* jvmti ATTRIBUTE_UNUSED, JNIEnv* jni) {
+  if (jni == nullptr) {
+    start_args = "jvmtiEnv*";
+  } else {
+    start_args = "jvmtiEnv*, JNIEnv*";
+  }
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti,
+                           JNIEnv* jni,
+                           const jvmtiAddrLocationMap* v,
+                           Args... args) {
+  if (v != nullptr) {
+    stream << ", const jvmtiAddrLocationMap*[start_address: "
+           << v->start_address << ", location: " << v->location << "]";
+  } else {
+    stream << ", const jvmtiAddrLocationMap*[nullptr]";
+  }
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jint* v, Args... args) {
+  stream << ", jint*[" << static_cast<const void*>(v) << "]";
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, const void* v, Args... args) {
+  stream << ", const void*[" << v << "]";
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, unsigned char** v, Args... args) {
+  stream << ", unsigned char**[" << static_cast<const void*>(v) << "]";
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, const unsigned char* v, Args... args) {
+  stream << ", const unsigned char*[" << static_cast<const void*>(v) << "]";
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, const char* v, Args... args) {
+  stream << ", const char*[" << v << "]";
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jvalue v ATTRIBUTE_UNUSED, Args... args) {
+  stream << ", jvalue[<UNION>]";
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, void** v, Args... args) {
+  stream << ", void**[" << v << "]";
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, void* v, Args... args) {
+  stream << ", void*[" << v << "]";
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jlongContainer l, Args... args) {
+  stream << ", jlong[" << l.val << ", hex: 0x" << std::hex << l.val << "]";
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jlocation l, Args... args) {
+  stream << ", jlocation[" << l << ", hex: 0x" << std::hex << l << "]";
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jboolean b, Args... args) {
+  stream << ", jboolean[" << (b ? "true" : "false") << "]";
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jint i, Args... args) {
+  stream << ", jint[" << i << ", hex: 0x" << std::hex << i << "]";
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jobject obj, Args... args) {
+  if (obj == nullptr) {
+    stream << ", jobject[nullptr]";
+  } else {
+    jni->PushLocalFrame(1);
+    jclass klass = jni->GetObjectClass(obj);
+    ScopedClassInfo sci(jvmti, klass);
+    if (sci.Init(event_ != JVMTI_EVENT_VM_OBJECT_ALLOC)) {
+      stream << ", jobject[type: " << sci << "]";
+    } else {
+      stream << ", jobject[type: TYPE UNKNOWN]";
+    }
+    jni->PopLocalFrame(nullptr);
+  }
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jthreadContainer thr, Args... args) {
+  ScopedThreadInfo sti(jvmti, jni, thr.thread);
+  stream << ", jthread[" << sti.GetName() << "]";
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jclass klass, Args... args) {
+  ScopedClassInfo sci(jvmti, klass);
+  if (sci.Init(/*get_generic=*/event_ != JVMTI_EVENT_VM_OBJECT_ALLOC)) {
+    stream << ", jclass[" << sci << "]";
+  } else {
+    stream << ", jclass[TYPE UNKNOWN]";
+  }
+  PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jmethodID meth, Args... args) {
+  ScopedMethodInfo smi(jvmti, jni, meth);
+  if (smi.Init()) {
+    stream << ", jmethodID[" << smi << "]";
+  } else {
+    stream << ", jmethodID[METHOD UNKNOWN]";
+  }
+  PrintRest(jvmti, jni, args...);
+}
+
+#define GENERATE_LOG_FUNCTION_JNI(name, event, args, argnames) \
+    static void JNICALL log ## name  args { \
+      LogPrinter printer(event); \
+      printer.PrintRest argnames; \
+      LOG(INFO) << "Got event " << #name << "(" << printer.GetResult() << ")"; \
+    } \
+
+#define GENERATE_LOG_FUNCTION_NO_JNI(name, event, args, argnames) \
+    static void JNICALL log ## name  args { \
+      LogPrinter printer(event); \
+      printer.PrintRestNoJNI argnames; \
+      LOG(INFO) << "Got event " << #name << "(" << printer.GetResult() << ")"; \
+    } \
+
+FOR_ALL_SUPPORTED_JNI_EVENTS(GENERATE_LOG_FUNCTION_JNI)
+FOR_ALL_SUPPORTED_NO_JNI_EVENTS(GENERATE_LOG_FUNCTION_NO_JNI)
 #undef GENERATE_LOG_FUNCTION
 
 static jvmtiEventCallbacks kLogCallbacks {
-#define CREATE_LOG_EVENT_CALLBACK(name, num, args) \
+#define CREATE_LOG_EVENT_CALLBACK(name, num, args, argnames) \
     .name = log ## name,
   FOR_ALL_SUPPORTED_EVENTS(CREATE_LOG_EVENT_CALLBACK)
 #undef CREATE_LOG_EVENT_CALLBACK
 };
 
+static std::string EventToName(jvmtiEvent desired_event) {
+#define CHECK_NAME(name, event, args, argnames) \
+  if (desired_event == (event)) { \
+    return #name; \
+  }
+  FOR_ALL_SUPPORTED_EVENTS(CHECK_NAME);
+  LOG(FATAL) << "Unknown event " << desired_event;
+  __builtin_unreachable();
+#undef CHECK_NAME
+}
 static jvmtiEvent NameToEvent(const std::string& desired_name) {
-#define CHECK_NAME(name, event, args) \
+#define CHECK_NAME(name, event, args, argnames) \
   if (desired_name == #name) { \
     return event; \
   }
@@ -125,14 +593,46 @@
 #undef CHECK_NAME
 }
 
+#undef FOR_ALL_SUPPORTED_JNI_EVENTS
+#undef FOR_ALL_SUPPORTED_NO_JNI_EVENTS
 #undef FOR_ALL_SUPPORTED_EVENTS
-static std::vector<jvmtiEvent> GetRequestedEventList(const std::string& args) {
+
+static std::vector<jvmtiEvent> GetAllAvailableEvents(jvmtiEnv* jvmti) {
+  std::vector<jvmtiEvent> out;
+  jvmtiCapabilities caps{};
+  jvmti->GetPotentialCapabilities(&caps);
+  uint8_t caps_bytes[sizeof(caps)];
+  memcpy(caps_bytes, &caps, sizeof(caps));
+  for (jvmtiEvent e : kAllEvents) {
+    jvmtiCapabilities req{};
+    AddCapsForEvent(e, &req);
+    uint8_t req_bytes[sizeof(req)];
+    memcpy(req_bytes, &req, sizeof(req));
+    bool good = true;
+    for (size_t i = 0; i < sizeof(caps); i++) {
+      if ((req_bytes[i] & caps_bytes[i]) != req_bytes[i]) {
+        good = false;
+        break;
+      }
+    }
+    if (good) {
+      out.push_back(e);
+    } else {
+      LOG(WARNING) << "Unable to get capabilities for event " << EventToName(e);
+    }
+  }
+  return out;
+}
+
+static std::vector<jvmtiEvent> GetRequestedEventList(jvmtiEnv* jvmti, const std::string& args) {
   std::vector<jvmtiEvent> res;
   std::stringstream args_stream(args);
   std::string item;
   while (std::getline(args_stream, item, ',')) {
     if (item == "") {
       continue;
+    } else if (item == "all") {
+      return GetAllAvailableEvents(jvmti);
     }
     res.push_back(NameToEvent(item));
   }
@@ -168,12 +668,17 @@
     args = args.substr(3);
   }
 
-  std::vector<jvmtiEvent> events = GetRequestedEventList(args);
+  std::vector<jvmtiEvent> events = GetRequestedEventList(jvmti, args);
 
   jvmtiCapabilities caps{};
   for (jvmtiEvent e : events) {
     AddCapsForEvent(e, &caps);
   }
+  if (is_log) {
+    caps.can_get_line_numbers = 1;
+    caps.can_get_source_file_name = 1;
+    caps.can_get_source_debug_extension = 1;
+  }
   error = jvmti->AddCapabilities(&caps);
   if (error != JVMTI_ERROR_NONE) {
     LOG(ERROR) << "Unable to set caps";
diff --git a/tools/timeout_dumper/Android.bp b/tools/timeout_dumper/Android.bp
new file mode 100644
index 0000000..bb813d4
--- /dev/null
+++ b/tools/timeout_dumper/Android.bp
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+art_cc_binary {
+    name: "timeout_dumper",
+
+    host_supported: true,
+    target: {
+        darwin: {
+            enabled: false,
+        },
+        linux_bionic: {
+            sanitize: {
+                address: false,
+            },
+        },
+    },
+    device_supported: false,
+
+    defaults: ["art_defaults"],
+
+    srcs: ["timeout_dumper.cc"],
+
+    shared_libs: [
+        "libbacktrace",
+        "libbase",
+    ],
+    sanitize: {
+        address: true,
+    },
+}
diff --git a/tools/timeout_dumper/timeout_dumper.cc b/tools/timeout_dumper/timeout_dumper.cc
new file mode 100644
index 0000000..08d2f4c
--- /dev/null
+++ b/tools/timeout_dumper/timeout_dumper.cc
@@ -0,0 +1,707 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <dirent.h>
+#include <poll.h>
+#include <sys/prctl.h>
+#include <sys/ptrace.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <csignal>
+#include <cstdlib>
+#include <cstring>
+#include <iostream>
+#include <thread>
+#include <memory>
+#include <set>
+#include <string>
+
+#include <android-base/file.h>
+#include <android-base/logging.h>
+#include <android-base/macros.h>
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
+#include <android-base/unique_fd.h>
+#include <backtrace/Backtrace.h>
+#include <backtrace/BacktraceMap.h>
+
+namespace art {
+namespace {
+
+using android::base::StringPrintf;
+using android::base::unique_fd;
+
+constexpr bool kUseAddr2line = true;
+
+namespace timeout_signal {
+
+class SignalSet {
+ public:
+  SignalSet() {
+    if (sigemptyset(&set_) == -1) {
+      PLOG(FATAL) << "sigemptyset failed";
+    }
+  }
+
+  void Add(int signal) {
+    if (sigaddset(&set_, signal) == -1) {
+      PLOG(FATAL) << "sigaddset " << signal << " failed";
+    }
+  }
+
+  void Block() {
+    if (pthread_sigmask(SIG_BLOCK, &set_, nullptr) != 0) {
+      PLOG(FATAL) << "pthread_sigmask failed";
+    }
+  }
+
+  int Wait() {
+    // Sleep in sigwait() until a signal arrives. gdb causes EINTR failures.
+    int signal_number;
+    int rc = TEMP_FAILURE_RETRY(sigwait(&set_, &signal_number));
+    if (rc != 0) {
+      PLOG(FATAL) << "sigwait failed";
+    }
+    return signal_number;
+  }
+
+ private:
+  sigset_t set_;
+};
+
+int GetTimeoutSignal() {
+  return SIGRTMIN + 2;
+}
+
+}  // namespace timeout_signal
+
+namespace addr2line {
+
+constexpr const char* kAddr2linePath =
+    "/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8/bin/x86_64-linux-addr2line";
+
+std::unique_ptr<std::string> FindAddr2line() {
+  const char* env_value = getenv("ANDROID_BUILD_TOP");
+  if (env_value != nullptr) {
+    std::string path = std::string(env_value) + kAddr2linePath;
+    if (access(path.c_str(), X_OK) == 0) {
+      return std::make_unique<std::string>(path);
+    }
+  }
+
+  {
+    std::string path = std::string(".") + kAddr2linePath;
+    if (access(path.c_str(), X_OK) == 0) {
+      return std::make_unique<std::string>(path);
+    }
+  }
+
+  {
+    using android::base::Dirname;
+
+    std::string exec_dir = android::base::GetExecutableDirectory();
+    std::string derived_top = Dirname(Dirname(Dirname(Dirname(exec_dir))));
+    std::string path = derived_top + kAddr2linePath;
+    if (access(path.c_str(), X_OK) == 0) {
+      return std::make_unique<std::string>(path);
+    }
+  }
+
+  constexpr const char* kHostAddr2line = "/usr/bin/addr2line";
+  if (access(kHostAddr2line, F_OK) == 0) {
+    return std::make_unique<std::string>(kHostAddr2line);
+  }
+
+  return nullptr;
+}
+
+// The state of an open pipe to addr2line. In "server" mode, addr2line takes input on stdin
+// and prints the result to stdout. This struct keeps the state of the open connection.
+struct Addr2linePipe {
+  Addr2linePipe(int in_fd, int out_fd, const std::string& file_name, pid_t pid)
+      : in(in_fd), out(out_fd), file(file_name), child_pid(pid), odd(true) {}
+
+  ~Addr2linePipe() {
+    kill(child_pid, SIGKILL);
+  }
+
+  unique_fd in;      // The file descriptor that is connected to the output of addr2line.
+  unique_fd out;     // The file descriptor that is connected to the input of addr2line.
+
+  const std::string file;     // The file addr2line is working on, so that we know when to close
+                              // and restart.
+  const pid_t child_pid;      // The pid of the child, which we should kill when we're done.
+  bool odd;                   // Print state for indentation of lines.
+};
+
+std::unique_ptr<Addr2linePipe> Connect(const std::string& name, const char* args[]) {
+  int caller_to_addr2line[2];
+  int addr2line_to_caller[2];
+
+  if (pipe(caller_to_addr2line) == -1) {
+    return nullptr;
+  }
+  if (pipe(addr2line_to_caller) == -1) {
+    close(caller_to_addr2line[0]);
+    close(caller_to_addr2line[1]);
+    return nullptr;
+  }
+
+  pid_t pid = fork();
+  if (pid == -1) {
+    close(caller_to_addr2line[0]);
+    close(caller_to_addr2line[1]);
+    close(addr2line_to_caller[0]);
+    close(addr2line_to_caller[1]);
+    return nullptr;
+  }
+
+  if (pid == 0) {
+    dup2(caller_to_addr2line[0], STDIN_FILENO);
+    dup2(addr2line_to_caller[1], STDOUT_FILENO);
+
+    close(caller_to_addr2line[0]);
+    close(caller_to_addr2line[1]);
+    close(addr2line_to_caller[0]);
+    close(addr2line_to_caller[1]);
+
+    execv(args[0], const_cast<char* const*>(args));
+    exit(1);
+  } else {
+    close(caller_to_addr2line[0]);
+    close(addr2line_to_caller[1]);
+    return std::make_unique<Addr2linePipe>(addr2line_to_caller[0],
+                                           caller_to_addr2line[1],
+                                           name,
+                                           pid);
+  }
+}
+
+void WritePrefix(std::ostream& os, const char* prefix, bool odd) {
+  if (prefix != nullptr) {
+    os << prefix;
+  }
+  os << "  ";
+  if (!odd) {
+    os << " ";
+  }
+}
+
+void Drain(size_t expected,
+           const char* prefix,
+           std::unique_ptr<Addr2linePipe>* pipe /* inout */,
+           std::ostream& os) {
+  DCHECK(pipe != nullptr);
+  DCHECK(pipe->get() != nullptr);
+  int in = pipe->get()->in.get();
+  DCHECK_GE(in, 0);
+
+  bool prefix_written = false;
+
+  for (;;) {
+    constexpr uint32_t kWaitTimeExpectedMilli = 500;
+    constexpr uint32_t kWaitTimeUnexpectedMilli = 50;
+
+    int timeout = expected > 0 ? kWaitTimeExpectedMilli : kWaitTimeUnexpectedMilli;
+    struct pollfd read_fd{in, POLLIN, 0};
+    int retval = TEMP_FAILURE_RETRY(poll(&read_fd, 1, timeout));
+    if (retval == -1) {
+      // An error occurred.
+      pipe->reset();
+      return;
+    }
+
+    if (retval == 0) {
+      // Timeout.
+      return;
+    }
+
+    if (!(read_fd.revents & POLLIN)) {
+      // addr2line call exited.
+      pipe->reset();
+      return;
+    }
+
+    constexpr size_t kMaxBuffer = 128;  // Relatively small buffer. Should be OK as we're on an
+    // alt stack, but just to be sure...
+    char buffer[kMaxBuffer];
+    memset(buffer, 0, kMaxBuffer);
+    int bytes_read = TEMP_FAILURE_RETRY(read(in, buffer, kMaxBuffer - 1));
+    if (bytes_read <= 0) {
+      // This should not really happen...
+      pipe->reset();
+      return;
+    }
+    buffer[bytes_read] = '\0';
+
+    char* tmp = buffer;
+    while (*tmp != 0) {
+      if (!prefix_written) {
+        WritePrefix(os, prefix, (*pipe)->odd);
+        prefix_written = true;
+      }
+      char* new_line = strchr(tmp, '\n');
+      if (new_line == nullptr) {
+        os << tmp;
+
+        break;
+      } else {
+        os << std::string(tmp, new_line - tmp + 1);
+
+        tmp = new_line + 1;
+        prefix_written = false;
+        (*pipe)->odd = !(*pipe)->odd;
+
+        if (expected > 0) {
+          expected--;
+        }
+      }
+    }
+  }
+}
+
+void Addr2line(const std::string& addr2line,
+               const std::string& map_src,
+               uintptr_t offset,
+               std::ostream& os,
+               const char* prefix,
+               std::unique_ptr<Addr2linePipe>* pipe /* inout */) {
+  DCHECK(pipe != nullptr);
+
+  if (map_src == "[vdso]" || android::base::EndsWith(map_src, ".vdex")) {
+    // addr2line will not work on the vdso.
+    // vdex files are special frames injected for the interpreter
+    // so they don't have any line number information available.
+    return;
+  }
+
+  if (*pipe == nullptr || (*pipe)->file != map_src) {
+    if (*pipe != nullptr) {
+      Drain(0, prefix, pipe, os);
+    }
+    pipe->reset();  // Close early.
+
+    const char* args[] = {
+        addr2line.c_str(),
+        "--functions",
+        "--inlines",
+        "--demangle",
+        "-e",
+        map_src.c_str(),
+        nullptr
+    };
+    *pipe = Connect(map_src, args);
+  }
+
+  Addr2linePipe* pipe_ptr = pipe->get();
+  if (pipe_ptr == nullptr) {
+    // Failed...
+    return;
+  }
+
+  // Send the offset.
+  const std::string hex_offset = StringPrintf("%zx\n", offset);
+
+  if (!android::base::WriteFully(pipe_ptr->out.get(), hex_offset.data(), hex_offset.length())) {
+    // Error. :-(
+    pipe->reset();
+    return;
+  }
+
+  // Now drain (expecting two lines).
+  Drain(2U, prefix, pipe, os);
+}
+
+}  // namespace addr2line
+
+namespace ptrace {
+
+std::set<pid_t> PtraceSiblings(pid_t pid) {
+  std::set<pid_t> ret;
+  std::string task_path = android::base::StringPrintf("/proc/%d/task", pid);
+
+  std::unique_ptr<DIR, int (*)(DIR*)> d(opendir(task_path.c_str()), closedir);
+
+  // Bail early if the task directory cannot be opened.
+  if (d == nullptr) {
+    PLOG(ERROR) << "Failed to scan task folder";
+    return ret;
+  }
+
+  struct dirent* de;
+  while ((de = readdir(d.get())) != nullptr) {
+    // Ignore "." and "..".
+    if (!strcmp(de->d_name, ".") || !strcmp(de->d_name, "..")) {
+      continue;
+    }
+
+    char* end;
+    pid_t tid = strtoul(de->d_name, &end, 10);
+    if (*end) {
+      continue;
+    }
+
+    if (tid == pid) {
+      continue;
+    }
+
+    if (::ptrace(PTRACE_ATTACH, tid, 0, 0) != 0) {
+      PLOG(ERROR) << "Failed to attach to tid " << tid;
+      continue;
+    }
+
+    ret.insert(tid);
+  }
+  return ret;
+}
+
+void DumpABI(pid_t forked_pid) {
+  enum class ABI { kArm, kArm64, kMips, kMips64, kX86, kX86_64 };
+#if defined(__arm__)
+  constexpr ABI kDumperABI = ABI::kArm;
+#elif defined(__aarch64__)
+  constexpr ABI kDumperABI = ABI::kArm64;
+#elif defined(__mips__) && !defined(__LP64__)
+  constexpr ABI kDumperABI = ABI::kMips;
+#elif defined(__mips__) && defined(__LP64__)
+  constexpr ABI kDumperABI = ABI::kMips64;
+#elif defined(__i386__)
+  constexpr ABI kDumperABI = ABI::kX86;
+#elif defined(__x86_64__)
+  constexpr ABI kDumperABI = ABI::kX86_64;
+#else
+#error Unsupported architecture
+#endif
+
+  char data[1024];  // Should be more than enough.
+  struct iovec io_vec;
+  io_vec.iov_base = &data;
+  io_vec.iov_len = 1024;
+  ABI to_print;
+  if (0 != ::ptrace(PTRACE_GETREGSET, forked_pid, /* NT_PRSTATUS */ 1, &io_vec)) {
+    LOG(ERROR) << "Could not get registers to determine abi.";
+    // Use 64-bit as default.
+    switch (kDumperABI) {
+      case ABI::kArm:
+      case ABI::kArm64:
+        to_print = ABI::kArm64;
+        break;
+      case ABI::kMips:
+      case ABI::kMips64:
+        to_print = ABI::kMips64;
+        break;
+      case ABI::kX86:
+      case ABI::kX86_64:
+        to_print = ABI::kX86_64;
+        break;
+      default:
+        __builtin_unreachable();
+    }
+  } else {
+    // Check the length of the data. Assume that it's the same arch as the tool.
+    switch (kDumperABI) {
+      case ABI::kArm:
+      case ABI::kArm64:
+        to_print = io_vec.iov_len == 18 * sizeof(uint32_t) ? ABI::kArm : ABI::kArm64;
+        break;
+      case ABI::kMips:
+      case ABI::kMips64:
+        to_print = ABI::kMips64;  // TODO Figure out how this should work.
+        break;
+      case ABI::kX86:
+      case ABI::kX86_64:
+        to_print = io_vec.iov_len == 17 * sizeof(uint32_t) ? ABI::kX86 : ABI::kX86_64;
+        break;
+      default:
+        __builtin_unreachable();
+    }
+  }
+  std::string abi_str;
+  switch (to_print) {
+    case ABI::kArm:
+      abi_str = "arm";
+      break;
+    case ABI::kArm64:
+      abi_str = "arm64";
+      break;
+    case ABI::kMips:
+      abi_str = "mips";
+      break;
+    case ABI::kMips64:
+      abi_str = "mips64";
+      break;
+    case ABI::kX86:
+      abi_str = "x86";
+      break;
+    case ABI::kX86_64:
+      abi_str = "x86_64";
+      break;
+  }
+  std::cerr << "ABI: '" << abi_str << "'" << std::endl;
+}
+
+}  // namespace ptrace
+
+template <typename T>
+bool WaitLoop(uint32_t max_wait_micros, const T& handler) {
+  constexpr uint32_t kWaitMicros = 10;
+  const size_t kMaxLoopCount = max_wait_micros / kWaitMicros;
+
+  for (size_t loop_count = 1; loop_count <= kMaxLoopCount; ++loop_count) {
+    bool ret;
+    if (handler(&ret)) {
+      return ret;
+    }
+    usleep(kWaitMicros);
+  }
+  return false;
+}
+
+bool WaitForMainSigStop(const std::atomic<bool>& saw_wif_stopped_for_main) {
+  auto handler = [&](bool* res) {
+    if (saw_wif_stopped_for_main) {
+      *res = true;
+      return true;
+    }
+    return false;
+  };
+  constexpr uint32_t kMaxWaitMicros = 30 * 1000 * 1000;  // 30s wait.
+  return WaitLoop(kMaxWaitMicros, handler);
+}
+
+bool WaitForSigStopped(pid_t pid, uint32_t max_wait_micros) {
+  auto handler = [&](bool* res) {
+    int status;
+    pid_t rc = TEMP_FAILURE_RETRY(waitpid(pid, &status, WNOHANG));
+    if (rc == -1) {
+      PLOG(ERROR) << "Failed to waitpid for " << pid;
+      *res = false;
+      return true;
+    }
+    if (rc == pid) {
+      if (!(WIFSTOPPED(status))) {
+        LOG(ERROR) << "Did not get expected stopped signal for " << pid;
+        *res = false;
+      } else {
+        *res = true;
+      }
+      return true;
+    }
+    return false;
+  };
+  return WaitLoop(max_wait_micros, handler);
+}
+
+#ifdef __LP64__
+constexpr bool kIs64Bit = true;
+#else
+constexpr bool kIs64Bit = false;
+#endif
+
+void DumpThread(pid_t pid,
+                pid_t tid,
+                const std::string* addr2line_path,
+                const char* prefix,
+                BacktraceMap* map) {
+  // Use std::cerr to avoid the LOG prefix.
+  std::cerr << std::endl << "=== pid: " << pid << " tid: " << tid << " ===" << std::endl;
+
+  constexpr uint32_t kMaxWaitMicros = 1000 * 1000;  // 1s.
+  if (pid != tid && !WaitForSigStopped(tid, kMaxWaitMicros)) {
+    LOG(ERROR) << "Failed to wait for sigstop on " << tid;
+  }
+
+  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, tid, map));
+  if (backtrace == nullptr) {
+    LOG(ERROR) << prefix << "(failed to create Backtrace for thread " << tid << ")";
+    return;
+  }
+  backtrace->SetSkipFrames(false);
+  if (!backtrace->Unwind(0, nullptr)) {
+    LOG(ERROR) << prefix << "(backtrace::Unwind failed for thread " << tid
+               << ": " <<  backtrace->GetErrorString(backtrace->GetError()) << ")";
+    return;
+  }
+  if (backtrace->NumFrames() == 0) {
+    LOG(ERROR) << prefix << "(no native stack frames for thread " << tid << ")";
+    return;
+  }
+
+  std::unique_ptr<addr2line::Addr2linePipe> addr2line_state;
+
+  for (Backtrace::const_iterator it = backtrace->begin();
+      it != backtrace->end(); ++it) {
+    std::ostringstream oss;
+    oss << prefix << StringPrintf("#%02zu pc ", it->num);
+    bool try_addr2line = false;
+    if (!BacktraceMap::IsValid(it->map)) {
+      oss << StringPrintf(kIs64Bit ? "%016" PRIx64 "  ???" : "%08" PRIx64 "  ???", it->pc);
+    } else {
+      oss << StringPrintf(kIs64Bit ? "%016" PRIx64 "  " : "%08" PRIx64 "  ", it->rel_pc);
+      if (it->map.name.empty()) {
+        oss << StringPrintf("<anonymous:%" PRIx64 ">", it->map.start);
+      } else {
+        oss << it->map.name;
+      }
+      if (it->map.offset != 0) {
+        oss << StringPrintf(" (offset %" PRIx64 ")", it->map.offset);
+      }
+      oss << " (";
+      if (!it->func_name.empty()) {
+        oss << it->func_name;
+        if (it->func_offset != 0) {
+          oss << "+" << it->func_offset;
+        }
+        // Functions found using the gdb jit interface will be in an empty
+        // map that cannot be found using addr2line.
+        if (!it->map.name.empty()) {
+          try_addr2line = true;
+        }
+      } else {
+        oss << "???";
+      }
+      oss << ")";
+    }
+    std::cerr << oss.str() << std::endl;
+    if (try_addr2line && addr2line_path != nullptr) {
+      addr2line::Addr2line(*addr2line_path,
+                           it->map.name,
+                           it->rel_pc,
+                           std::cerr,
+                           prefix,
+                           &addr2line_state);
+    }
+  }
+
+  if (addr2line_state != nullptr) {
+    addr2line::Drain(0, prefix, &addr2line_state, std::cerr);
+  }
+}
+
+void DumpProcess(pid_t forked_pid, const std::atomic<bool>& saw_wif_stopped_for_main) {
+  LOG(ERROR) << "Timeout for process " << forked_pid;
+
+  CHECK_EQ(0, ::ptrace(PTRACE_ATTACH, forked_pid, 0, 0));
+  std::set<pid_t> tids = ptrace::PtraceSiblings(forked_pid);
+  tids.insert(forked_pid);
+
+  ptrace::DumpABI(forked_pid);
+
+  // Check whether we have and should use addr2line.
+  std::unique_ptr<std::string> addr2line_path;
+  if (kUseAddr2line) {
+    addr2line_path = addr2line::FindAddr2line();
+    if (addr2line_path == nullptr) {
+      LOG(ERROR) << "Did not find usable addr2line";
+    }
+  }
+
+  if (!WaitForMainSigStop(saw_wif_stopped_for_main)) {
+    LOG(ERROR) << "Did not receive SIGSTOP for pid " << forked_pid;
+  }
+
+  std::unique_ptr<BacktraceMap> backtrace_map(BacktraceMap::Create(forked_pid));
+  if (backtrace_map == nullptr) {
+    LOG(ERROR) << "Could not create BacktraceMap";
+    return;
+  }
+
+  for (pid_t tid : tids) {
+    DumpThread(forked_pid, tid, addr2line_path.get(), "  ", backtrace_map.get());
+  }
+}
+
+[[noreturn]]
+void WaitMainLoop(pid_t forked_pid, std::atomic<bool>* saw_wif_stopped_for_main) {
+  for (;;) {
+    // Consider switching to waitid to not get woken up for WIFSTOPPED.
+    int status;
+    pid_t res = TEMP_FAILURE_RETRY(waitpid(forked_pid, &status, 0));
+    if (res == -1) {
+      PLOG(FATAL) << "Failure during waitpid";
+      __builtin_unreachable();
+    }
+
+    if (WIFEXITED(status)) {
+      _exit(WEXITSTATUS(status));
+      __builtin_unreachable();
+    }
+    if (WIFSIGNALED(status)) {
+      _exit(1);
+      __builtin_unreachable();
+    }
+    if (WIFSTOPPED(status)) {
+      *saw_wif_stopped_for_main = true;
+      continue;
+    }
+    if (WIFCONTINUED(status)) {
+      continue;
+    }
+
+    LOG(FATAL) << "Unknown status " << std::hex << status;
+  }
+}
+
+[[noreturn]]
+void SetupAndWait(pid_t forked_pid) {
+  timeout_signal::SignalSet signals;
+  signals.Add(timeout_signal::GetTimeoutSignal());
+  signals.Block();
+
+  std::atomic<bool> saw_wif_stopped_for_main(false);
+
+  std::thread signal_catcher([&]() {
+    signals.Block();
+    int sig = signals.Wait();
+    CHECK_EQ(sig, timeout_signal::GetTimeoutSignal());
+
+    DumpProcess(forked_pid, saw_wif_stopped_for_main);
+
+    // Don't clean up. Just kill the child and exit.
+    kill(forked_pid, SIGKILL);
+    _exit(1);
+  });
+
+  WaitMainLoop(forked_pid, &saw_wif_stopped_for_main);
+}
+
+}  // namespace
+}  // namespace art
+
+int main(int argc ATTRIBUTE_UNUSED, char** argv) {
+  pid_t orig_ppid = getpid();
+
+  pid_t pid = fork();
+  if (pid == 0) {
+    if (prctl(PR_SET_PDEATHSIG, SIGTERM) == -1) {
+      _exit(1);
+    }
+
+    if (getppid() != orig_ppid) {
+      _exit(2);
+    }
+
+    execvp(argv[1], &argv[1]);
+
+    _exit(3);
+    __builtin_unreachable();
+  }
+
+  art::SetupAndWait(pid);
+  __builtin_unreachable();
+}
diff --git a/tools/titrace/instruction_decoder.cc b/tools/titrace/instruction_decoder.cc
index d8fb713..6f497b3 100644
--- a/tools/titrace/instruction_decoder.cc
+++ b/tools/titrace/instruction_decoder.cc
@@ -32,7 +32,7 @@
     return Bytecode::ToString(op);
   }
 
-  virtual size_t LocationToOffset(size_t j_location) {
+  size_t LocationToOffset(size_t j_location) override {
     return j_location;
   }
 
@@ -456,9 +456,10 @@
         case kBreakpoint: return "breakpoint";
         case kImpdep1: return "impdep1";
         case kImpdep2: return "impdep2";
-        default: LOG(FATAL) << "Unknown opcode " << op;
+        default:
+          LOG(FATAL) << "Unknown opcode " << op;
+          __builtin_unreachable();
        }
-       return "";
      }
   };
 };
@@ -474,7 +475,7 @@
     return Bytecode::ToString(op);
   }
 
-  virtual size_t LocationToOffset(size_t j_location) {
+  size_t LocationToOffset(size_t j_location) override {
     // dex pc is uint16_t*, but offset needs to be in bytes.
     return j_location * (sizeof(uint16_t) / sizeof(uint8_t));
   }
@@ -484,7 +485,7 @@
    public:
     enum Opcode {
 #define MAKE_ENUM_DEFINITION(opcode, instruction_code, name, format, index, flags, extended_flags, verifier_flags) \
-      instruction_code = opcode,
+      instruction_code = opcode,  /* NOLINT */
 DEX_INSTRUCTION_LIST(MAKE_ENUM_DEFINITION)
 #undef MAKE_ENUM_DEFINITION
     };
@@ -500,7 +501,7 @@
 #undef MAKE_ENUM_DEFINITION
         default: LOG(FATAL) << "Unknown opcode " << op;
       }
-      return "";
+      __builtin_unreachable();
     }
   };
 };
diff --git a/tools/titrace/titrace.cc b/tools/titrace/titrace.cc
index 981ad56..1e49c0b 100644
--- a/tools/titrace/titrace.cc
+++ b/tools/titrace/titrace.cc
@@ -54,7 +54,7 @@
   }
 
   TiMemory(const TiMemory& other) = delete;
-  TiMemory(TiMemory&& other) {
+  TiMemory(TiMemory&& other) noexcept {
     env_ = other.env_;
     mem_ = other.mem_;
     size_ = other.size_;
@@ -66,7 +66,7 @@
     }
   }
 
-  TiMemory& operator=(TiMemory&& other) {
+  TiMemory& operator=(TiMemory&& other) noexcept {
     if (mem_ != other.mem_) {
       TiMemory::~TiMemory();
     }
@@ -237,7 +237,7 @@
                                     void* /* reserved */) {
   using namespace titrace;  // NOLINT [build/namespaces] [5]
 
-  android::base::InitLogging(/* argv */nullptr);
+  android::base::InitLogging(/* argv= */nullptr);
 
   jvmtiEnv* jvmti = nullptr;
   {
diff --git a/tools/tracefast-plugin/Android.bp b/tools/tracefast-plugin/Android.bp
index 1d7dd30..b7ae6c6 100644
--- a/tools/tracefast-plugin/Android.bp
+++ b/tools/tracefast-plugin/Android.bp
@@ -30,11 +30,6 @@
         "libbase",
     ],
     target: {
-        android: {
-            shared_libs: [
-                "libcutils",
-            ],
-        },
         darwin: {
             enabled: false,
         },
diff --git a/tools/tracefast-plugin/tracefast.cc b/tools/tracefast-plugin/tracefast.cc
index 4ea5b2d..98f7ea5 100644
--- a/tools/tracefast-plugin/tracefast.cc
+++ b/tools/tracefast-plugin/tracefast.cc
@@ -111,13 +111,6 @@
               int32_t dex_pc_offset ATTRIBUTE_UNUSED)
       override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
-  void InvokeVirtualOrInterface(art::Thread* thread ATTRIBUTE_UNUSED,
-                                art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
-                                art::ArtMethod* caller ATTRIBUTE_UNUSED,
-                                uint32_t dex_pc ATTRIBUTE_UNUSED,
-                                art::ArtMethod* callee ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
-
   void WatchedFramePop(art::Thread* thread ATTRIBUTE_UNUSED,
                        const art::ShadowFrame& frame ATTRIBUTE_UNUSED)
       override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
diff --git a/tools/unmount-buildbot-apexes.sh b/tools/unmount-buildbot-apexes.sh
new file mode 100755
index 0000000..8f0ad5f
--- /dev/null
+++ b/tools/unmount-buildbot-apexes.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Unmount Android Runtime and Core Libraries APEX packages required in the chroot directory.
+# This script emulates some the actions performed by `apexd`.
+
+# This script undoes the work done by tools/mount-buildbot-apexes.sh.
+# Make sure to keep these files in sync.
+
+green='\033[0;32m'
+nc='\033[0m'
+
+# Setup as root, as some actions performed here require it.
+adb root
+adb wait-for-device
+
+# Exit early if there is no chroot.
+[[ -n "$ART_TEST_CHROOT" ]] || exit
+
+# Check that ART_TEST_CHROOT is correctly defined.
+[[ "$ART_TEST_CHROOT" = /* ]] || { echo "$ART_TEST_CHROOT is not an absolute path"; exit 1; }
+
+# Directory containing extracted APEX packages' payloads (ext4 images) under
+# the chroot directory.
+apex_image_dir="/tmp/apex"
+
+# deactivate_system_package APEX_NAME
+# -----------------------------------
+# Unmount APEX_NAME in `/apex` under the chroot directory and delete the
+# corresponding APEX package payload (ext4 image).
+deactivate_system_package() {
+  local apex_name=$1
+  local abs_image_filename="$ART_TEST_CHROOT$apex_image_dir/$apex_name.img"
+  local abs_mount_point="$ART_TEST_CHROOT/apex/$apex_name"
+
+  echo -e "${green}Deactivating package $apex_name${nc}"
+
+  # Unmount the package's payload (ext4 image).
+  if adb shell mount | grep -q "^/dev/block/loop[0-9]\+ on $abs_mount_point type ext4"; then
+    adb shell umount "$abs_mount_point"
+    adb shell rmdir "$abs_mount_point"
+    # Delete the ext4 image.
+    adb shell rm "$abs_image_filename"
+  fi
+}
+
+# Deactivate the Android Runtime APEX.
+deactivate_system_package com.android.runtime
+
+# Delete the image's directory.
+adb shell rmdir "$ART_TEST_CHROOT$apex_image_dir"
diff --git a/tools/veridex/Android.bp b/tools/veridex/Android.bp
index 96d4a09..c375138 100644
--- a/tools/veridex/Android.bp
+++ b/tools/veridex/Android.bp
@@ -14,6 +14,7 @@
 
 cc_binary {
     name: "veridex",
+    defaults: ["art_defaults"],
     host_supported: true,
     srcs: [
         "flow_analysis.cc",
@@ -29,7 +30,6 @@
         "libartbase",
         "libbase",
         "liblog",
-        "libutils",
         "libz",
         "libziparchive",
     ],
diff --git a/tools/veridex/Android.mk b/tools/veridex/Android.mk
index 2faa577..c510a51 100644
--- a/tools/veridex/Android.mk
+++ b/tools/veridex/Android.mk
@@ -30,14 +30,10 @@
 $(oahl_stub_dex): $(call get-prebuilt-sdk-dir,current)/org.apache.http.legacy.jar | $(ZIP2ZIP) $(DX)
 	$(transform-classes.jar-to-dex)
 
-app_compat_lists := \
-  $(INTERNAL_PLATFORM_HIDDENAPI_LIGHT_GREYLIST) \
-  $(INTERNAL_PLATFORM_HIDDENAPI_DARK_GREYLIST) \
-  $(INTERNAL_PLATFORM_HIDDENAPI_BLACKLIST)
-
 # Phony rule to create all dependencies of the appcompat.sh script.
 .PHONY: appcompat
-appcompat: $(system_stub_dex) $(oahl_stub_dex) $(HOST_OUT_EXECUTABLES)/veridex $(app_compat_lists)
+appcompat: $(system_stub_dex) $(oahl_stub_dex) $(HOST_OUT_EXECUTABLES)/veridex \
+    $(INTERNAL_PLATFORM_HIDDENAPI_FLAGS)
 
 VERIDEX_FILES_PATH := \
     $(call intermediates-dir-for,PACKAGING,veridex,HOST)/veridex.zip
@@ -45,23 +41,31 @@
 VERIDEX_FILES := $(LOCAL_PATH)/appcompat.sh
 
 $(VERIDEX_FILES_PATH): PRIVATE_VERIDEX_FILES := $(VERIDEX_FILES)
-$(VERIDEX_FILES_PATH): PRIVATE_APP_COMPAT_LISTS := $(app_compat_lists)
+$(VERIDEX_FILES_PATH): PRIVATE_SYSTEM_STUBS_DEX_DIR := $(dir $(system_stub_dex))
 $(VERIDEX_FILES_PATH): PRIVATE_SYSTEM_STUBS_ZIP := $(dir $(VERIDEX_FILES_PATH))/system-stubs.zip
+$(VERIDEX_FILES_PATH): PRIVATE_OAHL_STUBS_DEX_DIR := $(dir $(oahl_stub_dex))
 $(VERIDEX_FILES_PATH): PRIVATE_OAHL_STUBS_ZIP := $(dir $(VERIDEX_FILES_PATH))/org.apache.http.legacy-stubs.zip
-$(VERIDEX_FILES_PATH) : $(SOONG_ZIP) $(VERIDEX_FILES) $(app_compat_lists) $(HOST_OUT_EXECUTABLES)/veridex $(system_stub_dex) $(oahl_stub_dex)
-	$(hide) rm -f $(PRIVATE_SYSTEM_STUBS_ZIP) $(PRIVATE_OAHL_STUBS_ZIP)
-	$(hide) zip -j $(PRIVATE_SYSTEM_STUBS_ZIP) $(dir $(system_stub_dex))/classes*.dex
-	$(hide) zip -j $(PRIVATE_OAHL_STUBS_ZIP) $(dir $(oahl_stub_dex))/classes*.dex
-	$(hide) $(SOONG_ZIP) -o $@ -C art/tools/veridex -f $(PRIVATE_VERIDEX_FILES) \
-                             -C $(dir $(lastword $(PRIVATE_APP_COMPAT_LISTS))) $(addprefix -f , $(PRIVATE_APP_COMPAT_LISTS)) \
-                             -C $(HOST_OUT_EXECUTABLES) -f $(HOST_OUT_EXECUTABLES)/veridex \
-                             -C $(dir $(PRIVATE_SYSTEM_STUBS_ZIP)) -f $(PRIVATE_SYSTEM_STUBS_ZIP) \
-                             -C $(dir $(PRIVATE_OAHL_STUBS_ZIP)) -f $(PRIVATE_OAHL_STUBS_ZIP)
-	$(hide) rm -f $(PRIVATE_SYSTEM_STUBS_ZIP)
-	$(hide) rm -f $(PRIVATE_OAHL_STUBS_ZIP)
+$(VERIDEX_FILES_PATH) : $(SOONG_ZIP) $(VERIDEX_FILES) $(INTERNAL_PLATFORM_HIDDENAPI_FLAGS) \
+    $(HOST_OUT_EXECUTABLES)/veridex $(system_stub_dex) $(oahl_stub_dex)
+	rm -rf $(dir $@)/*
+	ls -1 $(PRIVATE_SYSTEM_STUBS_DEX_DIR)/classes*.dex | sort >$(PRIVATE_SYSTEM_STUBS_ZIP).list
+	$(SOONG_ZIP) -o $(PRIVATE_SYSTEM_STUBS_ZIP) -C $(PRIVATE_SYSTEM_STUBS_DEX_DIR) -l $(PRIVATE_SYSTEM_STUBS_ZIP).list
+	rm $(PRIVATE_SYSTEM_STUBS_ZIP).list
+	ls -1 $(PRIVATE_OAHL_STUBS_DEX_DIR)/classes*.dex | sort >$(PRIVATE_OAHL_STUBS_ZIP).list
+	$(SOONG_ZIP) -o $(PRIVATE_OAHL_STUBS_ZIP) -C $(PRIVATE_OAHL_STUBS_DEX_DIR) -l $(PRIVATE_OAHL_STUBS_ZIP).list
+	rm $(PRIVATE_OAHL_STUBS_ZIP).list
+	$(SOONG_ZIP) -o $@ -C art/tools/veridex -f $(PRIVATE_VERIDEX_FILES) \
+	                    -C $(dir $(INTERNAL_PLATFORM_HIDDENAPI_FLAGS)) \
+	                        -f $(INTERNAL_PLATFORM_HIDDENAPI_FLAGS) \
+	                   -C $(HOST_OUT_EXECUTABLES) -f $(HOST_OUT_EXECUTABLES)/veridex \
+	                   -C $(dir $(PRIVATE_SYSTEM_STUBS_ZIP)) -f $(PRIVATE_SYSTEM_STUBS_ZIP) \
+	                   -C $(dir $(PRIVATE_OAHL_STUBS_ZIP)) -f $(PRIVATE_OAHL_STUBS_ZIP)
+	rm -f $(PRIVATE_SYSTEM_STUBS_ZIP)
+	rm -f $(PRIVATE_OAHL_STUBS_ZIP)
 
 # Make the zip file available for prebuilts.
 $(call dist-for-goals,sdk,$(VERIDEX_FILES_PATH))
 
 VERIDEX_FILES :=
-app_compat_lists :=
+system_stub_dex :=
+oahl_stub_dex :=
diff --git a/tools/veridex/appcompat.sh b/tools/veridex/appcompat.sh
index e7b735d..46d62db9 100755
--- a/tools/veridex/appcompat.sh
+++ b/tools/veridex/appcompat.sh
@@ -22,16 +22,12 @@
 SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 
 if [[ -e ${SCRIPT_DIR}/veridex && \
-      -e ${SCRIPT_DIR}/hiddenapi-blacklist.txt && \
-      -e ${SCRIPT_DIR}/hiddenapi-light-greylist.txt && \
-      -e ${SCRIPT_DIR}/hiddenapi-dark-greylist.txt && \
+      -e ${SCRIPT_DIR}/hiddenapi-flags.csv && \
       -e ${SCRIPT_DIR}/org.apache.http.legacy-stubs.zip && \
       -e ${SCRIPT_DIR}/system-stubs.zip ]]; then
   exec ${SCRIPT_DIR}/veridex \
     --core-stubs=${SCRIPT_DIR}/system-stubs.zip:${SCRIPT_DIR}/org.apache.http.legacy-stubs.zip \
-    --blacklist=${SCRIPT_DIR}/hiddenapi-blacklist.txt \
-    --light-greylist=${SCRIPT_DIR}/hiddenapi-light-greylist.txt \
-    --dark-greylist=${SCRIPT_DIR}/hiddenapi-dark-greylist.txt \
+    --api-flags=${SCRIPT_DIR}/hiddenapi-flags.csv \
     $@
 fi
 
@@ -43,8 +39,8 @@
 fi
 
 # Logic for setting out_dir from build/make/core/envsetup.mk:
-if [[ -z $OUT_DIR ]]; then
-  if [[ -z $OUT_DIR_COMMON_BASE ]]; then
+if [[ -z "${OUT_DIR}" ]]; then
+  if [[ -z "${OUT_DIR_COMMON_BASE}" ]]; then
     OUT=out
   else
     OUT=${OUT_DIR_COMMON_BASE}/${PWD##*/}
@@ -53,16 +49,16 @@
   OUT=${OUT_DIR}
 fi
 
-PACKAGING=${OUT}/target/common/obj/PACKAGING
+if [[ -z "${PACKAGING}" ]]; then
+  PACKAGING=${OUT}/target/common/obj/PACKAGING
+fi
 
-if [ -z "$ANDROID_HOST_OUT" ] ; then
+if [[ -z "${ANDROID_HOST_OUT}" ]]; then
   ANDROID_HOST_OUT=${OUT}/host/linux-x86
 fi
 
 
 ${ANDROID_HOST_OUT}/bin/veridex \
     --core-stubs=${PACKAGING}/core_dex_intermediates/classes.dex:${PACKAGING}/oahl_dex_intermediates/classes.dex \
-    --blacklist=${PACKAGING}/hiddenapi-blacklist.txt \
-    --light-greylist=${PACKAGING}/hiddenapi-light-greylist.txt \
-    --dark-greylist=${PACKAGING}/hiddenapi-dark-greylist.txt \
+    --api-flags=${PACKAGING}/hiddenapi-flags.csv \
     $@
diff --git a/tools/veridex/flow_analysis.cc b/tools/veridex/flow_analysis.cc
index f5eb4ea..65f2363 100644
--- a/tools/veridex/flow_analysis.cc
+++ b/tools/veridex/flow_analysis.cc
@@ -131,15 +131,15 @@
 
 RegisterValue VeriFlowAnalysis::GetReturnType(uint32_t method_index) {
   const DexFile& dex_file = resolver_->GetDexFile();
-  const DexFile::MethodId& method_id = dex_file.GetMethodId(method_index);
-  const DexFile::ProtoId& proto_id = dex_file.GetMethodPrototype(method_id);
+  const dex::MethodId& method_id = dex_file.GetMethodId(method_index);
+  const dex::ProtoId& proto_id = dex_file.GetMethodPrototype(method_id);
   VeriClass* cls = resolver_->GetVeriClass(proto_id.return_type_idx_);
   return RegisterValue(RegisterSource::kMethod, DexFileReference(&dex_file, method_index), cls);
 }
 
 RegisterValue VeriFlowAnalysis::GetFieldType(uint32_t field_index) {
   const DexFile& dex_file = resolver_->GetDexFile();
-  const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+  const dex::FieldId& field_id = dex_file.GetFieldId(field_index);
   VeriClass* cls = resolver_->GetVeriClass(field_id.type_idx_);
   return RegisterValue(RegisterSource::kField, DexFileReference(&dex_file, field_index), cls);
 }
@@ -162,7 +162,7 @@
     case Instruction::IF_##cond##Z: { \
       RegisterValue val = GetRegister(instruction.VRegA()); \
       if (val.IsConstant()) { \
-        if (val.GetConstant() op 0) { \
+        if (val.GetConstant() op 0) {  /* NOLINT */ \
           return Instruction::kBranch; \
         } else { \
           return Instruction::kContinue; \
@@ -318,7 +318,7 @@
     case Instruction::INVOKE_STATIC:
     case Instruction::INVOKE_SUPER:
     case Instruction::INVOKE_VIRTUAL: {
-      last_result_ = AnalyzeInvoke(instruction, /* is_range */ false);
+      last_result_ = AnalyzeInvoke(instruction, /* is_range= */ false);
       break;
     }
 
@@ -327,7 +327,7 @@
     case Instruction::INVOKE_STATIC_RANGE:
     case Instruction::INVOKE_SUPER_RANGE:
     case Instruction::INVOKE_VIRTUAL_RANGE: {
-      last_result_ = AnalyzeInvoke(instruction, /* is_range */ true);
+      last_result_ = AnalyzeInvoke(instruction, /* is_range= */ true);
       break;
     }
 
@@ -495,7 +495,7 @@
     case Instruction::DIV_INT_LIT8:
     case Instruction::REM_INT_LIT8:
     case Instruction::SHL_INT_LIT8:
-    case Instruction::SHR_INT_LIT8: {
+    case Instruction::SHR_INT_LIT8:
     case Instruction::USHR_INT_LIT8: {
       UpdateRegister(instruction.VRegA(), VeriClass::integer_);
       break;
@@ -537,7 +537,7 @@
     case Instruction::CMPG_FLOAT:
     case Instruction::CMPG_DOUBLE:
     case Instruction::CMPL_FLOAT:
-    case Instruction::CMPL_DOUBLE:
+    case Instruction::CMPL_DOUBLE: {
       UpdateRegister(instruction.VRegA(), VeriClass::integer_);
       break;
     }
@@ -702,21 +702,21 @@
     // second parameter for the field name.
     RegisterValue cls = GetRegister(GetParameterAt(instruction, is_range, args, 0));
     RegisterValue name = GetRegister(GetParameterAt(instruction, is_range, args, 1));
-    uses_.push_back(ReflectAccessInfo(cls, name, /* is_method */ false));
+    uses_.push_back(ReflectAccessInfo(cls, name, /* is_method= */ false));
     return GetReturnType(id);
   } else if (IsGetMethod(method)) {
     // Class.getMethod or Class.getDeclaredMethod. Fetch the first parameter for the class, and the
     // second parameter for the field name.
     RegisterValue cls = GetRegister(GetParameterAt(instruction, is_range, args, 0));
     RegisterValue name = GetRegister(GetParameterAt(instruction, is_range, args, 1));
-    uses_.push_back(ReflectAccessInfo(cls, name, /* is_method */ true));
+    uses_.push_back(ReflectAccessInfo(cls, name, /* is_method= */ true));
     return GetReturnType(id);
   } else if (method == VeriClass::getClass_) {
     // Get the type of the first parameter.
     RegisterValue obj = GetRegister(GetParameterAt(instruction, is_range, args, 0));
     const VeriClass* cls = obj.GetType();
     if (cls != nullptr && cls->GetClassDef() != nullptr) {
-      const DexFile::ClassDef* def = cls->GetClassDef();
+      const dex::ClassDef* def = cls->GetClassDef();
       return RegisterValue(
           RegisterSource::kClass,
           DexFileReference(&resolver_->GetDexFileOf(*cls), def->class_idx_.index_),
diff --git a/tools/veridex/flow_analysis.h b/tools/veridex/flow_analysis.h
index 865b9df..2151a41 100644
--- a/tools/veridex/flow_analysis.h
+++ b/tools/veridex/flow_analysis.h
@@ -174,7 +174,8 @@
   RegisterValue name;
   bool is_method;
 
-  ReflectAccessInfo(RegisterValue c, RegisterValue n, bool m) : cls(c), name(n), is_method(m) {}
+  ReflectAccessInfo(RegisterValue c, RegisterValue n, bool is_method)
+      : cls(c), name(n), is_method(is_method) {}
 
   bool IsConcrete() const {
     // We capture RegisterSource::kString for the class, for example in Class.forName.
diff --git a/tools/veridex/hidden_api.cc b/tools/veridex/hidden_api.cc
index 17fa1b8..efb01f7 100644
--- a/tools/veridex/hidden_api.cc
+++ b/tools/veridex/hidden_api.cc
@@ -19,13 +19,65 @@
 #include <fstream>
 #include <sstream>
 
+#include "android-base/strings.h"
 #include "dex/dex_file-inl.h"
 
 namespace art {
 
+HiddenApi::HiddenApi(const char* filename, bool sdk_uses_only) {
+  CHECK(filename != nullptr);
+
+  std::ifstream in(filename);
+  for (std::string str; std::getline(in, str);) {
+    std::vector<std::string> values = android::base::Split(str, ",");
+    const std::string& signature = values[0];
+
+    hiddenapi::ApiList membership;
+    bool success = hiddenapi::ApiList::FromNames(values.begin() + 1, values.end(), &membership);
+    CHECK(success) << "Unknown ApiList flag: " << str;
+    CHECK(membership.IsValid()) << "Invalid ApiList: " << membership;
+
+    if (sdk_uses_only != membership.Contains(hiddenapi::ApiList::Whitelist())) {
+      // Either we want only SDK uses and this is not a whitelist entry,
+      // or we want only non-SDK uses and this is a whitelist entry.
+      continue;
+    }
+
+    AddSignatureToApiList(signature, membership);
+    size_t pos = signature.find("->");
+    if (pos != std::string::npos) {
+      // Add the class name.
+      AddSignatureToApiList(signature.substr(0, pos), membership);
+      pos = signature.find('(');
+      if (pos != std::string::npos) {
+        // Add the class->method name (so stripping the signature).
+        AddSignatureToApiList(signature.substr(0, pos), membership);
+      }
+      pos = signature.find(':');
+      if (pos != std::string::npos) {
+        // Add the class->field name (so stripping the type).
+        AddSignatureToApiList(signature.substr(0, pos), membership);
+      }
+    }
+  }
+}
+
+void HiddenApi::AddSignatureToApiList(const std::string& signature, hiddenapi::ApiList membership) {
+  auto it = api_list_.find(signature);
+  if (it == api_list_.end()) {
+    // Does not exist yet. Add it to list.
+    api_list_.emplace(signature, membership);
+  } else if (membership.GetMaxAllowedSdkVersion() < it->second.GetMaxAllowedSdkVersion()) {
+    // Already exist but `membership` is more restrictive.
+    it->second = membership;
+  } else {
+    // Already exists and `membership` is equally or less restrictive.
+  }
+}
+
 std::string HiddenApi::GetApiMethodName(const DexFile& dex_file, uint32_t method_index) {
   std::stringstream ss;
-  const DexFile::MethodId& method_id = dex_file.GetMethodId(method_index);
+  const dex::MethodId& method_id = dex_file.GetMethodId(method_index);
   ss << dex_file.StringByTypeIdx(method_id.class_idx_)
      << "->"
      << dex_file.GetMethodName(method_id)
@@ -35,7 +87,7 @@
 
 std::string HiddenApi::GetApiFieldName(const DexFile& dex_file, uint32_t field_index) {
   std::stringstream ss;
-  const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+  const dex::FieldId& field_id = dex_file.GetFieldId(field_index);
   ss << dex_file.StringByTypeIdx(field_id.class_idx_)
      << "->"
      << dex_file.GetFieldName(field_id)
@@ -44,30 +96,4 @@
   return ss.str();
 }
 
-void HiddenApi::FillList(const char* filename, std::set<std::string>& entries) {
-  if (filename == nullptr) {
-    return;
-  }
-  std::ifstream in(filename);
-  std::string str;
-  while (std::getline(in, str)) {
-    entries.insert(str);
-    size_t pos = str.find("->");
-    if (pos != std::string::npos) {
-      // Add the class name.
-      entries.insert(str.substr(0, pos));
-      pos = str.find('(');
-      if (pos != std::string::npos) {
-        // Add the class->method name (so stripping the signature).
-        entries.insert(str.substr(0, pos));
-      }
-      pos = str.find(':');
-      if (pos != std::string::npos) {
-        // Add the class->field name (so stripping the type).
-        entries.insert(str.substr(0, pos));
-      }
-    }
-  }
-}
-
 }  // namespace art
diff --git a/tools/veridex/hidden_api.h b/tools/veridex/hidden_api.h
index b1c8559..e1b67a2 100644
--- a/tools/veridex/hidden_api.h
+++ b/tools/veridex/hidden_api.h
@@ -17,11 +17,11 @@
 #ifndef ART_TOOLS_VERIDEX_HIDDEN_API_H_
 #define ART_TOOLS_VERIDEX_HIDDEN_API_H_
 
-#include "dex/hidden_api_access_flags.h"
+#include "base/hiddenapi_flags.h"
 #include "dex/method_reference.h"
 
+#include <map>
 #include <ostream>
-#include <set>
 #include <string>
 
 namespace art {
@@ -33,26 +33,15 @@
  */
 class HiddenApi {
  public:
-  HiddenApi(const char* blacklist, const char* dark_greylist, const char* light_greylist) {
-    FillList(light_greylist, light_greylist_);
-    FillList(dark_greylist, dark_greylist_);
-    FillList(blacklist, blacklist_);
+  HiddenApi(const char* flags_file, bool sdk_uses_only);
+
+  hiddenapi::ApiList GetApiList(const std::string& name) const {
+    auto it = api_list_.find(name);
+    return (it == api_list_.end()) ? hiddenapi::ApiList() : it->second;
   }
 
-  HiddenApiAccessFlags::ApiList GetApiList(const std::string& name) const {
-    if (IsInList(name, blacklist_)) {
-      return HiddenApiAccessFlags::kBlacklist;
-    } else if (IsInList(name, dark_greylist_)) {
-      return HiddenApiAccessFlags::kDarkGreylist;
-    } else if (IsInList(name, light_greylist_)) {
-      return HiddenApiAccessFlags::kLightGreylist;
-    } else {
-      return HiddenApiAccessFlags::kWhitelist;
-    }
-  }
-
-  bool IsInRestrictionList(const std::string& name) const {
-    return GetApiList(name) != HiddenApiAccessFlags::kWhitelist;
+  bool IsInAnyList(const std::string& name) const {
+    return !GetApiList(name).IsEmpty();
   }
 
   static std::string GetApiMethodName(const DexFile& dex_file, uint32_t method_index);
@@ -70,22 +59,16 @@
   }
 
  private:
-  static bool IsInList(const std::string& name, const std::set<std::string>& list) {
-    return list.find(name) != list.end();
-  }
+  void AddSignatureToApiList(const std::string& signature, hiddenapi::ApiList membership);
 
-  static void FillList(const char* filename, std::set<std::string>& entries);
-
-  std::set<std::string> blacklist_;
-  std::set<std::string> light_greylist_;
-  std::set<std::string> dark_greylist_;
+  std::map<std::string, hiddenapi::ApiList> api_list_;
 };
 
 struct HiddenApiStats {
   uint32_t count = 0;
   uint32_t reflection_count = 0;
   uint32_t linking_count = 0;
-  uint32_t api_counts[4] = { 0, 0, 0, 0 };
+  uint32_t api_counts[hiddenapi::ApiList::kValueCount] = {};  // initialize all to zero
 };
 
 }  // namespace art
diff --git a/tools/veridex/hidden_api_finder.cc b/tools/veridex/hidden_api_finder.cc
index d81f133..fe6d88a 100644
--- a/tools/veridex/hidden_api_finder.cc
+++ b/tools/veridex/hidden_api_finder.cc
@@ -35,7 +35,7 @@
   // Note: we always query whether a method is in a list, as the app
   // might define blacklisted APIs (which won't be used at runtime).
   std::string name = HiddenApi::GetApiMethodName(resolver->GetDexFile(), method_id);
-  if (hidden_api_.IsInRestrictionList(name)) {
+  if (hidden_api_.IsInAnyList(name)) {
     method_locations_[name].push_back(ref);
   }
 }
@@ -46,7 +46,7 @@
   // Note: we always query whether a field is in a list, as the app
   // might define blacklisted APIs (which won't be used at runtime).
   std::string name = HiddenApi::GetApiFieldName(resolver->GetDexFile(), field_id);
-  if (hidden_api_.IsInRestrictionList(name)) {
+  if (hidden_api_.IsInAnyList(name)) {
     field_locations_[name].push_back(ref);
   }
 }
@@ -57,7 +57,7 @@
   // types can lead to being used through reflection.
   for (uint32_t i = 0; i < dex_file.NumTypeIds(); ++i) {
     std::string name(dex_file.StringByTypeIdx(dex::TypeIndex(i)));
-    if (hidden_api_.IsInRestrictionList(name)) {
+    if (hidden_api_.IsInAnyList(name)) {
       classes_.insert(name);
     }
   }
@@ -81,9 +81,9 @@
               // private methods and fields in them.
               // We don't add class names to the `strings_` set as we know method/field names
               // don't have '.' or '/'. All hidden API class names have a '/'.
-              if (hidden_api_.IsInRestrictionList(str)) {
+              if (hidden_api_.IsInAnyList(str)) {
                 classes_.insert(str);
-              } else if (hidden_api_.IsInRestrictionList(name)) {
+              } else if (hidden_api_.IsInAnyList(name)) {
                 // Could be something passed to JNI.
                 classes_.insert(name);
               } else {
@@ -174,32 +174,29 @@
 void HiddenApiFinder::Dump(std::ostream& os,
                            HiddenApiStats* stats,
                            bool dump_reflection) {
-  static const char* kPrefix = "       ";
   stats->linking_count = method_locations_.size() + field_locations_.size();
 
   // Dump methods from hidden APIs linked against.
   for (const std::pair<const std::string,
                        std::vector<MethodReference>>& pair : method_locations_) {
-    HiddenApiAccessFlags::ApiList api_list = hidden_api_.GetApiList(pair.first);
-    stats->api_counts[api_list]++;
+    hiddenapi::ApiList api_list = hidden_api_.GetApiList(pair.first);
+    CHECK(api_list.IsValid());
+    stats->api_counts[api_list.GetIntValue()]++;
     os << "#" << ++stats->count << ": Linking " << api_list << " " << pair.first << " use(s):";
     os << std::endl;
-    for (const MethodReference& ref : pair.second) {
-      os << kPrefix << HiddenApi::GetApiMethodName(ref) << std::endl;
-    }
+    HiddenApiFinder::DumpReferences(os, pair.second);
     os << std::endl;
   }
 
   // Dump fields from hidden APIs linked against.
   for (const std::pair<const std::string,
                        std::vector<MethodReference>>& pair : field_locations_) {
-    HiddenApiAccessFlags::ApiList api_list = hidden_api_.GetApiList(pair.first);
-    stats->api_counts[api_list]++;
+    hiddenapi::ApiList api_list = hidden_api_.GetApiList(pair.first);
+    CHECK(api_list.IsValid());
+    stats->api_counts[api_list.GetIntValue()]++;
     os << "#" << ++stats->count << ": Linking " << api_list << " " << pair.first << " use(s):";
     os << std::endl;
-    for (const MethodReference& ref : pair.second) {
-      os << kPrefix << HiddenApi::GetApiMethodName(ref) << std::endl;
-    }
+    HiddenApiFinder::DumpReferences(os, pair.second);
     os << std::endl;
   }
 
@@ -208,16 +205,14 @@
     for (const std::string& cls : classes_) {
       for (const std::string& name : strings_) {
         std::string full_name = cls + "->" + name;
-        HiddenApiAccessFlags::ApiList api_list = hidden_api_.GetApiList(full_name);
-        stats->api_counts[api_list]++;
-        if (api_list != HiddenApiAccessFlags::kWhitelist) {
+        hiddenapi::ApiList api_list = hidden_api_.GetApiList(full_name);
+        if (api_list.IsValid()) {
+          stats->api_counts[api_list.GetIntValue()]++;
           stats->reflection_count++;
           os << "#" << ++stats->count << ": Reflection " << api_list << " " << full_name
              << " potential use(s):";
           os << std::endl;
-          for (const MethodReference& ref : reflection_locations_[name]) {
-            os << kPrefix << HiddenApi::GetApiMethodName(ref) << std::endl;
-          }
+          HiddenApiFinder::DumpReferences(os, reflection_locations_[name]);
           os << std::endl;
         }
       }
@@ -225,4 +220,27 @@
   }
 }
 
+void HiddenApiFinder::DumpReferences(std::ostream& os,
+                                     const std::vector<MethodReference>& references) {
+  static const char* kPrefix = "       ";
+
+  // Count number of occurrences of each reference, to make the output clearer.
+  std::map<std::string, size_t> counts;
+  for (const MethodReference& ref : references) {
+    std::string ref_string = HiddenApi::GetApiMethodName(ref);
+    if (!counts.count(ref_string)) {
+      counts[ref_string] = 0;
+    }
+    counts[ref_string]++;
+  }
+
+  for (const std::pair<const std::string, size_t>& pair : counts) {
+    os << kPrefix << pair.first;
+    if (pair.second > 1) {
+       os << " (" << pair.second << " occurrences)";
+    }
+    os << std::endl;
+  }
+}
+
 }  // namespace art
diff --git a/tools/veridex/hidden_api_finder.h b/tools/veridex/hidden_api_finder.h
index f7d3dc8..9e10c1a 100644
--- a/tools/veridex/hidden_api_finder.h
+++ b/tools/veridex/hidden_api_finder.h
@@ -47,6 +47,7 @@
   void CollectAccesses(VeridexResolver* resolver);
   void CheckMethod(uint32_t method_idx, VeridexResolver* resolver, MethodReference ref);
   void CheckField(uint32_t field_idx, VeridexResolver* resolver, MethodReference ref);
+  void DumpReferences(std::ostream& os, const std::vector<MethodReference>& references);
 
   const HiddenApi& hidden_api_;
   std::set<std::string> classes_;
diff --git a/tools/veridex/precise_hidden_api_finder.cc b/tools/veridex/precise_hidden_api_finder.cc
index 445221e..be99ed2 100644
--- a/tools/veridex/precise_hidden_api_finder.cc
+++ b/tools/veridex/precise_hidden_api_finder.cc
@@ -85,24 +85,23 @@
 void PreciseHiddenApiFinder::Dump(std::ostream& os, HiddenApiStats* stats) {
   static const char* kPrefix = "       ";
   std::map<std::string, std::vector<MethodReference>> named_uses;
-  for (auto it : concrete_uses_) {
+  for (auto& it : concrete_uses_) {
     MethodReference ref = it.first;
     for (const ReflectAccessInfo& info : it.second) {
       std::string cls(info.cls.ToString());
       std::string name(info.name.ToString());
       std::string full_name = cls + "->" + name;
-      HiddenApiAccessFlags::ApiList api_list = hidden_api_.GetApiList(full_name);
-      if (api_list != HiddenApiAccessFlags::kWhitelist) {
+      if (hidden_api_.IsInAnyList(full_name)) {
         named_uses[full_name].push_back(ref);
       }
     }
   }
 
-  for (auto it : named_uses) {
+  for (auto& it : named_uses) {
     ++stats->reflection_count;
     const std::string& full_name = it.first;
-    HiddenApiAccessFlags::ApiList api_list = hidden_api_.GetApiList(full_name);
-    stats->api_counts[api_list]++;
+    hiddenapi::ApiList api_list = hidden_api_.GetApiList(full_name);
+    stats->api_counts[api_list.GetIntValue()]++;
     os << "#" << ++stats->count << ": Reflection " << api_list << " " << full_name << " use(s):";
     os << std::endl;
     for (const MethodReference& ref : it.second) {
diff --git a/tools/veridex/resolver.cc b/tools/veridex/resolver.cc
index 56729ff..df097b6 100644
--- a/tools/veridex/resolver.cc
+++ b/tools/veridex/resolver.cc
@@ -19,6 +19,7 @@
 #include "dex/class_accessor-inl.h"
 #include "dex/dex_file-inl.h"
 #include "dex/primitive.h"
+#include "dex/signature-inl.h"
 #include "hidden_api.h"
 #include "veridex.h"
 
@@ -46,7 +47,7 @@
 }
 
 static bool HasSameNameAndSignature(const DexFile& dex_file,
-                                    const DexFile::MethodId& method_id,
+                                    const dex::MethodId& method_id,
                                     const char* method_name,
                                     const char* type) {
   return strcmp(method_name, dex_file.GetMethodName(method_id)) == 0 &&
@@ -54,7 +55,7 @@
 }
 
 static bool HasSameNameAndSignature(const DexFile& dex_file,
-                                    const DexFile::MethodId& method_id,
+                                    const dex::MethodId& method_id,
                                     const char* method_name,
                                     const Signature& signature) {
   return strcmp(method_name, dex_file.GetMethodName(method_id)) == 0 &&
@@ -62,7 +63,7 @@
 }
 
 static bool HasSameNameAndType(const DexFile& dex_file,
-                               const DexFile::FieldId& field_id,
+                               const dex::FieldId& field_id,
                                const char* field_name,
                                const char* field_type) {
   return strcmp(field_name, dex_file.GetFieldName(field_id)) == 0 &&
@@ -139,7 +140,7 @@
   const DexFile& other_dex_file = resolver->dex_file_;
   ClassAccessor other_dex_accessor(other_dex_file, *kls.GetClassDef());
   for (const ClassAccessor::Method& method : other_dex_accessor.GetMethods()) {
-    const DexFile::MethodId& other_method_id = other_dex_file.GetMethodId(method.GetIndex());
+    const dex::MethodId& other_method_id = other_dex_file.GetMethodId(method.GetIndex());
     if (HasSameNameAndSignature(other_dex_file,
                                 other_method_id,
                                 method_name,
@@ -160,7 +161,7 @@
   }
 
   // Look at methods in `kls`'s interface hierarchy.
-  const DexFile::TypeList* interfaces = other_dex_file.GetInterfacesList(*kls.GetClassDef());
+  const dex::TypeList* interfaces = other_dex_file.GetInterfacesList(*kls.GetClassDef());
   if (interfaces != nullptr) {
     for (size_t i = 0; i < interfaces->Size(); i++) {
       dex::TypeIndex idx = interfaces->GetTypeItem(i).type_idx_;
@@ -194,7 +195,7 @@
   const DexFile& other_dex_file = resolver->dex_file_;
   ClassAccessor other_dex_accessor(other_dex_file, *kls.GetClassDef());
   for (const ClassAccessor::Field& field : other_dex_accessor.GetFields()) {
-    const DexFile::FieldId& other_field_id = other_dex_file.GetFieldId(field.GetIndex());
+    const dex::FieldId& other_field_id = other_dex_file.GetFieldId(field.GetIndex());
     if (HasSameNameAndType(other_dex_file,
                            other_field_id,
                            field_name,
@@ -204,7 +205,7 @@
   }
 
   // Look at fields in `kls`'s interface hierarchy.
-  const DexFile::TypeList* interfaces = other_dex_file.GetInterfacesList(*kls.GetClassDef());
+  const dex::TypeList* interfaces = other_dex_file.GetInterfacesList(*kls.GetClassDef());
   if (interfaces != nullptr) {
     for (size_t i = 0; i < interfaces->Size(); i++) {
       dex::TypeIndex idx = interfaces->GetTypeItem(i).type_idx_;
@@ -258,7 +259,7 @@
   VeriMethod method_info = method_infos_[method_index];
   if (method_info == nullptr) {
     // Method is defined in another dex file.
-    const DexFile::MethodId& method_id = dex_file_.GetMethodId(method_index);
+    const dex::MethodId& method_id = dex_file_.GetMethodId(method_index);
     VeriClass* kls = GetVeriClass(method_id.class_idx_);
     if (kls == nullptr) {
       return nullptr;
@@ -276,7 +277,7 @@
   VeriField field_info = field_infos_[field_index];
   if (field_info == nullptr) {
     // Field is defined in another dex file.
-    const DexFile::FieldId& field_id = dex_file_.GetFieldId(field_index);
+    const dex::FieldId& field_id = dex_file_.GetFieldId(field_index);
     VeriClass* kls = GetVeriClass(field_id.class_idx_);
     if (kls == nullptr) {
       return nullptr;
diff --git a/tools/veridex/veridex.cc b/tools/veridex/veridex.cc
index 1d3a4fb..3b6c7f9 100644
--- a/tools/veridex/veridex.cc
+++ b/tools/veridex/veridex.cc
@@ -65,14 +65,20 @@
 VeriMethod VeriClass::loadClass_ = nullptr;
 VeriField VeriClass::sdkInt_ = nullptr;
 
+static const char* kDexFileOption = "--dex-file=";
+static const char* kStubsOption = "--core-stubs=";
+static const char* kFlagsOption = "--api-flags=";
+static const char* kImprecise = "--imprecise";
+static const char* kTargetSdkVersion = "--target-sdk-version=";
+static const char* kOnlyReportSdkUses = "--only-report-sdk-uses";
+
 struct VeridexOptions {
   const char* dex_file = nullptr;
   const char* core_stubs = nullptr;
-  const char* blacklist = nullptr;
-  const char* light_greylist = nullptr;
-  const char* dark_greylist = nullptr;
+  const char* flags_file = nullptr;
   bool precise = true;
   int target_sdk_version = 28; /* P */
+  bool only_report_sdk_uses = false;
 };
 
 static const char* Substr(const char* str, int index) {
@@ -88,29 +94,19 @@
   argv++;
   argc--;
 
-  static const char* kDexFileOption = "--dex-file=";
-  static const char* kStubsOption = "--core-stubs=";
-  static const char* kBlacklistOption = "--blacklist=";
-  static const char* kDarkGreylistOption = "--dark-greylist=";
-  static const char* kLightGreylistOption = "--light-greylist=";
-  static const char* kImprecise = "--imprecise";
-  static const char* kTargetSdkVersion = "--target-sdk-version=";
-
   for (int i = 0; i < argc; ++i) {
     if (StartsWith(argv[i], kDexFileOption)) {
       options->dex_file = Substr(argv[i], strlen(kDexFileOption));
     } else if (StartsWith(argv[i], kStubsOption)) {
       options->core_stubs = Substr(argv[i], strlen(kStubsOption));
-    } else if (StartsWith(argv[i], kBlacklistOption)) {
-      options->blacklist = Substr(argv[i], strlen(kBlacklistOption));
-    } else if (StartsWith(argv[i], kDarkGreylistOption)) {
-      options->dark_greylist = Substr(argv[i], strlen(kDarkGreylistOption));
-    } else if (StartsWith(argv[i], kLightGreylistOption)) {
-      options->light_greylist = Substr(argv[i], strlen(kLightGreylistOption));
+    } else if (StartsWith(argv[i], kFlagsOption)) {
+      options->flags_file = Substr(argv[i], strlen(kFlagsOption));
     } else if (strcmp(argv[i], kImprecise) == 0) {
       options->precise = false;
     } else if (StartsWith(argv[i], kTargetSdkVersion)) {
       options->target_sdk_version = atoi(Substr(argv[i], strlen(kTargetSdkVersion)));
+    } else if (strcmp(argv[i], kOnlyReportSdkUses) == 0) {
+      options->only_report_sdk_uses = true;
     }
   }
 }
@@ -130,6 +126,13 @@
   static int Run(int argc, char** argv) {
     VeridexOptions options;
     ParseArgs(&options, argc, argv);
+    android::base::InitLogging(argv);
+
+    if (!options.dex_file) {
+      LOG(ERROR) << "Required argument '" << kDexFileOption << "' not provided.";
+      return 1;
+    }
+
     gTargetSdkVersion = options.target_sdk_version;
 
     std::vector<std::string> boot_content;
@@ -216,7 +219,7 @@
     Resolve(app_dex_files, resolver_map, type_map, &app_resolvers);
 
     // Find and log uses of hidden APIs.
-    HiddenApi hidden_api(options.blacklist, options.dark_greylist, options.light_greylist);
+    HiddenApi hidden_api(options.flags_file, options.only_report_sdk_uses);
     HiddenApiStats stats;
 
     HiddenApiFinder api_finder(hidden_api);
@@ -229,7 +232,7 @@
       precise_api_finder.Dump(std::cout, &stats);
     }
 
-    DumpSummaryStats(std::cout, stats);
+    DumpSummaryStats(std::cout, stats, options);
 
     if (options.precise) {
       std::cout << "To run an analysis that can give more reflection accesses, " << std::endl
@@ -240,17 +243,24 @@
   }
 
  private:
-  static void DumpSummaryStats(std::ostream& os, const HiddenApiStats& stats) {
+  static void DumpSummaryStats(std::ostream& os,
+                               const HiddenApiStats& stats,
+                               const VeridexOptions& options) {
     static const char* kPrefix = "       ";
-    os << stats.count << " hidden API(s) used: "
-       << stats.linking_count << " linked against, "
-       << stats.reflection_count << " through reflection" << std::endl;
-    os << kPrefix << stats.api_counts[HiddenApiAccessFlags::kBlacklist]
-       << " in blacklist" << std::endl;
-    os << kPrefix << stats.api_counts[HiddenApiAccessFlags::kDarkGreylist]
-       << " in dark greylist" << std::endl;
-    os << kPrefix << stats.api_counts[HiddenApiAccessFlags::kLightGreylist]
-       << " in light greylist" << std::endl;
+    if (options.only_report_sdk_uses) {
+      os << stats.api_counts[hiddenapi::ApiList::Whitelist().GetIntValue()]
+         << " SDK API uses." << std::endl;
+    } else {
+      os << stats.count << " hidden API(s) used: "
+         << stats.linking_count << " linked against, "
+         << stats.reflection_count << " through reflection" << std::endl;
+      for (size_t i = 0; i < hiddenapi::ApiList::kValueCount; ++i) {
+        hiddenapi::ApiList api_list = hiddenapi::ApiList(i);
+        if (api_list != hiddenapi::ApiList::Whitelist()) {
+          os << kPrefix << stats.api_counts[i] << " in " << api_list << std::endl;
+        }
+      }
+    }
   }
 
   static bool Load(const std::string& filename,
@@ -312,4 +322,3 @@
 int main(int argc, char** argv) {
   return art::Veridex::Run(argc, argv);
 }
-
diff --git a/tools/veridex/veridex.h b/tools/veridex/veridex.h
index 31ddbf4..f02de96 100644
--- a/tools/veridex/veridex.h
+++ b/tools/veridex/veridex.h
@@ -19,11 +19,14 @@
 
 #include <map>
 
-#include "dex/dex_file.h"
 #include "dex/primitive.h"
 
 namespace art {
 
+namespace dex {
+struct ClassDef;
+}  // namespace dex
+
 static int gTargetSdkVersion = 1000;  // Will be initialized after parsing options.
 
 /**
@@ -44,9 +47,8 @@
  */
 class VeriClass {
  public:
-  VeriClass(const VeriClass& other) = default;
   VeriClass() = default;
-  VeriClass(Primitive::Type k, uint8_t dims, const DexFile::ClassDef* cl)
+  VeriClass(Primitive::Type k, uint8_t dims, const dex::ClassDef* cl)
       : kind_(k), dimensions_(dims), class_def_(cl) {}
 
   bool IsUninitialized() const {
@@ -63,7 +65,7 @@
 
   Primitive::Type GetKind() const { return kind_; }
   uint8_t GetDimensions() const { return dimensions_; }
-  const DexFile::ClassDef* GetClassDef() const { return class_def_; }
+  const dex::ClassDef* GetClassDef() const { return class_def_; }
 
   static VeriClass* object_;
   static VeriClass* class_;
@@ -93,7 +95,7 @@
  private:
   Primitive::Type kind_;
   uint8_t dimensions_;
-  const DexFile::ClassDef* class_def_;
+  const dex::ClassDef* class_def_;
 };
 
 inline bool IsGetMethod(VeriMethod method) {